Merge branch 'master' into tq-master

This commit is contained in:
Taylor Blau 2016-12-16 10:12:43 -07:00 committed by GitHub
commit 1540e3066c
27 changed files with 1153 additions and 220 deletions

2
.gitignore vendored

@ -1,7 +1,7 @@
bin/
benchmark/
out/
resources.syso
resource.syso
# only allow man/*.\d.ronn files
man/*

@ -21,28 +21,24 @@ var (
ErrNoOperationGiven = errors.New("lfs/api: no operation provided in schema")
)
// EndpointSource is an interface which encapsulates the behavior of returning
// `config.Endpoint`s based on a particular operation.
type EndpointSource interface {
// Endpoint returns the `config.Endpoint` assosciated with a given
// operation.
Endpoint(operation string) config.Endpoint
}
// HttpLifecycle serves as the default implementation of the Lifecycle interface
// for HTTP requests. Internally, it leverages the *http.Client type to execute
// HTTP requests against a root *url.URL, as given in `NewHttpLifecycle`.
type HttpLifecycle struct {
endpoints EndpointSource
cfg *config.Configuration
}
var _ Lifecycle = new(HttpLifecycle)
// NewHttpLifecycle initializes a new instance of the *HttpLifecycle type with a
// new *http.Client, and the given root (see above).
func NewHttpLifecycle(endpoints EndpointSource) *HttpLifecycle {
// Passing a nil Configuration will use the global config
func NewHttpLifecycle(cfg *config.Configuration) *HttpLifecycle {
if cfg == nil {
cfg = config.Config
}
return &HttpLifecycle{
endpoints: endpoints,
cfg: cfg,
}
}
@ -78,7 +74,7 @@ func (l *HttpLifecycle) Build(schema *RequestSchema) (*http.Request, error) {
return nil, err
}
if _, err = auth.GetCreds(config.Config, req); err != nil {
if _, err = auth.GetCreds(l.cfg, req); err != nil {
return nil, err
}
@ -102,7 +98,7 @@ func (l *HttpLifecycle) Build(schema *RequestSchema) (*http.Request, error) {
// Otherwise, the api.Response is returned, along with no error, signaling that
// the request completed successfully.
func (l *HttpLifecycle) Execute(req *http.Request, into interface{}) (Response, error) {
resp, err := httputil.DoHttpRequestWithRedirects(config.Config, req, []*http.Request{}, true)
resp, err := httputil.DoHttpRequestWithRedirects(l.cfg, req, []*http.Request{}, true)
if err != nil {
return nil, err
}
@ -136,7 +132,7 @@ func (l *HttpLifecycle) absolutePath(operation Operation, path string) (*url.URL
return nil, ErrNoOperationGiven
}
root, err := url.Parse(l.endpoints.Endpoint(string(operation)).Url)
root, err := url.Parse(l.cfg.Endpoint(string(operation)).Url)
if err != nil {
return nil, err
}

@ -11,23 +11,16 @@ import (
"github.com/stretchr/testify/assert"
)
type NopEndpointSource struct {
Root string
func NewTestConfig() *config.Configuration {
c := config.NewFrom(config.Values{})
c.SetManualEndpoint(config.Endpoint{Url: "https://example.com"})
return c
}
func (e *NopEndpointSource) Endpoint(op string) config.Endpoint {
return config.Endpoint{Url: e.Root}
}
var (
source = &NopEndpointSource{"https://example.com"}
)
func TestHttpLifecycleMakesRequestsAgainstAbsolutePath(t *testing.T) {
SetupTestCredentialsFunc()
defer RestoreCredentialsFunc()
l := api.NewHttpLifecycle(source)
l := api.NewHttpLifecycle(NewTestConfig())
req, err := l.Build(&api.RequestSchema{
Path: "/foo",
Operation: api.DownloadOperation,
@ -41,7 +34,7 @@ func TestHttpLifecycleAttachesQueryParameters(t *testing.T) {
SetupTestCredentialsFunc()
defer RestoreCredentialsFunc()
l := api.NewHttpLifecycle(source)
l := api.NewHttpLifecycle(NewTestConfig())
req, err := l.Build(&api.RequestSchema{
Path: "/foo",
Operation: api.DownloadOperation,
@ -58,7 +51,7 @@ func TestHttpLifecycleAttachesBodyWhenPresent(t *testing.T) {
SetupTestCredentialsFunc()
defer RestoreCredentialsFunc()
l := api.NewHttpLifecycle(source)
l := api.NewHttpLifecycle(NewTestConfig())
req, err := l.Build(&api.RequestSchema{
Operation: api.DownloadOperation,
Body: struct {
@ -77,7 +70,7 @@ func TestHttpLifecycleDoesNotAttachBodyWhenEmpty(t *testing.T) {
SetupTestCredentialsFunc()
defer RestoreCredentialsFunc()
l := api.NewHttpLifecycle(source)
l := api.NewHttpLifecycle(NewTestConfig())
req, err := l.Build(&api.RequestSchema{
Operation: api.DownloadOperation,
})
@ -90,7 +83,7 @@ func TestHttpLifecycleErrsWithoutOperation(t *testing.T) {
SetupTestCredentialsFunc()
defer RestoreCredentialsFunc()
l := api.NewHttpLifecycle(source)
l := api.NewHttpLifecycle(NewTestConfig())
req, err := l.Build(&api.RequestSchema{
Path: "/foo",
})
@ -113,7 +106,7 @@ func TestHttpLifecycleExecutesRequestWithoutBody(t *testing.T) {
req, _ := http.NewRequest("GET", server.URL+"/path", nil)
l := api.NewHttpLifecycle(source)
l := api.NewHttpLifecycle(NewTestConfig())
_, err := l.Execute(req, nil)
assert.True(t, called)
@ -138,7 +131,7 @@ func TestHttpLifecycleExecutesRequestWithBody(t *testing.T) {
req, _ := http.NewRequest("GET", server.URL+"/path", nil)
l := api.NewHttpLifecycle(source)
l := api.NewHttpLifecycle(NewTestConfig())
resp := new(Response)
_, err := l.Execute(req, resp)

@ -4,8 +4,6 @@ import (
"fmt"
"strconv"
"time"
"github.com/git-lfs/git-lfs/config"
)
// LockService is an API service which encapsulates the Git LFS Locking API.
@ -144,15 +142,8 @@ type Committer struct {
Email string `json:"email"`
}
// CurrentCommitter returns a Committer instance populated with the same
// credentials as would be used to author a commit. In particular, the
// "user.name" and "user.email" configuration values are used from the
// config.Config singleton.
func CurrentCommitter() Committer {
name, _ := config.Config.Git.Get("user.name")
email, _ := config.Config.Git.Get("user.email")
return Committer{name, email}
func NewCommitter(name, email string) Committer {
return Committer{Name: name, Email: email}
}
// LockRequest encapsulates the payload sent across the API when a client would

@ -7,7 +7,7 @@ environment:
clone_folder: $(GOPATH)\src\github.com\git-lfs\git-lfs
install:
- echo $(GOPATH)
- echo %GOPATH%
- rd C:\Go /s /q
- appveyor DownloadFile https://storage.googleapis.com/golang/go1.7.4.windows-amd64.zip
- 7z x go1.7.4.windows-amd64.zip -oC:\ >nul
@ -16,7 +16,6 @@ install:
- set PATH="C:\Program Files (x86)\Inno Setup 5";%PATH%
build_script:
- C:\Ruby23\DevKit\mingw\bin\windres.exe script\windows-installer\resources.rc -o resources.syso
- bash --login -c 'GOARCH=386 script/bootstrap'
- mv bin\git-lfs.exe git-lfs-x86.exe
- bash --login -c 'GOARCH=amd64 script/bootstrap'

@ -6,60 +6,43 @@ import (
"path/filepath"
"strings"
"github.com/git-lfs/git-lfs/api"
"github.com/git-lfs/git-lfs/config"
"github.com/git-lfs/git-lfs/git"
"github.com/git-lfs/git-lfs/locking"
"github.com/spf13/cobra"
)
var (
lockRemote string
lockRemoteHelp = "specify which remote to use when interacting with locks"
// TODO(taylor): consider making this (and the above flag) a property of
// some parent-command, or another similarly less ugly way of handling
// this
setLockRemoteFor = func(c *config.Configuration) {
c.CurrentRemote = lockRemote
}
)
func lockCommand(cmd *cobra.Command, args []string) {
setLockRemoteFor(cfg)
if len(args) == 0 {
Print("Usage: git lfs lock <path>")
return
}
latest, err := git.CurrentRemoteRef()
if err != nil {
Error(err.Error())
Exit("Unable to determine lastest remote ref for branch.")
}
path, err := lockPath(args[0])
if err != nil {
Exit(err.Error())
}
s, resp := API.Locks.Lock(&api.LockRequest{
Path: path,
Committer: api.CurrentCommitter(),
LatestRemoteCommit: latest.Sha,
})
if _, err := API.Do(s); err != nil {
Error(err.Error())
Exit("Error communicating with LFS API.")
if len(lockRemote) > 0 {
cfg.CurrentRemote = lockRemote
}
if len(resp.Err) > 0 {
Error(resp.Err)
Exit("Server unable to create lock.")
lockClient, err := locking.NewClient(cfg)
if err != nil {
Exit("Unable to create lock system: %v", err.Error())
}
defer lockClient.Close()
lock, err := lockClient.LockFile(path)
if err != nil {
Exit("Lock failed: %v", err)
}
Print("\n'%s' was locked (%s)", args[0], resp.Lock.Id)
Print("\n'%s' was locked (%s)", args[0], lock.Id)
}
// lockPaths relativizes the given filepath such that it is relative to the root

@ -1,7 +1,7 @@
package commands
import (
"github.com/git-lfs/git-lfs/api"
"github.com/git-lfs/git-lfs/locking"
"github.com/spf13/cobra"
)
@ -10,45 +10,33 @@ var (
)
func locksCommand(cmd *cobra.Command, args []string) {
setLockRemoteFor(cfg)
filters, err := locksCmdFlags.Filters()
if err != nil {
Error(err.Error())
Exit("Error building filters: %v", err)
}
var locks []api.Lock
query := &api.LockSearchRequest{Filters: filters}
for {
s, resp := API.Locks.Search(query)
if _, err := API.Do(s); err != nil {
Error(err.Error())
Exit("Error communicating with LFS API.")
}
if resp.Err != "" {
Error(resp.Err)
}
locks = append(locks, resp.Locks...)
if locksCmdFlags.Limit > 0 && len(locks) > locksCmdFlags.Limit {
locks = locks[:locksCmdFlags.Limit]
break
}
if resp.NextCursor != "" {
query.Cursor = resp.NextCursor
} else {
break
}
if len(lockRemote) > 0 {
cfg.CurrentRemote = lockRemote
}
Print("\n%d lock(s) matched query:", len(locks))
lockClient, err := locking.NewClient(cfg)
if err != nil {
Exit("Unable to create lock system: %v", err.Error())
}
defer lockClient.Close()
var lockCount int
locks, err := lockClient.SearchLocks(filters, locksCmdFlags.Limit, locksCmdFlags.Local)
// Print any we got before exiting
for _, lock := range locks {
Print("%s\t%s <%s>", lock.Path, lock.Committer.Name, lock.Committer.Email)
Print("%s\t%s <%s>", lock.Path, lock.Name, lock.Email)
lockCount++
}
if err != nil {
Exit("Error while retrieving locks: %v", err)
}
Print("\n%d lock(s) matched query.", lockCount)
}
// locksFlags wraps up and holds all of the flags that can be given to the
@ -63,13 +51,14 @@ type locksFlags struct {
// limit is an optional request parameter sent to the server used to
// limit the
Limit int
// local limits the scope of lock reporting to the locally cached record
// of locks for the current user & doesn't query the server
Local bool
}
// Filters produces a slice of api.Filter instances based on the internal state
// of this locksFlags instance. The return value of this method is capable (and
// recommend to be used with) the api.LockSearchRequest type.
func (l *locksFlags) Filters() ([]api.Filter, error) {
filters := make([]api.Filter, 0)
// Filters produces a filter based on locksFlags instance.
func (l *locksFlags) Filters() (map[string]string, error) {
filters := make(map[string]string)
if l.Path != "" {
path, err := lockPath(l.Path)
@ -77,10 +66,10 @@ func (l *locksFlags) Filters() ([]api.Filter, error) {
return nil, err
}
filters = append(filters, api.Filter{"path", path})
filters["path"] = path
}
if l.Id != "" {
filters = append(filters, api.Filter{"id", l.Id})
filters["id"] = l.Id
}
return filters, nil
@ -96,5 +85,6 @@ func init() {
cmd.Flags().StringVarP(&locksCmdFlags.Path, "path", "p", "", "filter locks results matching a particular path")
cmd.Flags().StringVarP(&locksCmdFlags.Id, "id", "i", "", "filter locks results matching a particular ID")
cmd.Flags().IntVarP(&locksCmdFlags.Limit, "limit", "l", 0, "optional limit for number of results to return")
cmd.Flags().BoolVarP(&locksCmdFlags.Local, "local", "", false, "only list cached local record of own locks")
})
}

@ -1,20 +1,12 @@
package commands
import (
"errors"
"github.com/git-lfs/git-lfs/locking"
"github.com/git-lfs/git-lfs/api"
"github.com/spf13/cobra"
)
var (
// errNoMatchingLocks is an error returned when no matching locks were
// able to be resolved
errNoMatchingLocks = errors.New("lfs: no matching locks found")
// errLockAmbiguous is an error returned when multiple matching locks
// were found
errLockAmbiguous = errors.New("lfs: multiple locks found; ambiguous")
unlockCmdFlags unlockFlags
)
@ -29,67 +21,36 @@ type unlockFlags struct {
}
func unlockCommand(cmd *cobra.Command, args []string) {
setLockRemoteFor(cfg)
var id string
if len(lockRemote) > 0 {
cfg.CurrentRemote = lockRemote
}
lockClient, err := locking.NewClient(cfg)
if err != nil {
Exit("Unable to create lock system: %v", err.Error())
}
defer lockClient.Close()
if len(args) != 0 {
path, err := lockPath(args[0])
if err != nil {
Error(err.Error())
Exit("Unable to determine path: %v", err.Error())
}
if id, err = lockIdFromPath(path); err != nil {
Error(err.Error())
err = lockClient.UnlockFile(path, unlockCmdFlags.Force)
if err != nil {
Exit("Unable to unlock: %v", err.Error())
}
} else if unlockCmdFlags.Id != "" {
id = unlockCmdFlags.Id
err := lockClient.UnlockFileById(unlockCmdFlags.Id, unlockCmdFlags.Force)
if err != nil {
Exit("Unable to unlock %v: %v", unlockCmdFlags.Id, err.Error())
}
} else {
Error("Usage: git lfs unlock (--id my-lock-id | <path>)")
}
s, resp := API.Locks.Unlock(id, unlockCmdFlags.Force)
if _, err := API.Do(s); err != nil {
Error(err.Error())
Exit("Error communicating with LFS API.")
}
if len(resp.Err) > 0 {
Error(resp.Err)
Exit("Server unable to unlock lock.")
}
Print("'%s' was unlocked (%s)", args[0], resp.Lock.Id)
}
// lockIdFromPath makes a call to the LFS API and resolves the ID for the locked
// locked at the given path.
//
// If the API call failed, an error will be returned. If multiple locks matched
// the given path (should not happen during real-world usage), an error will be
// returnd. If no locks matched the given path, an error will be returned.
//
// If the API call is successful, and only one lock matches the given filepath,
// then its ID will be returned, along with a value of "nil" for the error.
func lockIdFromPath(path string) (string, error) {
s, resp := API.Locks.Search(&api.LockSearchRequest{
Filters: []api.Filter{
{"path", path},
},
})
if _, err := API.Do(s); err != nil {
return "", err
}
switch len(resp.Locks) {
case 0:
return "", errNoMatchingLocks
case 1:
return resp.Locks[0].Id, nil
default:
return "", errLockAmbiguous
}
Print("'%s' was unlocked", args[0])
}
func init() {

@ -11,7 +11,6 @@ import (
"strings"
"time"
"github.com/git-lfs/git-lfs/api"
"github.com/git-lfs/git-lfs/config"
"github.com/git-lfs/git-lfs/errors"
"github.com/git-lfs/git-lfs/filepathfilter"
@ -26,10 +25,6 @@ import (
//go:generate go run ../docs/man/mangen.go
var (
// API is a package-local instance of the API client for use within
// various command implementations.
API = api.NewClient(nil)
Debugging = false
ErrorBuffer = &bytes.Buffer{}
ErrorWriter = io.MultiWriter(os.Stderr, ErrorBuffer)

@ -492,3 +492,12 @@ func (c *Configuration) loadGitConfig() bool {
return false
}
// CurrentCommitter returns the name/email that would be used to author a commit
// with this configuration. In particular, the "user.name" and "user.email"
// configuration values are used
func (c *Configuration) CurrentCommitter() (name, email string) {
name, _ = c.Git.Get("user.name")
email, _ = c.Git.Get("user.email")
return
}

@ -31,7 +31,7 @@ that the client has configured. If omitted, the `basic` transfer adapter MUST
be assumed by the server.
* `objects` - An Array of objects to download.
* `oid` - String OID of the LFS object.
* `size` - Integer byte size of the LFS object.
* `size` - Integer byte size of the LFS object. Must be at least zero.
Note: Git LFS currently only supports the `basic` transfer adapter. This
property was added for future compatibility with some experimental transfer
@ -70,7 +70,7 @@ client will use the `basic` transfer adapter if the `transfer` property is
omitted.
* `objects` - An Array of objects to download.
* `oid` - String OID of the LFS object.
* `size` - Integer byte size of the LFS object.
* `size` - Integer byte size of the LFS object. Must be at least zero.
* `authenticated` - Optional boolean specifying whether the request for this
specific object is authenticated. If omitted or false, Git LFS will attempt
to [find credentials for this URL](./authentication.md).

@ -21,7 +21,8 @@
"type": "string"
},
"size": {
"type": "number"
"type": "number",
"minimum": 0
},
"authenticated": {
"type": "boolean"

@ -78,6 +78,7 @@ func TestFilterAllows(t *testing.T) {
filterTest{true, []string{"test/fil*"}, nil},
filterTest{false, []string{"test/g*"}, nil},
filterTest{true, []string{"tes*/*"}, nil},
filterTest{true, []string{"[Tt]est/[Ff]ilename.dat"}, nil},
// Exclusion
filterTest{false, nil, []string{"*.dat"}},
filterTest{false, nil, []string{"file*.dat"}},
@ -96,6 +97,7 @@ func TestFilterAllows(t *testing.T) {
filterTest{false, nil, []string{"test/fil*"}},
filterTest{true, nil, []string{"test/g*"}},
filterTest{false, nil, []string{"tes*/*"}},
filterTest{false, nil, []string{"[Tt]est/[Ff]ilename.dat"}},
// Both
filterTest{true, []string{"test/filename.dat"}, []string{"test/notfilename.dat"}},

@ -1,3 +1,5 @@
//go:generate goversioninfo -icon=script/windows-installer/git-lfs-logo.ico
package main
import (

101
locking/cache.go Normal file

@ -0,0 +1,101 @@
package locking
import (
"strings"
"github.com/git-lfs/git-lfs/tools/kv"
)
const (
// We want to use a single cache file for integrity, but to make it easy to
// list all locks, prefix the id->path map in a way we can identify (something
// that won't be in a path)
idKeyPrefix string = "*id*://"
)
type LockCache struct {
kv *kv.Store
}
func NewLockCache(filepath string) (*LockCache, error) {
kv, err := kv.NewStore(filepath)
if err != nil {
return nil, err
}
return &LockCache{kv}, nil
}
func (c *LockCache) encodeIdKey(id string) string {
// Safety against accidents
if !c.isIdKey(id) {
return idKeyPrefix + id
}
return id
}
func (c *LockCache) decodeIdKey(key string) string {
// Safety against accidents
if c.isIdKey(key) {
return key[len(idKeyPrefix):]
}
return key
}
func (c *LockCache) isIdKey(key string) bool {
return strings.HasPrefix(key, idKeyPrefix)
}
// Cache a successful lock for faster local lookup later
func (c *LockCache) Add(l Lock) error {
// Store reference in both directions
// Path -> Lock
c.kv.Set(l.Path, &l)
// EncodedId -> Lock (encoded so we can easily identify)
c.kv.Set(c.encodeIdKey(l.Id), &l)
return nil
}
// Remove a cached lock by path becuase it's been relinquished
func (c *LockCache) RemoveByPath(filePath string) error {
ilock := c.kv.Get(filePath)
if lock, ok := ilock.(*Lock); ok && lock != nil {
c.kv.Remove(lock.Path)
// Id as key is encoded
c.kv.Remove(c.encodeIdKey(lock.Id))
}
return nil
}
// Remove a cached lock by id because it's been relinquished
func (c *LockCache) RemoveById(id string) error {
// Id as key is encoded
idkey := c.encodeIdKey(id)
ilock := c.kv.Get(idkey)
if lock, ok := ilock.(*Lock); ok && lock != nil {
c.kv.Remove(idkey)
c.kv.Remove(lock.Path)
}
return nil
}
// Get the list of cached locked files
func (c *LockCache) Locks() []Lock {
var locks []Lock
c.kv.Visit(func(key string, val interface{}) bool {
// Only report file->id entries not reverse
if !c.isIdKey(key) {
lock := val.(*Lock)
locks = append(locks, *lock)
}
return true // continue
})
return locks
}
// Clear the cache
func (c *LockCache) Clear() {
c.kv.RemoveAll()
}
// Save the cache
func (c *LockCache) Save() error {
return c.kv.Save()
}

61
locking/cache_test.go Normal file

@ -0,0 +1,61 @@
package locking
import (
"io/ioutil"
"os"
"testing"
"github.com/stretchr/testify/assert"
)
func TestLockCache(t *testing.T) {
var err error
tmpf, err := ioutil.TempFile("", "testCacheLock")
assert.Nil(t, err)
defer func() {
os.Remove(tmpf.Name())
}()
tmpf.Close()
cache, err := NewLockCache(tmpf.Name())
assert.Nil(t, err)
testLocks := []Lock{
Lock{Path: "folder/test1.dat", Id: "101"},
Lock{Path: "folder/test2.dat", Id: "102"},
Lock{Path: "root.dat", Id: "103"},
}
for _, l := range testLocks {
err = cache.Add(l)
assert.Nil(t, err)
}
locks := cache.Locks()
for _, l := range testLocks {
assert.Contains(t, locks, l)
}
assert.Equal(t, len(testLocks), len(locks))
err = cache.RemoveByPath("folder/test2.dat")
assert.Nil(t, err)
locks = cache.Locks()
// delete item 1 from test locls
testLocks = append(testLocks[:1], testLocks[2:]...)
for _, l := range testLocks {
assert.Contains(t, locks, l)
}
assert.Equal(t, len(testLocks), len(locks))
err = cache.RemoveById("101")
assert.Nil(t, err)
locks = cache.Locks()
testLocks = testLocks[1:]
for _, l := range testLocks {
assert.Contains(t, locks, l)
}
assert.Equal(t, len(testLocks), len(locks))
}

276
locking/locks.go Normal file

@ -0,0 +1,276 @@
package locking
import (
"errors"
"fmt"
"os"
"path/filepath"
"time"
"github.com/git-lfs/git-lfs/api"
"github.com/git-lfs/git-lfs/config"
"github.com/git-lfs/git-lfs/git"
"github.com/git-lfs/git-lfs/tools/kv"
)
var (
// ErrNoMatchingLocks is an error returned when no matching locks were
// able to be resolved
ErrNoMatchingLocks = errors.New("lfs: no matching locks found")
// ErrLockAmbiguous is an error returned when multiple matching locks
// were found
ErrLockAmbiguous = errors.New("lfs: multiple locks found; ambiguous")
)
// Client is the main interface object for the locking package
type Client struct {
cfg *config.Configuration
apiClient *api.Client
cache *LockCache
}
// NewClient creates a new locking client with the given configuration
// You must call the returned object's `Close` method when you are finished with
// it
func NewClient(cfg *config.Configuration) (*Client, error) {
apiClient := api.NewClient(api.NewHttpLifecycle(cfg))
lockDir := filepath.Join(config.LocalGitStorageDir, "lfs")
err := os.MkdirAll(lockDir, 0755)
if err != nil {
return nil, err
}
lockFile := filepath.Join(lockDir, "lockcache.db")
cache, err := NewLockCache(lockFile)
if err != nil {
return nil, err
}
return &Client{cfg, apiClient, cache}, nil
}
// Close this client instance; must be called to dispose of resources
func (c *Client) Close() error {
return c.cache.Save()
}
// LockFile attempts to lock a file on the current remote
// path must be relative to the root of the repository
// Returns the lock id if successful, or an error
func (c *Client) LockFile(path string) (Lock, error) {
// TODO: this is not really the constraint we need to avoid merges, improve as per proposal
latest, err := git.CurrentRemoteRef()
if err != nil {
return Lock{}, err
}
s, resp := c.apiClient.Locks.Lock(&api.LockRequest{
Path: path,
Committer: api.NewCommitter(c.cfg.CurrentCommitter()),
LatestRemoteCommit: latest.Sha,
})
if _, err := c.apiClient.Do(s); err != nil {
return Lock{}, fmt.Errorf("Error communicating with LFS API: %v", err)
}
if len(resp.Err) > 0 {
return Lock{}, fmt.Errorf("Server unable to create lock: %v", resp.Err)
}
lock := c.newLockFromApi(*resp.Lock)
if err := c.cache.Add(lock); err != nil {
return Lock{}, fmt.Errorf("Error caching lock information: %v", err)
}
return lock, nil
}
// UnlockFile attempts to unlock a file on the current remote
// path must be relative to the root of the repository
// Force causes the file to be unlocked from other users as well
func (c *Client) UnlockFile(path string, force bool) error {
id, err := c.lockIdFromPath(path)
if err != nil {
return fmt.Errorf("Unable to get lock id: %v", err)
}
return c.UnlockFileById(id, force)
}
// UnlockFileById attempts to unlock a lock with a given id on the current remote
// Force causes the file to be unlocked from other users as well
func (c *Client) UnlockFileById(id string, force bool) error {
s, resp := c.apiClient.Locks.Unlock(id, force)
if _, err := c.apiClient.Do(s); err != nil {
return fmt.Errorf("Error communicating with LFS API: %v", err)
}
if len(resp.Err) > 0 {
return fmt.Errorf("Server unable to unlock lock: %v", resp.Err)
}
if err := c.cache.RemoveById(id); err != nil {
return fmt.Errorf("Error caching unlock information: %v", err)
}
return nil
}
// Lock is a record of a locked file
type Lock struct {
// Id is the unique identifier corresponding to this particular Lock. It
// must be consistent with the local copy, and the server's copy.
Id string
// Path is an absolute path to the file that is locked as a part of this
// lock.
Path string
// Name is the name of the person holding this lock
Name string
// Email address of the person holding this lock
Email string
// LockedAt is the time at which this lock was acquired.
LockedAt time.Time
}
func (c *Client) newLockFromApi(a api.Lock) Lock {
return Lock{
Id: a.Id,
Path: a.Path,
Name: a.Committer.Name,
Email: a.Committer.Email,
LockedAt: a.LockedAt,
}
}
// SearchLocks returns a channel of locks which match the given name/value filter
// If limit > 0 then search stops at that number of locks
// If localOnly = true, don't query the server & report only own local locks
func (c *Client) SearchLocks(filter map[string]string, limit int, localOnly bool) (locks []Lock, err error) {
if localOnly {
return c.searchCachedLocks(filter, limit)
} else {
return c.searchRemoteLocks(filter, limit)
}
}
func (c *Client) searchCachedLocks(filter map[string]string, limit int) ([]Lock, error) {
cachedlocks := c.cache.Locks()
path, filterByPath := filter["path"]
id, filterById := filter["id"]
lockCount := 0
locks := make([]Lock, 0, len(cachedlocks))
for _, l := range cachedlocks {
// Manually filter by Path/Id
if (filterByPath && path != l.Path) ||
(filterById && id != l.Id) {
continue
}
locks = append(locks, l)
lockCount++
if limit > 0 && lockCount >= limit {
break
}
}
return locks, nil
}
func (c *Client) searchRemoteLocks(filter map[string]string, limit int) ([]Lock, error) {
locks := make([]Lock, 0, limit)
apifilters := make([]api.Filter, 0, len(filter))
for k, v := range filter {
apifilters = append(apifilters, api.Filter{k, v})
}
query := &api.LockSearchRequest{Filters: apifilters}
for {
s, resp := c.apiClient.Locks.Search(query)
if _, err := c.apiClient.Do(s); err != nil {
return locks, fmt.Errorf("Error communicating with LFS API: %v", err)
}
if resp.Err != "" {
return locks, fmt.Errorf("Error response from LFS API: %v", resp.Err)
}
for _, l := range resp.Locks {
locks = append(locks, c.newLockFromApi(l))
if limit > 0 && len(locks) >= limit {
// Exit outer loop too
return locks, nil
}
}
if resp.NextCursor != "" {
query.Cursor = resp.NextCursor
} else {
break
}
}
return locks, nil
}
// lockIdFromPath makes a call to the LFS API and resolves the ID for the locked
// locked at the given path.
//
// If the API call failed, an error will be returned. If multiple locks matched
// the given path (should not happen during real-world usage), an error will be
// returnd. If no locks matched the given path, an error will be returned.
//
// If the API call is successful, and only one lock matches the given filepath,
// then its ID will be returned, along with a value of "nil" for the error.
func (c *Client) lockIdFromPath(path string) (string, error) {
s, resp := c.apiClient.Locks.Search(&api.LockSearchRequest{
Filters: []api.Filter{
{"path", path},
},
})
if _, err := c.apiClient.Do(s); err != nil {
return "", err
}
switch len(resp.Locks) {
case 0:
return "", ErrNoMatchingLocks
case 1:
return resp.Locks[0].Id, nil
default:
return "", ErrLockAmbiguous
}
}
// Fetch locked files for the current committer and cache them locally
// This can be used to sync up locked files when moving machines
func (c *Client) refreshLockCache() error {
// TODO: filters don't seem to currently define how to search for a
// committer's email. Is it "committer.email"? For now, just iterate
locks, err := c.SearchLocks(nil, 0, false)
if err != nil {
return err
}
// We're going to overwrite the entire local cache
c.cache.Clear()
_, email := c.cfg.CurrentCommitter()
for _, l := range locks {
if l.Email == email {
c.cache.Add(l)
}
}
return nil
}
func init() {
kv.RegisterTypeForStorage(&Lock{})
}

98
locking/locks_test.go Normal file

@ -0,0 +1,98 @@
package locking
import (
"bytes"
"encoding/json"
"io/ioutil"
"net/http"
"os"
"sort"
"testing"
"time"
"github.com/git-lfs/git-lfs/api"
"github.com/git-lfs/git-lfs/config"
"github.com/stretchr/testify/assert"
)
type TestLifecycle struct {
}
func (l *TestLifecycle) Build(schema *api.RequestSchema) (*http.Request, error) {
return http.NewRequest("GET", "http://dummy", nil)
}
func (l *TestLifecycle) Execute(req *http.Request, into interface{}) (api.Response, error) {
// Return test data including other users
locks := api.LockList{Locks: []api.Lock{
api.Lock{Id: "99", Path: "folder/test3.dat", Committer: api.Committer{Name: "Alice", Email: "alice@wonderland.com"}},
api.Lock{Id: "101", Path: "folder/test1.dat", Committer: api.Committer{Name: "Fred", Email: "fred@bloggs.com"}},
api.Lock{Id: "102", Path: "folder/test2.dat", Committer: api.Committer{Name: "Fred", Email: "fred@bloggs.com"}},
api.Lock{Id: "103", Path: "root.dat", Committer: api.Committer{Name: "Fred", Email: "fred@bloggs.com"}},
api.Lock{Id: "199", Path: "other/test1.dat", Committer: api.Committer{Name: "Charles", Email: "charles@incharge.com"}},
}}
locksJson, _ := json.Marshal(locks)
r := &http.Response{
Status: "200 OK",
StatusCode: 200,
Proto: "HTTP/1.0",
Body: ioutil.NopCloser(bytes.NewReader(locksJson)),
}
if into != nil {
decoder := json.NewDecoder(r.Body)
if err := decoder.Decode(into); err != nil {
return nil, err
}
}
return api.WrapHttpResponse(r), nil
}
func (l *TestLifecycle) Cleanup(resp api.Response) error {
return resp.Body().Close()
}
type LocksById []Lock
func (a LocksById) Len() int { return len(a) }
func (a LocksById) Swap(i, j int) { a[i], a[j] = a[j], a[i] }
func (a LocksById) Less(i, j int) bool { return a[i].Id < a[j].Id }
func TestRefreshCache(t *testing.T) {
var err error
oldStore := config.LocalGitStorageDir
config.LocalGitStorageDir, err = ioutil.TempDir("", "testCacheLock")
assert.Nil(t, err)
defer func() {
os.RemoveAll(config.LocalGitStorageDir)
config.LocalGitStorageDir = oldStore
}()
cfg := config.NewFrom(config.Values{
Git: map[string]string{"user.name": "Fred", "user.email": "fred@bloggs.com"}})
client, err := NewClient(cfg)
assert.Nil(t, err)
// Override api client for testing
client.apiClient = api.NewClient(&TestLifecycle{})
// Should start with no cached items
locks, err := client.SearchLocks(nil, 0, true)
assert.Nil(t, err)
assert.Empty(t, locks)
// Should load from test data, just Fred's
err = client.refreshLockCache()
assert.Nil(t, err)
locks, err = client.SearchLocks(nil, 0, true)
assert.Nil(t, err)
// Need to include zero time in structure for equal to work
zeroTime := time.Date(1, 1, 1, 0, 0, 0, 0, time.UTC)
// Sort locks for stable comparison
sort.Sort(LocksById(locks))
assert.Equal(t, []Lock{
Lock{Path: "folder/test1.dat", Id: "101", Name: "Fred", Email: "fred@bloggs.com", LockedAt: zeroTime},
Lock{Path: "folder/test2.dat", Id: "102", Name: "Fred", Email: "fred@bloggs.com", LockedAt: zeroTime},
Lock{Path: "root.dat", Id: "103", Name: "Fred", Email: "fred@bloggs.com", LockedAt: zeroTime},
}, locks)
}

@ -14,6 +14,14 @@ if [ -z "$GOPATH" ]; then
ln -s "$GOPATH" src/github.com/git-lfs/git-lfs
fi
if uname -s | grep -q "_NT-"; then
echo "Installing goversioninfo to embed resources into Windows executables..."
go get github.com/josephspurrier/goversioninfo/cmd/goversioninfo
export PATH=$PATH:$GOPATH/bin/windows_386
echo "Creating the resource.syso version information file..."
go generate
fi
script/fmt
rm -rf bin/*
GO15VENDOREXPERIMENT=1 go run script/*.go -cmd build "$@"

21
script/update-version Executable file

@ -0,0 +1,21 @@
#!/usr/bin/env bash
VERSION_STRING=$1
VERSION_ARRAY=( ${VERSION_STRING//./ } )
VERSION_MAJOR=${VERSION_ARRAY[0]}
VERSION_MINOR=${VERSION_ARRAY[1]}
VERSION_PATCH=${VERSION_ARRAY[2]:-0}
VERSION_BUILD=${VERSION_ARRAY[3]:-0}
# Update the version number git-lfs is reporting.
sed -i "s,\(Version = \"\).*\(\"\),\1$VERSION_STRING\2," config/version.go
# Update the version number in the RPM package.
sed -i "s,\(Version:[[:space:]]*\).*,\1$VERSION_STRING," rpm/SPECS/git-lfs.spec
# Update the version numbers in the Windows installer.
sed -i "s,\(\"Major\": \).*\,,\1$VERSION_MAJOR\,," versioninfo.json
sed -i "s,\(\"Minor\": \).*\,,\1$VERSION_MINOR\,," versioninfo.json
sed -i "s,\(\"Patch\": \).*\,,\1$VERSION_PATCH\,," versioninfo.json
sed -i "s,\(\"Build\": \).*,\1$VERSION_BUILD," versioninfo.json
sed -i "s,\(\"ProductVersion\": \"\).*\(\"\),\1$VERSION_STRING\2," versioninfo.json

@ -1,8 +1,10 @@
#define MyAppName "Git LFS"
; Misuse RemoveFileExt to strip the 4th patch-level version number.
; Arbitrarily choose the x86 executable here as both have the version embedded.
#define MyAppVersion RemoveFileExt(GetFileVersion("..\..\git-lfs-x86.exe"))
#define MyVersionInfoVersion GetFileVersion("..\..\git-lfs-x86.exe")
; Misuse RemoveFileExt to strip the 4th patch-level version number.
#define MyAppVersion RemoveFileExt(MyVersionInfoVersion)
#define MyAppPublisher "GitHub, Inc."
#define MyAppURL "https://git-lfs.github.com/"
@ -15,7 +17,7 @@
AppId={{286391DE-F778-44EA-9375-1B21AAA04FF0}
AppName={#MyAppName}
AppVersion={#MyAppVersion}
;AppVerName={#MyAppName} {#MyAppVersion}
AppCopyright=GitHub, Inc. and Git LFS contributors
AppPublisher={#MyAppPublisher}
AppPublisherURL={#MyAppURL}
AppSupportURL={#MyAppURL}
@ -32,6 +34,7 @@ DisableReadyPage=True
ArchitecturesInstallIn64BitMode=x64
ChangesEnvironment=yes
SetupIconFile=git-lfs-logo.ico
VersionInfoVersion={#MyVersionInfoVersion}
WizardImageFile=git-lfs-wizard-image.bmp
WizardSmallImageFile=git-lfs-logo.bmp

@ -1,20 +0,0 @@
main ICON "git-lfs-logo.ico"
1 VERSIONINFO
FILEVERSION 1,5,0,0
BEGIN
BLOCK "StringFileInfo"
BEGIN
BLOCK "040904b0" /* LANG_ENGLISH/SUBLANG_ENGLISH_US, Unicode CP */
BEGIN
VALUE "FileDescription", "Git LFS\0"
VALUE "ProductName", "Git Large File Storage (LFS)\0"
VALUE "ProductVersion", "1.5.0\0"
VALUE "LegalCopyright", "GitHub, Inc. and Git LFS contributors\0"
END
END
BLOCK "VarFileInfo"
BEGIN
VALUE "Translation", 0x409, 1200
END
END

@ -88,3 +88,48 @@ begin_test "list locks with pagination"
grep "4 lock(s) matched query" locks.log
)
end_test
begin_test "cached locks"
(
set -e
reponame="cached_locks"
setup_remote_repo "remote_$reponame"
clone_repo "remote_$reponame" "clone_$reponame"
git lfs track "*.dat"
echo "foo" > "cached1.dat"
echo "bar" > "cached2.dat"
git add "cached1.dat" "cached2.dat" ".gitattributes"
git commit -m "add files" | tee commit.log
grep "3 files changed" commit.log
grep "create mode 100644 cached1.dat" commit.log
grep "create mode 100644 cached2.dat" commit.log
grep "create mode 100644 .gitattributes" commit.log
git push origin master 2>&1 | tee push.log
grep "master -> master" push.log
GITLFSLOCKSENABLED=1 git lfs lock "cached1.dat" | tee lock.log
assert_server_lock "$(grep -oh "\((.*)\)" lock.log | tr -d "()")"
GITLFSLOCKSENABLED=1 git lfs lock "cached2.dat" | tee lock.log
assert_server_lock "$(grep -oh "\((.*)\)" lock.log | tr -d "()")"
GITLFSLOCKSENABLED=1 git lfs locks --local | tee locks.log
grep "2 lock(s) matched query" locks.log
# delete the remote to prove we're using the local records
git remote remove origin
GITLFSLOCKSENABLED=1 git lfs locks --local --path "cached1.dat" | tee locks.log
grep "1 lock(s) matched query" locks.log
grep "cached1.dat" locks.log
GITLFSLOCKSENABLED=1 git lfs locks --local --limit 1 | tee locks.log
grep "1 lock(s) matched query" locks.log
)
end_test

230
tools/kv/keyvaluestore.go Normal file

@ -0,0 +1,230 @@
package kv
import (
"encoding/gob"
"fmt"
"io"
"os"
"sync"
)
// Store provides an in-memory key/value store which is persisted to
// a file. The file handle itself is not kept locked for the duration; it is
// only locked during load and save, to make it concurrency friendly. When
// saving, the store uses optimistic locking to determine whether the db on disk
// has been modified by another process; in which case it loads the latest
// version and re-applies modifications made during this session. This means
// the Lost Update db concurrency issue is possible; so don't use this if you
// need more DB integrity than Read Committed isolation levels.
type Store struct {
// Locks the entire store
mu sync.RWMutex
filename string
log []change
// This is the persistent data
// version for optimistic locking, this field is incremented with every Save()
version int64
db map[string]interface{}
}
// Type of operation; set or remove
type operation int
const (
// Set a value for a key
setOperation = operation(iota)
// Removed a value for a key
removeOperation = operation(iota)
)
type change struct {
op operation
key string
value interface{}
}
// NewStore creates a new key/value store and initialises it with contents from
// the named file, if it exists
func NewStore(filepath string) (*Store, error) {
kv := &Store{filename: filepath, db: make(map[string]interface{})}
return kv, kv.loadAndMergeIfNeeded()
}
// Set updates the key/value store in memory
// Changes are not persisted until you call Save()
func (k *Store) Set(key string, value interface{}) {
k.mu.Lock()
defer k.mu.Unlock()
k.db[key] = value
k.logChange(setOperation, key, value)
}
// Remove removes the key and its value from the store in memory
// Changes are not persisted until you call Save()
func (k *Store) Remove(key string) {
k.mu.Lock()
defer k.mu.Unlock()
delete(k.db, key)
k.logChange(removeOperation, key, nil)
}
// RemoveAll removes all entries from the store
// These changes are not persisted until you call Save()
func (k *Store) RemoveAll() {
k.mu.Lock()
defer k.mu.Unlock()
// Log all changes
for key, _ := range k.db {
k.logChange(removeOperation, key, nil)
}
k.db = make(map[string]interface{})
}
// Visit walks through the entire store via a function; return false from
// your visitor function to halt the walk
func (k *Store) Visit(cb func(string, interface{}) bool) {
// Read-only lock
k.mu.RLock()
defer k.mu.RUnlock()
for k, v := range k.db {
if !cb(k, v) {
break
}
}
}
// Append a change to the log; mutex must already be locked
func (k *Store) logChange(op operation, key string, value interface{}) {
k.log = append(k.log, change{op, key, value})
}
// Get retrieves a value from the store, or nil if it is not present
func (k *Store) Get(key string) interface{} {
// Read-only lock
k.mu.RLock()
defer k.mu.RUnlock()
// zero value of interface{} is nil so this does what we want
return k.db[key]
}
// Save persists the changes made to disk
// If any changes have been written by other code they will be merged
func (k *Store) Save() error {
k.mu.Lock()
defer k.mu.Unlock()
// Short-circuit if we have no changes
if len(k.log) == 0 {
return nil
}
// firstly peek at version; open read/write to keep lock between check & write
f, err := os.OpenFile(k.filename, os.O_RDWR|os.O_CREATE, 0664)
if err != nil {
return err
}
defer f.Close()
// Only try to merge if > 0 bytes, ignore empty files (decoder will fail)
if stat, _ := f.Stat(); stat.Size() > 0 {
k.loadAndMergeReaderIfNeeded(f)
// Now we overwrite the file
f.Seek(0, os.SEEK_SET)
f.Truncate(0)
}
k.version++
enc := gob.NewEncoder(f)
if err := enc.Encode(k.version); err != nil {
return fmt.Errorf("Error while writing version data to %v: %v", k.filename, err)
}
if err := enc.Encode(k.db); err != nil {
return fmt.Errorf("Error while writing new key/value data to %v: %v", k.filename, err)
}
// Clear log now that it's saved
k.log = nil
return nil
}
// Reads as little as possible from the passed in file to determine if the
// contents are different from the version already held. If so, reads the
// contents and merges with any outstanding changes. If not, stops early without
// reading the rest of the file
func (k *Store) loadAndMergeIfNeeded() error {
stat, err := os.Stat(k.filename)
if err != nil {
if os.IsNotExist(err) {
return nil // missing is OK
}
return err
}
// Do nothing if empty file
if stat.Size() == 0 {
return nil
}
f, err := os.OpenFile(k.filename, os.O_RDONLY, 0664)
if err == nil {
defer f.Close()
return k.loadAndMergeReaderIfNeeded(f)
} else {
return err
}
}
// As loadAndMergeIfNeeded but lets caller decide how to manage file handles
func (k *Store) loadAndMergeReaderIfNeeded(f io.Reader) error {
var versionOnDisk int64
// Decode *only* the version field to check whether anyone else has
// modified the db; gob serializes structs in order so it will always be 1st
dec := gob.NewDecoder(f)
err := dec.Decode(&versionOnDisk)
if err != nil {
return fmt.Errorf("Problem checking version of key/value data from %v: %v", k.filename, err)
}
// Totally uninitialised Version == 0, saved versions are always >=1
if versionOnDisk != k.version {
// Reload data & merge
var dbOnDisk map[string]interface{}
err = dec.Decode(&dbOnDisk)
if err != nil {
return fmt.Errorf("Problem reading updated key/value data from %v: %v", k.filename, err)
}
k.reapplyChanges(dbOnDisk)
k.version = versionOnDisk
}
return nil
}
// reapplyChanges replays the changes made since the last load onto baseDb
// and stores the result as our own DB
func (k *Store) reapplyChanges(baseDb map[string]interface{}) {
for _, change := range k.log {
switch change.op {
case setOperation:
baseDb[change.key] = change.value
case removeOperation:
delete(baseDb, change.key)
}
}
// Note, log is not cleared here, that only happens on Save since it's a
// list of unsaved changes
k.db = baseDb
}
// RegisterTypeForStorage registers a custom type (e.g. a struct) for
// use in the key value store. This is necessary if you intend to pass custom
// structs to Store.Set() rather than primitive types.
func RegisterTypeForStorage(val interface{}) {
gob.Register(val)
}

@ -0,0 +1,187 @@
package kv
import (
"io/ioutil"
"os"
"testing"
"github.com/stretchr/testify/assert"
)
func TestStoreSimple(t *testing.T) {
tmpf, err := ioutil.TempFile("", "lfstest1")
assert.Nil(t, err)
filename := tmpf.Name()
defer os.Remove(filename)
tmpf.Close()
kvs, err := NewStore(filename)
assert.Nil(t, err)
// We'll include storing custom structs
type customData struct {
Val1 string
Val2 int
}
// Needed to store custom struct
RegisterTypeForStorage(&customData{})
kvs.Set("stringVal", "This is a string value")
kvs.Set("intVal", 3)
kvs.Set("floatVal", 3.142)
kvs.Set("structVal", &customData{"structTest", 20})
s := kvs.Get("stringVal")
assert.Equal(t, "This is a string value", s)
i := kvs.Get("intVal")
assert.Equal(t, 3, i)
f := kvs.Get("floatVal")
assert.Equal(t, 3.142, f)
c := kvs.Get("structVal")
assert.Equal(t, c, &customData{"structTest", 20})
n := kvs.Get("noValue")
assert.Nil(t, n)
kvs.Remove("stringVal")
s = kvs.Get("stringVal")
assert.Nil(t, s)
// Set the string value again before saving
kvs.Set("stringVal", "This is a string value")
err = kvs.Save()
assert.Nil(t, err)
kvs = nil
// Now confirm that we can read it all back
kvs2, err := NewStore(filename)
assert.Nil(t, err)
s = kvs2.Get("stringVal")
assert.Equal(t, "This is a string value", s)
i = kvs2.Get("intVal")
assert.Equal(t, 3, i)
f = kvs2.Get("floatVal")
assert.Equal(t, 3.142, f)
c = kvs2.Get("structVal")
assert.Equal(t, c, &customData{"structTest", 20})
n = kvs2.Get("noValue")
assert.Nil(t, n)
// Test remove all
kvs2.RemoveAll()
s = kvs2.Get("stringVal")
assert.Nil(t, s)
i = kvs2.Get("intVal")
assert.Nil(t, i)
f = kvs2.Get("floatVal")
assert.Nil(t, f)
c = kvs2.Get("structVal")
assert.Nil(t, c)
err = kvs2.Save()
assert.Nil(t, err)
kvs2 = nil
// Now confirm that we can read blank & get nothing
kvs, err = NewStore(filename)
kvs.Visit(func(k string, v interface{}) bool {
// Should not be called
assert.Fail(t, "Should be no entries")
return true
})
}
func TestStoreOptimisticConflict(t *testing.T) {
tmpf, err := ioutil.TempFile("", "lfstest2")
assert.Nil(t, err)
filename := tmpf.Name()
defer os.Remove(filename)
tmpf.Close()
kvs1, err := NewStore(filename)
assert.Nil(t, err)
kvs1.Set("key1", "value1")
kvs1.Set("key2", "value2")
kvs1.Set("key3", "value3")
err = kvs1.Save()
assert.Nil(t, err)
// Load second copy & modify
kvs2, err := NewStore(filename)
assert.Nil(t, err)
// New keys
kvs2.Set("key4", "value4_fromkvs2")
kvs2.Set("key5", "value5_fromkvs2")
// Modify a key too
kvs2.Set("key1", "value1_fromkvs2")
err = kvs2.Save()
assert.Nil(t, err)
// Now modify first copy & save; it should detect optimistic lock issue
// New item
kvs1.Set("key10", "value10")
// Overlapping item; since we save second this will overwrite one from kvs2
kvs1.Set("key4", "value4")
err = kvs1.Save()
assert.Nil(t, err)
// This should have merged changes from kvs2 in the process
v := kvs1.Get("key1")
assert.Equal(t, "value1_fromkvs2", v) // this one was modified by kvs2
v = kvs1.Get("key2")
assert.Equal(t, "value2", v)
v = kvs1.Get("key3")
assert.Equal(t, "value3", v)
v = kvs1.Get("key4")
assert.Equal(t, "value4", v) // we overwrote this so would not be merged
v = kvs1.Get("key5")
assert.Equal(t, "value5_fromkvs2", v)
}
func TestStoreReduceSize(t *testing.T) {
tmpf, err := ioutil.TempFile("", "lfstest3")
assert.Nil(t, err)
filename := tmpf.Name()
defer os.Remove(filename)
tmpf.Close()
kvs, err := NewStore(filename)
assert.Nil(t, err)
kvs.Set("key1", "I woke up in a Soho doorway")
kvs.Set("key2", "A policeman knew my name")
kvs.Set("key3", "He said 'You can go sleep at home tonight")
kvs.Set("key4", "If you can get up and walk away'")
assert.NotNil(t, kvs.Get("key1"))
assert.NotNil(t, kvs.Get("key2"))
assert.NotNil(t, kvs.Get("key3"))
assert.NotNil(t, kvs.Get("key4"))
assert.Nil(t, kvs.Save())
stat1, _ := os.Stat(filename)
// Remove all but 1 key & save smaller version
kvs.Remove("key2")
kvs.Remove("key3")
kvs.Remove("key4")
assert.Nil(t, kvs.Save())
// Now reload fresh & prove works
kvs = nil
kvs, err = NewStore(filename)
assert.Nil(t, err)
assert.NotNil(t, kvs.Get("key1"))
assert.Nil(t, kvs.Get("key2"))
assert.Nil(t, kvs.Get("key3"))
assert.Nil(t, kvs.Get("key4"))
stat2, _ := os.Stat(filename)
assert.True(t, stat2.Size() < stat1.Size(), "Size should have reduced, was %d now %d", stat1.Size(), stat2.Size())
}

@ -1,17 +0,0 @@
#!/bin/bash
VERSION_STRING=$1
VERSION_ARRAY=( ${VERSION_STRING//./ } )
VERSION_MAJOR=${VERSION_ARRAY[0]}
VERSION_MINOR=${VERSION_ARRAY[1]}
VERSION_PATCH=${VERSION_ARRAY[2]}
# Update the version number git-lfs is reporting.
sed -i "s,\(Version = \"\).*\(\"\),\1$VERSION_STRING\2," config/version.go
# Update the version number in the RPM package.
sed -i "s,\(Version:[[:space:]]*\).*,\1$VERSION_STRING," rpm/SPECS/git-lfs.spec
# Update the version numbers in the Windows installer.
sed -i "s,\(FILEVERSION \).*,\1$VERSION_MAJOR\,$VERSION_MINOR\,$VERSION_PATCH\,0," script/windows-installer/resources.rc
sed -i "s,\([[:space:]]*VALUE \"ProductVersion\"\, \"\).*\(\\\\0\"\),\1$VERSION_STRING\2," script/windows-installer/resources.rc

18
versioninfo.json Normal file

@ -0,0 +1,18 @@
{
"FixedFileInfo":
{
"FileVersion": {
"Major": 1,
"Minor": 5,
"Patch": 0,
"Build": 0
}
},
"StringFileInfo":
{
"FileDescription": "Git LFS",
"LegalCopyright": "GitHub, Inc. and Git LFS contributors",
"ProductName": "Git Large File Storage (LFS)",
"ProductVersion": "1.5.0"
}
}