diff --git a/commands/command_locks.go b/commands/command_locks.go index 6b8e8958..da9eeaee 100644 --- a/commands/command_locks.go +++ b/commands/command_locks.go @@ -1,7 +1,7 @@ package commands import ( - "encoding/json" + "io" "os" "sort" "strings" @@ -44,11 +44,41 @@ func locksCommand(cmd *cobra.Command, args []string) { } } - locks, err := lockClient.SearchLocks(filters, locksCmdFlags.Limit, locksCmdFlags.Local, locksCmdFlags.Cached) + if locksCmdFlags.Verify { + if len(filters) > 0 { + Exit("--verify option can't be combined with filters") + } + if locksCmdFlags.Local { + Exit("--verify option can't be combined with --local") + } + } + + var locks []locking.Lock + var locksOwned map[locking.Lock]bool + var jsonWriteFunc func(io.Writer) error + if locksCmdFlags.Verify { + var ourLocks, theirLocks []locking.Lock + ourLocks, theirLocks, err = lockClient.SearchLocksVerifiable(locksCmdFlags.Limit, locksCmdFlags.Cached) + jsonWriteFunc = func(writer io.Writer) error { + return lockClient.EncodeLocksVerifiable(ourLocks, theirLocks, writer) + } + + locks = append(ourLocks, theirLocks...) + locksOwned = make(map[locking.Lock]bool) + for _, lock := range ourLocks { + locksOwned[lock] = true + } + } else { + locks, err = lockClient.SearchLocks(filters, locksCmdFlags.Limit, locksCmdFlags.Local, locksCmdFlags.Cached) + jsonWriteFunc = func(writer io.Writer) error { + return lockClient.EncodeLocks(locks, writer) + } + } + // Print any we got before exiting if locksCmdFlags.JSON { - if err := json.NewEncoder(os.Stdout).Encode(locks); err != nil { + if err := jsonWriteFunc(os.Stdout); err != nil { Error(err.Error()) } return @@ -77,7 +107,16 @@ func locksCommand(cmd *cobra.Command, args []string) { pathPadding := tools.MaxInt(maxPathLen-len(lock.Path), 0) namePadding := tools.MaxInt(maxNameLen-len(ownerName), 0) - Print("%s%s\t%s%s\tID:%s", lock.Path, strings.Repeat(" ", pathPadding), + kind := "" + if locksOwned != nil { + if locksOwned[lock] { + kind = "O " + } else { + kind = " " + } + } + + Print("%s%s%s\t%s%s\tID:%s", kind, lock.Path, strings.Repeat(" ", pathPadding), ownerName, strings.Repeat(" ", namePadding), lock.Id, ) @@ -108,6 +147,9 @@ type locksFlags struct { // for non-local queries, report cached query results from the last query // instead of actually querying the server again Cached bool + // for non-local queries, verify lock owner on server and + // denote our locks in output + Verify bool } // Filters produces a filter based on locksFlags instance. @@ -137,6 +179,7 @@ func init() { cmd.Flags().IntVarP(&locksCmdFlags.Limit, "limit", "l", 0, "optional limit for number of results to return") cmd.Flags().BoolVarP(&locksCmdFlags.Local, "local", "", false, "only list cached local record of own locks") cmd.Flags().BoolVarP(&locksCmdFlags.Cached, "cached", "", false, "list cached lock information from the last remote query, instead of actually querying the server") + cmd.Flags().BoolVarP(&locksCmdFlags.Verify, "verify", "", false, "verify lock owner on server and mark own locks by 'O'") cmd.Flags().BoolVarP(&locksCmdFlags.JSON, "json", "", false, "print output in json") }) } diff --git a/commands/lockverifier.go b/commands/lockverifier.go index 60a322ce..9365c33b 100644 --- a/commands/lockverifier.go +++ b/commands/lockverifier.go @@ -46,12 +46,17 @@ type lockVerifier struct { } func (lv *lockVerifier) Verify(ref *git.Ref) { + if ref == nil { + panic("no ref specified for verification") + } + if lv.verifyState == verifyStateDisabled || lv.verifiedRefs[ref.Refspec()] { return } lockClient := newLockClient() - ours, theirs, err := lockClient.VerifiableLocks(ref, 0) + lockClient.RemoteRef = ref + ours, theirs, err := lockClient.SearchLocksVerifiable(0, false) if err != nil { if errors.IsNotImplementedError(err) { disableFor(lv.endpoint.Url) diff --git a/docs/man/git-lfs-locks.1.ronn b/docs/man/git-lfs-locks.1.ronn index 49d96562..99ffd6d9 100644 --- a/docs/man/git-lfs-locks.1.ronn +++ b/docs/man/git-lfs-locks.1.ronn @@ -29,6 +29,16 @@ Lists current locks from the Git LFS server. last known locks in case you are offline. There is no guarantee that locks on the server have not changed in the meanwhile. +* `--verify`: + Verifies the lock owner on the server and marks our own locks by 'O'. + Own locks are actually held by us and corresponding files can be updated for + the next push. All other locks are held by someone else. + Contrary to --local, this option will also detect locks which are held by us + despite no local lock information being available (e.g. because the file had + been locked from a different clone); + it will also detect 'broken' locks (e.g. if someone else has forcefully + unlocked our files). + * `-l ` `--limit=`: Specifies number of results to return. diff --git a/locking/api_test.go b/locking/api_test.go index 8ae32f58..9acbe480 100644 --- a/locking/api_test.go +++ b/locking/api_test.go @@ -171,7 +171,7 @@ func TestAPISearch(t *testing.T) { assert.Equal(t, "2", locks.Locks[1].Id) } -func TestAPIVerifiableLocks(t *testing.T) { +func TestAPISearchVerifiable(t *testing.T) { require.NotNil(t, verifyResSchema) srv := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { diff --git a/locking/locks.go b/locking/locks.go index e89629c5..762b0d64 100644 --- a/locking/locks.go +++ b/locking/locks.go @@ -3,6 +3,7 @@ package locking import ( "encoding/json" "fmt" + "io" "net/http" "os" "path/filepath" @@ -218,21 +219,11 @@ func (c *Client) SearchLocks(filter map[string]string, limit int, localOnly bool return []Lock{}, errors.New("can't search cached locks when filter or limit is set") } - cacheFile, err := c.prepareCacheDirectory() - if err != nil { - return []Lock{}, err - } - - _, err = os.Stat(cacheFile) - if err != nil { - if os.IsNotExist(err) { - return []Lock{}, errors.New("no cached locks present") - } - - return []Lock{}, err - } - - return c.readLocksFromCacheFile(cacheFile) + locks := []Lock{} + err := c.readLocksFromCacheFile("remote", func(decoder *json.Decoder) error { + return decoder.Decode(&locks) + }) + return locks, err } else { locks, err := c.searchRemoteLocks(filter, limit) if err != nil { @@ -240,78 +231,95 @@ func (c *Client) SearchLocks(filter map[string]string, limit int, localOnly bool } if len(filter) == 0 && limit == 0 { - cacheFile, err := c.prepareCacheDirectory() - if err != nil { - return locks, err - } - - err = c.writeLocksToCacheFile(cacheFile, locks) + err = c.writeLocksToCacheFile("remote", func(writer io.Writer) error { + return c.EncodeLocks(locks, writer) + }) } return locks, err } } -func (c *Client) VerifiableLocks(ref *git.Ref, limit int) (ourLocks, theirLocks []Lock, err error) { - if ref == nil { - ref = c.RemoteRef - } - +func (c *Client) SearchLocksVerifiable(limit int, cached bool) (ourLocks, theirLocks []Lock, err error) { ourLocks = make([]Lock, 0, limit) theirLocks = make([]Lock, 0, limit) - body := &lockVerifiableRequest{ - Ref: &lockRef{Name: ref.Refspec()}, - Limit: limit, + + if cached { + if limit != 0 { + return []Lock{}, []Lock{}, errors.New("can't search cached locks when limit is set") + } + + locks := &lockVerifiableList{} + err := c.readLocksFromCacheFile("verifiable", func(decoder *json.Decoder) error { + return decoder.Decode(&locks) + }) + return locks.Ours, locks.Theirs, err + } else { + var requestRef *lockRef + if c.RemoteRef != nil { + requestRef = &lockRef{Name: c.RemoteRef.Refspec()} + } + + body := &lockVerifiableRequest{ + Ref: requestRef, + Limit: limit, + } + + c.cache.Clear() + + for { + list, res, err := c.client.SearchVerifiable(c.Remote, body) + if res != nil { + switch res.StatusCode { + case http.StatusNotFound, http.StatusNotImplemented: + return ourLocks, theirLocks, errors.NewNotImplementedError(err) + case http.StatusForbidden: + return ourLocks, theirLocks, errors.NewAuthError(err) + } + } + + if err != nil { + return ourLocks, theirLocks, err + } + + if list.Message != "" { + if len(list.RequestID) > 0 { + tracerx.Printf("Server Request ID: %s", list.RequestID) + } + return ourLocks, theirLocks, fmt.Errorf("Server error searching locks: %s", list.Message) + } + + for _, l := range list.Ours { + c.cache.Add(l) + ourLocks = append(ourLocks, l) + if limit > 0 && (len(ourLocks)+len(theirLocks)) >= limit { + return ourLocks, theirLocks, nil + } + } + + for _, l := range list.Theirs { + c.cache.Add(l) + theirLocks = append(theirLocks, l) + if limit > 0 && (len(ourLocks)+len(theirLocks)) >= limit { + return ourLocks, theirLocks, nil + } + } + + if list.NextCursor != "" { + body.Cursor = list.NextCursor + } else { + break + } + } + + if limit == 0 { + err = c.writeLocksToCacheFile("verifiable", func(writer io.Writer) error { + return c.EncodeLocksVerifiable(ourLocks, theirLocks, writer) + }) + } + + return ourLocks, theirLocks, err } - - c.cache.Clear() - - for { - list, res, err := c.client.SearchVerifiable(c.Remote, body) - if res != nil { - switch res.StatusCode { - case http.StatusNotFound, http.StatusNotImplemented: - return ourLocks, theirLocks, errors.NewNotImplementedError(err) - case http.StatusForbidden: - return ourLocks, theirLocks, errors.NewAuthError(err) - } - } - - if err != nil { - return ourLocks, theirLocks, err - } - - if list.Message != "" { - if len(list.RequestID) > 0 { - tracerx.Printf("Server Request ID: %s", list.RequestID) - } - return ourLocks, theirLocks, fmt.Errorf("Server error searching locks: %s", list.Message) - } - - for _, l := range list.Ours { - c.cache.Add(l) - ourLocks = append(ourLocks, l) - if limit > 0 && (len(ourLocks)+len(theirLocks)) >= limit { - return ourLocks, theirLocks, nil - } - } - - for _, l := range list.Theirs { - c.cache.Add(l) - theirLocks = append(theirLocks, l) - if limit > 0 && (len(ourLocks)+len(theirLocks)) >= limit { - return ourLocks, theirLocks, nil - } - } - - if list.NextCursor != "" { - body.Cursor = list.NextCursor - } else { - break - } - } - - return ourLocks, theirLocks, nil } func (c *Client) searchLocalLocks(filter map[string]string, limit int) ([]Lock, error) { @@ -426,7 +434,7 @@ func init() { kv.RegisterTypeForStorage(&Lock{}) } -func (c *Client) prepareCacheDirectory() (string, error) { +func (c *Client) prepareCacheDirectory(kind string) (string, error) { cacheDir := filepath.Join(c.cacheDir, "locks") if c.RemoteRef != nil { cacheDir = filepath.Join(cacheDir, c.RemoteRef.Refspec()) @@ -446,39 +454,57 @@ func (c *Client) prepareCacheDirectory() (string, error) { return cacheDir, errors.Wrap(err, "init cache directory "+cacheDir+" failed") } - return filepath.Join(cacheDir, "remote"), nil + return filepath.Join(cacheDir, kind), nil } -func (c *Client) readLocksFromCacheFile(path string) ([]Lock, error) { - file, err := os.Open(path) +func (c *Client) readLocksFromCacheFile(kind string, decoder func(*json.Decoder) error) error { + cacheFile, err := c.prepareCacheDirectory(kind) if err != nil { - return []Lock{}, err + return err + } + + _, err = os.Stat(cacheFile) + if err != nil { + if os.IsNotExist(err) { + return errors.New("no cached locks present") + } + + return err + } + + file, err := os.Open(cacheFile) + if err != nil { + return err } defer file.Close() - - locks := []Lock{} - err = json.NewDecoder(file).Decode(&locks) - if err != nil { - return []Lock{}, err - } - - return locks, nil + return decoder(json.NewDecoder(file)) } -func (c *Client) writeLocksToCacheFile(path string, locks []Lock) error { - file, err := os.Create(path) +func (c *Client) EncodeLocks(locks []Lock, writer io.Writer) error { + return json.NewEncoder(writer).Encode(locks) +} + +func (c *Client) EncodeLocksVerifiable(ourLocks, theirLocks []Lock, writer io.Writer) error { + return json.NewEncoder(writer).Encode(&lockVerifiableList{ + Ours: ourLocks, + Theirs: theirLocks, + }) +} + +func (c *Client) writeLocksToCacheFile(kind string, writer func(io.Writer) error) error { + cacheFile, err := c.prepareCacheDirectory(kind) if err != nil { return err } - err = json.NewEncoder(file).Encode(locks) + file, err := os.Create(cacheFile) if err != nil { - file.Close() return err } - return file.Close() + defer file.Close() + return writer(file) } type nilLockCacher struct{} diff --git a/locking/locks_test.go b/locking/locks_test.go index 4a1fe748..39cb22c7 100644 --- a/locking/locks_test.go +++ b/locking/locks_test.go @@ -63,7 +63,7 @@ func TestRemoteLocksWithCache(t *testing.T) { assert.Nil(t, client.SetupFileCache(tempDir)) client.RemoteRef = &git.Ref{Name: "refs/heads/master"} - cacheFile, err := client.prepareCacheDirectory() + cacheFile, err := client.prepareCacheDirectory("remote") assert.Nil(t, err) // Cache file should not exist @@ -173,7 +173,8 @@ func TestRefreshCache(t *testing.T) { assert.Nil(t, err) assert.Empty(t, locks) - _, _, err = client.VerifiableLocks(nil, 100) + client.RemoteRef = &git.Ref{Name: "refs/heads/master"} + _, _, err = client.SearchLocksVerifiable(100, false) assert.Nil(t, err) locks, err = client.SearchLocks(nil, 0, true, false) @@ -192,8 +193,15 @@ func TestRefreshCache(t *testing.T) { }, locks) } -func TestGetVerifiableLocks(t *testing.T) { +func TestSearchLocksVerifiableWithCache(t *testing.T) { + var err error + tempDir, err := ioutil.TempDir("", "testCacheLock") + assert.Nil(t, err) + + remoteQueries := 0 srv := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + remoteQueries++ + assert.Equal(t, "POST", r.Method) assert.Equal(t, "/api/locks/verify", r.URL.Path) @@ -237,26 +245,72 @@ func TestGetVerifiableLocks(t *testing.T) { require.Nil(t, err) client, err := NewClient("", lfsclient, config.New()) + assert.Nil(t, client.SetupFileCache(tempDir)) + + client.RemoteRef = &git.Ref{Name: "refs/heads/master"} + cacheFile, err := client.prepareCacheDirectory("verifiable") assert.Nil(t, err) - ourLocks, theirLocks, err := client.VerifiableLocks(nil, 0) + // Cache file should not exist + fi, err := os.Stat(cacheFile) + assert.True(t, os.IsNotExist(err)) + + // Querying non-existing cache file will report nothing + ourLocks, theirLocks, err := client.SearchLocksVerifiable(0, true) + assert.NotNil(t, err) + assert.Empty(t, ourLocks) + assert.Empty(t, theirLocks) + assert.Equal(t, 0, remoteQueries) + + // REMOTE QUERY: No cache file will be created when querying with a limit + ourLocks, theirLocks, err = client.SearchLocksVerifiable(1, false) assert.Nil(t, err) + // Just make sure we have have received anything, content doesn't matter + assert.Equal(t, 1, len(ourLocks)) + assert.Equal(t, 0, len(theirLocks)) + assert.Equal(t, 1, remoteQueries) + + fi, err = os.Stat(cacheFile) + assert.True(t, os.IsNotExist(err)) + + // REMOTE QUERY: locks will be reported and cache file should be created + ourLocks, theirLocks, err = client.SearchLocksVerifiable(0, false) + assert.Nil(t, err) + assert.Equal(t, 3, remoteQueries) + + fi, err = os.Stat(cacheFile) + assert.Nil(t, err) + const size int64 = 478 + assert.Equal(t, size, fi.Size()) // Need to include zero time in structure for equal to work zeroTime := time.Date(1, 1, 1, 0, 0, 0, 0, time.UTC) // Sort locks for stable comparison - sort.Sort(LocksById(ourLocks)) - assert.Equal(t, []Lock{ + expectedOurLocks := []Lock{ Lock{Path: "folder/0/test1.dat", Id: "101", LockedAt: zeroTime}, Lock{Path: "folder/0/test2.dat", Id: "102", LockedAt: zeroTime}, Lock{Path: "folder/1/test1.dat", Id: "111", LockedAt: zeroTime}, - }, ourLocks) + } - sort.Sort(LocksById(theirLocks)) - assert.Equal(t, []Lock{ + expectedTheirLocks := []Lock{ Lock{Path: "folder/0/test3.dat", Id: "103", LockedAt: zeroTime}, Lock{Path: "folder/1/test2.dat", Id: "112", LockedAt: zeroTime}, Lock{Path: "folder/1/test3.dat", Id: "113", LockedAt: zeroTime}, - }, theirLocks) + } + + sort.Sort(LocksById(ourLocks)) + assert.Equal(t, expectedOurLocks, ourLocks) + sort.Sort(LocksById(theirLocks)) + assert.Equal(t, expectedTheirLocks, theirLocks) + + // Querying cache file should report same locks + ourLocks, theirLocks, err = client.SearchLocksVerifiable(0, true) + assert.Nil(t, err) + assert.Equal(t, 3, remoteQueries) + + sort.Sort(LocksById(ourLocks)) + assert.Equal(t, expectedOurLocks, ourLocks) + sort.Sort(LocksById(theirLocks)) + assert.Equal(t, expectedTheirLocks, theirLocks) }