2016-11-28 15:12:47 +00:00
|
|
|
package locking
|
|
|
|
|
|
|
|
import (
|
2018-10-07 10:36:15 +00:00
|
|
|
"encoding/json"
|
2016-11-28 15:12:47 +00:00
|
|
|
"fmt"
|
2019-03-28 09:12:33 +00:00
|
|
|
"io"
|
2017-02-16 00:24:00 +00:00
|
|
|
"net/http"
|
2016-12-05 09:47:29 +00:00
|
|
|
"os"
|
|
|
|
"path/filepath"
|
2016-12-19 16:47:22 +00:00
|
|
|
"sync"
|
2016-11-28 17:01:06 +00:00
|
|
|
"time"
|
2016-11-28 15:12:47 +00:00
|
|
|
|
2018-12-05 16:15:52 +00:00
|
|
|
"github.com/git-lfs/git-lfs/config"
|
2016-12-22 23:59:54 +00:00
|
|
|
"github.com/git-lfs/git-lfs/errors"
|
2016-12-19 16:47:22 +00:00
|
|
|
"github.com/git-lfs/git-lfs/filepathfilter"
|
2017-08-11 18:32:40 +00:00
|
|
|
"github.com/git-lfs/git-lfs/git"
|
2016-12-22 23:59:54 +00:00
|
|
|
"github.com/git-lfs/git-lfs/lfsapi"
|
2016-12-19 17:41:53 +00:00
|
|
|
"github.com/git-lfs/git-lfs/tools"
|
2016-12-05 09:47:29 +00:00
|
|
|
"github.com/git-lfs/git-lfs/tools/kv"
|
2017-01-16 12:06:21 +00:00
|
|
|
"github.com/rubyist/tracerx"
|
2016-11-28 15:12:47 +00:00
|
|
|
)
|
|
|
|
|
|
|
|
var (
|
2016-11-28 17:09:49 +00:00
|
|
|
// ErrNoMatchingLocks is an error returned when no matching locks were
|
2016-11-28 15:12:47 +00:00
|
|
|
// able to be resolved
|
2016-11-28 17:09:49 +00:00
|
|
|
ErrNoMatchingLocks = errors.New("lfs: no matching locks found")
|
|
|
|
// ErrLockAmbiguous is an error returned when multiple matching locks
|
2016-11-28 15:12:47 +00:00
|
|
|
// were found
|
2016-11-28 17:09:49 +00:00
|
|
|
ErrLockAmbiguous = errors.New("lfs: multiple locks found; ambiguous")
|
2016-11-28 15:12:47 +00:00
|
|
|
)
|
|
|
|
|
2017-01-03 21:13:59 +00:00
|
|
|
type LockCacher interface {
|
|
|
|
Add(l Lock) error
|
|
|
|
RemoveByPath(filePath string) error
|
|
|
|
RemoveById(id string) error
|
|
|
|
Locks() []Lock
|
|
|
|
Clear()
|
|
|
|
Save() error
|
|
|
|
}
|
|
|
|
|
2016-12-05 09:47:29 +00:00
|
|
|
// Client is the main interface object for the locking package
|
|
|
|
type Client struct {
|
2017-12-06 23:47:57 +00:00
|
|
|
Remote string
|
|
|
|
RemoteRef *git.Ref
|
|
|
|
client *lockClient
|
|
|
|
cache LockCacher
|
2018-10-07 10:36:15 +00:00
|
|
|
cacheDir string
|
2018-12-05 16:15:52 +00:00
|
|
|
cfg *config.Configuration
|
2017-01-16 12:06:21 +00:00
|
|
|
|
2016-12-19 16:47:22 +00:00
|
|
|
lockablePatterns []string
|
|
|
|
lockableFilter *filepathfilter.Filter
|
|
|
|
lockableMutex sync.Mutex
|
2017-01-06 10:10:39 +00:00
|
|
|
|
2017-01-06 10:14:55 +00:00
|
|
|
LocalWorkingDir string
|
|
|
|
LocalGitDir string
|
|
|
|
SetLockableFilesReadOnly bool
|
locking: add flag to control modification of ignored files
In a727fea2 ("locking: remove write permission for ignored files",
2018-08-20), we learned to set and clear the read-only attribute on
ignored files that match the lockable pattern, since "git lfs lock"
operates on pathspecs, not actual files.
However, in doing so, we caused a regression for people who have ignored
files (such as build files) which match the lockable pattern, but should
not have their permissions modified. Since there is no easy way for us
to know if the user would like their files modified, add a config option
to control this behavior, and leave it at the historical (i.e.,
pre-a727fea2) default, off. Add documentation accordingly.
Choosing the historical behavior is compatible with the most existing
use cases and preserves the behavior people expect with Git, which is
that it does not touch ignored files.
2018-12-03 22:13:55 +00:00
|
|
|
ModifyIgnoredFiles bool
|
2016-12-05 09:47:29 +00:00
|
|
|
}
|
|
|
|
|
2016-12-05 11:25:02 +00:00
|
|
|
// NewClient creates a new locking client with the given configuration
|
2016-12-05 15:01:25 +00:00
|
|
|
// You must call the returned object's `Close` method when you are finished with
|
|
|
|
// it
|
2018-12-05 16:15:52 +00:00
|
|
|
func NewClient(remote string, lfsClient *lfsapi.Client, cfg *config.Configuration) (*Client, error) {
|
2017-01-03 21:13:59 +00:00
|
|
|
return &Client{
|
locking: add flag to control modification of ignored files
In a727fea2 ("locking: remove write permission for ignored files",
2018-08-20), we learned to set and clear the read-only attribute on
ignored files that match the lockable pattern, since "git lfs lock"
operates on pathspecs, not actual files.
However, in doing so, we caused a regression for people who have ignored
files (such as build files) which match the lockable pattern, but should
not have their permissions modified. Since there is no easy way for us
to know if the user would like their files modified, add a config option
to control this behavior, and leave it at the historical (i.e.,
pre-a727fea2) default, off. Add documentation accordingly.
Choosing the historical behavior is compatible with the most existing
use cases and preserves the behavior people expect with Git, which is
that it does not touch ignored files.
2018-12-03 22:13:55 +00:00
|
|
|
Remote: remote,
|
|
|
|
client: &lockClient{Client: lfsClient},
|
|
|
|
cache: &nilLockCacher{},
|
|
|
|
cfg: cfg,
|
|
|
|
ModifyIgnoredFiles: lfsClient.GitEnv().Bool("lfs.lockignoredfiles", false),
|
2017-01-03 21:13:59 +00:00
|
|
|
}, nil
|
|
|
|
}
|
2016-12-05 09:47:29 +00:00
|
|
|
|
2017-01-03 21:13:59 +00:00
|
|
|
func (c *Client) SetupFileCache(path string) error {
|
|
|
|
stat, err := os.Stat(path)
|
2016-12-06 10:06:46 +00:00
|
|
|
if err != nil {
|
2017-01-03 21:13:59 +00:00
|
|
|
return errors.Wrap(err, "init lock cache")
|
|
|
|
}
|
|
|
|
|
|
|
|
lockFile := path
|
|
|
|
if stat.IsDir() {
|
|
|
|
lockFile = filepath.Join(path, "lockcache.db")
|
2016-12-06 10:06:46 +00:00
|
|
|
}
|
2017-01-03 21:13:59 +00:00
|
|
|
|
2016-12-13 14:48:17 +00:00
|
|
|
cache, err := NewLockCache(lockFile)
|
2016-12-05 09:47:29 +00:00
|
|
|
if err != nil {
|
2017-01-03 21:13:59 +00:00
|
|
|
return errors.Wrap(err, "init lock cache")
|
2016-12-05 09:47:29 +00:00
|
|
|
}
|
2017-01-06 10:10:39 +00:00
|
|
|
|
2017-01-03 21:13:59 +00:00
|
|
|
c.cache = cache
|
2018-10-07 10:36:15 +00:00
|
|
|
c.cacheDir = filepath.Join(path, "cache")
|
2017-01-03 21:13:59 +00:00
|
|
|
return nil
|
2016-12-05 09:47:29 +00:00
|
|
|
}
|
|
|
|
|
2016-12-05 15:01:25 +00:00
|
|
|
// Close this client instance; must be called to dispose of resources
|
|
|
|
func (c *Client) Close() error {
|
|
|
|
return c.cache.Save()
|
|
|
|
}
|
|
|
|
|
2016-12-05 11:25:02 +00:00
|
|
|
// LockFile attempts to lock a file on the current remote
|
2016-11-28 15:12:47 +00:00
|
|
|
// path must be relative to the root of the repository
|
|
|
|
// Returns the lock id if successful, or an error
|
2017-12-06 23:47:57 +00:00
|
|
|
func (c *Client) LockFile(path string) (Lock, error) {
|
2017-12-06 22:50:08 +00:00
|
|
|
lockRes, _, err := c.client.Lock(c.Remote, &lockRequest{
|
|
|
|
Path: path,
|
2017-12-06 23:47:57 +00:00
|
|
|
Ref: &lockRef{Name: c.RemoteRef.Refspec()},
|
2017-12-06 22:50:08 +00:00
|
|
|
})
|
2017-01-03 20:25:55 +00:00
|
|
|
if err != nil {
|
|
|
|
return Lock{}, errors.Wrap(err, "api")
|
2016-11-28 15:12:47 +00:00
|
|
|
}
|
|
|
|
|
2017-02-02 16:54:39 +00:00
|
|
|
if len(lockRes.Message) > 0 {
|
2017-02-27 21:16:19 +00:00
|
|
|
if len(lockRes.RequestID) > 0 {
|
|
|
|
tracerx.Printf("Server Request ID: %s", lockRes.RequestID)
|
|
|
|
}
|
|
|
|
return Lock{}, fmt.Errorf("Server unable to create lock: %s", lockRes.Message)
|
2017-01-03 20:25:55 +00:00
|
|
|
}
|
2016-12-05 18:16:30 +00:00
|
|
|
|
2017-01-03 20:25:55 +00:00
|
|
|
lock := *lockRes.Lock
|
2016-12-14 09:44:58 +00:00
|
|
|
if err := c.cache.Add(lock); err != nil {
|
2017-01-03 20:25:55 +00:00
|
|
|
return Lock{}, errors.Wrap(err, "lock cache")
|
2016-11-28 15:12:47 +00:00
|
|
|
}
|
|
|
|
|
2017-08-11 18:32:59 +00:00
|
|
|
abs, err := getAbsolutePath(path)
|
|
|
|
if err != nil {
|
|
|
|
return Lock{}, errors.Wrap(err, "make lockpath absolute")
|
|
|
|
}
|
|
|
|
|
2016-12-19 17:41:53 +00:00
|
|
|
// Ensure writeable on return
|
2017-08-11 18:32:59 +00:00
|
|
|
if err := tools.SetFileWriteFlag(abs, true); err != nil {
|
2017-01-09 12:08:51 +00:00
|
|
|
return Lock{}, err
|
|
|
|
}
|
|
|
|
|
|
|
|
return lock, nil
|
2016-11-28 15:12:47 +00:00
|
|
|
}
|
|
|
|
|
2017-08-11 18:32:40 +00:00
|
|
|
// getAbsolutePath takes a repository-relative path and makes it absolute.
|
|
|
|
//
|
|
|
|
// For instance, given a repository in /usr/local/src/my-repo and a file called
|
|
|
|
// dir/foo/bar.txt, getAbsolutePath will return:
|
|
|
|
//
|
|
|
|
// /usr/local/src/my-repo/dir/foo/bar.txt
|
|
|
|
func getAbsolutePath(p string) (string, error) {
|
|
|
|
root, err := git.RootDir()
|
|
|
|
if err != nil {
|
|
|
|
return "", err
|
|
|
|
}
|
|
|
|
|
|
|
|
return filepath.Join(root, p), nil
|
|
|
|
}
|
|
|
|
|
2016-12-05 11:25:02 +00:00
|
|
|
// UnlockFile attempts to unlock a file on the current remote
|
2016-11-28 15:12:47 +00:00
|
|
|
// path must be relative to the root of the repository
|
|
|
|
// Force causes the file to be unlocked from other users as well
|
2017-12-06 23:47:57 +00:00
|
|
|
func (c *Client) UnlockFile(path string, force bool) error {
|
2016-12-05 09:47:29 +00:00
|
|
|
id, err := c.lockIdFromPath(path)
|
2016-11-28 15:12:47 +00:00
|
|
|
if err != nil {
|
|
|
|
return fmt.Errorf("Unable to get lock id: %v", err)
|
|
|
|
}
|
|
|
|
|
2017-12-06 23:47:57 +00:00
|
|
|
return c.UnlockFileById(id, force)
|
2016-11-28 15:12:47 +00:00
|
|
|
}
|
|
|
|
|
2016-12-05 11:25:02 +00:00
|
|
|
// UnlockFileById attempts to unlock a lock with a given id on the current remote
|
2016-11-28 15:12:47 +00:00
|
|
|
// Force causes the file to be unlocked from other users as well
|
2017-12-06 23:47:57 +00:00
|
|
|
func (c *Client) UnlockFileById(id string, force bool) error {
|
|
|
|
unlockRes, _, err := c.client.Unlock(c.RemoteRef, c.Remote, id, force)
|
2017-01-03 20:25:55 +00:00
|
|
|
if err != nil {
|
|
|
|
return errors.Wrap(err, "api")
|
2016-11-28 15:12:47 +00:00
|
|
|
}
|
|
|
|
|
2017-02-02 16:54:39 +00:00
|
|
|
if len(unlockRes.Message) > 0 {
|
2017-02-27 21:16:19 +00:00
|
|
|
if len(unlockRes.RequestID) > 0 {
|
|
|
|
tracerx.Printf("Server Request ID: %s", unlockRes.RequestID)
|
|
|
|
}
|
|
|
|
return fmt.Errorf("Server unable to unlock: %s", unlockRes.Message)
|
2016-11-28 15:12:47 +00:00
|
|
|
}
|
|
|
|
|
2016-12-14 09:44:58 +00:00
|
|
|
if err := c.cache.RemoveById(id); err != nil {
|
2016-11-28 15:12:47 +00:00
|
|
|
return fmt.Errorf("Error caching unlock information: %v", err)
|
|
|
|
}
|
|
|
|
|
2017-10-03 17:12:26 +00:00
|
|
|
if unlockRes.Lock != nil {
|
|
|
|
abs, err := getAbsolutePath(unlockRes.Lock.Path)
|
|
|
|
if err != nil {
|
|
|
|
return errors.Wrap(err, "make lockpath absolute")
|
|
|
|
}
|
|
|
|
|
|
|
|
// Make non-writeable if required
|
|
|
|
if c.SetLockableFilesReadOnly && c.IsFileLockable(unlockRes.Lock.Path) {
|
|
|
|
return tools.SetFileWriteFlag(abs, false)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2016-11-28 15:12:47 +00:00
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2016-11-28 17:01:06 +00:00
|
|
|
// Lock is a record of a locked file
|
|
|
|
type Lock struct {
|
|
|
|
// Id is the unique identifier corresponding to this particular Lock. It
|
|
|
|
// must be consistent with the local copy, and the server's copy.
|
2016-12-28 00:03:02 +00:00
|
|
|
Id string `json:"id"`
|
2016-11-28 17:01:06 +00:00
|
|
|
// Path is an absolute path to the file that is locked as a part of this
|
|
|
|
// lock.
|
2016-12-28 00:03:02 +00:00
|
|
|
Path string `json:"path"`
|
2017-02-03 17:48:31 +00:00
|
|
|
// Owner is the identity of the user that created this lock.
|
2017-02-10 21:53:17 +00:00
|
|
|
Owner *User `json:"owner,omitempty"`
|
2016-12-13 14:48:17 +00:00
|
|
|
// LockedAt is the time at which this lock was acquired.
|
2016-12-28 00:03:02 +00:00
|
|
|
LockedAt time.Time `json:"locked_at"`
|
2016-11-28 15:12:47 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// SearchLocks returns a channel of locks which match the given name/value filter
|
|
|
|
// If limit > 0 then search stops at that number of locks
|
2016-12-05 18:16:30 +00:00
|
|
|
// If localOnly = true, don't query the server & report only own local locks
|
2018-10-08 07:32:43 +00:00
|
|
|
func (c *Client) SearchLocks(filter map[string]string, limit int, localOnly bool, cached bool) ([]Lock, error) {
|
2016-12-05 18:16:30 +00:00
|
|
|
if localOnly {
|
2018-10-06 08:55:14 +00:00
|
|
|
return c.searchLocalLocks(filter, limit)
|
2018-10-08 07:32:43 +00:00
|
|
|
} else if cached {
|
|
|
|
if len(filter) > 0 || limit != 0 {
|
|
|
|
return []Lock{}, errors.New("can't search cached locks when filter or limit is set")
|
|
|
|
}
|
|
|
|
|
2019-03-28 09:12:41 +00:00
|
|
|
locks := []Lock{}
|
|
|
|
err := c.readLocksFromCacheFile("remote", func(decoder *json.Decoder) error {
|
|
|
|
return decoder.Decode(&locks)
|
|
|
|
})
|
|
|
|
return locks, err
|
2016-12-05 18:16:30 +00:00
|
|
|
} else {
|
2018-10-07 10:36:15 +00:00
|
|
|
locks, err := c.searchRemoteLocks(filter, limit)
|
|
|
|
if err != nil {
|
|
|
|
return locks, err
|
|
|
|
}
|
|
|
|
|
|
|
|
if len(filter) == 0 && limit == 0 {
|
2019-03-28 09:12:41 +00:00
|
|
|
err = c.writeLocksToCacheFile("remote", func(writer io.Writer) error {
|
|
|
|
return c.EncodeLocks(locks, writer)
|
|
|
|
})
|
2018-10-07 10:36:15 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
return locks, err
|
2016-12-05 18:16:30 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-03-28 09:12:41 +00:00
|
|
|
func (c *Client) SearchLocksVerifiable(limit int, cached bool) (ourLocks, theirLocks []Lock, err error) {
|
2017-01-31 00:49:42 +00:00
|
|
|
ourLocks = make([]Lock, 0, limit)
|
|
|
|
theirLocks = make([]Lock, 0, limit)
|
2019-01-02 11:08:15 +00:00
|
|
|
|
2019-03-28 09:12:41 +00:00
|
|
|
if cached {
|
|
|
|
if limit != 0 {
|
|
|
|
return []Lock{}, []Lock{}, errors.New("can't search cached locks when limit is set")
|
|
|
|
}
|
2019-01-02 11:08:15 +00:00
|
|
|
|
2019-03-28 09:12:41 +00:00
|
|
|
locks := &lockVerifiableList{}
|
|
|
|
err := c.readLocksFromCacheFile("verifiable", func(decoder *json.Decoder) error {
|
|
|
|
return decoder.Decode(&locks)
|
|
|
|
})
|
|
|
|
return locks.Ours, locks.Theirs, err
|
|
|
|
} else {
|
|
|
|
var requestRef *lockRef
|
|
|
|
if c.RemoteRef != nil {
|
|
|
|
requestRef = &lockRef{Name: c.RemoteRef.Refspec()}
|
|
|
|
}
|
2017-01-31 00:49:42 +00:00
|
|
|
|
2019-03-28 09:12:41 +00:00
|
|
|
body := &lockVerifiableRequest{
|
|
|
|
Ref: requestRef,
|
|
|
|
Limit: limit,
|
|
|
|
}
|
2017-10-31 17:23:37 +00:00
|
|
|
|
2019-03-28 09:12:41 +00:00
|
|
|
c.cache.Clear()
|
|
|
|
|
|
|
|
for {
|
|
|
|
list, res, err := c.client.SearchVerifiable(c.Remote, body)
|
|
|
|
if res != nil {
|
|
|
|
switch res.StatusCode {
|
|
|
|
case http.StatusNotFound, http.StatusNotImplemented:
|
|
|
|
return ourLocks, theirLocks, errors.NewNotImplementedError(err)
|
|
|
|
case http.StatusForbidden:
|
|
|
|
return ourLocks, theirLocks, errors.NewAuthError(err)
|
|
|
|
}
|
2017-03-29 21:53:05 +00:00
|
|
|
}
|
2017-02-16 00:24:00 +00:00
|
|
|
|
2019-03-28 09:12:41 +00:00
|
|
|
if err != nil {
|
|
|
|
return ourLocks, theirLocks, err
|
|
|
|
}
|
2017-01-31 00:49:42 +00:00
|
|
|
|
2019-03-28 09:12:41 +00:00
|
|
|
if list.Message != "" {
|
|
|
|
if len(list.RequestID) > 0 {
|
|
|
|
tracerx.Printf("Server Request ID: %s", list.RequestID)
|
|
|
|
}
|
|
|
|
return ourLocks, theirLocks, fmt.Errorf("Server error searching locks: %s", list.Message)
|
2017-02-27 21:16:19 +00:00
|
|
|
}
|
2017-01-31 00:49:42 +00:00
|
|
|
|
2019-03-28 09:12:41 +00:00
|
|
|
for _, l := range list.Ours {
|
|
|
|
c.cache.Add(l)
|
|
|
|
ourLocks = append(ourLocks, l)
|
|
|
|
if limit > 0 && (len(ourLocks)+len(theirLocks)) >= limit {
|
|
|
|
return ourLocks, theirLocks, nil
|
|
|
|
}
|
2017-01-31 00:49:42 +00:00
|
|
|
}
|
2017-01-31 00:21:56 +00:00
|
|
|
|
2019-03-28 09:12:41 +00:00
|
|
|
for _, l := range list.Theirs {
|
|
|
|
c.cache.Add(l)
|
|
|
|
theirLocks = append(theirLocks, l)
|
|
|
|
if limit > 0 && (len(ourLocks)+len(theirLocks)) >= limit {
|
|
|
|
return ourLocks, theirLocks, nil
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if list.NextCursor != "" {
|
|
|
|
body.Cursor = list.NextCursor
|
|
|
|
} else {
|
|
|
|
break
|
2017-01-31 00:49:42 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-03-28 09:12:41 +00:00
|
|
|
if limit == 0 {
|
|
|
|
err = c.writeLocksToCacheFile("verifiable", func(writer io.Writer) error {
|
|
|
|
return c.EncodeLocksVerifiable(ourLocks, theirLocks, writer)
|
|
|
|
})
|
2017-01-31 00:49:42 +00:00
|
|
|
}
|
|
|
|
|
2019-03-28 09:12:41 +00:00
|
|
|
return ourLocks, theirLocks, err
|
|
|
|
}
|
2017-01-31 00:49:42 +00:00
|
|
|
}
|
2017-01-31 00:21:56 +00:00
|
|
|
|
2018-10-06 08:55:14 +00:00
|
|
|
func (c *Client) searchLocalLocks(filter map[string]string, limit int) ([]Lock, error) {
|
2016-12-14 09:44:58 +00:00
|
|
|
cachedlocks := c.cache.Locks()
|
2016-12-05 18:16:30 +00:00
|
|
|
path, filterByPath := filter["path"]
|
|
|
|
id, filterById := filter["id"]
|
|
|
|
lockCount := 0
|
2016-12-12 14:56:40 +00:00
|
|
|
locks := make([]Lock, 0, len(cachedlocks))
|
|
|
|
for _, l := range cachedlocks {
|
2016-12-05 18:16:30 +00:00
|
|
|
// Manually filter by Path/Id
|
|
|
|
if (filterByPath && path != l.Path) ||
|
|
|
|
(filterById && id != l.Id) {
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
locks = append(locks, l)
|
|
|
|
lockCount++
|
|
|
|
if limit > 0 && lockCount >= limit {
|
|
|
|
break
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return locks, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
func (c *Client) searchRemoteLocks(filter map[string]string, limit int) ([]Lock, error) {
|
|
|
|
locks := make([]Lock, 0, limit)
|
2016-11-28 15:12:47 +00:00
|
|
|
|
2016-12-22 23:59:54 +00:00
|
|
|
apifilters := make([]lockFilter, 0, len(filter))
|
2016-11-28 17:01:06 +00:00
|
|
|
for k, v := range filter {
|
2016-12-22 23:59:54 +00:00
|
|
|
apifilters = append(apifilters, lockFilter{Property: k, Value: v})
|
2016-11-28 17:01:06 +00:00
|
|
|
}
|
2017-12-07 01:08:18 +00:00
|
|
|
|
|
|
|
query := &lockSearchRequest{
|
|
|
|
Filters: apifilters,
|
|
|
|
Limit: limit,
|
|
|
|
Refspec: c.RemoteRef.Refspec(),
|
|
|
|
}
|
|
|
|
|
2016-11-28 17:01:06 +00:00
|
|
|
for {
|
2016-12-22 23:59:54 +00:00
|
|
|
list, _, err := c.client.Search(c.Remote, query)
|
|
|
|
if err != nil {
|
|
|
|
return locks, errors.Wrap(err, "locking")
|
2016-11-28 17:01:06 +00:00
|
|
|
}
|
|
|
|
|
2017-02-02 16:54:39 +00:00
|
|
|
if list.Message != "" {
|
2017-02-27 21:16:19 +00:00
|
|
|
if len(list.RequestID) > 0 {
|
|
|
|
tracerx.Printf("Server Request ID: %s", list.RequestID)
|
|
|
|
}
|
|
|
|
return locks, fmt.Errorf("Server error searching for locks: %s", list.Message)
|
2016-11-28 17:01:06 +00:00
|
|
|
}
|
2016-11-28 15:12:47 +00:00
|
|
|
|
2016-12-22 23:59:54 +00:00
|
|
|
for _, l := range list.Locks {
|
|
|
|
locks = append(locks, l)
|
2016-11-28 17:01:06 +00:00
|
|
|
if limit > 0 && len(locks) >= limit {
|
|
|
|
// Exit outer loop too
|
|
|
|
return locks, nil
|
2016-11-28 15:12:47 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2016-12-22 23:59:54 +00:00
|
|
|
if list.NextCursor != "" {
|
|
|
|
query.Cursor = list.NextCursor
|
2016-11-28 17:01:06 +00:00
|
|
|
} else {
|
|
|
|
break
|
|
|
|
}
|
|
|
|
}
|
2016-11-28 15:12:47 +00:00
|
|
|
|
2016-11-28 17:01:06 +00:00
|
|
|
return locks, nil
|
2016-11-28 15:12:47 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// lockIdFromPath makes a call to the LFS API and resolves the ID for the locked
|
|
|
|
// locked at the given path.
|
|
|
|
//
|
|
|
|
// If the API call failed, an error will be returned. If multiple locks matched
|
|
|
|
// the given path (should not happen during real-world usage), an error will be
|
2019-07-24 07:17:40 +00:00
|
|
|
// returned. If no locks matched the given path, an error will be returned.
|
2016-11-28 15:12:47 +00:00
|
|
|
//
|
|
|
|
// If the API call is successful, and only one lock matches the given filepath,
|
|
|
|
// then its ID will be returned, along with a value of "nil" for the error.
|
2016-12-05 09:47:29 +00:00
|
|
|
func (c *Client) lockIdFromPath(path string) (string, error) {
|
2016-12-22 23:59:54 +00:00
|
|
|
list, _, err := c.client.Search(c.Remote, &lockSearchRequest{
|
|
|
|
Filters: []lockFilter{
|
|
|
|
{Property: "path", Value: path},
|
2016-11-28 15:12:47 +00:00
|
|
|
},
|
|
|
|
})
|
|
|
|
|
2016-12-22 23:59:54 +00:00
|
|
|
if err != nil {
|
2016-11-28 15:12:47 +00:00
|
|
|
return "", err
|
|
|
|
}
|
|
|
|
|
2016-12-22 23:59:54 +00:00
|
|
|
switch len(list.Locks) {
|
2016-11-28 15:12:47 +00:00
|
|
|
case 0:
|
2016-11-28 17:09:49 +00:00
|
|
|
return "", ErrNoMatchingLocks
|
2016-11-28 15:12:47 +00:00
|
|
|
case 1:
|
2016-12-22 23:59:54 +00:00
|
|
|
return list.Locks[0].Id, nil
|
2016-11-28 15:12:47 +00:00
|
|
|
default:
|
2016-11-28 17:09:49 +00:00
|
|
|
return "", ErrLockAmbiguous
|
2016-11-28 15:12:47 +00:00
|
|
|
}
|
|
|
|
}
|
2016-12-05 09:47:29 +00:00
|
|
|
|
2016-12-19 16:47:22 +00:00
|
|
|
// IsFileLockedByCurrentCommitter returns whether a file is locked by the
|
2017-02-03 17:48:31 +00:00
|
|
|
// current user, as cached locally
|
2016-12-19 16:47:22 +00:00
|
|
|
func (c *Client) IsFileLockedByCurrentCommitter(path string) bool {
|
|
|
|
filter := map[string]string{"path": path}
|
2018-10-06 08:55:14 +00:00
|
|
|
locks, err := c.searchLocalLocks(filter, 1)
|
2016-12-19 16:47:22 +00:00
|
|
|
if err != nil {
|
|
|
|
tracerx.Printf("Error searching cached locks: %s\nForcing remote search", err)
|
|
|
|
locks, _ = c.searchRemoteLocks(filter, 1)
|
|
|
|
}
|
|
|
|
return len(locks) > 0
|
|
|
|
}
|
|
|
|
|
2016-12-05 18:16:30 +00:00
|
|
|
func init() {
|
|
|
|
kv.RegisterTypeForStorage(&Lock{})
|
|
|
|
}
|
2017-01-03 21:13:59 +00:00
|
|
|
|
2019-03-28 09:12:41 +00:00
|
|
|
func (c *Client) prepareCacheDirectory(kind string) (string, error) {
|
2018-10-07 10:36:15 +00:00
|
|
|
cacheDir := filepath.Join(c.cacheDir, "locks")
|
|
|
|
if c.RemoteRef != nil {
|
|
|
|
cacheDir = filepath.Join(cacheDir, c.RemoteRef.Refspec())
|
|
|
|
}
|
|
|
|
|
|
|
|
stat, err := os.Stat(cacheDir)
|
|
|
|
if err == nil {
|
|
|
|
if !stat.IsDir() {
|
|
|
|
return cacheDir, errors.New("init cache directory " + cacheDir + " failed: already exists, but is no directory")
|
|
|
|
}
|
|
|
|
} else if os.IsNotExist(err) {
|
2018-12-05 16:15:52 +00:00
|
|
|
err = tools.MkdirAll(cacheDir, c.cfg)
|
2018-10-07 10:36:15 +00:00
|
|
|
if err != nil {
|
|
|
|
return cacheDir, errors.Wrap(err, "init cache directory "+cacheDir+" failed: directory creation failed")
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
return cacheDir, errors.Wrap(err, "init cache directory "+cacheDir+" failed")
|
|
|
|
}
|
|
|
|
|
2019-03-28 09:12:41 +00:00
|
|
|
return filepath.Join(cacheDir, kind), nil
|
2018-10-07 10:36:15 +00:00
|
|
|
}
|
|
|
|
|
2019-03-28 09:12:41 +00:00
|
|
|
func (c *Client) readLocksFromCacheFile(kind string, decoder func(*json.Decoder) error) error {
|
|
|
|
cacheFile, err := c.prepareCacheDirectory(kind)
|
2018-10-08 07:32:43 +00:00
|
|
|
if err != nil {
|
2019-03-28 09:12:41 +00:00
|
|
|
return err
|
2018-10-08 07:32:43 +00:00
|
|
|
}
|
|
|
|
|
2019-03-28 09:12:41 +00:00
|
|
|
_, err = os.Stat(cacheFile)
|
|
|
|
if err != nil {
|
|
|
|
if os.IsNotExist(err) {
|
|
|
|
return errors.New("no cached locks present")
|
|
|
|
}
|
|
|
|
|
|
|
|
return err
|
|
|
|
}
|
2018-10-08 07:32:43 +00:00
|
|
|
|
2019-03-28 09:12:41 +00:00
|
|
|
file, err := os.Open(cacheFile)
|
2018-10-08 07:32:43 +00:00
|
|
|
if err != nil {
|
2019-03-28 09:12:41 +00:00
|
|
|
return err
|
2018-10-08 07:32:43 +00:00
|
|
|
}
|
|
|
|
|
2019-03-28 09:12:41 +00:00
|
|
|
defer file.Close()
|
|
|
|
return decoder(json.NewDecoder(file))
|
2018-10-08 07:32:43 +00:00
|
|
|
}
|
|
|
|
|
2019-03-28 09:12:33 +00:00
|
|
|
func (c *Client) EncodeLocks(locks []Lock, writer io.Writer) error {
|
|
|
|
return json.NewEncoder(writer).Encode(locks)
|
|
|
|
}
|
|
|
|
|
|
|
|
func (c *Client) EncodeLocksVerifiable(ourLocks, theirLocks []Lock, writer io.Writer) error {
|
|
|
|
return json.NewEncoder(writer).Encode(&lockVerifiableList{
|
|
|
|
Ours: ourLocks,
|
|
|
|
Theirs: theirLocks,
|
|
|
|
})
|
|
|
|
}
|
|
|
|
|
2019-03-28 09:12:41 +00:00
|
|
|
func (c *Client) writeLocksToCacheFile(kind string, writer func(io.Writer) error) error {
|
|
|
|
cacheFile, err := c.prepareCacheDirectory(kind)
|
2018-10-07 10:36:15 +00:00
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
2019-03-28 09:12:41 +00:00
|
|
|
file, err := os.Create(cacheFile)
|
2018-10-07 10:36:15 +00:00
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
2019-03-28 09:12:41 +00:00
|
|
|
defer file.Close()
|
|
|
|
return writer(file)
|
2018-10-07 10:36:15 +00:00
|
|
|
}
|
|
|
|
|
2017-01-03 21:13:59 +00:00
|
|
|
type nilLockCacher struct{}
|
|
|
|
|
|
|
|
func (c *nilLockCacher) Add(l Lock) error {
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
func (c *nilLockCacher) RemoveByPath(filePath string) error {
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
func (c *nilLockCacher) RemoveById(id string) error {
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
func (c *nilLockCacher) Locks() []Lock {
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
func (c *nilLockCacher) Clear() {}
|
|
|
|
func (c *nilLockCacher) Save() error {
|
|
|
|
return nil
|
|
|
|
}
|