git-lfs/locking/locks.go

338 lines
8.6 KiB
Go
Raw Normal View History

package locking
import (
"fmt"
"os"
"path/filepath"
"sync"
"time"
2016-12-22 23:59:54 +00:00
"github.com/git-lfs/git-lfs/errors"
"github.com/git-lfs/git-lfs/filepathfilter"
"github.com/git-lfs/git-lfs/git"
2016-12-22 23:59:54 +00:00
"github.com/git-lfs/git-lfs/lfsapi"
"github.com/git-lfs/git-lfs/tools"
"github.com/git-lfs/git-lfs/tools/kv"
"github.com/rubyist/tracerx"
)
var (
2016-11-28 17:09:49 +00:00
// ErrNoMatchingLocks is an error returned when no matching locks were
// able to be resolved
2016-11-28 17:09:49 +00:00
ErrNoMatchingLocks = errors.New("lfs: no matching locks found")
// ErrLockAmbiguous is an error returned when multiple matching locks
// were found
2016-11-28 17:09:49 +00:00
ErrLockAmbiguous = errors.New("lfs: multiple locks found; ambiguous")
)
type LockCacher interface {
Add(l Lock) error
RemoveByPath(filePath string) error
RemoveById(id string) error
Locks() []Lock
Clear()
Save() error
}
// Client is the main interface object for the locking package
type Client struct {
2017-01-03 20:27:42 +00:00
Remote string
client *lockClient
cache LockCacher
lockablePatterns []string
lockableFilter *filepathfilter.Filter
lockableMutex sync.Mutex
LocalWorkingDir string
LocalGitDir string
SetLockableFilesReadOnly bool
}
// NewClient creates a new locking client with the given configuration
// You must call the returned object's `Close` method when you are finished with
// it
func NewClient(remote string, lfsClient *lfsapi.Client) (*Client, error) {
return &Client{
Remote: remote,
client: &lockClient{Client: lfsClient},
cache: &nilLockCacher{},
}, nil
}
func (c *Client) SetupFileCache(path string) error {
stat, err := os.Stat(path)
if err != nil {
return errors.Wrap(err, "init lock cache")
}
lockFile := path
if stat.IsDir() {
lockFile = filepath.Join(path, "lockcache.db")
}
cache, err := NewLockCache(lockFile)
if err != nil {
return errors.Wrap(err, "init lock cache")
}
c.cache = cache
return nil
}
// Close this client instance; must be called to dispose of resources
func (c *Client) Close() error {
return c.cache.Save()
}
// LockFile attempts to lock a file on the current remote
// path must be relative to the root of the repository
// Returns the lock id if successful, or an error
func (c *Client) LockFile(path string) (Lock, error) {
// TODO: this is not really the constraint we need to avoid merges, improve as per proposal
latest, err := git.CurrentRemoteRef()
if err != nil {
return Lock{}, err
}
lockReq := &lockRequest{
Path: path,
LatestRemoteCommit: latest.Sha,
Committer: NewCommitter(c.client.CurrentUser()),
}
lockRes, _, err := c.client.Lock(c.Remote, lockReq)
if err != nil {
return Lock{}, errors.Wrap(err, "api")
}
if len(lockRes.Err) > 0 {
return Lock{}, fmt.Errorf("Server unable to create lock: %v", lockRes.Err)
}
lock := *lockRes.Lock
2016-12-14 09:44:58 +00:00
if err := c.cache.Add(lock); err != nil {
return Lock{}, errors.Wrap(err, "lock cache")
}
// Ensure writeable on return
2017-01-09 12:08:51 +00:00
if err := tools.SetFileWriteFlag(path, true); err != nil {
return Lock{}, err
}
return lock, nil
}
// UnlockFile attempts to unlock a file on the current remote
// path must be relative to the root of the repository
// Force causes the file to be unlocked from other users as well
func (c *Client) UnlockFile(path string, force bool) error {
id, err := c.lockIdFromPath(path)
if err != nil {
return fmt.Errorf("Unable to get lock id: %v", err)
}
err = c.UnlockFileById(id, force)
if err != nil {
return err
}
// Make non-writeable if required
if c.SetLockableFilesReadOnly && c.IsFileLockable(path) {
return tools.SetFileWriteFlag(path, false)
}
return nil
}
// UnlockFileById attempts to unlock a lock with a given id on the current remote
// Force causes the file to be unlocked from other users as well
func (c *Client) UnlockFileById(id string, force bool) error {
unlockRes, _, err := c.client.Unlock(c.Remote, id, force)
if err != nil {
return errors.Wrap(err, "api")
}
if len(unlockRes.Err) > 0 {
return fmt.Errorf("Server unable to unlock: %s", unlockRes.Err)
}
2016-12-14 09:44:58 +00:00
if err := c.cache.RemoveById(id); err != nil {
return fmt.Errorf("Error caching unlock information: %v", err)
}
return nil
}
// Lock is a record of a locked file
type Lock struct {
// Id is the unique identifier corresponding to this particular Lock. It
// must be consistent with the local copy, and the server's copy.
Id string `json:"id"`
// Path is an absolute path to the file that is locked as a part of this
// lock.
Path string `json:"path"`
// Committer is the identity of the person who holds the ownership of
// this lock.
Committer *Committer `json:"committer"`
// LockedAt is the time at which this lock was acquired.
LockedAt time.Time `json:"locked_at"`
}
// SearchLocks returns a channel of locks which match the given name/value filter
// If limit > 0 then search stops at that number of locks
// If localOnly = true, don't query the server & report only own local locks
func (c *Client) SearchLocks(filter map[string]string, limit int, localOnly bool) (locks []Lock, err error) {
if localOnly {
return c.searchCachedLocks(filter, limit)
} else {
return c.searchRemoteLocks(filter, limit)
}
}
func (c *Client) searchCachedLocks(filter map[string]string, limit int) ([]Lock, error) {
2016-12-14 09:44:58 +00:00
cachedlocks := c.cache.Locks()
path, filterByPath := filter["path"]
id, filterById := filter["id"]
lockCount := 0
locks := make([]Lock, 0, len(cachedlocks))
for _, l := range cachedlocks {
// Manually filter by Path/Id
if (filterByPath && path != l.Path) ||
(filterById && id != l.Id) {
continue
}
locks = append(locks, l)
lockCount++
if limit > 0 && lockCount >= limit {
break
}
}
return locks, nil
}
func (c *Client) searchRemoteLocks(filter map[string]string, limit int) ([]Lock, error) {
locks := make([]Lock, 0, limit)
2016-12-22 23:59:54 +00:00
apifilters := make([]lockFilter, 0, len(filter))
for k, v := range filter {
2016-12-22 23:59:54 +00:00
apifilters = append(apifilters, lockFilter{Property: k, Value: v})
}
2016-12-22 23:59:54 +00:00
query := &lockSearchRequest{Filters: apifilters}
for {
2016-12-22 23:59:54 +00:00
list, _, err := c.client.Search(c.Remote, query)
if err != nil {
return locks, errors.Wrap(err, "locking")
}
2016-12-22 23:59:54 +00:00
if list.Err != "" {
return locks, errors.Wrap(err, "locking")
}
2016-12-22 23:59:54 +00:00
for _, l := range list.Locks {
locks = append(locks, l)
if limit > 0 && len(locks) >= limit {
// Exit outer loop too
return locks, nil
}
}
2016-12-22 23:59:54 +00:00
if list.NextCursor != "" {
query.Cursor = list.NextCursor
} else {
break
}
}
return locks, nil
}
// lockIdFromPath makes a call to the LFS API and resolves the ID for the locked
// locked at the given path.
//
// If the API call failed, an error will be returned. If multiple locks matched
// the given path (should not happen during real-world usage), an error will be
// returnd. If no locks matched the given path, an error will be returned.
//
// If the API call is successful, and only one lock matches the given filepath,
// then its ID will be returned, along with a value of "nil" for the error.
func (c *Client) lockIdFromPath(path string) (string, error) {
2016-12-22 23:59:54 +00:00
list, _, err := c.client.Search(c.Remote, &lockSearchRequest{
Filters: []lockFilter{
{Property: "path", Value: path},
},
})
2016-12-22 23:59:54 +00:00
if err != nil {
return "", err
}
2016-12-22 23:59:54 +00:00
switch len(list.Locks) {
case 0:
2016-11-28 17:09:49 +00:00
return "", ErrNoMatchingLocks
case 1:
2016-12-22 23:59:54 +00:00
return list.Locks[0].Id, nil
default:
2016-11-28 17:09:49 +00:00
return "", ErrLockAmbiguous
}
}
// Fetch locked files for the current committer and cache them locally
// This can be used to sync up locked files when moving machines
2016-12-14 09:46:36 +00:00
func (c *Client) refreshLockCache() error {
// TODO: filters don't seem to currently define how to search for a
// committer's email. Is it "committer.email"? For now, just iterate
locks, err := c.SearchLocks(nil, 0, false)
if err != nil {
return err
}
// We're going to overwrite the entire local cache
c.cache.Clear()
_, email := c.client.CurrentUser()
for _, l := range locks {
if l.Committer.Email == email {
2016-12-14 09:44:58 +00:00
c.cache.Add(l)
}
}
return nil
}
// IsFileLockedByCurrentCommitter returns whether a file is locked by the
// current committer, as cached locally
func (c *Client) IsFileLockedByCurrentCommitter(path string) bool {
filter := map[string]string{"path": path}
locks, err := c.searchCachedLocks(filter, 1)
if err != nil {
tracerx.Printf("Error searching cached locks: %s\nForcing remote search", err)
locks, _ = c.searchRemoteLocks(filter, 1)
}
return len(locks) > 0
}
func init() {
kv.RegisterTypeForStorage(&Lock{})
}
type nilLockCacher struct{}
func (c *nilLockCacher) Add(l Lock) error {
return nil
}
func (c *nilLockCacher) RemoveByPath(filePath string) error {
return nil
}
func (c *nilLockCacher) RemoveById(id string) error {
return nil
}
func (c *nilLockCacher) Locks() []Lock {
return nil
}
func (c *nilLockCacher) Clear() {}
func (c *nilLockCacher) Save() error {
return nil
}