From 83828f6042f103a092235f358344b5b0df4d04c7 Mon Sep 17 00:00:00 2001 From: Steve Streeting Date: Mon, 7 Nov 2016 17:43:26 +0000 Subject: [PATCH 01/68] First pass at an in-memory, optimistically locked key/value store --- tools/keyvaluestore.go | 164 +++++++++++++++++++++++++++++++++++++++++ 1 file changed, 164 insertions(+) create mode 100644 tools/keyvaluestore.go diff --git a/tools/keyvaluestore.go b/tools/keyvaluestore.go new file mode 100644 index 00000000..e21833db --- /dev/null +++ b/tools/keyvaluestore.go @@ -0,0 +1,164 @@ +package tools + +import ( + "encoding/gob" + "fmt" + "os" + "sync" +) + +// KeyValueStore provides an in-memory key/value store which is persisted to +// a file. The file handle itself is not kept locked for the duration; it is +// only locked during load and save, to make it concurrency friendly. When +// saving, the store uses optimistic locking to determine whether the db on disk +// has been modified by another process; in which case it loads the latest +// version and re-applies modifications made during this session. This means +// the Lost Update db concurrency issue is possible; so don't use this if you +// need more DB integrity than Read Committed isolation levels. +type KeyValueStore struct { + mu sync.RWMutex + filename string + data *keyValueStoreData + log []keyValueChange +} + +type keyValueOperation int + +const ( + // Set a value for a key + keyValueSetOperation = keyValueOperation(iota) + // Removed a value for a key + keyValueRemoveOperation = keyValueOperation(iota) +) + +type keyValueChange struct { + operation keyValueOperation + key string + value interface{} +} + +// Actual data that we persist +type keyValueStoreData struct { + // version for optimistic locking, this field is incremented with every Save() + // MUST BE FIRST in the struct, gob serializes in order and we need to peek + version int64 + db map[string]interface{} +} + +// NewKeyValueStore creates a new store and initialises it with contents from +// the named file, if it exists +func NewKeyValueStore(filepath string) (*KeyValueStore, error) { + + var kvdata *keyValueStoreData + f, err := os.OpenFile(filepath, os.O_RDONLY, 0664) + if err == nil { + defer f.Close() + dec := gob.NewDecoder(f) + err := dec.Decode(&kvdata) + if err != nil { + return nil, fmt.Errorf("Problem loading key/value data from %v: %v", filepath, err) + } + } else if !os.IsNotExist(err) { + return nil, err + } + if kvdata == nil { + kvdata = &keyValueStoreData{1, make(map[string]interface{}, 100)} + } + + return &KeyValueStore{filename: filepath, data: kvdata}, nil + +} + +// Set updates the key/value store in memory +// Changes are not persisted until you call Save() +func (k *KeyValueStore) Set(key string, value interface{}) { + k.mu.Lock() + defer k.mu.Unlock() + + k.data.db[key] = value + k.log = append(k.log, keyValueChange{keyValueSetOperation, key, value}) +} + +// Remove removes the key and its value from the store in memory +// Changes are not persisted until you call Save() +func (k *KeyValueStore) Remove(key string) { + k.mu.Lock() + defer k.mu.Unlock() + + delete(k.data.db, key) + k.log = append(k.log, keyValueChange{keyValueRemoveOperation, key, nil}) +} + +// Get retrieves a value from the store, or nil if it is not present +func (k *KeyValueStore) Get(key string) interface{} { + // Read-only lock + k.mu.RLock() + defer k.mu.RUnlock() + + // zero value of interface{} is nil so this does what we want + return k.data.db[key] +} + +// Save persists the changes made to disk +func (k *KeyValueStore) Save() error { + k.mu.Lock() + defer k.mu.Unlock() + + // Short-circuit if we have no changes + if len(k.log) == 0 { + return nil + } + + // firstly peek at version; open read/write to keep lock between check & write + f, err := os.OpenFile(k.filename, os.O_RDWR|os.O_CREATE, 0664) + if err != nil { + return err + } + + defer f.Close() + // Decode *only* the version field to check whether anyone else has + // modified the db; gob serializes structs in order so it will always be 1st + dec := gob.NewDecoder(f) + var versionOnDisk int64 + err = dec.Decode(&versionOnDisk) + if err != nil { + return fmt.Errorf("Problem checking version of key/value data from %v: %v", k.filename, err) + } + if versionOnDisk != k.data.version { + // Reload data & merge + var dbOnDisk map[string]interface{} + err = dec.Decode(&dbOnDisk) + if err != nil { + return fmt.Errorf("Problem reading updated key/value data from %v: %v", k.filename, err) + } + k.reapplyChanges(dbOnDisk) + } + // Now we overwrite + f.Seek(0, os.SEEK_SET) + k.data.version++ + + enc := gob.NewEncoder(f) + err = enc.Encode(k.data) + if err != nil { + return fmt.Errorf("Error while writing new key/value data to %v: %v", k.filename, err) + } + // Clear log now that it's saved + k.log = nil + + return nil +} + +// reapplyChanges replays the changes made since the last load onto baseDb +// and stores the result as our own DB +func (k *KeyValueStore) reapplyChanges(baseDb map[string]interface{}) { + for _, change := range k.log { + switch change.operation { + case keyValueSetOperation: + baseDb[change.key] = change.value + case keyValueRemoveOperation: + delete(baseDb, change.key) + } + } + k.data.db = baseDb + +} From 49444eeeefab19db82d19f0a422966c70be552dc Mon Sep 17 00:00:00 2001 From: Steve Streeting Date: Mon, 21 Nov 2016 12:59:05 +0000 Subject: [PATCH 02/68] =?UTF-8?q?Don=E2=80=99t=20try=20to=20decode=20a=200?= =?UTF-8?q?-byte=20key=20value=20store=20file,=20just=20ignore?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- tools/keyvaluestore.go | 44 +++++++++++++++++++++++++----------------- 1 file changed, 26 insertions(+), 18 deletions(-) diff --git a/tools/keyvaluestore.go b/tools/keyvaluestore.go index e21833db..4b0c9a8e 100644 --- a/tools/keyvaluestore.go +++ b/tools/keyvaluestore.go @@ -50,15 +50,17 @@ type keyValueStoreData struct { func NewKeyValueStore(filepath string) (*KeyValueStore, error) { var kvdata *keyValueStoreData + stat, _ := os.Stat(filepath) f, err := os.OpenFile(filepath, os.O_RDONLY, 0664) - if err == nil { + // Only try to load if file has some bytes in it + if err == nil && stat.Size() > 0 { defer f.Close() dec := gob.NewDecoder(f) err := dec.Decode(&kvdata) if err != nil { return nil, fmt.Errorf("Problem loading key/value data from %v: %v", filepath, err) } - } else if !os.IsNotExist(err) { + } else if err != nil && !os.IsNotExist(err) { return nil, err } if kvdata == nil { @@ -114,27 +116,33 @@ func (k *KeyValueStore) Save() error { if err != nil { return err } + stat, _ := os.Stat(k.filename) defer f.Close() - // Decode *only* the version field to check whether anyone else has - // modified the db; gob serializes structs in order so it will always be 1st - dec := gob.NewDecoder(f) - var versionOnDisk int64 - err = dec.Decode(&versionOnDisk) - if err != nil { - return fmt.Errorf("Problem checking version of key/value data from %v: %v", k.filename, err) - } - if versionOnDisk != k.data.version { - // Reload data & merge - var dbOnDisk map[string]interface{} - err = dec.Decode(&dbOnDisk) + + // Only try to peek if > 0 bytes, ignore empty files (decoder will fail) + if stat.Size() > 0 { + var versionOnDisk int64 + // Decode *only* the version field to check whether anyone else has + // modified the db; gob serializes structs in order so it will always be 1st + dec := gob.NewDecoder(f) + err = dec.Decode(&versionOnDisk) if err != nil { - return fmt.Errorf("Problem reading updated key/value data from %v: %v", k.filename, err) + return fmt.Errorf("Problem checking version of key/value data from %v: %v", k.filename, err) } - k.reapplyChanges(dbOnDisk) + if versionOnDisk != k.data.version { + // Reload data & merge + var dbOnDisk map[string]interface{} + err = dec.Decode(&dbOnDisk) + if err != nil { + return fmt.Errorf("Problem reading updated key/value data from %v: %v", k.filename, err) + } + k.reapplyChanges(dbOnDisk) + } + // Now we overwrite + f.Seek(0, os.SEEK_SET) } - // Now we overwrite - f.Seek(0, os.SEEK_SET) + k.data.version++ enc := gob.NewEncoder(f) From 28163b8ad314ccac0ac2ae18b0618b61eac6fbe4 Mon Sep 17 00:00:00 2001 From: Steve Streeting Date: Mon, 21 Nov 2016 14:42:15 +0000 Subject: [PATCH 03/68] Need to export KV data members --- tools/keyvaluestore.go | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) diff --git a/tools/keyvaluestore.go b/tools/keyvaluestore.go index 4b0c9a8e..f85ff1ee 100644 --- a/tools/keyvaluestore.go +++ b/tools/keyvaluestore.go @@ -41,8 +41,8 @@ type keyValueChange struct { type keyValueStoreData struct { // version for optimistic locking, this field is incremented with every Save() // MUST BE FIRST in the struct, gob serializes in order and we need to peek - version int64 - db map[string]interface{} + Version int64 + Db map[string]interface{} } // NewKeyValueStore creates a new store and initialises it with contents from @@ -77,7 +77,7 @@ func (k *KeyValueStore) Set(key string, value interface{}) { k.mu.Lock() defer k.mu.Unlock() - k.data.db[key] = value + k.data.Db[key] = value k.log = append(k.log, keyValueChange{keyValueSetOperation, key, value}) } @@ -87,7 +87,7 @@ func (k *KeyValueStore) Remove(key string) { k.mu.Lock() defer k.mu.Unlock() - delete(k.data.db, key) + delete(k.data.Db, key) k.log = append(k.log, keyValueChange{keyValueRemoveOperation, key, nil}) } @@ -98,7 +98,7 @@ func (k *KeyValueStore) Get(key string) interface{} { defer k.mu.RUnlock() // zero value of interface{} is nil so this does what we want - return k.data.db[key] + return k.data.Db[key] } // Save persists the changes made to disk @@ -130,7 +130,7 @@ func (k *KeyValueStore) Save() error { if err != nil { return fmt.Errorf("Problem checking version of key/value data from %v: %v", k.filename, err) } - if versionOnDisk != k.data.version { + if versionOnDisk != k.data.Version { // Reload data & merge var dbOnDisk map[string]interface{} err = dec.Decode(&dbOnDisk) @@ -143,7 +143,7 @@ func (k *KeyValueStore) Save() error { f.Seek(0, os.SEEK_SET) } - k.data.version++ + k.data.Version++ enc := gob.NewEncoder(f) err = enc.Encode(k.data) @@ -167,6 +167,6 @@ func (k *KeyValueStore) reapplyChanges(baseDb map[string]interface{}) { delete(baseDb, change.key) } } - k.data.db = baseDb + k.data.Db = baseDb } From 5a4f853931a8d0def364dfc30ade8525043acb64 Mon Sep 17 00:00:00 2001 From: Steve Streeting Date: Mon, 21 Nov 2016 14:57:28 +0000 Subject: [PATCH 04/68] Need to allow registration of custom structs for storage Could do this automatically using reflection? This is faster for now. --- tools/keyvaluestore.go | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/tools/keyvaluestore.go b/tools/keyvaluestore.go index f85ff1ee..25d7af7e 100644 --- a/tools/keyvaluestore.go +++ b/tools/keyvaluestore.go @@ -170,3 +170,10 @@ func (k *KeyValueStore) reapplyChanges(baseDb map[string]interface{}) { k.data.Db = baseDb } + +// RegisterTypeForKeyValueStorage registers a custom type (e.g. a struct) for +// use in the key value store. This is necessary if you intend to pass custom +// structs to KeyValueStore.Set() rather than primitive types. +func RegisterTypeForKeyValueStorage(val interface{}) { + gob.Register(val) +} From 1dce30d7b96aeb6bb9b598c3a6b2fbdbda43a488 Mon Sep 17 00:00:00 2001 From: Steve Streeting Date: Mon, 21 Nov 2016 14:57:42 +0000 Subject: [PATCH 05/68] Initial key value store test --- tools/keyvaluestore_test.go | 69 +++++++++++++++++++++++++++++++++++++ 1 file changed, 69 insertions(+) create mode 100644 tools/keyvaluestore_test.go diff --git a/tools/keyvaluestore_test.go b/tools/keyvaluestore_test.go new file mode 100644 index 00000000..5b8986cd --- /dev/null +++ b/tools/keyvaluestore_test.go @@ -0,0 +1,69 @@ +package tools + +import ( + "io/ioutil" + "os" + "testing" + + "github.com/stretchr/testify/assert" +) + +func TestKeyValueStoreSimple(t *testing.T) { + tmpf, err := ioutil.TempFile("", "lfskeyvaluetest1") + assert.Nil(t, err) + filename := tmpf.Name() + defer os.Remove(filename) + tmpf.Close() + + kvs, err := NewKeyValueStore(filename) + assert.Nil(t, err) + + // We'll include storing custom structs + type customData struct { + Val1 string + Val2 int + } + // Needed to store custom struct + RegisterTypeForKeyValueStorage(&customData{}) + + kvs.Set("stringVal", "This is a string value") + kvs.Set("intVal", 3) + kvs.Set("floatVal", 3.142) + kvs.Set("structVal", &customData{"structTest", 20}) + + s := kvs.Get("stringVal") + assert.Equal(t, "This is a string value", s) + i := kvs.Get("intVal") + assert.Equal(t, 3, i) + f := kvs.Get("floatVal") + assert.Equal(t, 3.142, f) + c := kvs.Get("structVal") + assert.Equal(t, c, &customData{"structTest", 20}) + n := kvs.Get("noValue") + assert.Nil(t, n) + + kvs.Remove("stringVal") + s = kvs.Get("stringVal") + assert.Nil(t, s) + // Set the string value again before saving + kvs.Set("stringVal", "This is a string value") + + err = kvs.Save() + assert.Nil(t, err) + kvs = nil + + // Now confirm that we can read it all back + kvs2, err := NewKeyValueStore(filename) + assert.Nil(t, err) + s = kvs2.Get("stringVal") + assert.Equal(t, "This is a string value", s) + i = kvs2.Get("intVal") + assert.Equal(t, 3, i) + f = kvs2.Get("floatVal") + assert.Equal(t, 3.142, f) + c = kvs2.Get("structVal") + assert.Equal(t, c, &customData{"structTest", 20}) + n = kvs2.Get("noValue") + assert.Nil(t, n) + +} From 5a6caffa4d167b96e98822303e427702c5bc5abc Mon Sep 17 00:00:00 2001 From: Steve Streeting Date: Mon, 21 Nov 2016 15:55:05 +0000 Subject: [PATCH 06/68] Manually serialize version & db in order so we can decode separately Some refactoring to make it cleaner when doing this --- tools/keyvaluestore.go | 130 ++++++++++++++++++++++++----------------- 1 file changed, 75 insertions(+), 55 deletions(-) diff --git a/tools/keyvaluestore.go b/tools/keyvaluestore.go index 25d7af7e..6e292b6b 100644 --- a/tools/keyvaluestore.go +++ b/tools/keyvaluestore.go @@ -3,6 +3,7 @@ package tools import ( "encoding/gob" "fmt" + "io" "os" "sync" ) @@ -18,8 +19,12 @@ import ( type KeyValueStore struct { mu sync.RWMutex filename string - data *keyValueStoreData log []keyValueChange + + // This is the persistent data + // version for optimistic locking, this field is incremented with every Save() + version int64 + db map[string]interface{} } type keyValueOperation int @@ -37,38 +42,12 @@ type keyValueChange struct { value interface{} } -// Actual data that we persist -type keyValueStoreData struct { - // version for optimistic locking, this field is incremented with every Save() - // MUST BE FIRST in the struct, gob serializes in order and we need to peek - Version int64 - Db map[string]interface{} -} - // NewKeyValueStore creates a new store and initialises it with contents from // the named file, if it exists func NewKeyValueStore(filepath string) (*KeyValueStore, error) { - - var kvdata *keyValueStoreData - stat, _ := os.Stat(filepath) - f, err := os.OpenFile(filepath, os.O_RDONLY, 0664) - // Only try to load if file has some bytes in it - if err == nil && stat.Size() > 0 { - defer f.Close() - dec := gob.NewDecoder(f) - err := dec.Decode(&kvdata) - if err != nil { - return nil, fmt.Errorf("Problem loading key/value data from %v: %v", filepath, err) - } - } else if err != nil && !os.IsNotExist(err) { - return nil, err - } - if kvdata == nil { - kvdata = &keyValueStoreData{1, make(map[string]interface{}, 100)} - } - - return &KeyValueStore{filename: filepath, data: kvdata}, nil - + kv := &KeyValueStore{filename: filepath, db: make(map[string]interface{})} + err := kv.loadAndMergeIfNeeded() + return kv, err } // Set updates the key/value store in memory @@ -77,7 +56,7 @@ func (k *KeyValueStore) Set(key string, value interface{}) { k.mu.Lock() defer k.mu.Unlock() - k.data.Db[key] = value + k.db[key] = value k.log = append(k.log, keyValueChange{keyValueSetOperation, key, value}) } @@ -87,7 +66,7 @@ func (k *KeyValueStore) Remove(key string) { k.mu.Lock() defer k.mu.Unlock() - delete(k.data.Db, key) + delete(k.db, key) k.log = append(k.log, keyValueChange{keyValueRemoveOperation, key, nil}) } @@ -98,10 +77,11 @@ func (k *KeyValueStore) Get(key string) interface{} { defer k.mu.RUnlock() // zero value of interface{} is nil so this does what we want - return k.data.Db[key] + return k.db[key] } // Save persists the changes made to disk +// If any changes have been written by other code they will be merged func (k *KeyValueStore) Save() error { k.mu.Lock() defer k.mu.Unlock() @@ -120,33 +100,21 @@ func (k *KeyValueStore) Save() error { defer f.Close() - // Only try to peek if > 0 bytes, ignore empty files (decoder will fail) + // Only try to merge if > 0 bytes, ignore empty files (decoder will fail) if stat.Size() > 0 { - var versionOnDisk int64 - // Decode *only* the version field to check whether anyone else has - // modified the db; gob serializes structs in order so it will always be 1st - dec := gob.NewDecoder(f) - err = dec.Decode(&versionOnDisk) - if err != nil { - return fmt.Errorf("Problem checking version of key/value data from %v: %v", k.filename, err) - } - if versionOnDisk != k.data.Version { - // Reload data & merge - var dbOnDisk map[string]interface{} - err = dec.Decode(&dbOnDisk) - if err != nil { - return fmt.Errorf("Problem reading updated key/value data from %v: %v", k.filename, err) - } - k.reapplyChanges(dbOnDisk) - } - // Now we overwrite + k.loadAndMergeReaderIfNeeded(f) + // Now we overwrite the file f.Seek(0, os.SEEK_SET) } - k.data.Version++ + k.version++ enc := gob.NewEncoder(f) - err = enc.Encode(k.data) + err = enc.Encode(k.version) + if err != nil { + return fmt.Errorf("Error while writing version data to %v: %v", k.filename, err) + } + err = enc.Encode(k.db) if err != nil { return fmt.Errorf("Error while writing new key/value data to %v: %v", k.filename, err) } @@ -156,6 +124,56 @@ func (k *KeyValueStore) Save() error { return nil } +// Reads as little as possible from the passed in file to determine if the +// contents are different from the version already held. If so, reads the +// contents and merges with any outstanding changes. If not, stops early without +// reading the rest of the file +func (k *KeyValueStore) loadAndMergeIfNeeded() error { + stat, err := os.Stat(k.filename) + if err != nil { + if os.IsNotExist(err) { + return nil // missing is OK + } + return err + } + // Do nothing if empty file + if stat.Size() == 0 { + return nil + } + + f, err := os.OpenFile(k.filename, os.O_RDONLY, 0664) + if err == nil { + defer f.Close() + return k.loadAndMergeReaderIfNeeded(f) + } else if !os.IsNotExist(err) { + return err + } + return nil +} + +// As loadAndMergeIfNeeded but lets caller decide how to manage file handles +func (k *KeyValueStore) loadAndMergeReaderIfNeeded(f io.Reader) error { + var versionOnDisk int64 + // Decode *only* the version field to check whether anyone else has + // modified the db; gob serializes structs in order so it will always be 1st + dec := gob.NewDecoder(f) + err := dec.Decode(&versionOnDisk) + if err != nil { + return fmt.Errorf("Problem checking version of key/value data from %v: %v", k.filename, err) + } + // Totally uninitialised Version == 0, saved versions are always >=1 + if versionOnDisk != k.version { + // Reload data & merge + var dbOnDisk map[string]interface{} + err = dec.Decode(&dbOnDisk) + if err != nil { + return fmt.Errorf("Problem reading updated key/value data from %v: %v", k.filename, err) + } + k.reapplyChanges(dbOnDisk) + } + return nil +} + // reapplyChanges replays the changes made since the last load onto baseDb // and stores the result as our own DB func (k *KeyValueStore) reapplyChanges(baseDb map[string]interface{}) { @@ -167,7 +185,9 @@ func (k *KeyValueStore) reapplyChanges(baseDb map[string]interface{}) { delete(baseDb, change.key) } } - k.data.Db = baseDb + // Note, log is not cleared here, that only happens on Save since it's a + // list of unsaved changes + k.db = baseDb } From 931caaa5ac4dd788704e153f916a94560dd65571 Mon Sep 17 00:00:00 2001 From: Steve Streeting Date: Mon, 21 Nov 2016 16:54:38 +0000 Subject: [PATCH 07/68] Must update version loaded from disk --- tools/keyvaluestore.go | 1 + 1 file changed, 1 insertion(+) diff --git a/tools/keyvaluestore.go b/tools/keyvaluestore.go index 6e292b6b..069908d4 100644 --- a/tools/keyvaluestore.go +++ b/tools/keyvaluestore.go @@ -170,6 +170,7 @@ func (k *KeyValueStore) loadAndMergeReaderIfNeeded(f io.Reader) error { return fmt.Errorf("Problem reading updated key/value data from %v: %v", k.filename, err) } k.reapplyChanges(dbOnDisk) + k.version = versionOnDisk } return nil } From ff247d5c591c152541552174fff33c16b466aaba Mon Sep 17 00:00:00 2001 From: Steve Streeting Date: Mon, 21 Nov 2016 16:54:54 +0000 Subject: [PATCH 08/68] Test for merge behaviour on optimistic lock conflict --- tools/keyvaluestore_test.go | 49 +++++++++++++++++++++++++++++++++++++ 1 file changed, 49 insertions(+) diff --git a/tools/keyvaluestore_test.go b/tools/keyvaluestore_test.go index 5b8986cd..5985dbac 100644 --- a/tools/keyvaluestore_test.go +++ b/tools/keyvaluestore_test.go @@ -67,3 +67,52 @@ func TestKeyValueStoreSimple(t *testing.T) { assert.Nil(t, n) } + +func TestKeyValueStoreOptimisticConflict(t *testing.T) { + tmpf, err := ioutil.TempFile("", "lfskeyvaluetest1") + assert.Nil(t, err) + filename := tmpf.Name() + defer os.Remove(filename) + tmpf.Close() + + kvs1, err := NewKeyValueStore(filename) + assert.Nil(t, err) + + kvs1.Set("key1", "value1") + kvs1.Set("key2", "value2") + kvs1.Set("key3", "value3") + err = kvs1.Save() + assert.Nil(t, err) + + // Load second copy & modify + kvs2, err := NewKeyValueStore(filename) + assert.Nil(t, err) + // New keys + kvs2.Set("key4", "value4_fromkvs2") + kvs2.Set("key5", "value5_fromkvs2") + // Modify a key too + kvs2.Set("key1", "value1_fromkvs2") + err = kvs2.Save() + assert.Nil(t, err) + + // Now modify first copy & save; it should detect optimistic lock issue + // New item + kvs1.Set("key10", "value10") + // Overlapping item; since we save second this will overwrite one from kvs2 + kvs1.Set("key4", "value4") + err = kvs1.Save() + assert.Nil(t, err) + + // This should have merged changes from kvs2 in the process + v := kvs1.Get("key1") + assert.Equal(t, "value1_fromkvs2", v) // this one was modified by kvs2 + v = kvs1.Get("key2") + assert.Equal(t, "value2", v) + v = kvs1.Get("key3") + assert.Equal(t, "value3", v) + v = kvs1.Get("key4") + assert.Equal(t, "value4", v) // we overwrote this so would not be merged + v = kvs1.Get("key5") + assert.Equal(t, "value5_fromkvs2", v) + +} From eb908d7becc26b574e161f204029e16e6759e6bb Mon Sep 17 00:00:00 2001 From: Steve Streeting Date: Mon, 21 Nov 2016 17:14:49 +0000 Subject: [PATCH 09/68] Truncate file when we re-write in case it gets smaller MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit This actually didn’t cause any decoding issues to leave old data at the end, but we should be efficient with on-disk data. --- tools/keyvaluestore.go | 1 + tools/keyvaluestore_test.go | 48 ++++++++++++++++++++++++++++++++++++- 2 files changed, 48 insertions(+), 1 deletion(-) diff --git a/tools/keyvaluestore.go b/tools/keyvaluestore.go index 069908d4..73903dfa 100644 --- a/tools/keyvaluestore.go +++ b/tools/keyvaluestore.go @@ -105,6 +105,7 @@ func (k *KeyValueStore) Save() error { k.loadAndMergeReaderIfNeeded(f) // Now we overwrite the file f.Seek(0, os.SEEK_SET) + f.Truncate(0) } k.version++ diff --git a/tools/keyvaluestore_test.go b/tools/keyvaluestore_test.go index 5985dbac..31d2ab5c 100644 --- a/tools/keyvaluestore_test.go +++ b/tools/keyvaluestore_test.go @@ -69,7 +69,7 @@ func TestKeyValueStoreSimple(t *testing.T) { } func TestKeyValueStoreOptimisticConflict(t *testing.T) { - tmpf, err := ioutil.TempFile("", "lfskeyvaluetest1") + tmpf, err := ioutil.TempFile("", "lfskeyvaluetest2") assert.Nil(t, err) filename := tmpf.Name() defer os.Remove(filename) @@ -116,3 +116,49 @@ func TestKeyValueStoreOptimisticConflict(t *testing.T) { assert.Equal(t, "value5_fromkvs2", v) } + +func TestKeyValueStoreReduceSize(t *testing.T) { + tmpf, err := ioutil.TempFile("", "lfskeyvaluetest3") + assert.Nil(t, err) + filename := tmpf.Name() + defer os.Remove(filename) + tmpf.Close() + + kvs, err := NewKeyValueStore(filename) + assert.Nil(t, err) + + kvs.Set("key1", "I woke up in a Soho doorway") + kvs.Set("key2", "A policeman knew my name") + kvs.Set("key3", "He said 'You can go sleep at home tonight") + kvs.Set("key4", "If you can get up and walk away'") + + assert.NotNil(t, kvs.Get("key1")) + assert.NotNil(t, kvs.Get("key2")) + assert.NotNil(t, kvs.Get("key3")) + assert.NotNil(t, kvs.Get("key4")) + + assert.Nil(t, kvs.Save()) + + stat1, _ := os.Stat(filename) + + // Remove all but 1 key & save smaller version + kvs.Remove("key2") + kvs.Remove("key3") + kvs.Remove("key4") + assert.Nil(t, kvs.Save()) + + // Now reload fresh & prove works + kvs = nil + + kvs, err = NewKeyValueStore(filename) + assert.Nil(t, err) + assert.NotNil(t, kvs.Get("key1")) + assert.Nil(t, kvs.Get("key2")) + assert.Nil(t, kvs.Get("key3")) + assert.Nil(t, kvs.Get("key4")) + + stat2, _ := os.Stat(filename) + + assert.True(t, stat2.Size() < stat1.Size(), "Size should have reduced, was %d now %d", stat1.Size(), stat2.Size()) + +} From c24b64772cfe4610b61b95a0f62c69dd9d0e7647 Mon Sep 17 00:00:00 2001 From: Steve Streeting Date: Mon, 21 Nov 2016 18:17:59 +0000 Subject: [PATCH 10/68] PR feedback --- tools/keyvaluestore.go | 22 ++++++++++++---------- 1 file changed, 12 insertions(+), 10 deletions(-) diff --git a/tools/keyvaluestore.go b/tools/keyvaluestore.go index 73903dfa..c1dfa3a8 100644 --- a/tools/keyvaluestore.go +++ b/tools/keyvaluestore.go @@ -17,6 +17,7 @@ import ( // the Lost Update db concurrency issue is possible; so don't use this if you // need more DB integrity than Read Committed isolation levels. type KeyValueStore struct { + // Locks the entire store mu sync.RWMutex filename string log []keyValueChange @@ -46,8 +47,7 @@ type keyValueChange struct { // the named file, if it exists func NewKeyValueStore(filepath string) (*KeyValueStore, error) { kv := &KeyValueStore{filename: filepath, db: make(map[string]interface{})} - err := kv.loadAndMergeIfNeeded() - return kv, err + return kv, kv.loadAndMergeIfNeeded() } // Set updates the key/value store in memory @@ -57,7 +57,7 @@ func (k *KeyValueStore) Set(key string, value interface{}) { defer k.mu.Unlock() k.db[key] = value - k.log = append(k.log, keyValueChange{keyValueSetOperation, key, value}) + k.logChange(keyValueSetOperation, key, value) } // Remove removes the key and its value from the store in memory @@ -67,7 +67,12 @@ func (k *KeyValueStore) Remove(key string) { defer k.mu.Unlock() delete(k.db, key) - k.log = append(k.log, keyValueChange{keyValueRemoveOperation, key, nil}) + k.logChange(keyValueRemoveOperation, key, nil) +} + +// Append a change to the log; mutex must already be locked +func (k *KeyValueStore) logChange(op keyValueOperation, key string, value interface{}) { + k.log = append(k.log, keyValueChange{op, key, value}) } // Get retrieves a value from the store, or nil if it is not present @@ -111,12 +116,10 @@ func (k *KeyValueStore) Save() error { k.version++ enc := gob.NewEncoder(f) - err = enc.Encode(k.version) - if err != nil { + if err := enc.Encode(k.version); err != nil { return fmt.Errorf("Error while writing version data to %v: %v", k.filename, err) } - err = enc.Encode(k.db) - if err != nil { + if err := enc.Encode(k.db); err != nil { return fmt.Errorf("Error while writing new key/value data to %v: %v", k.filename, err) } // Clear log now that it's saved @@ -146,10 +149,9 @@ func (k *KeyValueStore) loadAndMergeIfNeeded() error { if err == nil { defer f.Close() return k.loadAndMergeReaderIfNeeded(f) - } else if !os.IsNotExist(err) { + } else { return err } - return nil } // As loadAndMergeIfNeeded but lets caller decide how to manage file handles From a372626a319772a024f8f59aaf7468e57c6fcb1b Mon Sep 17 00:00:00 2001 From: Steve Streeting Date: Tue, 22 Nov 2016 10:49:15 +0000 Subject: [PATCH 11/68] Move KeyValueStore into tools/kv and shorten naming --- tools/{ => kv}/keyvaluestore.go | 63 ++++++++++++++-------------- tools/{ => kv}/keyvaluestore_test.go | 28 ++++++------- 2 files changed, 46 insertions(+), 45 deletions(-) rename tools/{ => kv}/keyvaluestore.go (74%) rename tools/{ => kv}/keyvaluestore_test.go (85%) diff --git a/tools/keyvaluestore.go b/tools/kv/keyvaluestore.go similarity index 74% rename from tools/keyvaluestore.go rename to tools/kv/keyvaluestore.go index c1dfa3a8..d543b9b7 100644 --- a/tools/keyvaluestore.go +++ b/tools/kv/keyvaluestore.go @@ -1,4 +1,4 @@ -package tools +package kv import ( "encoding/gob" @@ -8,7 +8,7 @@ import ( "sync" ) -// KeyValueStore provides an in-memory key/value store which is persisted to +// Store provides an in-memory key/value store which is persisted to // a file. The file handle itself is not kept locked for the duration; it is // only locked during load and save, to make it concurrency friendly. When // saving, the store uses optimistic locking to determine whether the db on disk @@ -16,11 +16,11 @@ import ( // version and re-applies modifications made during this session. This means // the Lost Update db concurrency issue is possible; so don't use this if you // need more DB integrity than Read Committed isolation levels. -type KeyValueStore struct { +type Store struct { // Locks the entire store mu sync.RWMutex filename string - log []keyValueChange + log []change // This is the persistent data // version for optimistic locking, this field is incremented with every Save() @@ -28,55 +28,56 @@ type KeyValueStore struct { db map[string]interface{} } -type keyValueOperation int +// Type of operation; set or remove +type operation int const ( // Set a value for a key - keyValueSetOperation = keyValueOperation(iota) + setOperation = operation(iota) // Removed a value for a key - keyValueRemoveOperation = keyValueOperation(iota) + removeOperation = operation(iota) ) -type keyValueChange struct { - operation keyValueOperation - key string - value interface{} +type change struct { + op operation + key string + value interface{} } -// NewKeyValueStore creates a new store and initialises it with contents from +// NewStore creates a new key/value store and initialises it with contents from // the named file, if it exists -func NewKeyValueStore(filepath string) (*KeyValueStore, error) { - kv := &KeyValueStore{filename: filepath, db: make(map[string]interface{})} +func NewStore(filepath string) (*Store, error) { + kv := &Store{filename: filepath, db: make(map[string]interface{})} return kv, kv.loadAndMergeIfNeeded() } // Set updates the key/value store in memory // Changes are not persisted until you call Save() -func (k *KeyValueStore) Set(key string, value interface{}) { +func (k *Store) Set(key string, value interface{}) { k.mu.Lock() defer k.mu.Unlock() k.db[key] = value - k.logChange(keyValueSetOperation, key, value) + k.logChange(setOperation, key, value) } // Remove removes the key and its value from the store in memory // Changes are not persisted until you call Save() -func (k *KeyValueStore) Remove(key string) { +func (k *Store) Remove(key string) { k.mu.Lock() defer k.mu.Unlock() delete(k.db, key) - k.logChange(keyValueRemoveOperation, key, nil) + k.logChange(removeOperation, key, nil) } // Append a change to the log; mutex must already be locked -func (k *KeyValueStore) logChange(op keyValueOperation, key string, value interface{}) { - k.log = append(k.log, keyValueChange{op, key, value}) +func (k *Store) logChange(op operation, key string, value interface{}) { + k.log = append(k.log, change{op, key, value}) } // Get retrieves a value from the store, or nil if it is not present -func (k *KeyValueStore) Get(key string) interface{} { +func (k *Store) Get(key string) interface{} { // Read-only lock k.mu.RLock() defer k.mu.RUnlock() @@ -87,7 +88,7 @@ func (k *KeyValueStore) Get(key string) interface{} { // Save persists the changes made to disk // If any changes have been written by other code they will be merged -func (k *KeyValueStore) Save() error { +func (k *Store) Save() error { k.mu.Lock() defer k.mu.Unlock() @@ -132,7 +133,7 @@ func (k *KeyValueStore) Save() error { // contents are different from the version already held. If so, reads the // contents and merges with any outstanding changes. If not, stops early without // reading the rest of the file -func (k *KeyValueStore) loadAndMergeIfNeeded() error { +func (k *Store) loadAndMergeIfNeeded() error { stat, err := os.Stat(k.filename) if err != nil { if os.IsNotExist(err) { @@ -155,7 +156,7 @@ func (k *KeyValueStore) loadAndMergeIfNeeded() error { } // As loadAndMergeIfNeeded but lets caller decide how to manage file handles -func (k *KeyValueStore) loadAndMergeReaderIfNeeded(f io.Reader) error { +func (k *Store) loadAndMergeReaderIfNeeded(f io.Reader) error { var versionOnDisk int64 // Decode *only* the version field to check whether anyone else has // modified the db; gob serializes structs in order so it will always be 1st @@ -180,12 +181,12 @@ func (k *KeyValueStore) loadAndMergeReaderIfNeeded(f io.Reader) error { // reapplyChanges replays the changes made since the last load onto baseDb // and stores the result as our own DB -func (k *KeyValueStore) reapplyChanges(baseDb map[string]interface{}) { +func (k *Store) reapplyChanges(baseDb map[string]interface{}) { for _, change := range k.log { - switch change.operation { - case keyValueSetOperation: + switch change.op { + case setOperation: baseDb[change.key] = change.value - case keyValueRemoveOperation: + case removeOperation: delete(baseDb, change.key) } } @@ -195,9 +196,9 @@ func (k *KeyValueStore) reapplyChanges(baseDb map[string]interface{}) { } -// RegisterTypeForKeyValueStorage registers a custom type (e.g. a struct) for +// RegisterTypeForStorage registers a custom type (e.g. a struct) for // use in the key value store. This is necessary if you intend to pass custom -// structs to KeyValueStore.Set() rather than primitive types. -func RegisterTypeForKeyValueStorage(val interface{}) { +// structs to Store.Set() rather than primitive types. +func RegisterTypeForStorage(val interface{}) { gob.Register(val) } diff --git a/tools/keyvaluestore_test.go b/tools/kv/keyvaluestore_test.go similarity index 85% rename from tools/keyvaluestore_test.go rename to tools/kv/keyvaluestore_test.go index 31d2ab5c..797c9463 100644 --- a/tools/keyvaluestore_test.go +++ b/tools/kv/keyvaluestore_test.go @@ -1,4 +1,4 @@ -package tools +package kv import ( "io/ioutil" @@ -8,14 +8,14 @@ import ( "github.com/stretchr/testify/assert" ) -func TestKeyValueStoreSimple(t *testing.T) { - tmpf, err := ioutil.TempFile("", "lfskeyvaluetest1") +func TestStoreSimple(t *testing.T) { + tmpf, err := ioutil.TempFile("", "lfstest1") assert.Nil(t, err) filename := tmpf.Name() defer os.Remove(filename) tmpf.Close() - kvs, err := NewKeyValueStore(filename) + kvs, err := NewStore(filename) assert.Nil(t, err) // We'll include storing custom structs @@ -24,7 +24,7 @@ func TestKeyValueStoreSimple(t *testing.T) { Val2 int } // Needed to store custom struct - RegisterTypeForKeyValueStorage(&customData{}) + RegisterTypeForStorage(&customData{}) kvs.Set("stringVal", "This is a string value") kvs.Set("intVal", 3) @@ -53,7 +53,7 @@ func TestKeyValueStoreSimple(t *testing.T) { kvs = nil // Now confirm that we can read it all back - kvs2, err := NewKeyValueStore(filename) + kvs2, err := NewStore(filename) assert.Nil(t, err) s = kvs2.Get("stringVal") assert.Equal(t, "This is a string value", s) @@ -68,14 +68,14 @@ func TestKeyValueStoreSimple(t *testing.T) { } -func TestKeyValueStoreOptimisticConflict(t *testing.T) { - tmpf, err := ioutil.TempFile("", "lfskeyvaluetest2") +func TestStoreOptimisticConflict(t *testing.T) { + tmpf, err := ioutil.TempFile("", "lfstest2") assert.Nil(t, err) filename := tmpf.Name() defer os.Remove(filename) tmpf.Close() - kvs1, err := NewKeyValueStore(filename) + kvs1, err := NewStore(filename) assert.Nil(t, err) kvs1.Set("key1", "value1") @@ -85,7 +85,7 @@ func TestKeyValueStoreOptimisticConflict(t *testing.T) { assert.Nil(t, err) // Load second copy & modify - kvs2, err := NewKeyValueStore(filename) + kvs2, err := NewStore(filename) assert.Nil(t, err) // New keys kvs2.Set("key4", "value4_fromkvs2") @@ -117,14 +117,14 @@ func TestKeyValueStoreOptimisticConflict(t *testing.T) { } -func TestKeyValueStoreReduceSize(t *testing.T) { - tmpf, err := ioutil.TempFile("", "lfskeyvaluetest3") +func TestStoreReduceSize(t *testing.T) { + tmpf, err := ioutil.TempFile("", "lfstest3") assert.Nil(t, err) filename := tmpf.Name() defer os.Remove(filename) tmpf.Close() - kvs, err := NewKeyValueStore(filename) + kvs, err := NewStore(filename) assert.Nil(t, err) kvs.Set("key1", "I woke up in a Soho doorway") @@ -150,7 +150,7 @@ func TestKeyValueStoreReduceSize(t *testing.T) { // Now reload fresh & prove works kvs = nil - kvs, err = NewKeyValueStore(filename) + kvs, err = NewStore(filename) assert.Nil(t, err) assert.NotNil(t, kvs.Get("key1")) assert.Nil(t, kvs.Get("key2")) From a047f0a49461580b65c99c1da9dc5b567a45a45e Mon Sep 17 00:00:00 2001 From: Steve Streeting Date: Wed, 23 Nov 2016 11:35:13 +0000 Subject: [PATCH 12/68] PR review; tidy stat --- tools/kv/keyvaluestore.go | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/tools/kv/keyvaluestore.go b/tools/kv/keyvaluestore.go index d543b9b7..2c60c8e7 100644 --- a/tools/kv/keyvaluestore.go +++ b/tools/kv/keyvaluestore.go @@ -102,12 +102,11 @@ func (k *Store) Save() error { if err != nil { return err } - stat, _ := os.Stat(k.filename) defer f.Close() // Only try to merge if > 0 bytes, ignore empty files (decoder will fail) - if stat.Size() > 0 { + if stat, _ := f.Stat(); stat.Size() > 0 { k.loadAndMergeReaderIfNeeded(f) // Now we overwrite the file f.Seek(0, os.SEEK_SET) From 092c879dbe42594f603145736521994593a89a9a Mon Sep 17 00:00:00 2001 From: Steve Streeting Date: Mon, 28 Nov 2016 15:12:47 +0000 Subject: [PATCH 13/68] Move lock implementation to separate package --- commands/command_lock.go | 36 +------ commands/command_locks.go | 57 ++++------- commands/command_unlock.go | 69 ++----------- commands/commands.go | 5 - locking/cache.go | 36 +++++++ locking/locks.go | 198 +++++++++++++++++++++++++++++++++++++ 6 files changed, 267 insertions(+), 134 deletions(-) create mode 100644 locking/cache.go create mode 100644 locking/locks.go diff --git a/commands/command_lock.go b/commands/command_lock.go index c5c5c7c9..d77f567d 100644 --- a/commands/command_lock.go +++ b/commands/command_lock.go @@ -6,60 +6,34 @@ import ( "path/filepath" "strings" - "github.com/git-lfs/git-lfs/api" - "github.com/git-lfs/git-lfs/config" "github.com/git-lfs/git-lfs/git" + "github.com/git-lfs/git-lfs/locking" "github.com/spf13/cobra" ) var ( lockRemote string lockRemoteHelp = "specify which remote to use when interacting with locks" - - // TODO(taylor): consider making this (and the above flag) a property of - // some parent-command, or another similarly less ugly way of handling - // this - setLockRemoteFor = func(c *config.Configuration) { - c.CurrentRemote = lockRemote - } ) func lockCommand(cmd *cobra.Command, args []string) { - setLockRemoteFor(cfg) if len(args) == 0 { Print("Usage: git lfs lock ") return } - latest, err := git.CurrentRemoteRef() - if err != nil { - Error(err.Error()) - Exit("Unable to determine lastest remote ref for branch.") - } - path, err := lockPath(args[0]) if err != nil { Exit(err.Error()) } - s, resp := API.Locks.Lock(&api.LockRequest{ - Path: path, - Committer: api.CurrentCommitter(), - LatestRemoteCommit: latest.Sha, - }) - - if _, err := API.Do(s); err != nil { - Error(err.Error()) - Exit("Error communicating with LFS API.") + id, err := locking.Lock(path, lockRemote) + if err != nil { + Exit("Lock failed: %v", err) } - if len(resp.Err) > 0 { - Error(resp.Err) - Exit("Server unable to create lock.") - } - - Print("\n'%s' was locked (%s)", args[0], resp.Lock.Id) + Print("\n'%s' was locked (%s)", args[0], id) } // lockPaths relativizes the given filepath such that it is relative to the root diff --git a/commands/command_locks.go b/commands/command_locks.go index 10771ca0..3dde4ab5 100644 --- a/commands/command_locks.go +++ b/commands/command_locks.go @@ -1,7 +1,7 @@ package commands import ( - "github.com/git-lfs/git-lfs/api" + "github.com/git-lfs/git-lfs/locking" "github.com/spf13/cobra" ) @@ -10,45 +10,26 @@ var ( ) func locksCommand(cmd *cobra.Command, args []string) { - setLockRemoteFor(cfg) filters, err := locksCmdFlags.Filters() if err != nil { - Error(err.Error()) + Exit("Error building filters: %v", err) } - var locks []api.Lock + var lockCount int + locks := locking.SearchLocks(lockRemote, filters, locksCmdFlags.Limit) - query := &api.LockSearchRequest{Filters: filters} - for { - s, resp := API.Locks.Search(query) - if _, err := API.Do(s); err != nil { - Error(err.Error()) - Exit("Error communicating with LFS API.") - } - - if resp.Err != "" { - Error(resp.Err) - } - - locks = append(locks, resp.Locks...) - - if locksCmdFlags.Limit > 0 && len(locks) > locksCmdFlags.Limit { - locks = locks[:locksCmdFlags.Limit] - break - } - - if resp.NextCursor != "" { - query.Cursor = resp.NextCursor - } else { - break - } - } - - Print("\n%d lock(s) matched query:", len(locks)) - for _, lock := range locks { + for lock := range locks.Results { Print("%s\t%s <%s>", lock.Path, lock.Committer.Name, lock.Committer.Email) + lockCount++ } + err = locks.Wait() + + if err != nil { + Exit("Error while retrieving locks: %v", err) + } + + Print("\n%d lock(s) matched query.", lockCount) } // locksFlags wraps up and holds all of the flags that can be given to the @@ -65,11 +46,9 @@ type locksFlags struct { Limit int } -// Filters produces a slice of api.Filter instances based on the internal state -// of this locksFlags instance. The return value of this method is capable (and -// recommend to be used with) the api.LockSearchRequest type. -func (l *locksFlags) Filters() ([]api.Filter, error) { - filters := make([]api.Filter, 0) +// Filters produces a filter based on locksFlags instance. +func (l *locksFlags) Filters() (map[string]string, error) { + filters := make(map[string]string) if l.Path != "" { path, err := lockPath(l.Path) @@ -77,10 +56,10 @@ func (l *locksFlags) Filters() ([]api.Filter, error) { return nil, err } - filters = append(filters, api.Filter{"path", path}) + filters["path"] = path } if l.Id != "" { - filters = append(filters, api.Filter{"id", l.Id}) + filters["id"] = l.Id } return filters, nil diff --git a/commands/command_unlock.go b/commands/command_unlock.go index a52aa731..9d954367 100644 --- a/commands/command_unlock.go +++ b/commands/command_unlock.go @@ -1,20 +1,12 @@ package commands import ( - "errors" + "github.com/git-lfs/git-lfs/locking" - "github.com/git-lfs/git-lfs/api" "github.com/spf13/cobra" ) var ( - // errNoMatchingLocks is an error returned when no matching locks were - // able to be resolved - errNoMatchingLocks = errors.New("lfs: no matching locks found") - // errLockAmbiguous is an error returned when multiple matching locks - // were found - errLockAmbiguous = errors.New("lfs: multiple locks found; ambiguous") - unlockCmdFlags unlockFlags ) @@ -29,67 +21,26 @@ type unlockFlags struct { } func unlockCommand(cmd *cobra.Command, args []string) { - setLockRemoteFor(cfg) - - var id string if len(args) != 0 { path, err := lockPath(args[0]) if err != nil { - Error(err.Error()) + Exit("Unable to determine path: %v", err.Error()) } - if id, err = lockIdFromPath(path); err != nil { - Error(err.Error()) + err = locking.Unlock(path, lockRemote, unlockCmdFlags.Force) + if err != nil { + Exit("Unable to unlock: %v", err.Error()) } } else if unlockCmdFlags.Id != "" { - id = unlockCmdFlags.Id + err := locking.UnlockById(unlockCmdFlags.Id, lockRemote, unlockCmdFlags.Force) + if err != nil { + Exit("Unable to unlock %v: %v", unlockCmdFlags.Id, err.Error()) + } } else { Error("Usage: git lfs unlock (--id my-lock-id | )") } - s, resp := API.Locks.Unlock(id, unlockCmdFlags.Force) - - if _, err := API.Do(s); err != nil { - Error(err.Error()) - Exit("Error communicating with LFS API.") - } - - if len(resp.Err) > 0 { - Error(resp.Err) - Exit("Server unable to unlock lock.") - } - - Print("'%s' was unlocked (%s)", args[0], resp.Lock.Id) -} - -// lockIdFromPath makes a call to the LFS API and resolves the ID for the locked -// locked at the given path. -// -// If the API call failed, an error will be returned. If multiple locks matched -// the given path (should not happen during real-world usage), an error will be -// returnd. If no locks matched the given path, an error will be returned. -// -// If the API call is successful, and only one lock matches the given filepath, -// then its ID will be returned, along with a value of "nil" for the error. -func lockIdFromPath(path string) (string, error) { - s, resp := API.Locks.Search(&api.LockSearchRequest{ - Filters: []api.Filter{ - {"path", path}, - }, - }) - - if _, err := API.Do(s); err != nil { - return "", err - } - - switch len(resp.Locks) { - case 0: - return "", errNoMatchingLocks - case 1: - return resp.Locks[0].Id, nil - default: - return "", errLockAmbiguous - } + Print("'%s' was unlocked", args[0]) } func init() { diff --git a/commands/commands.go b/commands/commands.go index bfece5b0..e94e3686 100644 --- a/commands/commands.go +++ b/commands/commands.go @@ -11,7 +11,6 @@ import ( "strings" "time" - "github.com/git-lfs/git-lfs/api" "github.com/git-lfs/git-lfs/config" "github.com/git-lfs/git-lfs/errors" "github.com/git-lfs/git-lfs/filepathfilter" @@ -25,10 +24,6 @@ import ( //go:generate go run ../docs/man/mangen.go var ( - // API is a package-local instance of the API client for use within - // various command implementations. - API = api.NewClient(nil) - Debugging = false ErrorBuffer = &bytes.Buffer{} ErrorWriter = io.MultiWriter(os.Stderr, ErrorBuffer) diff --git a/locking/cache.go b/locking/cache.go new file mode 100644 index 00000000..126b3a5b --- /dev/null +++ b/locking/cache.go @@ -0,0 +1,36 @@ +package locking + +// This file caches active locks locally so that we can more easily retrieve +// a list of locally locked files without consulting the server +// This only includes locks which the local committer has taken, not all locks + +// Cache a successful lock for faster local lookup later +func cacheLock(filePath, id string) error { + // TODO + return nil +} + +// Remove a cached lock by path becuase it's been relinquished +func cacheUnlock(filePath string) error { + // TODO + return nil +} + +// Remove a cached lock by id becuase it's been relinquished +func cacheUnlockById(id string) error { + // TODO + return nil +} + +// Get the list of cached locked files +func cachedLocks() []string { + // TODO + return nil +} + +// Fetch locked files for the current committer and cache them locally +// This can be used to sync up locked files when moving machines +func fetchLocksToCache(remoteName string) error { + // TODO + return nil +} diff --git a/locking/locks.go b/locking/locks.go new file mode 100644 index 00000000..b011c42e --- /dev/null +++ b/locking/locks.go @@ -0,0 +1,198 @@ +package locking + +import ( + "errors" + "fmt" + + "github.com/git-lfs/git-lfs/api" + "github.com/git-lfs/git-lfs/config" + "github.com/git-lfs/git-lfs/git" + "github.com/git-lfs/git-lfs/tools" +) + +var ( + // API is a package-local instance of the API client for use within + // various command implementations. + API = api.NewClient(nil) + // errNoMatchingLocks is an error returned when no matching locks were + // able to be resolved + errNoMatchingLocks = errors.New("lfs: no matching locks found") + // errLockAmbiguous is an error returned when multiple matching locks + // were found + errLockAmbiguous = errors.New("lfs: multiple locks found; ambiguous") +) + +// Lock attempts to lock a file on the given remote name +// path must be relative to the root of the repository +// Returns the lock id if successful, or an error +func Lock(path, remote string) (id string, e error) { + // TODO: API currently relies on config.Config but should really pass to client in future + savedRemote := config.Config.CurrentRemote + config.Config.CurrentRemote = remote + defer func() { config.Config.CurrentRemote = savedRemote }() + + // TODO: this is not really the constraint we need to avoid merges, improve as per proposal + latest, err := git.CurrentRemoteRef() + if err != nil { + return "", err + } + + s, resp := API.Locks.Lock(&api.LockRequest{ + Path: path, + Committer: api.CurrentCommitter(), + LatestRemoteCommit: latest.Sha, + }) + + if _, err := API.Do(s); err != nil { + return "", fmt.Errorf("Error communicating with LFS API: %v", err) + } + + if len(resp.Err) > 0 { + return "", fmt.Errorf("Server unable to create lock: %v", resp.Err) + } + + if err := cacheLock(resp.Lock.Path, resp.Lock.Id); err != nil { + return "", fmt.Errorf("Error caching lock information: %v", err) + } + + return resp.Lock.Id, nil +} + +// Unlock attempts to unlock a file on the given remote name +// path must be relative to the root of the repository +// Force causes the file to be unlocked from other users as well +func Unlock(path, remote string, force bool) error { + // TODO: API currently relies on config.Config but should really pass to client in future + savedRemote := config.Config.CurrentRemote + config.Config.CurrentRemote = remote + defer func() { config.Config.CurrentRemote = savedRemote }() + + id, err := lockIdFromPath(path) + if err != nil { + return fmt.Errorf("Unable to get lock id: %v", err) + } + + return UnlockById(id, remote, force) + +} + +// Unlock attempts to unlock a lock with a given id on the remote +// Force causes the file to be unlocked from other users as well +func UnlockById(id, remote string, force bool) error { + s, resp := API.Locks.Unlock(id, force) + + if _, err := API.Do(s); err != nil { + return fmt.Errorf("Error communicating with LFS API: %v", err) + } + + if len(resp.Err) > 0 { + return fmt.Errorf("Server unable to unlock lock: %v", resp.Err) + } + + if err := cacheUnlockById(id); err != nil { + return fmt.Errorf("Error caching unlock information: %v", err) + } + + return nil +} + +// ChannelWrapper for lock search to more easily return async error data via Wait() +// See NewPointerChannelWrapper for construction / use +type LockChannelWrapper struct { + *tools.BaseChannelWrapper + Results <-chan api.Lock +} + +// Construct a new channel wrapper for api.Lock +// Caller can use s.Results directly for normal processing then call Wait() to finish & check for errors +func NewLockChannelWrapper(lockChan <-chan api.Lock, errChan <-chan error) *LockChannelWrapper { + return &LockChannelWrapper{tools.NewBaseChannelWrapper(errChan), lockChan} +} + +// SearchLocks returns a channel of locks which match the given name/value filter +// If limit > 0 then search stops at that number of locks +func SearchLocks(remote string, filter map[string]string, limit int) *LockChannelWrapper { + // TODO: API currently relies on config.Config but should really pass to client in future + savedRemote := config.Config.CurrentRemote + config.Config.CurrentRemote = remote + errChan := make(chan error, 5) // can be multiple errors below + lockChan := make(chan api.Lock, 10) + c := NewLockChannelWrapper(lockChan, errChan) + go func() { + defer func() { + close(lockChan) + close(errChan) + // Only reinstate the remote after we're done + config.Config.CurrentRemote = savedRemote + }() + + apifilters := make([]api.Filter, 0, len(filter)) + for k, v := range filter { + apifilters = append(apifilters, api.Filter{k, v}) + } + lockCount := 0 + query := &api.LockSearchRequest{Filters: apifilters} + QueryLoop: + for { + s, resp := API.Locks.Search(query) + if _, err := API.Do(s); err != nil { + errChan <- fmt.Errorf("Error communicating with LFS API: %v", err) + break + } + + if resp.Err != "" { + errChan <- fmt.Errorf("Error response from LFS API: %v", resp.Err) + break + } + + for _, l := range resp.Locks { + lockChan <- l + lockCount++ + if limit > 0 && lockCount >= limit { + // Exit outer loop too + break QueryLoop + } + } + + if resp.NextCursor != "" { + query.Cursor = resp.NextCursor + } else { + break + } + } + + }() + + return c + +} + +// lockIdFromPath makes a call to the LFS API and resolves the ID for the locked +// locked at the given path. +// +// If the API call failed, an error will be returned. If multiple locks matched +// the given path (should not happen during real-world usage), an error will be +// returnd. If no locks matched the given path, an error will be returned. +// +// If the API call is successful, and only one lock matches the given filepath, +// then its ID will be returned, along with a value of "nil" for the error. +func lockIdFromPath(path string) (string, error) { + s, resp := API.Locks.Search(&api.LockSearchRequest{ + Filters: []api.Filter{ + {"path", path}, + }, + }) + + if _, err := API.Do(s); err != nil { + return "", err + } + + switch len(resp.Locks) { + case 0: + return "", errNoMatchingLocks + case 1: + return resp.Locks[0].Id, nil + default: + return "", errLockAmbiguous + } +} From 4f6fdb8dc20b623f137e1d01776b20394238f87c Mon Sep 17 00:00:00 2001 From: Steve Streeting Date: Mon, 28 Nov 2016 15:54:10 +0000 Subject: [PATCH 14/68] Make api.Client instance local not public --- locking/locks.go | 18 +++++++++--------- 1 file changed, 9 insertions(+), 9 deletions(-) diff --git a/locking/locks.go b/locking/locks.go index b011c42e..9089d359 100644 --- a/locking/locks.go +++ b/locking/locks.go @@ -13,7 +13,7 @@ import ( var ( // API is a package-local instance of the API client for use within // various command implementations. - API = api.NewClient(nil) + apiClient = api.NewClient(nil) // errNoMatchingLocks is an error returned when no matching locks were // able to be resolved errNoMatchingLocks = errors.New("lfs: no matching locks found") @@ -37,13 +37,13 @@ func Lock(path, remote string) (id string, e error) { return "", err } - s, resp := API.Locks.Lock(&api.LockRequest{ + s, resp := apiClient.Locks.Lock(&api.LockRequest{ Path: path, Committer: api.CurrentCommitter(), LatestRemoteCommit: latest.Sha, }) - if _, err := API.Do(s); err != nil { + if _, err := apiClient.Do(s); err != nil { return "", fmt.Errorf("Error communicating with LFS API: %v", err) } @@ -79,9 +79,9 @@ func Unlock(path, remote string, force bool) error { // Unlock attempts to unlock a lock with a given id on the remote // Force causes the file to be unlocked from other users as well func UnlockById(id, remote string, force bool) error { - s, resp := API.Locks.Unlock(id, force) + s, resp := apiClient.Locks.Unlock(id, force) - if _, err := API.Do(s); err != nil { + if _, err := apiClient.Do(s); err != nil { return fmt.Errorf("Error communicating with LFS API: %v", err) } @@ -134,8 +134,8 @@ func SearchLocks(remote string, filter map[string]string, limit int) *LockChanne query := &api.LockSearchRequest{Filters: apifilters} QueryLoop: for { - s, resp := API.Locks.Search(query) - if _, err := API.Do(s); err != nil { + s, resp := apiClient.Locks.Search(query) + if _, err := apiClient.Do(s); err != nil { errChan <- fmt.Errorf("Error communicating with LFS API: %v", err) break } @@ -177,13 +177,13 @@ func SearchLocks(remote string, filter map[string]string, limit int) *LockChanne // If the API call is successful, and only one lock matches the given filepath, // then its ID will be returned, along with a value of "nil" for the error. func lockIdFromPath(path string) (string, error) { - s, resp := API.Locks.Search(&api.LockSearchRequest{ + s, resp := apiClient.Locks.Search(&api.LockSearchRequest{ Filters: []api.Filter{ {"path", path}, }, }) - if _, err := API.Do(s); err != nil { + if _, err := apiClient.Do(s); err != nil { return "", err } From 5cd9dcee01a2228611b80f6a1436fbf24cde276d Mon Sep 17 00:00:00 2001 From: Steve Streeting Date: Mon, 28 Nov 2016 17:01:06 +0000 Subject: [PATCH 15/68] Drop channel wrappers in locking pkg, simplify & make self-contained --- commands/command_lock.go | 2 +- commands/command_locks.go | 9 +-- commands/command_unlock.go | 4 +- locking/locks.go | 148 +++++++++++++++++++++---------------- 4 files changed, 90 insertions(+), 73 deletions(-) diff --git a/commands/command_lock.go b/commands/command_lock.go index d77f567d..06c8f2ca 100644 --- a/commands/command_lock.go +++ b/commands/command_lock.go @@ -28,7 +28,7 @@ func lockCommand(cmd *cobra.Command, args []string) { Exit(err.Error()) } - id, err := locking.Lock(path, lockRemote) + id, err := locking.LockFile(path, lockRemote) if err != nil { Exit("Lock failed: %v", err) } diff --git a/commands/command_locks.go b/commands/command_locks.go index 3dde4ab5..fcf8e33d 100644 --- a/commands/command_locks.go +++ b/commands/command_locks.go @@ -17,13 +17,12 @@ func locksCommand(cmd *cobra.Command, args []string) { } var lockCount int - locks := locking.SearchLocks(lockRemote, filters, locksCmdFlags.Limit) - - for lock := range locks.Results { - Print("%s\t%s <%s>", lock.Path, lock.Committer.Name, lock.Committer.Email) + locks, err := locking.SearchLocks(lockRemote, filters, locksCmdFlags.Limit) + // Print any we got before exiting + for _, lock := range locks { + Print("%s\t%s <%s>", lock.Path, lock.Name, lock.Email) lockCount++ } - err = locks.Wait() if err != nil { Exit("Error while retrieving locks: %v", err) diff --git a/commands/command_unlock.go b/commands/command_unlock.go index 9d954367..d9bb80ff 100644 --- a/commands/command_unlock.go +++ b/commands/command_unlock.go @@ -27,12 +27,12 @@ func unlockCommand(cmd *cobra.Command, args []string) { Exit("Unable to determine path: %v", err.Error()) } - err = locking.Unlock(path, lockRemote, unlockCmdFlags.Force) + err = locking.UnlockFile(path, lockRemote, unlockCmdFlags.Force) if err != nil { Exit("Unable to unlock: %v", err.Error()) } } else if unlockCmdFlags.Id != "" { - err := locking.UnlockById(unlockCmdFlags.Id, lockRemote, unlockCmdFlags.Force) + err := locking.UnlockFileById(unlockCmdFlags.Id, lockRemote, unlockCmdFlags.Force) if err != nil { Exit("Unable to unlock %v: %v", unlockCmdFlags.Id, err.Error()) } diff --git a/locking/locks.go b/locking/locks.go index 9089d359..634d94f5 100644 --- a/locking/locks.go +++ b/locking/locks.go @@ -3,11 +3,11 @@ package locking import ( "errors" "fmt" + "time" "github.com/git-lfs/git-lfs/api" "github.com/git-lfs/git-lfs/config" "github.com/git-lfs/git-lfs/git" - "github.com/git-lfs/git-lfs/tools" ) var ( @@ -22,10 +22,10 @@ var ( errLockAmbiguous = errors.New("lfs: multiple locks found; ambiguous") ) -// Lock attempts to lock a file on the given remote name +// LockFile attempts to lock a file on the given remote name // path must be relative to the root of the repository // Returns the lock id if successful, or an error -func Lock(path, remote string) (id string, e error) { +func LockFile(path, remote string) (id string, e error) { // TODO: API currently relies on config.Config but should really pass to client in future savedRemote := config.Config.CurrentRemote config.Config.CurrentRemote = remote @@ -58,10 +58,10 @@ func Lock(path, remote string) (id string, e error) { return resp.Lock.Id, nil } -// Unlock attempts to unlock a file on the given remote name +// UnlockFile attempts to unlock a file on the given remote name // path must be relative to the root of the repository // Force causes the file to be unlocked from other users as well -func Unlock(path, remote string, force bool) error { +func UnlockFile(path, remote string, force bool) error { // TODO: API currently relies on config.Config but should really pass to client in future savedRemote := config.Config.CurrentRemote config.Config.CurrentRemote = remote @@ -72,13 +72,13 @@ func Unlock(path, remote string, force bool) error { return fmt.Errorf("Unable to get lock id: %v", err) } - return UnlockById(id, remote, force) + return UnlockFileById(id, remote, force) } -// Unlock attempts to unlock a lock with a given id on the remote +// UnlockFileById attempts to unlock a lock with a given id on the remote // Force causes the file to be unlocked from other users as well -func UnlockById(id, remote string, force bool) error { +func UnlockFileById(id, remote string, force bool) error { s, resp := apiClient.Locks.Unlock(id, force) if _, err := apiClient.Do(s); err != nil { @@ -96,74 +96,92 @@ func UnlockById(id, remote string, force bool) error { return nil } -// ChannelWrapper for lock search to more easily return async error data via Wait() -// See NewPointerChannelWrapper for construction / use -type LockChannelWrapper struct { - *tools.BaseChannelWrapper - Results <-chan api.Lock +// Lock is a record of a locked file +// TODO SJS: review this struct and api equivalent +// deliberately duplicated to make this API self-contained, could be simpler +type Lock struct { + // Id is the unique identifier corresponding to this particular Lock. It + // must be consistent with the local copy, and the server's copy. + Id string + // Path is an absolute path to the file that is locked as a part of this + // lock. + Path string + // Name is the name of the person holding this lock + Name string + // Email address of the person holding this lock + Email string + // CommitSHA is the commit that this Lock was created against. It is + // strictly equal to the SHA of the minimum commit negotiated in order + // to create this lock. + CommitSHA string + // LockedAt is a required parameter that represents the instant in time + // that this lock was created. For most server implementations, this + // should be set to the instant at which the lock was initially + // received. + LockedAt time.Time + // ExpiresAt is an optional parameter that represents the instant in + // time that the lock stopped being active. If the lock is still active, + // the server can either a) not send this field, or b) send the + // zero-value of time.Time. + UnlockedAt time.Time } -// Construct a new channel wrapper for api.Lock -// Caller can use s.Results directly for normal processing then call Wait() to finish & check for errors -func NewLockChannelWrapper(lockChan <-chan api.Lock, errChan <-chan error) *LockChannelWrapper { - return &LockChannelWrapper{tools.NewBaseChannelWrapper(errChan), lockChan} +func newLockFromApi(a api.Lock) Lock { + return Lock{ + Id: a.Id, + Path: a.Path, + Name: a.Committer.Name, + Email: a.Committer.Email, + CommitSHA: a.CommitSHA, + LockedAt: a.LockedAt, + UnlockedAt: a.UnlockedAt, + } } // SearchLocks returns a channel of locks which match the given name/value filter // If limit > 0 then search stops at that number of locks -func SearchLocks(remote string, filter map[string]string, limit int) *LockChannelWrapper { +func SearchLocks(remote string, filter map[string]string, limit int) (locks []Lock, err error) { // TODO: API currently relies on config.Config but should really pass to client in future savedRemote := config.Config.CurrentRemote config.Config.CurrentRemote = remote - errChan := make(chan error, 5) // can be multiple errors below - lockChan := make(chan api.Lock, 10) - c := NewLockChannelWrapper(lockChan, errChan) - go func() { - defer func() { - close(lockChan) - close(errChan) - // Only reinstate the remote after we're done - config.Config.CurrentRemote = savedRemote - }() - - apifilters := make([]api.Filter, 0, len(filter)) - for k, v := range filter { - apifilters = append(apifilters, api.Filter{k, v}) - } - lockCount := 0 - query := &api.LockSearchRequest{Filters: apifilters} - QueryLoop: - for { - s, resp := apiClient.Locks.Search(query) - if _, err := apiClient.Do(s); err != nil { - errChan <- fmt.Errorf("Error communicating with LFS API: %v", err) - break - } - - if resp.Err != "" { - errChan <- fmt.Errorf("Error response from LFS API: %v", resp.Err) - break - } - - for _, l := range resp.Locks { - lockChan <- l - lockCount++ - if limit > 0 && lockCount >= limit { - // Exit outer loop too - break QueryLoop - } - } - - if resp.NextCursor != "" { - query.Cursor = resp.NextCursor - } else { - break - } - } - + defer func() { + // Only reinstate the remote after we're done + config.Config.CurrentRemote = savedRemote }() - return c + locks = make([]Lock, 0, limit) + + apifilters := make([]api.Filter, 0, len(filter)) + for k, v := range filter { + apifilters = append(apifilters, api.Filter{k, v}) + } + query := &api.LockSearchRequest{Filters: apifilters} + for { + s, resp := apiClient.Locks.Search(query) + if _, err := apiClient.Do(s); err != nil { + return locks, fmt.Errorf("Error communicating with LFS API: %v", err) + } + + if resp.Err != "" { + return locks, fmt.Errorf("Error response from LFS API: %v", resp.Err) + } + + for _, l := range resp.Locks { + locks = append(locks, newLockFromApi(l)) + if limit > 0 && len(locks) >= limit { + // Exit outer loop too + return locks, nil + } + } + + if resp.NextCursor != "" { + query.Cursor = resp.NextCursor + } else { + break + } + } + + return locks, nil } From 7f578a97cef74a2e7fef6723bfc6ce30e512e44b Mon Sep 17 00:00:00 2001 From: Steve Streeting Date: Mon, 28 Nov 2016 17:09:49 +0000 Subject: [PATCH 16/68] Make error types public --- locking/locks.go | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/locking/locks.go b/locking/locks.go index 634d94f5..02584f9c 100644 --- a/locking/locks.go +++ b/locking/locks.go @@ -14,12 +14,12 @@ var ( // API is a package-local instance of the API client for use within // various command implementations. apiClient = api.NewClient(nil) - // errNoMatchingLocks is an error returned when no matching locks were + // ErrNoMatchingLocks is an error returned when no matching locks were // able to be resolved - errNoMatchingLocks = errors.New("lfs: no matching locks found") - // errLockAmbiguous is an error returned when multiple matching locks + ErrNoMatchingLocks = errors.New("lfs: no matching locks found") + // ErrLockAmbiguous is an error returned when multiple matching locks // were found - errLockAmbiguous = errors.New("lfs: multiple locks found; ambiguous") + ErrLockAmbiguous = errors.New("lfs: multiple locks found; ambiguous") ) // LockFile attempts to lock a file on the given remote name @@ -207,10 +207,10 @@ func lockIdFromPath(path string) (string, error) { switch len(resp.Locks) { case 0: - return "", errNoMatchingLocks + return "", ErrNoMatchingLocks case 1: return resp.Locks[0].Id, nil default: - return "", errLockAmbiguous + return "", ErrLockAmbiguous } } From 491070607c43a14709ffb1f4cdacbcf2c7c78854 Mon Sep 17 00:00:00 2001 From: Steve Streeting Date: Mon, 5 Dec 2016 09:47:29 +0000 Subject: [PATCH 17/68] Create a `Client` stateful wrapper for locking API --- commands/command_lock.go | 6 ++- commands/command_locks.go | 6 ++- commands/command_unlock.go | 8 +++- locking/cache.go | 36 -------------- locking/locks.go | 96 +++++++++++++++++++++++++++++--------- 5 files changed, 90 insertions(+), 62 deletions(-) delete mode 100644 locking/cache.go diff --git a/commands/command_lock.go b/commands/command_lock.go index 06c8f2ca..ea76a2aa 100644 --- a/commands/command_lock.go +++ b/commands/command_lock.go @@ -28,7 +28,11 @@ func lockCommand(cmd *cobra.Command, args []string) { Exit(err.Error()) } - id, err := locking.LockFile(path, lockRemote) + lockClient, err := locking.NewClient(cfg) + if err != nil { + Exit("Unable to create lock system: %v", err.Error()) + } + id, err := lockClient.LockFile(path, lockRemote) if err != nil { Exit("Lock failed: %v", err) } diff --git a/commands/command_locks.go b/commands/command_locks.go index fcf8e33d..ea4a2ea8 100644 --- a/commands/command_locks.go +++ b/commands/command_locks.go @@ -17,7 +17,11 @@ func locksCommand(cmd *cobra.Command, args []string) { } var lockCount int - locks, err := locking.SearchLocks(lockRemote, filters, locksCmdFlags.Limit) + lockClient, err := locking.NewClient(cfg) + if err != nil { + Exit("Unable to create lock system: %v", err.Error()) + } + locks, err := lockClient.SearchLocks(lockRemote, filters, locksCmdFlags.Limit) // Print any we got before exiting for _, lock := range locks { Print("%s\t%s <%s>", lock.Path, lock.Name, lock.Email) diff --git a/commands/command_unlock.go b/commands/command_unlock.go index d9bb80ff..56ab599a 100644 --- a/commands/command_unlock.go +++ b/commands/command_unlock.go @@ -21,18 +21,22 @@ type unlockFlags struct { } func unlockCommand(cmd *cobra.Command, args []string) { + lockClient, err := locking.NewClient(cfg) + if err != nil { + Exit("Unable to create lock system: %v", err.Error()) + } if len(args) != 0 { path, err := lockPath(args[0]) if err != nil { Exit("Unable to determine path: %v", err.Error()) } - err = locking.UnlockFile(path, lockRemote, unlockCmdFlags.Force) + err = lockClient.UnlockFile(path, lockRemote, unlockCmdFlags.Force) if err != nil { Exit("Unable to unlock: %v", err.Error()) } } else if unlockCmdFlags.Id != "" { - err := locking.UnlockFileById(unlockCmdFlags.Id, lockRemote, unlockCmdFlags.Force) + err := lockClient.UnlockFileById(unlockCmdFlags.Id, lockRemote, unlockCmdFlags.Force) if err != nil { Exit("Unable to unlock %v: %v", unlockCmdFlags.Id, err.Error()) } diff --git a/locking/cache.go b/locking/cache.go deleted file mode 100644 index 126b3a5b..00000000 --- a/locking/cache.go +++ /dev/null @@ -1,36 +0,0 @@ -package locking - -// This file caches active locks locally so that we can more easily retrieve -// a list of locally locked files without consulting the server -// This only includes locks which the local committer has taken, not all locks - -// Cache a successful lock for faster local lookup later -func cacheLock(filePath, id string) error { - // TODO - return nil -} - -// Remove a cached lock by path becuase it's been relinquished -func cacheUnlock(filePath string) error { - // TODO - return nil -} - -// Remove a cached lock by id becuase it's been relinquished -func cacheUnlockById(id string) error { - // TODO - return nil -} - -// Get the list of cached locked files -func cachedLocks() []string { - // TODO - return nil -} - -// Fetch locked files for the current committer and cache them locally -// This can be used to sync up locked files when moving machines -func fetchLocksToCache(remoteName string) error { - // TODO - return nil -} diff --git a/locking/locks.go b/locking/locks.go index 02584f9c..02840830 100644 --- a/locking/locks.go +++ b/locking/locks.go @@ -3,17 +3,17 @@ package locking import ( "errors" "fmt" + "os" + "path/filepath" "time" "github.com/git-lfs/git-lfs/api" "github.com/git-lfs/git-lfs/config" "github.com/git-lfs/git-lfs/git" + "github.com/git-lfs/git-lfs/tools/kv" ) var ( - // API is a package-local instance of the API client for use within - // various command implementations. - apiClient = api.NewClient(nil) // ErrNoMatchingLocks is an error returned when no matching locks were // able to be resolved ErrNoMatchingLocks = errors.New("lfs: no matching locks found") @@ -22,10 +22,31 @@ var ( ErrLockAmbiguous = errors.New("lfs: multiple locks found; ambiguous") ) +// Client is the main interface object for the locking package +type Client struct { + cfg *config.Configuration + apiClient *api.Client + cache *kv.Store +} + +func NewClient(cfg *config.Configuration) (*Client, error) { + + apiClient := api.NewClient(nil) + + lockDir := filepath.Join(config.LocalGitStorageDir, "lfs") + os.MkdirAll(lockDir, 0755) + lockFile := filepath.Join(lockDir, "lockcache.db") + store, err := kv.NewStore(lockFile) + if err != nil { + return nil, err + } + return &Client{cfg, apiClient, store}, nil +} + // LockFile attempts to lock a file on the given remote name // path must be relative to the root of the repository // Returns the lock id if successful, or an error -func LockFile(path, remote string) (id string, e error) { +func (c *Client) LockFile(path, remote string) (id string, e error) { // TODO: API currently relies on config.Config but should really pass to client in future savedRemote := config.Config.CurrentRemote config.Config.CurrentRemote = remote @@ -37,13 +58,13 @@ func LockFile(path, remote string) (id string, e error) { return "", err } - s, resp := apiClient.Locks.Lock(&api.LockRequest{ + s, resp := c.apiClient.Locks.Lock(&api.LockRequest{ Path: path, Committer: api.CurrentCommitter(), LatestRemoteCommit: latest.Sha, }) - if _, err := apiClient.Do(s); err != nil { + if _, err := c.apiClient.Do(s); err != nil { return "", fmt.Errorf("Error communicating with LFS API: %v", err) } @@ -51,7 +72,7 @@ func LockFile(path, remote string) (id string, e error) { return "", fmt.Errorf("Server unable to create lock: %v", resp.Err) } - if err := cacheLock(resp.Lock.Path, resp.Lock.Id); err != nil { + if err := c.cacheLock(resp.Lock.Path, resp.Lock.Id); err != nil { return "", fmt.Errorf("Error caching lock information: %v", err) } @@ -61,27 +82,27 @@ func LockFile(path, remote string) (id string, e error) { // UnlockFile attempts to unlock a file on the given remote name // path must be relative to the root of the repository // Force causes the file to be unlocked from other users as well -func UnlockFile(path, remote string, force bool) error { +func (c *Client) UnlockFile(path, remote string, force bool) error { // TODO: API currently relies on config.Config but should really pass to client in future savedRemote := config.Config.CurrentRemote config.Config.CurrentRemote = remote defer func() { config.Config.CurrentRemote = savedRemote }() - id, err := lockIdFromPath(path) + id, err := c.lockIdFromPath(path) if err != nil { return fmt.Errorf("Unable to get lock id: %v", err) } - return UnlockFileById(id, remote, force) + return c.UnlockFileById(id, remote, force) } // UnlockFileById attempts to unlock a lock with a given id on the remote // Force causes the file to be unlocked from other users as well -func UnlockFileById(id, remote string, force bool) error { - s, resp := apiClient.Locks.Unlock(id, force) +func (c *Client) UnlockFileById(id, remote string, force bool) error { + s, resp := c.apiClient.Locks.Unlock(id, force) - if _, err := apiClient.Do(s); err != nil { + if _, err := c.apiClient.Do(s); err != nil { return fmt.Errorf("Error communicating with LFS API: %v", err) } @@ -89,7 +110,7 @@ func UnlockFileById(id, remote string, force bool) error { return fmt.Errorf("Server unable to unlock lock: %v", resp.Err) } - if err := cacheUnlockById(id); err != nil { + if err := c.cacheUnlockById(id); err != nil { return fmt.Errorf("Error caching unlock information: %v", err) } @@ -126,7 +147,7 @@ type Lock struct { UnlockedAt time.Time } -func newLockFromApi(a api.Lock) Lock { +func (c *Client) newLockFromApi(a api.Lock) Lock { return Lock{ Id: a.Id, Path: a.Path, @@ -140,7 +161,7 @@ func newLockFromApi(a api.Lock) Lock { // SearchLocks returns a channel of locks which match the given name/value filter // If limit > 0 then search stops at that number of locks -func SearchLocks(remote string, filter map[string]string, limit int) (locks []Lock, err error) { +func (c *Client) SearchLocks(remote string, filter map[string]string, limit int) (locks []Lock, err error) { // TODO: API currently relies on config.Config but should really pass to client in future savedRemote := config.Config.CurrentRemote config.Config.CurrentRemote = remote @@ -157,8 +178,8 @@ func SearchLocks(remote string, filter map[string]string, limit int) (locks []Lo } query := &api.LockSearchRequest{Filters: apifilters} for { - s, resp := apiClient.Locks.Search(query) - if _, err := apiClient.Do(s); err != nil { + s, resp := c.apiClient.Locks.Search(query) + if _, err := c.apiClient.Do(s); err != nil { return locks, fmt.Errorf("Error communicating with LFS API: %v", err) } @@ -167,7 +188,7 @@ func SearchLocks(remote string, filter map[string]string, limit int) (locks []Lo } for _, l := range resp.Locks { - locks = append(locks, newLockFromApi(l)) + locks = append(locks, c.newLockFromApi(l)) if limit > 0 && len(locks) >= limit { // Exit outer loop too return locks, nil @@ -194,14 +215,14 @@ func SearchLocks(remote string, filter map[string]string, limit int) (locks []Lo // // If the API call is successful, and only one lock matches the given filepath, // then its ID will be returned, along with a value of "nil" for the error. -func lockIdFromPath(path string) (string, error) { - s, resp := apiClient.Locks.Search(&api.LockSearchRequest{ +func (c *Client) lockIdFromPath(path string) (string, error) { + s, resp := c.apiClient.Locks.Search(&api.LockSearchRequest{ Filters: []api.Filter{ {"path", path}, }, }) - if _, err := apiClient.Do(s); err != nil { + if _, err := c.apiClient.Do(s); err != nil { return "", err } @@ -214,3 +235,34 @@ func lockIdFromPath(path string) (string, error) { return "", ErrLockAmbiguous } } + +// Cache a successful lock for faster local lookup later +func (c *Client) cacheLock(filePath, id string) error { + // TODO + return nil +} + +// Remove a cached lock by path becuase it's been relinquished +func (c *Client) cacheUnlock(filePath string) error { + // TODO + return nil +} + +// Remove a cached lock by id becuase it's been relinquished +func (c *Client) cacheUnlockById(id string) error { + // TODO + return nil +} + +// Get the list of cached locked files +func (c *Client) cachedLocks() []string { + // TODO + return nil +} + +// Fetch locked files for the current committer and cache them locally +// This can be used to sync up locked files when moving machines +func (c *Client) fetchLocksToCache(remoteName string) error { + // TODO + return nil +} From 7d835d585139db8cdf0765b6e6715e6ddd9baa05 Mon Sep 17 00:00:00 2001 From: Steve Streeting Date: Mon, 5 Dec 2016 11:15:36 +0000 Subject: [PATCH 18/68] HttpLifecycle really depends on Configuration not just EndpointAccess --- api/http_lifecycle.go | 23 +++++++++-------------- api/http_lifecycle_test.go | 29 +++++++++++++---------------- locking/locks.go | 2 +- 3 files changed, 23 insertions(+), 31 deletions(-) diff --git a/api/http_lifecycle.go b/api/http_lifecycle.go index 45b1b80c..1025714f 100644 --- a/api/http_lifecycle.go +++ b/api/http_lifecycle.go @@ -21,28 +21,23 @@ var ( ErrNoOperationGiven = errors.New("lfs/api: no operation provided in schema") ) -// EndpointSource is an interface which encapsulates the behavior of returning -// `config.Endpoint`s based on a particular operation. -type EndpointSource interface { - // Endpoint returns the `config.Endpoint` assosciated with a given - // operation. - Endpoint(operation string) config.Endpoint -} - // HttpLifecycle serves as the default implementation of the Lifecycle interface // for HTTP requests. Internally, it leverages the *http.Client type to execute // HTTP requests against a root *url.URL, as given in `NewHttpLifecycle`. type HttpLifecycle struct { - endpoints EndpointSource + cfg *config.Configuration } var _ Lifecycle = new(HttpLifecycle) // NewHttpLifecycle initializes a new instance of the *HttpLifecycle type with a // new *http.Client, and the given root (see above). -func NewHttpLifecycle(endpoints EndpointSource) *HttpLifecycle { +func NewHttpLifecycle(cfg *config.Configuration) *HttpLifecycle { + if cfg == nil { + cfg = config.Config + } return &HttpLifecycle{ - endpoints: endpoints, + cfg: cfg, } } @@ -78,7 +73,7 @@ func (l *HttpLifecycle) Build(schema *RequestSchema) (*http.Request, error) { return nil, err } - if _, err = auth.GetCreds(config.Config, req); err != nil { + if _, err = auth.GetCreds(l.cfg, req); err != nil { return nil, err } @@ -102,7 +97,7 @@ func (l *HttpLifecycle) Build(schema *RequestSchema) (*http.Request, error) { // Otherwise, the api.Response is returned, along with no error, signaling that // the request completed successfully. func (l *HttpLifecycle) Execute(req *http.Request, into interface{}) (Response, error) { - resp, err := httputil.DoHttpRequestWithRedirects(config.Config, req, []*http.Request{}, true) + resp, err := httputil.DoHttpRequestWithRedirects(l.cfg, req, []*http.Request{}, true) if err != nil { return nil, err } @@ -136,7 +131,7 @@ func (l *HttpLifecycle) absolutePath(operation Operation, path string) (*url.URL return nil, ErrNoOperationGiven } - root, err := url.Parse(l.endpoints.Endpoint(string(operation)).Url) + root, err := url.Parse(l.cfg.Endpoint(string(operation)).Url) if err != nil { return nil, err } diff --git a/api/http_lifecycle_test.go b/api/http_lifecycle_test.go index c068a596..d512f071 100644 --- a/api/http_lifecycle_test.go +++ b/api/http_lifecycle_test.go @@ -11,23 +11,20 @@ import ( "github.com/stretchr/testify/assert" ) -type NopEndpointSource struct { - Root string -} - -func (e *NopEndpointSource) Endpoint(op string) config.Endpoint { - return config.Endpoint{Url: e.Root} -} - var ( - source = &NopEndpointSource{"https://example.com"} + testConfig = NewTestConfig() ) +func NewTestConfig() *config.Configuration { + c := config.NewFrom(config.Values{}) + c.SetManualEndpoint(config.Endpoint{Url: "https://example.com"}) + return c +} func TestHttpLifecycleMakesRequestsAgainstAbsolutePath(t *testing.T) { SetupTestCredentialsFunc() defer RestoreCredentialsFunc() - l := api.NewHttpLifecycle(source) + l := api.NewHttpLifecycle(testConfig) req, err := l.Build(&api.RequestSchema{ Path: "/foo", Operation: api.DownloadOperation, @@ -41,7 +38,7 @@ func TestHttpLifecycleAttachesQueryParameters(t *testing.T) { SetupTestCredentialsFunc() defer RestoreCredentialsFunc() - l := api.NewHttpLifecycle(source) + l := api.NewHttpLifecycle(testConfig) req, err := l.Build(&api.RequestSchema{ Path: "/foo", Operation: api.DownloadOperation, @@ -58,7 +55,7 @@ func TestHttpLifecycleAttachesBodyWhenPresent(t *testing.T) { SetupTestCredentialsFunc() defer RestoreCredentialsFunc() - l := api.NewHttpLifecycle(source) + l := api.NewHttpLifecycle(testConfig) req, err := l.Build(&api.RequestSchema{ Operation: api.DownloadOperation, Body: struct { @@ -77,7 +74,7 @@ func TestHttpLifecycleDoesNotAttachBodyWhenEmpty(t *testing.T) { SetupTestCredentialsFunc() defer RestoreCredentialsFunc() - l := api.NewHttpLifecycle(source) + l := api.NewHttpLifecycle(testConfig) req, err := l.Build(&api.RequestSchema{ Operation: api.DownloadOperation, }) @@ -90,7 +87,7 @@ func TestHttpLifecycleErrsWithoutOperation(t *testing.T) { SetupTestCredentialsFunc() defer RestoreCredentialsFunc() - l := api.NewHttpLifecycle(source) + l := api.NewHttpLifecycle(testConfig) req, err := l.Build(&api.RequestSchema{ Path: "/foo", }) @@ -113,7 +110,7 @@ func TestHttpLifecycleExecutesRequestWithoutBody(t *testing.T) { req, _ := http.NewRequest("GET", server.URL+"/path", nil) - l := api.NewHttpLifecycle(source) + l := api.NewHttpLifecycle(testConfig) _, err := l.Execute(req, nil) assert.True(t, called) @@ -138,7 +135,7 @@ func TestHttpLifecycleExecutesRequestWithBody(t *testing.T) { req, _ := http.NewRequest("GET", server.URL+"/path", nil) - l := api.NewHttpLifecycle(source) + l := api.NewHttpLifecycle(testConfig) resp := new(Response) _, err := l.Execute(req, resp) diff --git a/locking/locks.go b/locking/locks.go index 02840830..09de57de 100644 --- a/locking/locks.go +++ b/locking/locks.go @@ -31,7 +31,7 @@ type Client struct { func NewClient(cfg *config.Configuration) (*Client, error) { - apiClient := api.NewClient(nil) + apiClient := api.NewClient(api.NewHttpLifecycle(cfg)) lockDir := filepath.Join(config.LocalGitStorageDir, "lfs") os.MkdirAll(lockDir, 0755) From de63cd1d46fd731fd7cdf6a771b098f0bc0dbb6b Mon Sep 17 00:00:00 2001 From: Steve Streeting Date: Mon, 5 Dec 2016 11:25:02 +0000 Subject: [PATCH 19/68] Remove remote arg from lock methods, configuration handles this now --- commands/command_lock.go | 6 +++++- commands/command_locks.go | 7 +++++-- commands/command_unlock.go | 9 +++++++-- locking/locks.go | 34 ++++++++++------------------------ 4 files changed, 27 insertions(+), 29 deletions(-) diff --git a/commands/command_lock.go b/commands/command_lock.go index ea76a2aa..c6fc4d7c 100644 --- a/commands/command_lock.go +++ b/commands/command_lock.go @@ -28,11 +28,15 @@ func lockCommand(cmd *cobra.Command, args []string) { Exit(err.Error()) } + if len(lockRemote) > 0 { + cfg.CurrentRemote = lockRemote + } + lockClient, err := locking.NewClient(cfg) if err != nil { Exit("Unable to create lock system: %v", err.Error()) } - id, err := lockClient.LockFile(path, lockRemote) + id, err := lockClient.LockFile(path) if err != nil { Exit("Lock failed: %v", err) } diff --git a/commands/command_locks.go b/commands/command_locks.go index ea4a2ea8..d01c26c0 100644 --- a/commands/command_locks.go +++ b/commands/command_locks.go @@ -16,12 +16,15 @@ func locksCommand(cmd *cobra.Command, args []string) { Exit("Error building filters: %v", err) } - var lockCount int + if len(lockRemote) > 0 { + cfg.CurrentRemote = lockRemote + } lockClient, err := locking.NewClient(cfg) if err != nil { Exit("Unable to create lock system: %v", err.Error()) } - locks, err := lockClient.SearchLocks(lockRemote, filters, locksCmdFlags.Limit) + var lockCount int + locks, err := lockClient.SearchLocks(filters, locksCmdFlags.Limit) // Print any we got before exiting for _, lock := range locks { Print("%s\t%s <%s>", lock.Path, lock.Name, lock.Email) diff --git a/commands/command_unlock.go b/commands/command_unlock.go index 56ab599a..1868fe50 100644 --- a/commands/command_unlock.go +++ b/commands/command_unlock.go @@ -21,6 +21,11 @@ type unlockFlags struct { } func unlockCommand(cmd *cobra.Command, args []string) { + + if len(lockRemote) > 0 { + cfg.CurrentRemote = lockRemote + } + lockClient, err := locking.NewClient(cfg) if err != nil { Exit("Unable to create lock system: %v", err.Error()) @@ -31,12 +36,12 @@ func unlockCommand(cmd *cobra.Command, args []string) { Exit("Unable to determine path: %v", err.Error()) } - err = lockClient.UnlockFile(path, lockRemote, unlockCmdFlags.Force) + err = lockClient.UnlockFile(path, unlockCmdFlags.Force) if err != nil { Exit("Unable to unlock: %v", err.Error()) } } else if unlockCmdFlags.Id != "" { - err := lockClient.UnlockFileById(unlockCmdFlags.Id, lockRemote, unlockCmdFlags.Force) + err := lockClient.UnlockFileById(unlockCmdFlags.Id, unlockCmdFlags.Force) if err != nil { Exit("Unable to unlock %v: %v", unlockCmdFlags.Id, err.Error()) } diff --git a/locking/locks.go b/locking/locks.go index 09de57de..122c281f 100644 --- a/locking/locks.go +++ b/locking/locks.go @@ -29,6 +29,7 @@ type Client struct { cache *kv.Store } +// NewClient creates a new locking client with the given configuration func NewClient(cfg *config.Configuration) (*Client, error) { apiClient := api.NewClient(api.NewHttpLifecycle(cfg)) @@ -43,14 +44,10 @@ func NewClient(cfg *config.Configuration) (*Client, error) { return &Client{cfg, apiClient, store}, nil } -// LockFile attempts to lock a file on the given remote name +// LockFile attempts to lock a file on the current remote // path must be relative to the root of the repository // Returns the lock id if successful, or an error -func (c *Client) LockFile(path, remote string) (id string, e error) { - // TODO: API currently relies on config.Config but should really pass to client in future - savedRemote := config.Config.CurrentRemote - config.Config.CurrentRemote = remote - defer func() { config.Config.CurrentRemote = savedRemote }() +func (c *Client) LockFile(path string) (id string, e error) { // TODO: this is not really the constraint we need to avoid merges, improve as per proposal latest, err := git.CurrentRemoteRef() @@ -79,27 +76,23 @@ func (c *Client) LockFile(path, remote string) (id string, e error) { return resp.Lock.Id, nil } -// UnlockFile attempts to unlock a file on the given remote name +// UnlockFile attempts to unlock a file on the current remote // path must be relative to the root of the repository // Force causes the file to be unlocked from other users as well -func (c *Client) UnlockFile(path, remote string, force bool) error { - // TODO: API currently relies on config.Config but should really pass to client in future - savedRemote := config.Config.CurrentRemote - config.Config.CurrentRemote = remote - defer func() { config.Config.CurrentRemote = savedRemote }() +func (c *Client) UnlockFile(path string, force bool) error { id, err := c.lockIdFromPath(path) if err != nil { return fmt.Errorf("Unable to get lock id: %v", err) } - return c.UnlockFileById(id, remote, force) + return c.UnlockFileById(id, force) } -// UnlockFileById attempts to unlock a lock with a given id on the remote +// UnlockFileById attempts to unlock a lock with a given id on the current remote // Force causes the file to be unlocked from other users as well -func (c *Client) UnlockFileById(id, remote string, force bool) error { +func (c *Client) UnlockFileById(id string, force bool) error { s, resp := c.apiClient.Locks.Unlock(id, force) if _, err := c.apiClient.Do(s); err != nil { @@ -161,14 +154,7 @@ func (c *Client) newLockFromApi(a api.Lock) Lock { // SearchLocks returns a channel of locks which match the given name/value filter // If limit > 0 then search stops at that number of locks -func (c *Client) SearchLocks(remote string, filter map[string]string, limit int) (locks []Lock, err error) { - // TODO: API currently relies on config.Config but should really pass to client in future - savedRemote := config.Config.CurrentRemote - config.Config.CurrentRemote = remote - defer func() { - // Only reinstate the remote after we're done - config.Config.CurrentRemote = savedRemote - }() +func (c *Client) SearchLocks(filter map[string]string, limit int) (locks []Lock, err error) { locks = make([]Lock, 0, limit) @@ -262,7 +248,7 @@ func (c *Client) cachedLocks() []string { // Fetch locked files for the current committer and cache them locally // This can be used to sync up locked files when moving machines -func (c *Client) fetchLocksToCache(remoteName string) error { +func (c *Client) fetchLocksToCache() error { // TODO return nil } From cf1b0e1ed8c44108f886f19bef8c3a7a6e57082f Mon Sep 17 00:00:00 2001 From: Steve Streeting Date: Mon, 5 Dec 2016 11:45:24 +0000 Subject: [PATCH 20/68] Move CurrentCommitter to Configuration instead of API This removes another implicit dependency on global config from API pkg --- api/lock_api.go | 13 ++----------- config/config.go | 9 +++++++++ locking/locks.go | 2 +- 3 files changed, 12 insertions(+), 12 deletions(-) diff --git a/api/lock_api.go b/api/lock_api.go index 7dd2df55..345f63f2 100644 --- a/api/lock_api.go +++ b/api/lock_api.go @@ -4,8 +4,6 @@ import ( "fmt" "strconv" "time" - - "github.com/git-lfs/git-lfs/config" ) // LockService is an API service which encapsulates the Git LFS Locking API. @@ -144,15 +142,8 @@ type Committer struct { Email string `json:"email"` } -// CurrentCommitter returns a Committer instance populated with the same -// credentials as would be used to author a commit. In particular, the -// "user.name" and "user.email" configuration values are used from the -// config.Config singleton. -func CurrentCommitter() Committer { - name, _ := config.Config.Git.Get("user.name") - email, _ := config.Config.Git.Get("user.email") - - return Committer{name, email} +func NewCommitter(name, email string) Committer { + return Committer{Name: name, Email: email} } // LockRequest encapsulates the payload sent across the API when a client would diff --git a/config/config.go b/config/config.go index b4e23f17..756d2fab 100644 --- a/config/config.go +++ b/config/config.go @@ -492,3 +492,12 @@ func (c *Configuration) loadGitConfig() bool { return false } + +// CurrentCommitter returns the name/email that would be used to author a commit +// with this configuration. In particular, the "user.name" and "user.email" +// configuration values are used +func (c *Configuration) CurrentCommitter() (name, email string) { + name, _ = c.Git.Get("user.name") + email, _ = c.Git.Get("user.email") + return +} diff --git a/locking/locks.go b/locking/locks.go index 122c281f..ddbb52d2 100644 --- a/locking/locks.go +++ b/locking/locks.go @@ -57,7 +57,7 @@ func (c *Client) LockFile(path string) (id string, e error) { s, resp := c.apiClient.Locks.Lock(&api.LockRequest{ Path: path, - Committer: api.CurrentCommitter(), + Committer: api.NewCommitter(c.cfg.CurrentCommitter()), LatestRemoteCommit: latest.Sha, }) From e5c37c8093fc8ba03131ca8aa57a63eabfd4e9bc Mon Sep 17 00:00:00 2001 From: Steve Streeting Date: Tue, 6 Dec 2016 10:06:14 +0000 Subject: [PATCH 21/68] Document nil behaviour of NewHttpLifecycle --- api/http_lifecycle.go | 1 + 1 file changed, 1 insertion(+) diff --git a/api/http_lifecycle.go b/api/http_lifecycle.go index 1025714f..a8b6770a 100644 --- a/api/http_lifecycle.go +++ b/api/http_lifecycle.go @@ -32,6 +32,7 @@ var _ Lifecycle = new(HttpLifecycle) // NewHttpLifecycle initializes a new instance of the *HttpLifecycle type with a // new *http.Client, and the given root (see above). +// Passing a nil Configuration will use the global config func NewHttpLifecycle(cfg *config.Configuration) *HttpLifecycle { if cfg == nil { cfg = config.Config From 9ecf675cc227dc78950955db0626ab8d7c1f1dbe Mon Sep 17 00:00:00 2001 From: Steve Streeting Date: Tue, 6 Dec 2016 10:06:23 +0000 Subject: [PATCH 22/68] Keep test config local to tests --- api/http_lifecycle_test.go | 18 +++++++----------- 1 file changed, 7 insertions(+), 11 deletions(-) diff --git a/api/http_lifecycle_test.go b/api/http_lifecycle_test.go index d512f071..72013a99 100644 --- a/api/http_lifecycle_test.go +++ b/api/http_lifecycle_test.go @@ -11,10 +11,6 @@ import ( "github.com/stretchr/testify/assert" ) -var ( - testConfig = NewTestConfig() -) - func NewTestConfig() *config.Configuration { c := config.NewFrom(config.Values{}) c.SetManualEndpoint(config.Endpoint{Url: "https://example.com"}) @@ -24,7 +20,7 @@ func TestHttpLifecycleMakesRequestsAgainstAbsolutePath(t *testing.T) { SetupTestCredentialsFunc() defer RestoreCredentialsFunc() - l := api.NewHttpLifecycle(testConfig) + l := api.NewHttpLifecycle(NewTestConfig()) req, err := l.Build(&api.RequestSchema{ Path: "/foo", Operation: api.DownloadOperation, @@ -38,7 +34,7 @@ func TestHttpLifecycleAttachesQueryParameters(t *testing.T) { SetupTestCredentialsFunc() defer RestoreCredentialsFunc() - l := api.NewHttpLifecycle(testConfig) + l := api.NewHttpLifecycle(NewTestConfig()) req, err := l.Build(&api.RequestSchema{ Path: "/foo", Operation: api.DownloadOperation, @@ -55,7 +51,7 @@ func TestHttpLifecycleAttachesBodyWhenPresent(t *testing.T) { SetupTestCredentialsFunc() defer RestoreCredentialsFunc() - l := api.NewHttpLifecycle(testConfig) + l := api.NewHttpLifecycle(NewTestConfig()) req, err := l.Build(&api.RequestSchema{ Operation: api.DownloadOperation, Body: struct { @@ -74,7 +70,7 @@ func TestHttpLifecycleDoesNotAttachBodyWhenEmpty(t *testing.T) { SetupTestCredentialsFunc() defer RestoreCredentialsFunc() - l := api.NewHttpLifecycle(testConfig) + l := api.NewHttpLifecycle(NewTestConfig()) req, err := l.Build(&api.RequestSchema{ Operation: api.DownloadOperation, }) @@ -87,7 +83,7 @@ func TestHttpLifecycleErrsWithoutOperation(t *testing.T) { SetupTestCredentialsFunc() defer RestoreCredentialsFunc() - l := api.NewHttpLifecycle(testConfig) + l := api.NewHttpLifecycle(NewTestConfig()) req, err := l.Build(&api.RequestSchema{ Path: "/foo", }) @@ -110,7 +106,7 @@ func TestHttpLifecycleExecutesRequestWithoutBody(t *testing.T) { req, _ := http.NewRequest("GET", server.URL+"/path", nil) - l := api.NewHttpLifecycle(testConfig) + l := api.NewHttpLifecycle(NewTestConfig()) _, err := l.Execute(req, nil) assert.True(t, called) @@ -135,7 +131,7 @@ func TestHttpLifecycleExecutesRequestWithBody(t *testing.T) { req, _ := http.NewRequest("GET", server.URL+"/path", nil) - l := api.NewHttpLifecycle(testConfig) + l := api.NewHttpLifecycle(NewTestConfig()) resp := new(Response) _, err := l.Execute(req, resp) From 761656d01b3f64a83bdd1b835654203629592cbb Mon Sep 17 00:00:00 2001 From: Steve Streeting Date: Tue, 6 Dec 2016 10:06:46 +0000 Subject: [PATCH 23/68] Report errors when creating dir structure for lock cache --- locking/locks.go | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/locking/locks.go b/locking/locks.go index ddbb52d2..7d80b269 100644 --- a/locking/locks.go +++ b/locking/locks.go @@ -35,7 +35,10 @@ func NewClient(cfg *config.Configuration) (*Client, error) { apiClient := api.NewClient(api.NewHttpLifecycle(cfg)) lockDir := filepath.Join(config.LocalGitStorageDir, "lfs") - os.MkdirAll(lockDir, 0755) + err := os.MkdirAll(lockDir, 0755) + if err != nil { + return nil, err + } lockFile := filepath.Join(lockDir, "lockcache.db") store, err := kv.NewStore(lockFile) if err != nil { From 7ac855b684b2784abc4a0d0a189dc32fc0278204 Mon Sep 17 00:00:00 2001 From: Steve Streeting Date: Mon, 5 Dec 2016 15:01:25 +0000 Subject: [PATCH 24/68] Add Close func to lock Client to save cache --- commands/command_lock.go | 1 + commands/command_locks.go | 1 + commands/command_unlock.go | 1 + locking/locks.go | 7 +++++++ 4 files changed, 10 insertions(+) diff --git a/commands/command_lock.go b/commands/command_lock.go index c6fc4d7c..b21387d1 100644 --- a/commands/command_lock.go +++ b/commands/command_lock.go @@ -36,6 +36,7 @@ func lockCommand(cmd *cobra.Command, args []string) { if err != nil { Exit("Unable to create lock system: %v", err.Error()) } + defer lockClient.Close() id, err := lockClient.LockFile(path) if err != nil { Exit("Lock failed: %v", err) diff --git a/commands/command_locks.go b/commands/command_locks.go index d01c26c0..0d859e77 100644 --- a/commands/command_locks.go +++ b/commands/command_locks.go @@ -23,6 +23,7 @@ func locksCommand(cmd *cobra.Command, args []string) { if err != nil { Exit("Unable to create lock system: %v", err.Error()) } + defer lockClient.Close() var lockCount int locks, err := lockClient.SearchLocks(filters, locksCmdFlags.Limit) // Print any we got before exiting diff --git a/commands/command_unlock.go b/commands/command_unlock.go index 1868fe50..505d57fa 100644 --- a/commands/command_unlock.go +++ b/commands/command_unlock.go @@ -30,6 +30,7 @@ func unlockCommand(cmd *cobra.Command, args []string) { if err != nil { Exit("Unable to create lock system: %v", err.Error()) } + defer lockClient.Close() if len(args) != 0 { path, err := lockPath(args[0]) if err != nil { diff --git a/locking/locks.go b/locking/locks.go index 7d80b269..8d928da3 100644 --- a/locking/locks.go +++ b/locking/locks.go @@ -30,6 +30,8 @@ type Client struct { } // NewClient creates a new locking client with the given configuration +// You must call the returned object's `Close` method when you are finished with +// it func NewClient(cfg *config.Configuration) (*Client, error) { apiClient := api.NewClient(api.NewHttpLifecycle(cfg)) @@ -47,6 +49,11 @@ func NewClient(cfg *config.Configuration) (*Client, error) { return &Client{cfg, apiClient, store}, nil } +// Close this client instance; must be called to dispose of resources +func (c *Client) Close() error { + return c.cache.Save() +} + // LockFile attempts to lock a file on the current remote // path must be relative to the root of the repository // Returns the lock id if successful, or an error From eccfa781aec9430945cc3bcfd54d0235fe9c28b7 Mon Sep 17 00:00:00 2001 From: Steve Streeting Date: Mon, 5 Dec 2016 16:44:02 +0000 Subject: [PATCH 25/68] Add RemoveAll and Visit functions to kv store --- tools/kv/keyvaluestore.go | 27 +++++++++++++++++++++++++++ tools/kv/keyvaluestore_test.go | 23 +++++++++++++++++++++++ 2 files changed, 50 insertions(+) diff --git a/tools/kv/keyvaluestore.go b/tools/kv/keyvaluestore.go index 2c60c8e7..0699b98f 100644 --- a/tools/kv/keyvaluestore.go +++ b/tools/kv/keyvaluestore.go @@ -71,6 +71,33 @@ func (k *Store) Remove(key string) { k.logChange(removeOperation, key, nil) } +// RemoveAll removes all entries from the store +// These changes are not persisted until you call Save() +func (k *Store) RemoveAll() { + k.mu.Lock() + defer k.mu.Unlock() + + // Log all changes + for key, _ := range k.db { + k.logChange(removeOperation, key, nil) + } + k.db = make(map[string]interface{}) +} + +// Visit walks through the entire store via a function; return false from +// your visitor function to halt the walk +func (k *Store) Visit(cb func(string, interface{}) bool) { + // Read-only lock + k.mu.RLock() + defer k.mu.RUnlock() + + for k, v := range k.db { + if !cb(k, v) { + break + } + } +} + // Append a change to the log; mutex must already be locked func (k *Store) logChange(op operation, key string, value interface{}) { k.log = append(k.log, change{op, key, value}) diff --git a/tools/kv/keyvaluestore_test.go b/tools/kv/keyvaluestore_test.go index 797c9463..2f531f35 100644 --- a/tools/kv/keyvaluestore_test.go +++ b/tools/kv/keyvaluestore_test.go @@ -66,6 +66,29 @@ func TestStoreSimple(t *testing.T) { n = kvs2.Get("noValue") assert.Nil(t, n) + // Test remove all + kvs2.RemoveAll() + s = kvs2.Get("stringVal") + assert.Nil(t, s) + i = kvs2.Get("intVal") + assert.Nil(t, i) + f = kvs2.Get("floatVal") + assert.Nil(t, f) + c = kvs2.Get("structVal") + assert.Nil(t, c) + + err = kvs2.Save() + assert.Nil(t, err) + kvs2 = nil + + // Now confirm that we can read blank & get nothing + kvs, err = NewStore(filename) + kvs.Visit(func(k string, v interface{}) bool { + // Should not be called + assert.Fail(t, "Should be no entries") + return true + }) + } func TestStoreOptimisticConflict(t *testing.T) { From 3ea24f164780e5f9c42ede0a89bc386d5b26f76c Mon Sep 17 00:00:00 2001 From: Steve Streeting Date: Mon, 5 Dec 2016 18:16:30 +0000 Subject: [PATCH 26/68] Add lock tests and tweak cache to store entire locks --- commands/command_lock.go | 4 +- commands/command_locks.go | 6 +- locking/locks.go | 139 ++++++++++++++++++++++++++++++++------ locking/locks_test.go | 139 ++++++++++++++++++++++++++++++++++++++ 4 files changed, 266 insertions(+), 22 deletions(-) create mode 100644 locking/locks_test.go diff --git a/commands/command_lock.go b/commands/command_lock.go index b21387d1..8ccf63e5 100644 --- a/commands/command_lock.go +++ b/commands/command_lock.go @@ -37,12 +37,12 @@ func lockCommand(cmd *cobra.Command, args []string) { Exit("Unable to create lock system: %v", err.Error()) } defer lockClient.Close() - id, err := lockClient.LockFile(path) + lock, err := lockClient.LockFile(path) if err != nil { Exit("Lock failed: %v", err) } - Print("\n'%s' was locked (%s)", args[0], id) + Print("\n'%s' was locked (%s)", args[0], lock.Id) } // lockPaths relativizes the given filepath such that it is relative to the root diff --git a/commands/command_locks.go b/commands/command_locks.go index 0d859e77..13fc6965 100644 --- a/commands/command_locks.go +++ b/commands/command_locks.go @@ -25,7 +25,7 @@ func locksCommand(cmd *cobra.Command, args []string) { } defer lockClient.Close() var lockCount int - locks, err := lockClient.SearchLocks(filters, locksCmdFlags.Limit) + locks, err := lockClient.SearchLocks(filters, locksCmdFlags.Limit, locksCmdFlags.Local) // Print any we got before exiting for _, lock := range locks { Print("%s\t%s <%s>", lock.Path, lock.Name, lock.Email) @@ -51,6 +51,9 @@ type locksFlags struct { // limit is an optional request parameter sent to the server used to // limit the Limit int + // local limits the scope of lock reporting to the locally cached record + // of locks for the current user & doesn't query the server + Local bool } // Filters produces a filter based on locksFlags instance. @@ -82,5 +85,6 @@ func init() { cmd.Flags().StringVarP(&locksCmdFlags.Path, "path", "p", "", "filter locks results matching a particular path") cmd.Flags().StringVarP(&locksCmdFlags.Id, "id", "i", "", "filter locks results matching a particular ID") cmd.Flags().IntVarP(&locksCmdFlags.Limit, "limit", "l", 0, "optional limit for number of results to return") + cmd.Flags().BoolVarP(&locksCmdFlags.Local, "local", "", false, "only list cached local record of own locks") }) } diff --git a/locking/locks.go b/locking/locks.go index 8d928da3..6808e707 100644 --- a/locking/locks.go +++ b/locking/locks.go @@ -5,6 +5,7 @@ import ( "fmt" "os" "path/filepath" + "strings" "time" "github.com/git-lfs/git-lfs/api" @@ -57,12 +58,12 @@ func (c *Client) Close() error { // LockFile attempts to lock a file on the current remote // path must be relative to the root of the repository // Returns the lock id if successful, or an error -func (c *Client) LockFile(path string) (id string, e error) { +func (c *Client) LockFile(path string) (Lock, error) { // TODO: this is not really the constraint we need to avoid merges, improve as per proposal latest, err := git.CurrentRemoteRef() if err != nil { - return "", err + return Lock{}, err } s, resp := c.apiClient.Locks.Lock(&api.LockRequest{ @@ -72,18 +73,20 @@ func (c *Client) LockFile(path string) (id string, e error) { }) if _, err := c.apiClient.Do(s); err != nil { - return "", fmt.Errorf("Error communicating with LFS API: %v", err) + return Lock{}, fmt.Errorf("Error communicating with LFS API: %v", err) } if len(resp.Err) > 0 { - return "", fmt.Errorf("Server unable to create lock: %v", resp.Err) + return Lock{}, fmt.Errorf("Server unable to create lock: %v", resp.Err) } - if err := c.cacheLock(resp.Lock.Path, resp.Lock.Id); err != nil { - return "", fmt.Errorf("Error caching lock information: %v", err) + lock := c.newLockFromApi(*resp.Lock) + + if err := c.cacheLock(lock); err != nil { + return Lock{}, fmt.Errorf("Error caching lock information: %v", err) } - return resp.Lock.Id, nil + return lock, nil } // UnlockFile attempts to unlock a file on the current remote @@ -164,9 +167,39 @@ func (c *Client) newLockFromApi(a api.Lock) Lock { // SearchLocks returns a channel of locks which match the given name/value filter // If limit > 0 then search stops at that number of locks -func (c *Client) SearchLocks(filter map[string]string, limit int) (locks []Lock, err error) { +// If localOnly = true, don't query the server & report only own local locks +func (c *Client) SearchLocks(filter map[string]string, limit int, localOnly bool) (locks []Lock, err error) { - locks = make([]Lock, 0, limit) + if localOnly { + return c.searchCachedLocks(filter, limit) + } else { + return c.searchRemoteLocks(filter, limit) + } +} + +func (c *Client) searchCachedLocks(filter map[string]string, limit int) ([]Lock, error) { + locks := c.cachedLocks() + path, filterByPath := filter["path"] + id, filterById := filter["id"] + lockCount := 0 + + for _, l := range locks { + // Manually filter by Path/Id + if (filterByPath && path != l.Path) || + (filterById && id != l.Id) { + continue + } + locks = append(locks, l) + lockCount++ + if limit > 0 && lockCount >= limit { + break + } + } + return locks, nil +} + +func (c *Client) searchRemoteLocks(filter map[string]string, limit int) ([]Lock, error) { + locks := make([]Lock, 0, limit) apifilters := make([]api.Filter, 0, len(filter)) for k, v := range filter { @@ -232,33 +265,101 @@ func (c *Client) lockIdFromPath(path string) (string, error) { } } +// We want to use a single cache file for integrity, but to make it easy to +// list all locks, prefix the id->path map in a way we can identify (something +// that won't be in a path) +const idKeyPrefix = "*id*://" + +func (c *Client) encodeIdKey(id string) string { + // Safety against accidents + if !c.isIdKey(id) { + return idKeyPrefix + id + } + return id +} +func (c *Client) decodeIdKey(key string) string { + // Safety against accidents + if c.isIdKey(key) { + return key[len(idKeyPrefix):] + } + return key +} +func (c *Client) isIdKey(key string) bool { + return strings.HasPrefix(key, idKeyPrefix) +} + // Cache a successful lock for faster local lookup later -func (c *Client) cacheLock(filePath, id string) error { - // TODO +func (c *Client) cacheLock(l Lock) error { + // Store reference in both directions + // Path -> Lock + c.cache.Set(l.Path, &l) + // EncodedId -> Lock (encoded so we can easily identify) + c.cache.Set(c.encodeIdKey(l.Id), &l) return nil } // Remove a cached lock by path becuase it's been relinquished -func (c *Client) cacheUnlock(filePath string) error { - // TODO +func (c *Client) cacheUnlockByPath(filePath string) error { + ilock := c.cache.Get(filePath) + if ilock != nil { + lock := ilock.(*Lock) + c.cache.Remove(lock.Path) + // Id as key is encoded + c.cache.Remove(c.encodeIdKey(lock.Id)) + } return nil } -// Remove a cached lock by id becuase it's been relinquished +// Remove a cached lock by id because it's been relinquished func (c *Client) cacheUnlockById(id string) error { - // TODO + // Id as key is encoded + idkey := c.encodeIdKey(id) + ilock := c.cache.Get(idkey) + if ilock != nil { + lock := ilock.(*Lock) + c.cache.Remove(idkey) + c.cache.Remove(lock.Path) + } return nil } // Get the list of cached locked files -func (c *Client) cachedLocks() []string { - // TODO - return nil +func (c *Client) cachedLocks() []Lock { + var locks []Lock + c.cache.Visit(func(key string, val interface{}) bool { + // Only report file->id entries not reverse + if !c.isIdKey(key) { + lock := val.(*Lock) + locks = append(locks, *lock) + } + return true // continue + }) + return locks } // Fetch locked files for the current committer and cache them locally // This can be used to sync up locked files when moving machines func (c *Client) fetchLocksToCache() error { - // TODO + // TODO: filters don't seem to currently define how to search for a + // committer's email. Is it "committer.email"? For now, just iterate + locks, err := c.SearchLocks(nil, 0, false) + if err != nil { + return err + } + + // We're going to overwrite the entire local cache + c.cache.RemoveAll() + + _, email := c.cfg.CurrentCommitter() + for _, l := range locks { + if l.Email == email { + c.cacheLock(l) + } + } + return nil } + +func init() { + kv.RegisterTypeForStorage(&Lock{}) +} diff --git a/locking/locks_test.go b/locking/locks_test.go new file mode 100644 index 00000000..3a4d545a --- /dev/null +++ b/locking/locks_test.go @@ -0,0 +1,139 @@ +package locking + +import ( + "bytes" + "encoding/json" + "io/ioutil" + "net/http" + "os" + "testing" + "time" + + "github.com/git-lfs/git-lfs/api" + "github.com/git-lfs/git-lfs/config" + "github.com/stretchr/testify/assert" +) + +func TestLockCache(t *testing.T) { + var err error + + oldStore := config.LocalGitStorageDir + config.LocalGitStorageDir, err = ioutil.TempDir("", "testCacheLock") + assert.Nil(t, err) + defer func() { + os.RemoveAll(config.LocalGitStorageDir) + config.LocalGitStorageDir = oldStore + }() + + client, err := NewClient(config.NewFrom(config.Values{})) + assert.Nil(t, err) + + testLocks := []Lock{ + Lock{Path: "folder/test1.dat", Id: "101"}, + Lock{Path: "folder/test2.dat", Id: "102"}, + Lock{Path: "root.dat", Id: "103"}, + } + + for _, l := range testLocks { + err = client.cacheLock(l) + assert.Nil(t, err) + } + + locks := client.cachedLocks() + for _, l := range testLocks { + assert.Contains(t, locks, l) + } + assert.Equal(t, len(testLocks), len(locks)) + + err = client.cacheUnlockByPath("folder/test2.dat") + assert.Nil(t, err) + + locks = client.cachedLocks() + // delete item 1 from test locls + testLocks = append(testLocks[:1], testLocks[2:]...) + for _, l := range testLocks { + assert.Contains(t, locks, l) + } + assert.Equal(t, len(testLocks), len(locks)) + + err = client.cacheUnlockById("101") + assert.Nil(t, err) + + locks = client.cachedLocks() + testLocks = testLocks[1:] + for _, l := range testLocks { + assert.Contains(t, locks, l) + } + assert.Equal(t, len(testLocks), len(locks)) +} + +type TestLifecycle struct { +} + +func (l *TestLifecycle) Build(schema *api.RequestSchema) (*http.Request, error) { + return http.NewRequest("GET", "http://dummy", nil) +} + +func (l *TestLifecycle) Execute(req *http.Request, into interface{}) (api.Response, error) { + // Return test data including other users + locks := api.LockList{Locks: []api.Lock{ + api.Lock{Id: "99", Path: "folder/test3.dat", Committer: api.Committer{Name: "Alice", Email: "alice@wonderland.com"}}, + api.Lock{Id: "101", Path: "folder/test1.dat", Committer: api.Committer{Name: "Fred", Email: "fred@bloggs.com"}}, + api.Lock{Id: "102", Path: "folder/test2.dat", Committer: api.Committer{Name: "Fred", Email: "fred@bloggs.com"}}, + api.Lock{Id: "103", Path: "root.dat", Committer: api.Committer{Name: "Fred", Email: "fred@bloggs.com"}}, + api.Lock{Id: "199", Path: "other/test1.dat", Committer: api.Committer{Name: "Charles", Email: "charles@incharge.com"}}, + }} + locksJson, _ := json.Marshal(locks) + r := &http.Response{ + Status: "200 OK", + StatusCode: 200, + Proto: "HTTP/1.0", + Body: ioutil.NopCloser(bytes.NewReader(locksJson)), + } + if into != nil { + decoder := json.NewDecoder(r.Body) + if err := decoder.Decode(into); err != nil { + return nil, err + } + } + return api.WrapHttpResponse(r), nil +} +func (l *TestLifecycle) Cleanup(resp api.Response) error { + return resp.Body().Close() +} + +func TestRefreshCache(t *testing.T) { + var err error + oldStore := config.LocalGitStorageDir + config.LocalGitStorageDir, err = ioutil.TempDir("", "testCacheLock") + assert.Nil(t, err) + defer func() { + os.RemoveAll(config.LocalGitStorageDir) + config.LocalGitStorageDir = oldStore + }() + + cfg := config.NewFrom(config.Values{ + Git: map[string]string{"user.name": "Fred", "user.email": "fred@bloggs.com"}}) + client, err := NewClient(cfg) + assert.Nil(t, err) + // Override api client for testing + client.apiClient = api.NewClient(&TestLifecycle{}) + + // Should start with no cached items + locks := client.cachedLocks() + assert.Empty(t, locks) + + // Should load from test data, just Fred's + err = client.fetchLocksToCache() + assert.Nil(t, err) + + locks = client.cachedLocks() + // Need to include zero time in structure for equal to work + zeroTime := time.Date(1, 1, 1, 0, 0, 0, 0, time.UTC) + assert.Equal(t, []Lock{ + Lock{Path: "folder/test1.dat", Id: "101", Name: "Fred", Email: "fred@bloggs.com", LockedAt: zeroTime, UnlockedAt: zeroTime}, + Lock{Path: "folder/test2.dat", Id: "102", Name: "Fred", Email: "fred@bloggs.com", LockedAt: zeroTime, UnlockedAt: zeroTime}, + Lock{Path: "root.dat", Id: "103", Name: "Fred", Email: "fred@bloggs.com", LockedAt: zeroTime, UnlockedAt: zeroTime}, + }, locks) + +} From f71598bc207192312697f92435c742a605cb483e Mon Sep 17 00:00:00 2001 From: Steve Streeting Date: Mon, 12 Dec 2016 14:56:40 +0000 Subject: [PATCH 27/68] Add test for cached locks, fix cached locks --- locking/locks.go | 6 +++--- test/test-locks.sh | 45 +++++++++++++++++++++++++++++++++++++++++++++ 2 files changed, 48 insertions(+), 3 deletions(-) diff --git a/locking/locks.go b/locking/locks.go index 6808e707..2702e589 100644 --- a/locking/locks.go +++ b/locking/locks.go @@ -178,12 +178,12 @@ func (c *Client) SearchLocks(filter map[string]string, limit int, localOnly bool } func (c *Client) searchCachedLocks(filter map[string]string, limit int) ([]Lock, error) { - locks := c.cachedLocks() + cachedlocks := c.cachedLocks() path, filterByPath := filter["path"] id, filterById := filter["id"] lockCount := 0 - - for _, l := range locks { + locks := make([]Lock, 0, len(cachedlocks)) + for _, l := range cachedlocks { // Manually filter by Path/Id if (filterByPath && path != l.Path) || (filterById && id != l.Id) { diff --git a/test/test-locks.sh b/test/test-locks.sh index 9d0f32b1..43f1a94a 100755 --- a/test/test-locks.sh +++ b/test/test-locks.sh @@ -88,3 +88,48 @@ begin_test "list locks with pagination" grep "4 lock(s) matched query" locks.log ) end_test + +begin_test "cached locks" +( + set -e + + reponame="cached_locks" + setup_remote_repo "remote_$reponame" + clone_repo "remote_$reponame" "clone_$reponame" + + git lfs track "*.dat" + echo "foo" > "cached1.dat" + echo "bar" > "cached2.dat" + + git add "cached1.dat" "cached2.dat" ".gitattributes" + git commit -m "add files" | tee commit.log + grep "3 files changed" commit.log + grep "create mode 100644 cached1.dat" commit.log + grep "create mode 100644 cached2.dat" commit.log + grep "create mode 100644 .gitattributes" commit.log + + git push origin master 2>&1 | tee push.log + grep "master -> master" push.log + + GITLFSLOCKSENABLED=1 git lfs lock "cached1.dat" | tee lock.log + assert_server_lock "$(grep -oh "\((.*)\)" lock.log | tr -d "()")" + + GITLFSLOCKSENABLED=1 git lfs lock "cached2.dat" | tee lock.log + assert_server_lock "$(grep -oh "\((.*)\)" lock.log | tr -d "()")" + + GITLFSLOCKSENABLED=1 git lfs locks --local | tee locks.log + grep "2 lock(s) matched query" locks.log + + # delete the remote to prove we're using the local records + git remote remove origin + + GITLFSLOCKSENABLED=1 git lfs locks --local --path "cached1.dat" | tee locks.log + grep "1 lock(s) matched query" locks.log + grep "cached1.dat" locks.log + + GITLFSLOCKSENABLED=1 git lfs locks --local --limit 1 | tee locks.log + grep "1 lock(s) matched query" locks.log +) +end_test + + From fdb85ac0d4227c062e868a666f23bb00cdffa9ff Mon Sep 17 00:00:00 2001 From: Steve Streeting Date: Mon, 12 Dec 2016 15:04:36 +0000 Subject: [PATCH 28/68] Expose the minimum guaranteed information from locking.Lock API version currently has some fields which may go away, or which may be solely internal in future. For now only expose the fields which are always useful. --- locking/locks.go | 28 ++++++---------------------- 1 file changed, 6 insertions(+), 22 deletions(-) diff --git a/locking/locks.go b/locking/locks.go index 2702e589..d4c7cc9f 100644 --- a/locking/locks.go +++ b/locking/locks.go @@ -124,8 +124,6 @@ func (c *Client) UnlockFileById(id string, force bool) error { } // Lock is a record of a locked file -// TODO SJS: review this struct and api equivalent -// deliberately duplicated to make this API self-contained, could be simpler type Lock struct { // Id is the unique identifier corresponding to this particular Lock. It // must be consistent with the local copy, and the server's copy. @@ -137,31 +135,17 @@ type Lock struct { Name string // Email address of the person holding this lock Email string - // CommitSHA is the commit that this Lock was created against. It is - // strictly equal to the SHA of the minimum commit negotiated in order - // to create this lock. - CommitSHA string - // LockedAt is a required parameter that represents the instant in time - // that this lock was created. For most server implementations, this - // should be set to the instant at which the lock was initially - // received. + // LockedAt tells you when this lock was acquired. LockedAt time.Time - // ExpiresAt is an optional parameter that represents the instant in - // time that the lock stopped being active. If the lock is still active, - // the server can either a) not send this field, or b) send the - // zero-value of time.Time. - UnlockedAt time.Time } func (c *Client) newLockFromApi(a api.Lock) Lock { return Lock{ - Id: a.Id, - Path: a.Path, - Name: a.Committer.Name, - Email: a.Committer.Email, - CommitSHA: a.CommitSHA, - LockedAt: a.LockedAt, - UnlockedAt: a.UnlockedAt, + Id: a.Id, + Path: a.Path, + Name: a.Committer.Name, + Email: a.Committer.Email, + LockedAt: a.LockedAt, } } From ea8ff40d38d54910f1659cd780755f474149dc25 Mon Sep 17 00:00:00 2001 From: Steve Streeting Date: Mon, 12 Dec 2016 15:09:27 +0000 Subject: [PATCH 29/68] Fix tests referencing old fields --- locking/locks_test.go | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/locking/locks_test.go b/locking/locks_test.go index 3a4d545a..82e4c77b 100644 --- a/locking/locks_test.go +++ b/locking/locks_test.go @@ -131,9 +131,9 @@ func TestRefreshCache(t *testing.T) { // Need to include zero time in structure for equal to work zeroTime := time.Date(1, 1, 1, 0, 0, 0, 0, time.UTC) assert.Equal(t, []Lock{ - Lock{Path: "folder/test1.dat", Id: "101", Name: "Fred", Email: "fred@bloggs.com", LockedAt: zeroTime, UnlockedAt: zeroTime}, - Lock{Path: "folder/test2.dat", Id: "102", Name: "Fred", Email: "fred@bloggs.com", LockedAt: zeroTime, UnlockedAt: zeroTime}, - Lock{Path: "root.dat", Id: "103", Name: "Fred", Email: "fred@bloggs.com", LockedAt: zeroTime, UnlockedAt: zeroTime}, + Lock{Path: "folder/test1.dat", Id: "101", Name: "Fred", Email: "fred@bloggs.com", LockedAt: zeroTime}, + Lock{Path: "folder/test2.dat", Id: "102", Name: "Fred", Email: "fred@bloggs.com", LockedAt: zeroTime}, + Lock{Path: "root.dat", Id: "103", Name: "Fred", Email: "fred@bloggs.com", LockedAt: zeroTime}, }, locks) } From 6c80e586539ab30b24abecd20afafe028fb783e0 Mon Sep 17 00:00:00 2001 From: Steve Streeting Date: Mon, 12 Dec 2016 15:15:20 +0000 Subject: [PATCH 30/68] Guarantee sort order in test --- locking/locks_test.go | 10 ++++++++++ 1 file changed, 10 insertions(+) diff --git a/locking/locks_test.go b/locking/locks_test.go index 82e4c77b..a8132a64 100644 --- a/locking/locks_test.go +++ b/locking/locks_test.go @@ -6,6 +6,7 @@ import ( "io/ioutil" "net/http" "os" + "sort" "testing" "time" @@ -102,6 +103,12 @@ func (l *TestLifecycle) Cleanup(resp api.Response) error { return resp.Body().Close() } +type LocksById []Lock + +func (a LocksById) Len() int { return len(a) } +func (a LocksById) Swap(i, j int) { a[i], a[j] = a[j], a[i] } +func (a LocksById) Less(i, j int) bool { return a[i].Id < a[j].Id } + func TestRefreshCache(t *testing.T) { var err error oldStore := config.LocalGitStorageDir @@ -130,6 +137,9 @@ func TestRefreshCache(t *testing.T) { locks = client.cachedLocks() // Need to include zero time in structure for equal to work zeroTime := time.Date(1, 1, 1, 0, 0, 0, 0, time.UTC) + + // Sort locks for stable comparison + sort.Sort(LocksById(locks)) assert.Equal(t, []Lock{ Lock{Path: "folder/test1.dat", Id: "101", Name: "Fred", Email: "fred@bloggs.com", LockedAt: zeroTime}, Lock{Path: "folder/test2.dat", Id: "102", Name: "Fred", Email: "fred@bloggs.com", LockedAt: zeroTime}, From 21cb7a2b837c7969ab2a71c4aa5b8259115f898c Mon Sep 17 00:00:00 2001 From: Steve Streeting Date: Tue, 13 Dec 2016 14:48:17 +0000 Subject: [PATCH 31/68] Move detail of locks cache into dedicated LockCache type --- locking/cache.go | 103 ++++++++++++++++++++++++++++++++++++++++++ locking/cache_test.go | 61 +++++++++++++++++++++++++ locking/locks.go | 91 ++++--------------------------------- locking/locks_test.go | 59 ++---------------------- 4 files changed, 177 insertions(+), 137 deletions(-) create mode 100644 locking/cache.go create mode 100644 locking/cache_test.go diff --git a/locking/cache.go b/locking/cache.go new file mode 100644 index 00000000..7a706169 --- /dev/null +++ b/locking/cache.go @@ -0,0 +1,103 @@ +package locking + +import ( + "strings" + + "github.com/git-lfs/git-lfs/tools/kv" +) + +const ( + // We want to use a single cache file for integrity, but to make it easy to + // list all locks, prefix the id->path map in a way we can identify (something + // that won't be in a path) + idKeyPrefix string = "*id*://" +) + +type LockCache struct { + kv *kv.Store +} + +func NewLockCache(filepath string) (*LockCache, error) { + kv, err := kv.NewStore(filepath) + if err != nil { + return nil, err + } + return &LockCache{kv}, nil +} + +func (c *LockCache) encodeIdKey(id string) string { + // Safety against accidents + if !c.isIdKey(id) { + return idKeyPrefix + id + } + return id +} +func (c *LockCache) decodeIdKey(key string) string { + // Safety against accidents + if c.isIdKey(key) { + return key[len(idKeyPrefix):] + } + return key +} +func (c *LockCache) isIdKey(key string) bool { + return strings.HasPrefix(key, idKeyPrefix) +} + +// Cache a successful lock for faster local lookup later +func (c *LockCache) CacheLock(l Lock) error { + // Store reference in both directions + // Path -> Lock + c.kv.Set(l.Path, &l) + // EncodedId -> Lock (encoded so we can easily identify) + c.kv.Set(c.encodeIdKey(l.Id), &l) + return nil +} + +// Remove a cached lock by path becuase it's been relinquished +func (c *LockCache) CacheUnlockByPath(filePath string) error { + ilock := c.kv.Get(filePath) + if ilock != nil { + lock := ilock.(*Lock) + c.kv.Remove(lock.Path) + // Id as key is encoded + c.kv.Remove(c.encodeIdKey(lock.Id)) + } + return nil +} + +// Remove a cached lock by id because it's been relinquished +func (c *LockCache) CacheUnlockById(id string) error { + // Id as key is encoded + idkey := c.encodeIdKey(id) + ilock := c.kv.Get(idkey) + if ilock != nil { + lock := ilock.(*Lock) + c.kv.Remove(idkey) + c.kv.Remove(lock.Path) + } + return nil +} + +// Get the list of cached locked files +func (c *LockCache) CachedLocks() []Lock { + var locks []Lock + c.kv.Visit(func(key string, val interface{}) bool { + // Only report file->id entries not reverse + if !c.isIdKey(key) { + lock := val.(*Lock) + locks = append(locks, *lock) + } + return true // continue + }) + return locks +} + +// Clear the cache +func (c *LockCache) Clear() { + c.kv.RemoveAll() +} + +// Save the cache +func (c *LockCache) Save() error { + return c.kv.Save() +} diff --git a/locking/cache_test.go b/locking/cache_test.go new file mode 100644 index 00000000..60bf0458 --- /dev/null +++ b/locking/cache_test.go @@ -0,0 +1,61 @@ +package locking + +import ( + "io/ioutil" + "os" + "testing" + + "github.com/stretchr/testify/assert" +) + +func TestLockCache(t *testing.T) { + var err error + + tmpf, err := ioutil.TempFile("", "testCacheLock") + assert.Nil(t, err) + defer func() { + os.Remove(tmpf.Name()) + }() + tmpf.Close() + + cache, err := NewLockCache(tmpf.Name()) + assert.Nil(t, err) + + testLocks := []Lock{ + Lock{Path: "folder/test1.dat", Id: "101"}, + Lock{Path: "folder/test2.dat", Id: "102"}, + Lock{Path: "root.dat", Id: "103"}, + } + + for _, l := range testLocks { + err = cache.CacheLock(l) + assert.Nil(t, err) + } + + locks := cache.CachedLocks() + for _, l := range testLocks { + assert.Contains(t, locks, l) + } + assert.Equal(t, len(testLocks), len(locks)) + + err = cache.CacheUnlockByPath("folder/test2.dat") + assert.Nil(t, err) + + locks = cache.CachedLocks() + // delete item 1 from test locls + testLocks = append(testLocks[:1], testLocks[2:]...) + for _, l := range testLocks { + assert.Contains(t, locks, l) + } + assert.Equal(t, len(testLocks), len(locks)) + + err = cache.CacheUnlockById("101") + assert.Nil(t, err) + + locks = cache.CachedLocks() + testLocks = testLocks[1:] + for _, l := range testLocks { + assert.Contains(t, locks, l) + } + assert.Equal(t, len(testLocks), len(locks)) +} diff --git a/locking/locks.go b/locking/locks.go index d4c7cc9f..136fa363 100644 --- a/locking/locks.go +++ b/locking/locks.go @@ -5,7 +5,6 @@ import ( "fmt" "os" "path/filepath" - "strings" "time" "github.com/git-lfs/git-lfs/api" @@ -27,7 +26,7 @@ var ( type Client struct { cfg *config.Configuration apiClient *api.Client - cache *kv.Store + cache *LockCache } // NewClient creates a new locking client with the given configuration @@ -43,11 +42,11 @@ func NewClient(cfg *config.Configuration) (*Client, error) { return nil, err } lockFile := filepath.Join(lockDir, "lockcache.db") - store, err := kv.NewStore(lockFile) + cache, err := NewLockCache(lockFile) if err != nil { return nil, err } - return &Client{cfg, apiClient, store}, nil + return &Client{cfg, apiClient, cache}, nil } // Close this client instance; must be called to dispose of resources @@ -82,7 +81,7 @@ func (c *Client) LockFile(path string) (Lock, error) { lock := c.newLockFromApi(*resp.Lock) - if err := c.cacheLock(lock); err != nil { + if err := c.cache.CacheLock(lock); err != nil { return Lock{}, fmt.Errorf("Error caching lock information: %v", err) } @@ -116,7 +115,7 @@ func (c *Client) UnlockFileById(id string, force bool) error { return fmt.Errorf("Server unable to unlock lock: %v", resp.Err) } - if err := c.cacheUnlockById(id); err != nil { + if err := c.cache.CacheUnlockById(id); err != nil { return fmt.Errorf("Error caching unlock information: %v", err) } @@ -135,7 +134,7 @@ type Lock struct { Name string // Email address of the person holding this lock Email string - // LockedAt tells you when this lock was acquired. + // LockedAt is the time at which this lock was acquired. LockedAt time.Time } @@ -162,7 +161,7 @@ func (c *Client) SearchLocks(filter map[string]string, limit int, localOnly bool } func (c *Client) searchCachedLocks(filter map[string]string, limit int) ([]Lock, error) { - cachedlocks := c.cachedLocks() + cachedlocks := c.cache.CachedLocks() path, filterByPath := filter["path"] id, filterById := filter["id"] lockCount := 0 @@ -249,78 +248,6 @@ func (c *Client) lockIdFromPath(path string) (string, error) { } } -// We want to use a single cache file for integrity, but to make it easy to -// list all locks, prefix the id->path map in a way we can identify (something -// that won't be in a path) -const idKeyPrefix = "*id*://" - -func (c *Client) encodeIdKey(id string) string { - // Safety against accidents - if !c.isIdKey(id) { - return idKeyPrefix + id - } - return id -} -func (c *Client) decodeIdKey(key string) string { - // Safety against accidents - if c.isIdKey(key) { - return key[len(idKeyPrefix):] - } - return key -} -func (c *Client) isIdKey(key string) bool { - return strings.HasPrefix(key, idKeyPrefix) -} - -// Cache a successful lock for faster local lookup later -func (c *Client) cacheLock(l Lock) error { - // Store reference in both directions - // Path -> Lock - c.cache.Set(l.Path, &l) - // EncodedId -> Lock (encoded so we can easily identify) - c.cache.Set(c.encodeIdKey(l.Id), &l) - return nil -} - -// Remove a cached lock by path becuase it's been relinquished -func (c *Client) cacheUnlockByPath(filePath string) error { - ilock := c.cache.Get(filePath) - if ilock != nil { - lock := ilock.(*Lock) - c.cache.Remove(lock.Path) - // Id as key is encoded - c.cache.Remove(c.encodeIdKey(lock.Id)) - } - return nil -} - -// Remove a cached lock by id because it's been relinquished -func (c *Client) cacheUnlockById(id string) error { - // Id as key is encoded - idkey := c.encodeIdKey(id) - ilock := c.cache.Get(idkey) - if ilock != nil { - lock := ilock.(*Lock) - c.cache.Remove(idkey) - c.cache.Remove(lock.Path) - } - return nil -} - -// Get the list of cached locked files -func (c *Client) cachedLocks() []Lock { - var locks []Lock - c.cache.Visit(func(key string, val interface{}) bool { - // Only report file->id entries not reverse - if !c.isIdKey(key) { - lock := val.(*Lock) - locks = append(locks, *lock) - } - return true // continue - }) - return locks -} - // Fetch locked files for the current committer and cache them locally // This can be used to sync up locked files when moving machines func (c *Client) fetchLocksToCache() error { @@ -332,12 +259,12 @@ func (c *Client) fetchLocksToCache() error { } // We're going to overwrite the entire local cache - c.cache.RemoveAll() + c.cache.Clear() _, email := c.cfg.CurrentCommitter() for _, l := range locks { if l.Email == email { - c.cacheLock(l) + c.cache.CacheLock(l) } } diff --git a/locking/locks_test.go b/locking/locks_test.go index a8132a64..fe802a24 100644 --- a/locking/locks_test.go +++ b/locking/locks_test.go @@ -15,59 +15,6 @@ import ( "github.com/stretchr/testify/assert" ) -func TestLockCache(t *testing.T) { - var err error - - oldStore := config.LocalGitStorageDir - config.LocalGitStorageDir, err = ioutil.TempDir("", "testCacheLock") - assert.Nil(t, err) - defer func() { - os.RemoveAll(config.LocalGitStorageDir) - config.LocalGitStorageDir = oldStore - }() - - client, err := NewClient(config.NewFrom(config.Values{})) - assert.Nil(t, err) - - testLocks := []Lock{ - Lock{Path: "folder/test1.dat", Id: "101"}, - Lock{Path: "folder/test2.dat", Id: "102"}, - Lock{Path: "root.dat", Id: "103"}, - } - - for _, l := range testLocks { - err = client.cacheLock(l) - assert.Nil(t, err) - } - - locks := client.cachedLocks() - for _, l := range testLocks { - assert.Contains(t, locks, l) - } - assert.Equal(t, len(testLocks), len(locks)) - - err = client.cacheUnlockByPath("folder/test2.dat") - assert.Nil(t, err) - - locks = client.cachedLocks() - // delete item 1 from test locls - testLocks = append(testLocks[:1], testLocks[2:]...) - for _, l := range testLocks { - assert.Contains(t, locks, l) - } - assert.Equal(t, len(testLocks), len(locks)) - - err = client.cacheUnlockById("101") - assert.Nil(t, err) - - locks = client.cachedLocks() - testLocks = testLocks[1:] - for _, l := range testLocks { - assert.Contains(t, locks, l) - } - assert.Equal(t, len(testLocks), len(locks)) -} - type TestLifecycle struct { } @@ -127,14 +74,16 @@ func TestRefreshCache(t *testing.T) { client.apiClient = api.NewClient(&TestLifecycle{}) // Should start with no cached items - locks := client.cachedLocks() + locks, err := client.SearchLocks(nil, 0, true) + assert.Nil(t, err) assert.Empty(t, locks) // Should load from test data, just Fred's err = client.fetchLocksToCache() assert.Nil(t, err) - locks = client.cachedLocks() + locks, err = client.SearchLocks(nil, 0, true) + assert.Nil(t, err) // Need to include zero time in structure for equal to work zeroTime := time.Date(1, 1, 1, 0, 0, 0, 0, time.UTC) From c8d4ae03a312ac3f5bf65463f3944d0aef3233ca Mon Sep 17 00:00:00 2001 From: Steve Streeting Date: Tue, 13 Dec 2016 14:54:24 +0000 Subject: [PATCH 32/68] Protect against failed casts --- locking/cache.go | 6 ++---- 1 file changed, 2 insertions(+), 4 deletions(-) diff --git a/locking/cache.go b/locking/cache.go index 7a706169..560e2213 100644 --- a/locking/cache.go +++ b/locking/cache.go @@ -56,8 +56,7 @@ func (c *LockCache) CacheLock(l Lock) error { // Remove a cached lock by path becuase it's been relinquished func (c *LockCache) CacheUnlockByPath(filePath string) error { ilock := c.kv.Get(filePath) - if ilock != nil { - lock := ilock.(*Lock) + if lock, ok := ilock.(*Lock); ok && lock != nil { c.kv.Remove(lock.Path) // Id as key is encoded c.kv.Remove(c.encodeIdKey(lock.Id)) @@ -70,8 +69,7 @@ func (c *LockCache) CacheUnlockById(id string) error { // Id as key is encoded idkey := c.encodeIdKey(id) ilock := c.kv.Get(idkey) - if ilock != nil { - lock := ilock.(*Lock) + if lock, ok := ilock.(*Lock); ok && lock != nil { c.kv.Remove(idkey) c.kv.Remove(lock.Path) } From b73eea59d71503fc181aa8a0e1879013495aecfb Mon Sep 17 00:00:00 2001 From: Steve Streeting Date: Wed, 14 Dec 2016 09:34:03 +0000 Subject: [PATCH 33/68] Add test to prove set operator [] works in filter matching This is supported by .gitignore --- filepathfilter/filepathfilter_test.go | 2 ++ 1 file changed, 2 insertions(+) diff --git a/filepathfilter/filepathfilter_test.go b/filepathfilter/filepathfilter_test.go index 10819c36..ab678cfc 100644 --- a/filepathfilter/filepathfilter_test.go +++ b/filepathfilter/filepathfilter_test.go @@ -78,6 +78,7 @@ func TestFilterAllows(t *testing.T) { filterTest{true, []string{"test/fil*"}, nil}, filterTest{false, []string{"test/g*"}, nil}, filterTest{true, []string{"tes*/*"}, nil}, + filterTest{true, []string{"[Tt]est/[Ff]ilename.dat"}, nil}, // Exclusion filterTest{false, nil, []string{"*.dat"}}, filterTest{false, nil, []string{"file*.dat"}}, @@ -96,6 +97,7 @@ func TestFilterAllows(t *testing.T) { filterTest{false, nil, []string{"test/fil*"}}, filterTest{true, nil, []string{"test/g*"}}, filterTest{false, nil, []string{"tes*/*"}}, + filterTest{false, nil, []string{"[Tt]est/[Ff]ilename.dat"}}, // Both filterTest{true, []string{"test/filename.dat"}, []string{"test/notfilename.dat"}}, From 2d42802239b807a01bb80bba9a450fa21b188421 Mon Sep 17 00:00:00 2001 From: Steve Streeting Date: Wed, 14 Dec 2016 09:44:58 +0000 Subject: [PATCH 34/68] Refine naming for LockCache methods --- locking/cache.go | 8 ++++---- locking/cache_test.go | 12 ++++++------ locking/locks.go | 8 ++++---- 3 files changed, 14 insertions(+), 14 deletions(-) diff --git a/locking/cache.go b/locking/cache.go index 560e2213..765e7298 100644 --- a/locking/cache.go +++ b/locking/cache.go @@ -44,7 +44,7 @@ func (c *LockCache) isIdKey(key string) bool { } // Cache a successful lock for faster local lookup later -func (c *LockCache) CacheLock(l Lock) error { +func (c *LockCache) Add(l Lock) error { // Store reference in both directions // Path -> Lock c.kv.Set(l.Path, &l) @@ -54,7 +54,7 @@ func (c *LockCache) CacheLock(l Lock) error { } // Remove a cached lock by path becuase it's been relinquished -func (c *LockCache) CacheUnlockByPath(filePath string) error { +func (c *LockCache) RemoveByPath(filePath string) error { ilock := c.kv.Get(filePath) if lock, ok := ilock.(*Lock); ok && lock != nil { c.kv.Remove(lock.Path) @@ -65,7 +65,7 @@ func (c *LockCache) CacheUnlockByPath(filePath string) error { } // Remove a cached lock by id because it's been relinquished -func (c *LockCache) CacheUnlockById(id string) error { +func (c *LockCache) RemoveById(id string) error { // Id as key is encoded idkey := c.encodeIdKey(id) ilock := c.kv.Get(idkey) @@ -77,7 +77,7 @@ func (c *LockCache) CacheUnlockById(id string) error { } // Get the list of cached locked files -func (c *LockCache) CachedLocks() []Lock { +func (c *LockCache) Locks() []Lock { var locks []Lock c.kv.Visit(func(key string, val interface{}) bool { // Only report file->id entries not reverse diff --git a/locking/cache_test.go b/locking/cache_test.go index 60bf0458..c8f28fc8 100644 --- a/locking/cache_test.go +++ b/locking/cache_test.go @@ -28,20 +28,20 @@ func TestLockCache(t *testing.T) { } for _, l := range testLocks { - err = cache.CacheLock(l) + err = cache.Add(l) assert.Nil(t, err) } - locks := cache.CachedLocks() + locks := cache.Locks() for _, l := range testLocks { assert.Contains(t, locks, l) } assert.Equal(t, len(testLocks), len(locks)) - err = cache.CacheUnlockByPath("folder/test2.dat") + err = cache.RemoveByPath("folder/test2.dat") assert.Nil(t, err) - locks = cache.CachedLocks() + locks = cache.Locks() // delete item 1 from test locls testLocks = append(testLocks[:1], testLocks[2:]...) for _, l := range testLocks { @@ -49,10 +49,10 @@ func TestLockCache(t *testing.T) { } assert.Equal(t, len(testLocks), len(locks)) - err = cache.CacheUnlockById("101") + err = cache.RemoveById("101") assert.Nil(t, err) - locks = cache.CachedLocks() + locks = cache.Locks() testLocks = testLocks[1:] for _, l := range testLocks { assert.Contains(t, locks, l) diff --git a/locking/locks.go b/locking/locks.go index 136fa363..12f947c9 100644 --- a/locking/locks.go +++ b/locking/locks.go @@ -81,7 +81,7 @@ func (c *Client) LockFile(path string) (Lock, error) { lock := c.newLockFromApi(*resp.Lock) - if err := c.cache.CacheLock(lock); err != nil { + if err := c.cache.Add(lock); err != nil { return Lock{}, fmt.Errorf("Error caching lock information: %v", err) } @@ -115,7 +115,7 @@ func (c *Client) UnlockFileById(id string, force bool) error { return fmt.Errorf("Server unable to unlock lock: %v", resp.Err) } - if err := c.cache.CacheUnlockById(id); err != nil { + if err := c.cache.RemoveById(id); err != nil { return fmt.Errorf("Error caching unlock information: %v", err) } @@ -161,7 +161,7 @@ func (c *Client) SearchLocks(filter map[string]string, limit int, localOnly bool } func (c *Client) searchCachedLocks(filter map[string]string, limit int) ([]Lock, error) { - cachedlocks := c.cache.CachedLocks() + cachedlocks := c.cache.Locks() path, filterByPath := filter["path"] id, filterById := filter["id"] lockCount := 0 @@ -264,7 +264,7 @@ func (c *Client) fetchLocksToCache() error { _, email := c.cfg.CurrentCommitter() for _, l := range locks { if l.Email == email { - c.cache.CacheLock(l) + c.cache.Add(l) } } From 9e815e53b48450c8f7d1439c82dda7608ece5dfd Mon Sep 17 00:00:00 2001 From: Steve Streeting Date: Wed, 14 Dec 2016 09:46:36 +0000 Subject: [PATCH 35/68] fetchLocksToCache -> refreshLockCache --- locking/locks.go | 2 +- locking/locks_test.go | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/locking/locks.go b/locking/locks.go index 12f947c9..e1dd8ac2 100644 --- a/locking/locks.go +++ b/locking/locks.go @@ -250,7 +250,7 @@ func (c *Client) lockIdFromPath(path string) (string, error) { // Fetch locked files for the current committer and cache them locally // This can be used to sync up locked files when moving machines -func (c *Client) fetchLocksToCache() error { +func (c *Client) refreshLockCache() error { // TODO: filters don't seem to currently define how to search for a // committer's email. Is it "committer.email"? For now, just iterate locks, err := c.SearchLocks(nil, 0, false) diff --git a/locking/locks_test.go b/locking/locks_test.go index fe802a24..a5f8d7f2 100644 --- a/locking/locks_test.go +++ b/locking/locks_test.go @@ -79,7 +79,7 @@ func TestRefreshCache(t *testing.T) { assert.Empty(t, locks) // Should load from test data, just Fred's - err = client.fetchLocksToCache() + err = client.refreshLockCache() assert.Nil(t, err) locks, err = client.SearchLocks(nil, 0, true) From 62d0b51ada64411e0045d49c0865ef3c17dcad2e Mon Sep 17 00:00:00 2001 From: risk danger olson Date: Wed, 14 Dec 2016 10:53:34 -0700 Subject: [PATCH 36/68] Remove unused callback arg on ScanTree() --- commands/command_checkout.go | 4 ++-- commands/command_fetch.go | 2 +- commands/command_ls_files.go | 2 +- lfs/gitscanner.go | 4 ++-- 4 files changed, 6 insertions(+), 6 deletions(-) diff --git a/commands/command_checkout.go b/commands/command_checkout.go index 6d21cae8..65f1844d 100644 --- a/commands/command_checkout.go +++ b/commands/command_checkout.go @@ -56,7 +56,7 @@ func checkoutFromFetchChan(in chan *lfs.WrappedPointer, filter *filepathfilter.F }) chgitscanner.Filter = filter - if err := chgitscanner.ScanTree(ref.Sha, nil); err != nil { + if err := chgitscanner.ScanTree(ref.Sha); err != nil { ExitWithError(err) } @@ -108,7 +108,7 @@ func checkoutWithIncludeExclude(filter *filepathfilter.Filter) { chgitscanner.Filter = filter - if err := chgitscanner.ScanTree(ref.Sha, nil); err != nil { + if err := chgitscanner.ScanTree(ref.Sha); err != nil { ExitWithError(err) } chgitscanner.Close() diff --git a/commands/command_fetch.go b/commands/command_fetch.go index d3d5be56..ce280461 100644 --- a/commands/command_fetch.go +++ b/commands/command_fetch.go @@ -124,7 +124,7 @@ func pointersToFetchForRef(ref string, filter *filepathfilter.Filter) ([]*lfs.Wr tempgitscanner.Filter = filter - if err := tempgitscanner.ScanTree(ref, nil); err != nil { + if err := tempgitscanner.ScanTree(ref); err != nil { return nil, err } diff --git a/commands/command_ls_files.go b/commands/command_ls_files.go index ec0d12d0..af2b4f51 100644 --- a/commands/command_ls_files.go +++ b/commands/command_ls_files.go @@ -42,7 +42,7 @@ func lsFilesCommand(cmd *cobra.Command, args []string) { }) defer gitscanner.Close() - if err := gitscanner.ScanTree(ref, nil); err != nil { + if err := gitscanner.ScanTree(ref); err != nil { Exit("Could not scan for Git LFS tree: %s", err) } } diff --git a/lfs/gitscanner.go b/lfs/gitscanner.go index 123e6cfc..952842c4 100644 --- a/lfs/gitscanner.go +++ b/lfs/gitscanner.go @@ -132,8 +132,8 @@ func (s *GitScanner) ScanAll(cb GitScannerCallback) error { // ScanTree takes a ref and returns WrappedPointer objects in the tree at that // ref. Differs from ScanRefs in that multiple files in the tree with the same // content are all reported. -func (s *GitScanner) ScanTree(ref string, cb GitScannerCallback) error { - callback, err := firstGitScannerCallback(cb, s.callback) +func (s *GitScanner) ScanTree(ref string) error { + callback, err := firstGitScannerCallback(s.callback) if err != nil { return err } From de8c915678ccec29010de51eff8095e2c7c3b263 Mon Sep 17 00:00:00 2001 From: risk danger olson Date: Wed, 14 Dec 2016 11:36:55 -0700 Subject: [PATCH 37/68] move single-use funcs to the pull cmd --- commands/command_checkout.go | 46 --------------------------- commands/command_fetch.go | 12 -------- commands/command_pull.go | 60 ++++++++++++++++++++++++++++++++++++ 3 files changed, 60 insertions(+), 58 deletions(-) diff --git a/commands/command_checkout.go b/commands/command_checkout.go index 65f1844d..6cdbb6e8 100644 --- a/commands/command_checkout.go +++ b/commands/command_checkout.go @@ -38,52 +38,6 @@ func checkoutCommand(cmd *cobra.Command, args []string) { checkoutWithIncludeExclude(filter) } -func checkoutFromFetchChan(in chan *lfs.WrappedPointer, filter *filepathfilter.Filter) { - ref, err := git.CurrentRef() - if err != nil { - Panic(err, "Could not checkout") - } - - // Need to ScanTree to identify multiple files with the same content (fetch will only report oids once) - // use new gitscanner so mapping has all the scanned pointers before continuing - mapping := make(map[string][]*lfs.WrappedPointer) - chgitscanner := lfs.NewGitScanner(func(p *lfs.WrappedPointer, err error) { - if err != nil { - Panic(err, "Could not scan for Git LFS files") - return - } - mapping[p.Oid] = append(mapping[p.Oid], p) - }) - chgitscanner.Filter = filter - - if err := chgitscanner.ScanTree(ref.Sha); err != nil { - ExitWithError(err) - } - - chgitscanner.Close() - - // Launch git update-index - c := make(chan *lfs.WrappedPointer) - - var wait sync.WaitGroup - wait.Add(1) - - go func() { - checkoutWithChan(c) - wait.Done() - }() - - // Feed it from in, which comes from fetch - for p := range in { - // Add all of the files for this oid - for _, fp := range mapping[p.Oid] { - c <- fp - } - } - close(c) - wait.Wait() -} - func checkoutWithIncludeExclude(filter *filepathfilter.Filter) { ref, err := git.CurrentRef() if err != nil { diff --git a/commands/command_fetch.go b/commands/command_fetch.go index ce280461..cf6e103a 100644 --- a/commands/command_fetch.go +++ b/commands/command_fetch.go @@ -132,18 +132,6 @@ func pointersToFetchForRef(ref string, filter *filepathfilter.Filter) ([]*lfs.Wr return pointers, multiErr } -func fetchRefToChan(ref string, filter *filepathfilter.Filter) chan *lfs.WrappedPointer { - c := make(chan *lfs.WrappedPointer) - pointers, err := pointersToFetchForRef(ref, filter) - if err != nil { - Panic(err, "Could not scan for Git LFS files") - } - - go fetchAndReportToChan(pointers, filter, c) - - return c -} - // Fetch all binaries for a given ref (that we don't have already) func fetchRef(ref string, filter *filepathfilter.Filter) bool { pointers, err := pointersToFetchForRef(ref, filter) diff --git a/commands/command_pull.go b/commands/command_pull.go index 4fced5dc..ce389082 100644 --- a/commands/command_pull.go +++ b/commands/command_pull.go @@ -2,9 +2,11 @@ package commands import ( "fmt" + "sync" "github.com/git-lfs/git-lfs/filepathfilter" "github.com/git-lfs/git-lfs/git" + "github.com/git-lfs/git-lfs/lfs" "github.com/spf13/cobra" ) @@ -42,6 +44,64 @@ func pull(filter *filepathfilter.Filter) { checkoutFromFetchChan(c, filter) } +func fetchRefToChan(ref string, filter *filepathfilter.Filter) chan *lfs.WrappedPointer { + c := make(chan *lfs.WrappedPointer) + pointers, err := pointersToFetchForRef(ref, filter) + if err != nil { + Panic(err, "Could not scan for Git LFS files") + } + + go fetchAndReportToChan(pointers, filter, c) + + return c +} + +func checkoutFromFetchChan(in chan *lfs.WrappedPointer, filter *filepathfilter.Filter) { + ref, err := git.CurrentRef() + if err != nil { + Panic(err, "Could not checkout") + } + + // Need to ScanTree to identify multiple files with the same content (fetch will only report oids once) + // use new gitscanner so mapping has all the scanned pointers before continuing + mapping := make(map[string][]*lfs.WrappedPointer) + chgitscanner := lfs.NewGitScanner(func(p *lfs.WrappedPointer, err error) { + if err != nil { + Panic(err, "Could not scan for Git LFS files") + return + } + mapping[p.Oid] = append(mapping[p.Oid], p) + }) + chgitscanner.Filter = filter + + if err := chgitscanner.ScanTree(ref.Sha); err != nil { + ExitWithError(err) + } + + chgitscanner.Close() + + // Launch git update-index + c := make(chan *lfs.WrappedPointer) + + var wait sync.WaitGroup + wait.Add(1) + + go func() { + checkoutWithChan(c) + wait.Done() + }() + + // Feed it from in, which comes from fetch + for p := range in { + // Add all of the files for this oid + for _, fp := range mapping[p.Oid] { + c <- fp + } + } + close(c) + wait.Wait() +} + func init() { RegisterCommand("pull", pullCommand, func(cmd *cobra.Command) { cmd.Flags().StringVarP(&includeArg, "include", "I", "", "Include a list of paths") From 1a4b2cf9b81e5e41b1d0cfaff1e0672160d80cb2 Mon Sep 17 00:00:00 2001 From: risk danger olson Date: Wed, 14 Dec 2016 10:56:44 -0700 Subject: [PATCH 38/68] extract little rootedPaths helper --- commands/command_checkout.go | 37 +++++++++++++++++++----------------- 1 file changed, 20 insertions(+), 17 deletions(-) diff --git a/commands/command_checkout.go b/commands/command_checkout.go index 6cdbb6e8..97b498c2 100644 --- a/commands/command_checkout.go +++ b/commands/command_checkout.go @@ -18,23 +18,7 @@ import ( func checkoutCommand(cmd *cobra.Command, args []string) { requireInRepo() - - // Parameters are filters - // firstly convert any pathspecs to the root of the repo, in case this is being executed in a sub-folder - var rootedpaths []string - - inchan := make(chan string, 1) - outchan, err := lfs.ConvertCwdFilesRelativeToRepo(inchan) - if err != nil { - Panic(err, "Could not checkout") - } - for _, arg := range args { - inchan <- arg - rootedpaths = append(rootedpaths, <-outchan) - } - close(inchan) - - filter := filepathfilter.New(rootedpaths, nil) + filter := filepathfilter.New(rootedPaths(args), nil) checkoutWithIncludeExclude(filter) } @@ -193,6 +177,25 @@ func checkoutWithChan(in <-chan *lfs.WrappedPointer) { } } +// Parameters are filters +// firstly convert any pathspecs to the root of the repo, in case this is being +// executed in a sub-folder +func rootedPaths(args []string) []string { + inchan := make(chan string, 1) + outchan, err := lfs.ConvertCwdFilesRelativeToRepo(inchan) + if err != nil { + Panic(err, "Could not checkout") + } + + rootedpaths := make([]string, 0, len(args)) + for _, arg := range args { + inchan <- arg + rootedpaths = append(rootedpaths, <-outchan) + } + close(inchan) + return rootedpaths +} + func init() { RegisterCommand("checkout", checkoutCommand, nil) } From 68efd0536a2eb3e0514ee4d2da7ab9c7a084fcb8 Mon Sep 17 00:00:00 2001 From: risk danger olson Date: Wed, 14 Dec 2016 11:34:29 -0700 Subject: [PATCH 39/68] Rewrite path converters to a struct. Channels are unnecessary --- commands/command_checkout.go | 14 ++-- lfs/util.go | 136 ++++++++++++++++++----------------- 2 files changed, 76 insertions(+), 74 deletions(-) diff --git a/commands/command_checkout.go b/commands/command_checkout.go index 97b498c2..3d695c00 100644 --- a/commands/command_checkout.go +++ b/commands/command_checkout.go @@ -93,8 +93,7 @@ func checkoutWithIncludeExclude(filter *filepathfilter.Filter) { func checkoutWithChan(in <-chan *lfs.WrappedPointer) { // Get a converter from repo-relative to cwd-relative // Since writing data & calling git update-index must be relative to cwd - repopathchan := make(chan string, 1) - cwdpathchan, err := lfs.ConvertRepoFilesRelativeToCwd(repopathchan) + pathConverter, err := lfs.NewRepoToCurrentPathConverter() if err != nil { Panic(err, "Could not convert file paths") } @@ -135,8 +134,7 @@ func checkoutWithChan(in <-chan *lfs.WrappedPointer) { continue } - repopathchan <- pointer.Name - cwdfilepath := <-cwdpathchan + cwdfilepath := pathConverter.Convert(pointer.Name) err = lfs.PointerSmudgeToFile(cwdfilepath, pointer.Pointer, false, manifest, nil) if err != nil { @@ -167,7 +165,6 @@ func checkoutWithChan(in <-chan *lfs.WrappedPointer) { updateIdxStdin.Write([]byte(cwdfilepath + "\n")) } - close(repopathchan) if cmd != nil && updateIdxStdin != nil { updateIdxStdin.Close() @@ -181,18 +178,15 @@ func checkoutWithChan(in <-chan *lfs.WrappedPointer) { // firstly convert any pathspecs to the root of the repo, in case this is being // executed in a sub-folder func rootedPaths(args []string) []string { - inchan := make(chan string, 1) - outchan, err := lfs.ConvertCwdFilesRelativeToRepo(inchan) + pathConverter, err := lfs.NewCurrentToRepoPathConverter() if err != nil { Panic(err, "Could not checkout") } rootedpaths := make([]string, 0, len(args)) for _, arg := range args { - inchan <- arg - rootedpaths = append(rootedpaths, <-outchan) + rootedpaths = append(rootedpaths, pathConverter.Convert(arg)) } - close(inchan) return rootedpaths } diff --git a/lfs/util.go b/lfs/util.go index 16836978..06b67e7d 100644 --- a/lfs/util.go +++ b/lfs/util.go @@ -87,90 +87,98 @@ func GetPlatform() Platform { return currentPlatform } +type PathConverter interface { + Convert(string) string +} + // Convert filenames expressed relative to the root of the repo relative to the // current working dir. Useful when needing to calling git with results from a rooted command, // but the user is in a subdir of their repo // Pass in a channel which you will fill with relative files & receive a channel which will get results -func ConvertRepoFilesRelativeToCwd(repochan <-chan string) (<-chan string, error) { - wd, err := os.Getwd() +func NewRepoToCurrentPathConverter() (PathConverter, error) { + r, c, p, err := pathConverterArgs() if err != nil { - return nil, fmt.Errorf("Unable to get working dir: %v", err) - } - wd = tools.ResolveSymlinks(wd) - - // Early-out if working dir is root dir, same result - passthrough := false - if config.LocalWorkingDir == wd { - passthrough = true + return nil, err } - outchan := make(chan string, 1) + return &repoToCurrentPathConverter{ + repoDir: r, + currDir: c, + passthrough: p, + }, nil +} - go func() { - for f := range repochan { - if passthrough { - outchan <- f - continue - } - abs := filepath.Join(config.LocalWorkingDir, f) - rel, err := filepath.Rel(wd, abs) - if err != nil { - // Use absolute file instead - outchan <- abs - } else { - outchan <- rel - } - } - close(outchan) - }() +type repoToCurrentPathConverter struct { + repoDir string + currDir string + passthrough bool +} - return outchan, nil +func (p *repoToCurrentPathConverter) Convert(filename string) string { + if p.passthrough { + return filename + } + + abs := filepath.Join(p.repoDir, filename) + rel, err := filepath.Rel(p.currDir, abs) + if err != nil { + // Use absolute file instead + return abs + } else { + return rel + } } // Convert filenames expressed relative to the current directory to be // relative to the repo root. Useful when calling git with arguments that requires them // to be rooted but the user is in a subdir of their repo & expects to use relative args // Pass in a channel which you will fill with relative files & receive a channel which will get results -func ConvertCwdFilesRelativeToRepo(cwdchan <-chan string) (<-chan string, error) { - curdir, err := os.Getwd() +func NewCurrentToRepoPathConverter() (PathConverter, error) { + r, c, p, err := pathConverterArgs() if err != nil { - return nil, fmt.Errorf("Could not retrieve current directory: %v", err) - } - // Make sure to resolve symlinks - curdir = tools.ResolveSymlinks(curdir) - - // Early-out if working dir is root dir, same result - passthrough := false - if config.LocalWorkingDir == curdir { - passthrough = true + return nil, err } - outchan := make(chan string, 1) - go func() { - for p := range cwdchan { - if passthrough { - outchan <- p - continue - } - var abs string - if filepath.IsAbs(p) { - abs = tools.ResolveSymlinks(p) - } else { - abs = filepath.Join(curdir, p) - } - reltoroot, err := filepath.Rel(config.LocalWorkingDir, abs) - if err != nil { - // Can't do this, use absolute as best fallback - outchan <- abs - } else { - outchan <- reltoroot - } - } - close(outchan) - }() + return ¤tToRepoPathConverter{ + repoDir: r, + currDir: c, + passthrough: p, + }, nil +} - return outchan, nil +type currentToRepoPathConverter struct { + repoDir string + currDir string + passthrough bool +} +func (p *currentToRepoPathConverter) Convert(filename string) string { + if p.passthrough { + return filename + } + + var abs string + if filepath.IsAbs(filename) { + abs = tools.ResolveSymlinks(filename) + } else { + abs = filepath.Join(p.currDir, filename) + } + reltoroot, err := filepath.Rel(p.repoDir, abs) + if err != nil { + // Can't do this, use absolute as best fallback + return abs + } else { + return reltoroot + } +} + +func pathConverterArgs() (string, string, bool, error) { + currDir, err := os.Getwd() + if err != nil { + return "", "", false, fmt.Errorf("Unable to get working dir: %v", err) + } + currDir = tools.ResolveSymlinks(currDir) + return config.LocalWorkingDir, currDir, config.LocalWorkingDir == currDir, nil } // Are we running on Windows? Need to handle some extra path shenanigans From 5ef6b8a1bda07212b97133f2a7794cd6cbfa8283 Mon Sep 17 00:00:00 2001 From: risk danger olson Date: Wed, 14 Dec 2016 11:54:12 -0700 Subject: [PATCH 40/68] extract gitIndexer --- commands/command_checkout.go | 94 ++++++++++++++++++++++++------------ 1 file changed, 62 insertions(+), 32 deletions(-) diff --git a/commands/command_checkout.go b/commands/command_checkout.go index 3d695c00..6e343174 100644 --- a/commands/command_checkout.go +++ b/commands/command_checkout.go @@ -98,13 +98,8 @@ func checkoutWithChan(in <-chan *lfs.WrappedPointer) { Panic(err, "Could not convert file paths") } - // Don't fire up the update-index command until we have at least one file to - // give it. Otherwise git interprets the lack of arguments to mean param-less update-index - // which can trigger entire working copy to be re-examined, which triggers clean filters - // and which has unexpected side effects (e.g. downloading filtered-out files) - var cmd *exec.Cmd - var updateIdxStdin io.WriteCloser - var updateIdxOut bytes.Buffer + manifest := TransferManifest() + gitIndexer := &gitIndexer{} // From this point on, git update-index is running. Code in this loop MUST // NOT Panic() or otherwise cause the process to exit. If the process exits @@ -112,11 +107,7 @@ func checkoutWithChan(in <-chan *lfs.WrappedPointer) { // locked state. // As files come in, write them to the wd and update the index - - manifest := TransferManifest() - for pointer := range in { - // Check the content - either missing or still this pointer (not exist is ok) filepointer, err := lfs.DecodePointerFromFile(pointer.Name) if err != nil && !os.IsNotExist(err) { @@ -147,30 +138,14 @@ func checkoutWithChan(in <-chan *lfs.WrappedPointer) { } } - if cmd == nil { - // Fire up the update-index command - cmd = exec.Command("git", "update-index", "-q", "--refresh", "--stdin") - cmd.Stdout = &updateIdxOut - cmd.Stderr = &updateIdxOut - updateIdxStdin, err = cmd.StdinPipe() - if err != nil { - Panic(err, "Could not update the index") - } - - if err := cmd.Start(); err != nil { - Panic(err, "Could not update the index") - } - + // errors are only returned when the gitIndexer is starting a new cmd + if err := gitIndexer.Add(cwdfilepath); err != nil { + Panic(err, "Could not update the index") } - - updateIdxStdin.Write([]byte(cwdfilepath + "\n")) } - if cmd != nil && updateIdxStdin != nil { - updateIdxStdin.Close() - if err := cmd.Wait(); err != nil { - LoggedError(err, "Error updating the git index:\n%s", updateIdxOut.String()) - } + if err := gitIndexer.Close(); err != nil { + LoggedError(err, "Error updating the git index:\n%s", gitIndexer.Output()) } } @@ -193,3 +168,58 @@ func rootedPaths(args []string) []string { func init() { RegisterCommand("checkout", checkoutCommand, nil) } + +// Don't fire up the update-index command until we have at least one file to +// give it. Otherwise git interprets the lack of arguments to mean param-less update-index +// which can trigger entire working copy to be re-examined, which triggers clean filters +// and which has unexpected side effects (e.g. downloading filtered-out files) +type gitIndexer struct { + cmd *exec.Cmd + input io.WriteCloser + output bytes.Buffer + mu sync.Mutex +} + +func (i *gitIndexer) Add(path string) error { + i.mu.Lock() + defer i.mu.Unlock() + + if i.cmd == nil { + // Fire up the update-index command + i.cmd = exec.Command("git", "update-index", "-q", "--refresh", "--stdin") + i.cmd.Stdout = &i.output + i.cmd.Stderr = &i.output + stdin, err := i.cmd.StdinPipe() + if err == nil { + err = i.cmd.Start() + } + + if err != nil { + return err + } + + i.input = stdin + } + + i.input.Write([]byte(path + "\n")) + return nil +} + +func (i *gitIndexer) Output() string { + return i.output.String() +} + +func (i *gitIndexer) Close() error { + i.mu.Lock() + defer i.mu.Unlock() + + if i.input != nil { + i.input.Close() + } + + if i.cmd != nil { + return i.cmd.Wait() + } + + return nil +} From 0d5d391f92d5146ba90f8d7cbcd7852279275232 Mon Sep 17 00:00:00 2001 From: risk danger olson Date: Wed, 14 Dec 2016 12:01:53 -0700 Subject: [PATCH 41/68] Introduced checkout() --- commands/command_checkout.go | 87 +++++++++++------------------------- commands/command_pull.go | 45 +++++++++++++++++++ 2 files changed, 72 insertions(+), 60 deletions(-) diff --git a/commands/command_checkout.go b/commands/command_checkout.go index 6e343174..c7f7d239 100644 --- a/commands/command_checkout.go +++ b/commands/command_checkout.go @@ -13,6 +13,7 @@ import ( "github.com/git-lfs/git-lfs/git" "github.com/git-lfs/git-lfs/lfs" "github.com/git-lfs/git-lfs/progress" + "github.com/git-lfs/git-lfs/transfer" "github.com/spf13/cobra" ) @@ -83,70 +84,36 @@ func checkoutWithIncludeExclude(filter *filepathfilter.Filter) { meter.Finish() } -// Populate the working copy with the real content of objects where the file is -// either missing, or contains a matching pointer placeholder, from a list of pointers. -// If the file exists but has other content it is left alone -// Callers of this function MUST NOT Panic or otherwise exit the process -// without waiting for this function to shut down. If the process exits while -// update-index is in the middle of processing a file the git index can be left -// in a locked state. -func checkoutWithChan(in <-chan *lfs.WrappedPointer) { - // Get a converter from repo-relative to cwd-relative - // Since writing data & calling git update-index must be relative to cwd - pathConverter, err := lfs.NewRepoToCurrentPathConverter() +func checkout(pointer *lfs.WrappedPointer, pathConverter lfs.PathConverter, manifest *transfer.Manifest) (string, error) { + // Check the content - either missing or still this pointer (not exist is ok) + filepointer, err := lfs.DecodePointerFromFile(pointer.Name) + if err != nil && !os.IsNotExist(err) { + if errors.IsNotAPointerError(err) { + // File has non-pointer content, leave it alone + return "", nil + } + return "", err + } + + if filepointer != nil && filepointer.Oid != pointer.Oid { + // User has probably manually reset a file to another commit + // while leaving it a pointer; don't mess with this + return "", nil + } + + cwdfilepath := pathConverter.Convert(pointer.Name) + + err = lfs.PointerSmudgeToFile(cwdfilepath, pointer.Pointer, false, manifest, nil) if err != nil { - Panic(err, "Could not convert file paths") - } - - manifest := TransferManifest() - gitIndexer := &gitIndexer{} - - // From this point on, git update-index is running. Code in this loop MUST - // NOT Panic() or otherwise cause the process to exit. If the process exits - // while update-index is in the middle of updating, the index can remain in a - // locked state. - - // As files come in, write them to the wd and update the index - for pointer := range in { - // Check the content - either missing or still this pointer (not exist is ok) - filepointer, err := lfs.DecodePointerFromFile(pointer.Name) - if err != nil && !os.IsNotExist(err) { - if errors.IsNotAPointerError(err) { - // File has non-pointer content, leave it alone - continue - } - LoggedError(err, "Problem accessing %v", pointer.Name) - continue - } - - if filepointer != nil && filepointer.Oid != pointer.Oid { - // User has probably manually reset a file to another commit - // while leaving it a pointer; don't mess with this - continue - } - - cwdfilepath := pathConverter.Convert(pointer.Name) - - err = lfs.PointerSmudgeToFile(cwdfilepath, pointer.Pointer, false, manifest, nil) - if err != nil { - if errors.IsDownloadDeclinedError(err) { - // acceptable error, data not local (fetch not run or include/exclude) - LoggedError(err, "Skipped checkout for %v, content not local. Use fetch to download.", pointer.Name) - } else { - LoggedError(err, "Could not checkout file") - continue - } - } - - // errors are only returned when the gitIndexer is starting a new cmd - if err := gitIndexer.Add(cwdfilepath); err != nil { - Panic(err, "Could not update the index") + if errors.IsDownloadDeclinedError(err) { + // acceptable error, data not local (fetch not run or include/exclude) + return "", fmt.Errorf("Skipped checkout for %q, content not local. Use fetch to download.", pointer.Name) + } else { + return "", fmt.Errorf("Could not check out %q", pointer.Name) } } - if err := gitIndexer.Close(); err != nil { - LoggedError(err, "Error updating the git index:\n%s", gitIndexer.Output()) - } + return cwdfilepath, nil } // Parameters are filters diff --git a/commands/command_pull.go b/commands/command_pull.go index ce389082..933107ba 100644 --- a/commands/command_pull.go +++ b/commands/command_pull.go @@ -102,6 +102,51 @@ func checkoutFromFetchChan(in chan *lfs.WrappedPointer, filter *filepathfilter.F wait.Wait() } +// Populate the working copy with the real content of objects where the file is +// either missing, or contains a matching pointer placeholder, from a list of pointers. +// If the file exists but has other content it is left alone +// Callers of this function MUST NOT Panic or otherwise exit the process +// without waiting for this function to shut down. If the process exits while +// update-index is in the middle of processing a file the git index can be left +// in a locked state. +func checkoutWithChan(in <-chan *lfs.WrappedPointer) { + // Get a converter from repo-relative to cwd-relative + // Since writing data & calling git update-index must be relative to cwd + pathConverter, err := lfs.NewRepoToCurrentPathConverter() + if err != nil { + Panic(err, "Could not convert file paths") + } + + manifest := TransferManifest() + gitIndexer := &gitIndexer{} + + // From this point on, git update-index is running. Code in this loop MUST + // NOT Panic() or otherwise cause the process to exit. If the process exits + // while update-index is in the middle of updating, the index can remain in a + // locked state. + + // As files come in, write them to the wd and update the index + for pointer := range in { + cwdfilepath, err := checkout(pointer, pathConverter, manifest) + if err != nil { + LoggedError(err, "Checkout error") + } + + if len(cwdfilepath) == 0 { + continue + } + + // errors are only returned when the gitIndexer is starting a new cmd + if err := gitIndexer.Add(cwdfilepath); err != nil { + Panic(err, "Could not update the index") + } + } + + if err := gitIndexer.Close(); err != nil { + LoggedError(err, "Error updating the git index:\n%s", gitIndexer.Output()) + } +} + func init() { RegisterCommand("pull", pullCommand, func(cmd *cobra.Command) { cmd.Flags().StringVarP(&includeArg, "include", "I", "", "Include a list of paths") From 08dd0309b2aab31663cb6e62b75828ca622a086f Mon Sep 17 00:00:00 2001 From: risk danger olson Date: Wed, 14 Dec 2016 12:15:50 -0700 Subject: [PATCH 42/68] remove the channel from the checkout cmd --- commands/command_checkout.go | 89 +++++++++++++++++------------------- 1 file changed, 42 insertions(+), 47 deletions(-) diff --git a/commands/command_checkout.go b/commands/command_checkout.go index c7f7d239..edebf3e5 100644 --- a/commands/command_checkout.go +++ b/commands/command_checkout.go @@ -19,30 +19,49 @@ import ( func checkoutCommand(cmd *cobra.Command, args []string) { requireInRepo() - filter := filepathfilter.New(rootedPaths(args), nil) - checkoutWithIncludeExclude(filter) -} - -func checkoutWithIncludeExclude(filter *filepathfilter.Filter) { ref, err := git.CurrentRef() if err != nil { Panic(err, "Could not checkout") } - // this func has to load all pointers into memory - var pointers []*lfs.WrappedPointer - var multiErr error + // Get a converter from repo-relative to cwd-relative + // Since writing data & calling git update-index must be relative to cwd + pathConverter, err := lfs.NewRepoToCurrentPathConverter() + if err != nil { + Panic(err, "Could not convert file paths") + } + + var totalBytes int64 + meter := progress.NewMeter(progress.WithOSEnv(cfg.Os)) + manifest := TransferManifest() + gitIndexer := &gitIndexer{} + filter := filepathfilter.New(rootedPaths(args), nil) chgitscanner := lfs.NewGitScanner(func(p *lfs.WrappedPointer, err error) { if err != nil { - if multiErr != nil { - multiErr = fmt.Errorf("%v\n%v", multiErr, err) - } else { - multiErr = err - } + LoggedError(err, "Scanner error") return } - pointers = append(pointers, p) + totalBytes += p.Size + meter.Add(p.Size) + meter.StartTransfer(p.Name) + + cwdfilepath, err := checkout(p, pathConverter, manifest) + if err != nil { + LoggedError(err, "Checkout error") + } + + if len(cwdfilepath) > 0 { + // errors are only returned when the gitIndexer is starting a new cmd + if err := gitIndexer.Add(cwdfilepath); err != nil { + Panic(err, "Could not update the index") + } + } + + // not strictly correct (parallel) but we don't have a callback & it's just local + // plus only 1 slot in channel so it'll block & be close + meter.TransferBytes("checkout", p.Name, p.Size, totalBytes, int(p.Size)) + meter.FinishTransfer(p.Name) }) chgitscanner.Filter = filter @@ -50,38 +69,14 @@ func checkoutWithIncludeExclude(filter *filepathfilter.Filter) { if err := chgitscanner.ScanTree(ref.Sha); err != nil { ExitWithError(err) } - chgitscanner.Close() - if multiErr != nil { - Panic(multiErr, "Could not scan for Git LFS files") - } - - var wait sync.WaitGroup - wait.Add(1) - - c := make(chan *lfs.WrappedPointer, 1) - - go func() { - checkoutWithChan(c) - wait.Done() - }() - - meter := progress.NewMeter(progress.WithOSEnv(cfg.Os)) meter.Start() - var totalBytes int64 - for _, pointer := range pointers { - totalBytes += pointer.Size - meter.Add(totalBytes) - meter.StartTransfer(pointer.Name) - c <- pointer - // not strictly correct (parallel) but we don't have a callback & it's just local - // plus only 1 slot in channel so it'll block & be close - meter.TransferBytes("checkout", pointer.Name, pointer.Size, totalBytes, int(pointer.Size)) - meter.FinishTransfer(pointer.Name) - } - close(c) - wait.Wait() + chgitscanner.Close() meter.Finish() + + if err := gitIndexer.Close(); err != nil { + LoggedError(err, "Error updating the git index:\n%s", gitIndexer.Output()) + } } func checkout(pointer *lfs.WrappedPointer, pathConverter lfs.PathConverter, manifest *transfer.Manifest) (string, error) { @@ -132,10 +127,6 @@ func rootedPaths(args []string) []string { return rootedpaths } -func init() { - RegisterCommand("checkout", checkoutCommand, nil) -} - // Don't fire up the update-index command until we have at least one file to // give it. Otherwise git interprets the lack of arguments to mean param-less update-index // which can trigger entire working copy to be re-examined, which triggers clean filters @@ -190,3 +181,7 @@ func (i *gitIndexer) Close() error { return nil } + +func init() { + RegisterCommand("checkout", checkoutCommand, nil) +} From e18044f38b065ff5a57caba59a3fa59d3a4551d0 Mon Sep 17 00:00:00 2001 From: Taylor Blau Date: Wed, 14 Dec 2016 10:57:34 -0700 Subject: [PATCH 43/68] tq: use *tq.Transfer instead of Transferable and transfer.Transfer --- lfs/download_queue.go | 40 +++--------- lfs/upload_queue.go | 43 ++---------- test/test-push.sh | 2 +- tq/adapterbase.go | 36 ++--------- tq/basic_download.go | 27 ++++---- tq/basic_upload.go | 18 +++--- tq/custom.go | 57 ++++++++-------- tq/object.go | 140 ++++++++++++++++++++++++++++++++++++++++ tq/transfer.go | 18 ------ tq/transfer_queue.go | 147 ++++++++++++++++++++---------------------- tq/tus_upload.go | 29 +++++---- 11 files changed, 298 insertions(+), 259 deletions(-) create mode 100644 tq/object.go diff --git a/lfs/download_queue.go b/lfs/download_queue.go index 62360259..2ea18add 100644 --- a/lfs/download_queue.go +++ b/lfs/download_queue.go @@ -1,43 +1,19 @@ package lfs import ( - "github.com/git-lfs/git-lfs/api" "github.com/git-lfs/git-lfs/config" "github.com/git-lfs/git-lfs/tq" ) -type Downloadable struct { - pointer *WrappedPointer - object *api.ObjectResource -} +func NewDownloadable(p *WrappedPointer) *tq.Transfer { + path, _ := LocalMediaPath(p.Oid) -func (d *Downloadable) Object() *api.ObjectResource { - return d.object -} - -func (d *Downloadable) Oid() string { - return d.pointer.Oid -} - -func (d *Downloadable) Size() int64 { - return d.pointer.Size -} - -func (d *Downloadable) Name() string { - return d.pointer.Name -} - -func (d *Downloadable) Path() string { - p, _ := LocalMediaPath(d.pointer.Oid) - return p -} - -func (d *Downloadable) SetObject(o *api.ObjectResource) { - d.object = o -} - -func NewDownloadable(p *WrappedPointer) *Downloadable { - return &Downloadable{pointer: p} + return &tq.Transfer{ + Oid: p.Oid, + Size: p.Size, + Name: p.Name, + Path: path, + } } // NewDownloadCheckQueue builds a checking queue, checks that objects are there but doesn't download diff --git a/lfs/upload_queue.go b/lfs/upload_queue.go index 6c13a39f..b489cd98 100644 --- a/lfs/upload_queue.go +++ b/lfs/upload_queue.go @@ -5,48 +5,14 @@ import ( "os" "path/filepath" - "github.com/git-lfs/git-lfs/api" "github.com/git-lfs/git-lfs/config" "github.com/git-lfs/git-lfs/errors" "github.com/git-lfs/git-lfs/tq" ) -// Uploadable describes a file that can be uploaded. -type Uploadable struct { - oid string - OidPath string - Filename string - size int64 - object *api.ObjectResource -} - -func (u *Uploadable) Object() *api.ObjectResource { - return u.object -} - -func (u *Uploadable) Oid() string { - return u.oid -} - -func (u *Uploadable) Size() int64 { - return u.size -} - -func (u *Uploadable) Name() string { - return u.Filename -} - -func (u *Uploadable) SetObject(o *api.ObjectResource) { - u.object = o -} - -func (u *Uploadable) Path() string { - return u.OidPath -} - // NewUploadable builds the Uploadable from the given information. // "filename" can be empty if a raw object is pushed (see "object-id" flag in push command)/ -func NewUploadable(oid, filename string) (*Uploadable, error) { +func NewUploadable(oid, filename string) (*tq.Transfer, error) { localMediaPath, err := LocalMediaPath(oid) if err != nil { return nil, errors.Wrapf(err, "Error uploading file %s (%s)", filename, oid) @@ -63,7 +29,12 @@ func NewUploadable(oid, filename string) (*Uploadable, error) { return nil, errors.Wrapf(err, "Error uploading file %s (%s)", filename, oid) } - return &Uploadable{oid: oid, OidPath: localMediaPath, Filename: filename, size: fi.Size()}, nil + return &tq.Transfer{ + Name: filename, + Oid: oid, + Size: fi.Size(), + Path: localMediaPath, + }, nil } // ensureFile makes sure that the cleanPath exists before pushing it. If it diff --git a/test/test-push.sh b/test/test-push.sh index 860a3867..d161c915 100755 --- a/test/test-push.sh +++ b/test/test-push.sh @@ -507,7 +507,7 @@ begin_test "push (retry with expired actions)" GIT_TRACE=1 git push origin master 2>&1 | tee push.log - [ "1" -eq "$(grep -c "expired, retrying..." push.log)" ] + [ "1" -eq "$(grep -c "enqueue retry" push.log)" ] grep "(1 of 1 files)" push.log ) end_test diff --git a/tq/adapterbase.go b/tq/adapterbase.go index 6b842bc2..52fff93e 100644 --- a/tq/adapterbase.go +++ b/tq/adapterbase.go @@ -3,19 +3,10 @@ package tq import ( "fmt" "sync" - "time" - "github.com/git-lfs/git-lfs/errors" "github.com/rubyist/tracerx" ) -const ( - // objectExpirationToTransfer is the duration we expect to have passed - // from the time that the object's expires_at property is checked to - // when the transfer is executed. - objectExpirationToTransfer = 5 * time.Second -) - // adapterBase implements the common functionality for core adapters which // process transfers with N workers handling an oid each, and which wait for // authentication to succeed on one worker before proceeding @@ -156,27 +147,12 @@ func (a *adapterBase) worker(workerNum int, ctx interface{}) { signalAuthOnResponse = false } } - tracerx.Printf("xfer: adapter %q worker %d processing job for %q", a.Name(), workerNum, t.Object.Oid) - - // transferTime is the time that we are to compare the transfer's - // `expired_at` property against. - // - // We add the `objectExpirationToTransfer` since there will be - // some time lost from this comparison to the time we actually - // transfer the object - transferTime := time.Now().Add(objectExpirationToTransfer) + tracerx.Printf("xfer: adapter %q worker %d processing job for %q", a.Name(), workerNum, t.Oid) // Actual transfer happens here var err error - if expAt, expired := t.Object.IsExpired(transferTime); expired { - tracerx.Printf("xfer: adapter %q worker %d found job for %q expired, retrying...", a.Name(), workerNum, t.Object.Oid) - err = errors.NewRetriableError(errors.Errorf( - "lfs/transfer: object %q expires at %s", - t.Object.Oid, expAt.In(time.Local).Format(time.RFC822), - )) - } else if t.Object.Size < 0 { - tracerx.Printf("xfer: adapter %q worker %d found invalid size for %q (got: %d), retrying...", a.Name(), workerNum, t.Object.Oid, t.Object.Size) - err = fmt.Errorf("Git LFS: object %q has invalid size (got: %d)", t.Object.Oid, t.Object.Size) + if t.Size < 0 { + err = fmt.Errorf("Git LFS: object %q has invalid size (got: %d)", t.Oid, t.Size) } else { err = a.transferImpl.DoTransfer(ctx, t, a.cb, authCallback) } @@ -184,7 +160,7 @@ func (a *adapterBase) worker(workerNum int, ctx interface{}) { // Mark the job as completed, and alter all listeners job.Done(err) - tracerx.Printf("xfer: adapter %q worker %d finished job for %q", a.Name(), workerNum, t.Object.Oid) + tracerx.Printf("xfer: adapter %q worker %d finished job for %q", a.Name(), workerNum, t.Oid) } // This will only happen if no jobs were submitted; just wake up all workers to finish if signalAuthOnResponse { @@ -203,10 +179,10 @@ func advanceCallbackProgress(cb ProgressCallback, t *Transfer, numBytes int64) { remainder := numBytes - read if remainder > int64(maxInt) { read += int64(maxInt) - cb(t.Name, t.Object.Size, read, maxInt) + cb(t.Name, t.Size, read, maxInt) } else { read += remainder - cb(t.Name, t.Object.Size, read, int(remainder)) + cb(t.Name, t.Size, read, int(remainder)) } } diff --git a/tq/basic_download.go b/tq/basic_download.go index 98335876..766bca93 100644 --- a/tq/basic_download.go +++ b/tq/basic_download.go @@ -72,7 +72,7 @@ func (a *basicDownloadAdapter) checkResumeDownload(t *Transfer) (outFile *os.Fil f.Close() return nil, 0, nil, err } - tracerx.Printf("xfer: Attempting to resume download of %q from byte %d", t.Object.Oid, n) + tracerx.Printf("xfer: Attempting to resume download of %q from byte %d", t.Oid, n) return f, n, hash, nil } @@ -80,7 +80,7 @@ func (a *basicDownloadAdapter) checkResumeDownload(t *Transfer) (outFile *os.Fil // Create or open a download file for resuming func (a *basicDownloadAdapter) downloadFilename(t *Transfer) string { // Not a temp file since we will be resuming it - return filepath.Join(a.tempDir(), t.Object.Oid+".tmp") + return filepath.Join(a.tempDir(), t.Oid+".tmp") } // download starts or resumes and download. Always closes dlFile if non-nil @@ -91,9 +91,10 @@ func (a *basicDownloadAdapter) download(t *Transfer, cb ProgressCallback, authOk defer dlFile.Close() } - rel, ok := t.Object.Rel("download") - if !ok { - return errors.New("Object not found on the server.") + rel, err := t.Actions.Get("download") + if err != nil { + return err + // return errors.New("Object not found on the server.") } req, err := httputil.NewHttpRequest("GET", rel.Href, rel.Header) @@ -103,17 +104,17 @@ func (a *basicDownloadAdapter) download(t *Transfer, cb ProgressCallback, authOk if fromByte > 0 { if dlFile == nil || hash == nil { - return fmt.Errorf("Cannot restart %v from %d without a file & hash", t.Object.Oid, fromByte) + return fmt.Errorf("Cannot restart %v from %d without a file & hash", t.Oid, fromByte) } // We could just use a start byte, but since we know the length be specific - req.Header.Set("Range", fmt.Sprintf("bytes=%d-%d", fromByte, t.Object.Size-1)) + req.Header.Set("Range", fmt.Sprintf("bytes=%d-%d", fromByte, t.Size-1)) } - res, err := httputil.DoHttpRequest(config.Config, req, t.Object.NeedsAuth()) + res, err := httputil.DoHttpRequest(config.Config, req, !t.Authenticated) if err != nil { // Special-case status code 416 () - fall back if fromByte > 0 && dlFile != nil && res.StatusCode == 416 { - tracerx.Printf("xfer: server rejected resume download request for %q from byte %d; re-downloading from start", t.Object.Oid, fromByte) + tracerx.Printf("xfer: server rejected resume download request for %q from byte %d; re-downloading from start", t.Oid, fromByte) dlFile.Close() os.Remove(dlFile.Name()) return a.download(t, cb, authOkFunc, nil, 0, nil) @@ -150,11 +151,11 @@ func (a *basicDownloadAdapter) download(t *Transfer, cb ProgressCallback, authOk failReason = fmt.Sprintf("expected status code 206, received %d", res.StatusCode) } if rangeRequestOk { - tracerx.Printf("xfer: server accepted resume download request: %q from byte %d", t.Object.Oid, fromByte) + tracerx.Printf("xfer: server accepted resume download request: %q from byte %d", t.Oid, fromByte) advanceCallbackProgress(cb, t, fromByte) } else { // Abort resume, perform regular download - tracerx.Printf("xfer: failed to resume download for %q from byte %d: %s. Re-downloading from start", t.Object.Oid, fromByte, failReason) + tracerx.Printf("xfer: failed to resume download for %q from byte %d: %s. Re-downloading from start", t.Oid, fromByte, failReason) dlFile.Close() os.Remove(dlFile.Name()) if res.StatusCode == 200 { @@ -210,8 +211,8 @@ func (a *basicDownloadAdapter) download(t *Transfer, cb ProgressCallback, authOk return fmt.Errorf("can't close tempfile %q: %v", dlfilename, err) } - if actual := hasher.Hash(); actual != t.Object.Oid { - return fmt.Errorf("Expected OID %s, got %s after %d bytes written", t.Object.Oid, actual, written) + if actual := hasher.Hash(); actual != t.Oid { + return fmt.Errorf("Expected OID %s, got %s after %d bytes written", t.Oid, actual, written) } return tools.RenameFileCopyPermissions(dlfilename, t.Path) diff --git a/tq/basic_upload.go b/tq/basic_upload.go index 60f935d2..6175c50d 100644 --- a/tq/basic_upload.go +++ b/tq/basic_upload.go @@ -1,7 +1,6 @@ package tq import ( - "fmt" "io" "io/ioutil" "os" @@ -45,9 +44,10 @@ func (a *basicUploadAdapter) WorkerEnding(workerNum int, ctx interface{}) { } func (a *basicUploadAdapter) DoTransfer(ctx interface{}, t *Transfer, cb ProgressCallback, authOkFunc func()) error { - rel, ok := t.Object.Rel("upload") - if !ok { - return fmt.Errorf("No upload action for this object.") + rel, err := t.Actions.Get("upload") + if err != nil { + return err + // return fmt.Errorf("No upload action for this object.") } req, err := httputil.NewHttpRequest("PUT", rel.Href, rel.Header) @@ -62,10 +62,10 @@ func (a *basicUploadAdapter) DoTransfer(ctx interface{}, t *Transfer, cb Progres if req.Header.Get("Transfer-Encoding") == "chunked" { req.TransferEncoding = []string{"chunked"} } else { - req.Header.Set("Content-Length", strconv.FormatInt(t.Object.Size, 10)) + req.Header.Set("Content-Length", strconv.FormatInt(t.Size, 10)) } - req.ContentLength = t.Object.Size + req.ContentLength = t.Size f, err := os.OpenFile(t.Path, os.O_RDONLY, 0644) if err != nil { @@ -84,7 +84,7 @@ func (a *basicUploadAdapter) DoTransfer(ctx interface{}, t *Transfer, cb Progres var reader io.Reader reader = &progress.CallbackReader{ C: ccb, - TotalSize: t.Object.Size, + TotalSize: t.Size, Reader: f, } @@ -97,7 +97,7 @@ func (a *basicUploadAdapter) DoTransfer(ctx interface{}, t *Transfer, cb Progres req.Body = ioutil.NopCloser(reader) - res, err := httputil.DoHttpRequest(config.Config, req, t.Object.NeedsAuth()) + res, err := httputil.DoHttpRequest(config.Config, req, !t.Authenticated) if err != nil { return errors.NewRetriableError(err) } @@ -117,7 +117,7 @@ func (a *basicUploadAdapter) DoTransfer(ctx interface{}, t *Transfer, cb Progres io.Copy(ioutil.Discard, res.Body) res.Body.Close() - return api.VerifyUpload(config.Config, t.Object) + return api.VerifyUpload(config.Config, toApiObject(t)) } // startCallbackReader is a reader wrapper which calls a function as soon as the diff --git a/tq/custom.go b/tq/custom.go index 50b0bbe8..290fb91a 100644 --- a/tq/custom.go +++ b/tq/custom.go @@ -4,7 +4,6 @@ import ( "bufio" "bytes" "encoding/json" - "errors" "fmt" "io" "os/exec" @@ -74,17 +73,17 @@ func NewCustomAdapterInitRequest(op string, concurrent bool, concurrentTransfers } type customAdapterTransferRequest struct { // common between upload/download - Event string `json:"event"` - Oid string `json:"oid"` - Size int64 `json:"size"` - Path string `json:"path,omitempty"` - Action *api.LinkRelation `json:"action"` + Event string `json:"event"` + Oid string `json:"oid"` + Size int64 `json:"size"` + Path string `json:"path,omitempty"` + Action *Action `json:"action"` } -func NewCustomAdapterUploadRequest(oid string, size int64, path string, action *api.LinkRelation) *customAdapterTransferRequest { +func NewCustomAdapterUploadRequest(oid string, size int64, path string, action *Action) *customAdapterTransferRequest { return &customAdapterTransferRequest{"upload", oid, size, path, action} } -func NewCustomAdapterDownloadRequest(oid string, size int64, action *api.LinkRelation) *customAdapterTransferRequest { +func NewCustomAdapterDownloadRequest(oid string, size int64, action *Action) *customAdapterTransferRequest { return &customAdapterTransferRequest{"download", oid, size, "", action} } @@ -98,12 +97,12 @@ func NewCustomAdapterTerminateRequest() *customAdapterTerminateRequest { // A common struct that allows all types of response to be identified type customAdapterResponseMessage struct { - Event string `json:"event"` - Error *api.ObjectError `json:"error"` - Oid string `json:"oid"` - Path string `json:"path,omitempty"` // always blank for upload - BytesSoFar int64 `json:"bytesSoFar"` - BytesSinceLast int `json:"bytesSinceLast"` + Event string `json:"event"` + Error *ObjectError `json:"error"` + Oid string `json:"oid"` + Path string `json:"path,omitempty"` // always blank for upload + BytesSoFar int64 `json:"bytesSoFar"` + BytesSinceLast int `json:"bytesSinceLast"` } func (a *customAdapter) Begin(maxConcurrency int, cb ProgressCallback) error { @@ -268,18 +267,18 @@ func (a *customAdapter) DoTransfer(ctx interface{}, t *Transfer, cb ProgressCall } var authCalled bool - rel, ok := t.Object.Rel(a.getOperationName()) - if !ok { - return errors.New("Object not found on the server.") + rel, err := t.Actions.Get(a.getOperationName()) + if err != nil { + return err + // return errors.New("Object not found on the server.") } var req *customAdapterTransferRequest if a.direction == Upload { - req = NewCustomAdapterUploadRequest(t.Object.Oid, t.Object.Size, t.Path, rel) + req = NewCustomAdapterUploadRequest(t.Oid, t.Size, t.Path, rel) } else { - req = NewCustomAdapterDownloadRequest(t.Object.Oid, t.Object.Size, rel) + req = NewCustomAdapterDownloadRequest(t.Oid, t.Size, rel) } - err := a.sendMessage(customCtx, req) - if err != nil { + if err = a.sendMessage(customCtx, req); err != nil { return err } @@ -294,24 +293,24 @@ func (a *customAdapter) DoTransfer(ctx interface{}, t *Transfer, cb ProgressCall switch resp.Event { case "progress": // Progress - if resp.Oid != t.Object.Oid { - return fmt.Errorf("Unexpected oid %q in response, expecting %q", resp.Oid, t.Object.Oid) + if resp.Oid != t.Oid { + return fmt.Errorf("Unexpected oid %q in response, expecting %q", resp.Oid, t.Oid) } if cb != nil { - cb(t.Name, t.Object.Size, resp.BytesSoFar, resp.BytesSinceLast) + cb(t.Name, t.Size, resp.BytesSoFar, resp.BytesSinceLast) } wasAuthOk = resp.BytesSoFar > 0 case "complete": // Download/Upload complete - if resp.Oid != t.Object.Oid { - return fmt.Errorf("Unexpected oid %q in response, expecting %q", resp.Oid, t.Object.Oid) + if resp.Oid != t.Oid { + return fmt.Errorf("Unexpected oid %q in response, expecting %q", resp.Oid, t.Oid) } if resp.Error != nil { - return fmt.Errorf("Error transferring %q: %v", t.Object.Oid, resp.Error) + return fmt.Errorf("Error transferring %q: %v", t.Oid, resp.Error) } if a.direction == Download { // So we don't have to blindly trust external providers, check SHA - if err = tools.VerifyFileHash(t.Object.Oid, resp.Path); err != nil { + if err = tools.VerifyFileHash(t.Oid, resp.Path); err != nil { return fmt.Errorf("Downloaded file failed checks: %v", err) } // Move file to final location @@ -319,7 +318,7 @@ func (a *customAdapter) DoTransfer(ctx interface{}, t *Transfer, cb ProgressCall return fmt.Errorf("Failed to copy downloaded file: %v", err) } } else if a.direction == Upload { - if err = api.VerifyUpload(config.Config, t.Object); err != nil { + if err = api.VerifyUpload(config.Config, toApiObject(t)); err != nil { return err } } diff --git a/tq/object.go b/tq/object.go new file mode 100644 index 00000000..c68e4678 --- /dev/null +++ b/tq/object.go @@ -0,0 +1,140 @@ +package tq + +import ( + "fmt" + "time" + + "github.com/git-lfs/git-lfs/api" + "github.com/git-lfs/git-lfs/errors" +) + +type Transfer struct { + Name string `json:"name"` + Oid string `json:"oid,omitempty"` + Size int64 `json:"size"` + Authenticated bool `json:"authenticated,omitempty"` + Actions ActionSet `json:"actions,omitempty"` + Error *ObjectError `json:"error,omitempty"` + Path string `json:"path"` +} + +type ObjectError struct { + Code int `json:"code"` + Message string `json:"message"` +} + +// NewTransfer creates a new Transfer instance +func NewTransfer(name string, obj *api.ObjectResource, path string) *Transfer { + t := &Transfer{ + Name: name, + Oid: obj.Oid, + Size: obj.Size, + Authenticated: obj.Authenticated, + Actions: make(ActionSet), + Path: path, + } + + if obj.Error != nil { + t.Error = &ObjectError{ + Code: obj.Error.Code, + Message: obj.Error.Message, + } + } + + for rel, action := range obj.Actions { + t.Actions[rel] = &Action{ + Href: action.Href, + Header: action.Header, + ExpiresAt: action.ExpiresAt, + } + } + + return t + +} + +type Action struct { + Href string `json:"href"` + Header map[string]string `json:"header,omitempty"` + ExpiresAt time.Time `json:"expires_at,omitempty"` +} + +type ActionSet map[string]*Action + +const ( + // objectExpirationToTransfer is the duration we expect to have passed + // from the time that the object's expires_at property is checked to + // when the transfer is executed. + objectExpirationToTransfer = 5 * time.Second +) + +func (as ActionSet) Get(rel string) (*Action, error) { + a, ok := as[rel] + if !ok { + return nil, &ActionMissingError{Rel: rel} + } + + if !a.ExpiresAt.IsZero() && a.ExpiresAt.Before(time.Now().Add(objectExpirationToTransfer)) { + return nil, errors.NewRetriableError(&ActionExpiredErr{Rel: rel, At: a.ExpiresAt}) + } + + return a, nil +} + +type ActionExpiredErr struct { + Rel string + At time.Time +} + +func (e ActionExpiredErr) Error() string { + return fmt.Sprintf("tq: action %q expires at %s", + e.Rel, e.At.In(time.Local).Format(time.RFC822)) +} + +type ActionMissingError struct { + Rel string +} + +func (e ActionMissingError) Error() string { + return fmt.Sprintf("tq: unable to find action %q", e.Rel) +} + +func IsActionExpiredError(err error) bool { + if _, ok := err.(*ActionExpiredErr); ok { + return true + } + return false +} + +func IsActionMissingError(err error) bool { + if _, ok := err.(*ActionMissingError); ok { + return true + } + return false +} + +func toApiObject(t *Transfer) *api.ObjectResource { + o := &api.ObjectResource{ + Oid: t.Oid, + Size: t.Size, + Authenticated: t.Authenticated, + Actions: make(map[string]*api.LinkRelation), + } + + for rel, a := range t.Actions { + o.Actions[rel] = &api.LinkRelation{ + Href: a.Href, + Header: a.Header, + ExpiresAt: a.ExpiresAt, + } + } + + if t.Error != nil { + o.Error = &api.ObjectError{ + Code: t.Error.Code, + Message: t.Error.Message, + } + } + + return o +} diff --git a/tq/transfer.go b/tq/transfer.go index 1af8af20..0da7455f 100644 --- a/tq/transfer.go +++ b/tq/transfer.go @@ -2,8 +2,6 @@ // NOTE: Subject to change, do not rely on this package from outside git-lfs source package tq -import "github.com/git-lfs/git-lfs/api" - type Direction int const ( @@ -56,22 +54,6 @@ type Adapter interface { ClearTempStorage() error } -// General struct for both uploads and downloads -type Transfer struct { - // Name of the file that triggered this transfer - Name string - // Object from API which provides the core data for this transfer - Object *api.ObjectResource - // Path for uploads is the source of data to send, for downloads is the - // location to place the final result - Path string -} - -// NewTransfer creates a new Transfer instance -func NewTransfer(name string, obj *api.ObjectResource, path string) *Transfer { - return &Transfer{name, obj, path} -} - // Result of a transfer returned through CompletionChannel() type TransferResult struct { Transfer *Transfer diff --git a/tq/transfer_queue.go b/tq/transfer_queue.go index 15b8f771..c4930a4e 100644 --- a/tq/transfer_queue.go +++ b/tq/transfer_queue.go @@ -15,15 +15,6 @@ const ( defaultBatchSize = 100 ) -type Transferable interface { - Oid() string - Size() int64 - Name() string - Path() string - Object() *api.ObjectResource - SetObject(*api.ObjectResource) -} - type retryCounter struct { MaxRetries int `git:"lfs.transfer.maxretries"` @@ -72,29 +63,26 @@ func (r *retryCounter) CanRetry(oid string) (int, bool) { } // Batch implements the sort.Interface interface and enables sorting on a slice -// of `Transferable`s by object size. +// of `*Transfer`s by object size. // // This interface is implemented here so that the largest objects can be // processed first. Since adding a new batch is unable to occur until the // current batch has finished processing, this enables us to reduce the risk of // a single worker getting tied up on a large item at the end of a batch while // all other workers are sitting idle. -type Batch []Transferable +type Batch []*Transfer func (b Batch) ApiObjects() []*api.ObjectResource { transfers := make([]*api.ObjectResource, 0, len(b)) for _, t := range b { - transfers = append(transfers, &api.ObjectResource{ - Oid: t.Oid(), - Size: t.Size(), - }) + transfers = append(transfers, toApiObject(t)) } return transfers } func (b Batch) Len() int { return len(b) } -func (b Batch) Less(i, j int) bool { return b[i].Size() < b[j].Size() } +func (b Batch) Less(i, j int) bool { return b[i].Size < b[j].Size } func (b Batch) Swap(i, j int) { b[i], b[j] = b[j], b[i] } // TransferQueue organises the wider process of uploading and downloading, @@ -108,11 +96,11 @@ type TransferQueue struct { dryRun bool meter progress.Meter errors []error - transferables map[string]Transferable + transfers map[string]*Transfer batchSize int bufferDepth int // Channel for processing (and buffering) incoming items - incoming chan Transferable + incoming chan *Transfer errorc chan error // Channel for processing errors watchers []chan string trMutex *sync.Mutex @@ -152,12 +140,12 @@ func WithBufferDepth(depth int) Option { // NewTransferQueue builds a TransferQueue, direction and underlying mechanism determined by adapter func NewTransferQueue(dir Direction, manifest *Manifest, options ...Option) *TransferQueue { q := &TransferQueue{ - direction: dir, - errorc: make(chan error), - transferables: make(map[string]Transferable), - trMutex: &sync.Mutex{}, - manifest: manifest, - rc: newRetryCounter(), + direction: dir, + errorc: make(chan error), + transfers: make(map[string]*Transfer), + trMutex: &sync.Mutex{}, + manifest: manifest, + rc: newRetryCounter(), } for _, opt := range options { @@ -173,7 +161,7 @@ func NewTransferQueue(dir Direction, manifest *Manifest, options ...Option) *Tra q.bufferDepth = q.batchSize } - q.incoming = make(chan Transferable, q.bufferDepth) + q.incoming = make(chan *Transfer, q.bufferDepth) if q.meter == nil { q.meter = progress.Noop() @@ -186,28 +174,28 @@ func NewTransferQueue(dir Direction, manifest *Manifest, options ...Option) *Tra return q } -// Add adds a Transferable to the transfer queue. It only increments the amount -// of waiting the TransferQueue has to do if the Transferable "t" is new. -func (q *TransferQueue) Add(t Transferable) { +// Add adds a *Transfer to the transfer queue. It only increments the amount +// of waiting the TransferQueue has to do if the *Transfer "t" is new. +func (q *TransferQueue) Add(t *Transfer) { if isNew := q.remember(t); !isNew { - tracerx.Printf("already transferring %q, skipping duplicate", t.Oid()) + tracerx.Printf("already transferring %q, skipping duplicate", t.Oid) return } q.incoming <- t } -// remember remembers the Transferable "t" if the *TransferQueue doesn't already -// know about a Transferable with the same OID. +// remember remembers the *Transfer "t" if the *TransferQueue doesn't already +// know about a Transfer with the same OID. // // It returns if the value is new or not. -func (q *TransferQueue) remember(t Transferable) bool { +func (q *TransferQueue) remember(t *Transfer) bool { q.trMutex.Lock() defer q.trMutex.Unlock() - if _, ok := q.transferables[t.Oid()]; !ok { + if _, ok := q.transfers[t.Oid]; !ok { q.wait.Add(1) - q.transferables[t.Oid()] = t + q.transfers[t.Oid] = t return true } @@ -221,7 +209,7 @@ func (q *TransferQueue) remember(t Transferable) bool { // 2. While the batch contains less items than `q.batchSize` AND the channel // is open, read one item from the `q.incoming` channel. // a. If the read was a channel close, go to step 4. -// b. If the read was a Transferable item, go to step 3. +// b. If the read was a TransferTransferable item, go to step 3. // 3. Append the item to the batch. // 4. Sort the batch by descending object size, make a batch API call, send // the items to the `*adapterBase`. @@ -293,8 +281,8 @@ func (q *TransferQueue) enqueueAndCollectRetriesFor(batch Batch) (Batch, error) // that was encountered. If any of the objects couldn't be // retried, they will be marked as failed. for _, t := range batch { - if q.canRetryObject(t.Oid(), err) { - q.rc.Increment(t.Oid()) + if q.canRetryObject(t.Oid, err) { + q.rc.Increment(t.Oid) next = append(next, t) } else { @@ -319,47 +307,52 @@ func (q *TransferQueue) enqueueAndCollectRetriesFor(batch Batch) (Batch, error) continue } - if _, needsTransfer := o.Rel(q.transferKind()); needsTransfer { - // If the object has a link relation for the kind of - // transfer that we want to perform, grab a Transferable - // that matches the object's OID. - q.trMutex.Lock() - t, ok := q.transferables[o.Oid] - q.trMutex.Unlock() + q.trMutex.Lock() + t, ok := q.transfers[o.Oid] + q.trMutex.Unlock() + if !ok { + // If we couldn't find any associated + // Transfer object, then we give up on the + // transfer by telling the progress meter to + // skip the number of bytes in "o". + q.errorc <- errors.Errorf("[%v] The server returned an unknown OID.", o.Oid) - if ok { - // If we knew about an associated Transferable, - // begin the transfer. - t.SetObject(o) - q.meter.StartTransfer(t.Name()) - - toTransfer = append(toTransfer, NewTransfer( - t.Name(), t.Object(), t.Path(), - )) - } else { - // If we couldn't find any associated - // Transferable object, then we give up on the - // transfer by telling the progress meter to - // skip the number of bytes in "o". - q.errorc <- errors.Errorf("[%v] The server returned an unknown OID.", o.Oid) - - q.Skip(o.Size) - q.wait.Done() - } - } else { - // Otherwise, if the object didn't need a transfer at - // all, skip and decrement it. q.Skip(o.Size) q.wait.Done() + } else { + t = NewTransfer(t.Name, o, t.Path) + + if _, err := t.Actions.Get(q.transferKind()); err != nil { + // XXX(taylor): duplication + if q.canRetryObject(t.Oid, err) { + q.rc.Increment(t.Oid) + count := q.rc.CountFor(t.Oid) + + tracerx.Printf("tq: enqueue retry #%d for %q (size: %d)", count, t.Oid, t.Size) + next = append(next, t) + } else { + if !IsActionMissingError(err) { + q.errorc <- errors.Errorf("[%v] %v", t.Name, err) + } + // q.errorc <- errors.Errorf("[%v] %v", t.Oid[:7], err) + + q.Skip(o.Size) + q.wait.Done() + } + + } else { + q.meter.StartTransfer(t.Name) + toTransfer = append(toTransfer, t) + } } } retries := q.addToAdapter(toTransfer) for t := range retries { - q.rc.Increment(t.Oid()) - count := q.rc.CountFor(t.Oid()) + q.rc.Increment(t.Oid) + count := q.rc.CountFor(t.Oid) - tracerx.Printf("tq: enqueue retry #%d for %q (size: %d)", count, t.Oid(), t.Size()) + tracerx.Printf("tq: enqueue retry #%d for %q (size: %d)", count, t.Oid, t.Size) next = append(next, t) } @@ -372,20 +365,20 @@ func (q *TransferQueue) enqueueAndCollectRetriesFor(batch Batch) (Batch, error) func (q *TransferQueue) makeBatch() Batch { return make(Batch, 0, q.batchSize) } // addToAdapter adds the given "pending" transfers to the transfer adapters and -// returns a channel of Transferables that are to be retried in the next batch. +// returns a channel of Transfers that are to be retried in the next batch. // After all of the items in the batch have been processed, the channel is // closed. // // addToAdapter returns immediately, and does not block. -func (q *TransferQueue) addToAdapter(pending []*Transfer) <-chan Transferable { - retries := make(chan Transferable, len(pending)) +func (q *TransferQueue) addToAdapter(pending []*Transfer) <-chan *Transfer { + retries := make(chan *Transfer, len(pending)) if err := q.ensureAdapterBegun(); err != nil { close(retries) q.errorc <- err for _, t := range pending { - q.Skip(t.Object.Size) + q.Skip(t.Size) q.wait.Done() } @@ -426,9 +419,9 @@ func (q *TransferQueue) makeDryRunResults(ts []*Transfer) <-chan TransferResult // handleTransferResult observes the transfer result, sending it on the retries // channel if it was able to be retried. func (q *TransferQueue) handleTransferResult( - res TransferResult, retries chan<- Transferable, + res TransferResult, retries chan<- *Transfer, ) { - oid := res.Transfer.Object.Oid + oid := res.Transfer.Oid if res.Error != nil { // If there was an error encountered when processing the @@ -441,7 +434,7 @@ func (q *TransferQueue) handleTransferResult( tracerx.Printf("tq: retrying object %s", oid) q.trMutex.Lock() - t, ok := q.transferables[oid] + t, ok := q.transfers[oid] q.trMutex.Unlock() if ok { @@ -532,7 +525,7 @@ func (q *TransferQueue) ensureAdapterBegun() error { } // Wait waits for the queue to finish processing all transfers. Once Wait is -// called, Add will no longer add transferables to the queue. Any failed +// called, Add will no longer add transfers to the queue. Any failed // transfers will be automatically retried once. func (q *TransferQueue) Wait() { close(q.incoming) diff --git a/tq/tus_upload.go b/tq/tus_upload.go index a4801fee..dced036e 100644 --- a/tq/tus_upload.go +++ b/tq/tus_upload.go @@ -37,9 +37,10 @@ func (a *tusUploadAdapter) WorkerEnding(workerNum int, ctx interface{}) { } func (a *tusUploadAdapter) DoTransfer(ctx interface{}, t *Transfer, cb ProgressCallback, authOkFunc func()) error { - rel, ok := t.Object.Rel("upload") - if !ok { - return fmt.Errorf("No upload action for this object.") + rel, err := t.Actions.Get("upload") + if err != nil { + return err + // return fmt.Errorf("No upload action for this object.") } // Note not supporting the Creation extension since the batch API generates URLs @@ -47,7 +48,7 @@ func (a *tusUploadAdapter) DoTransfer(ctx interface{}, t *Transfer, cb ProgressC // 1. Send HEAD request to determine upload start point // Request must include Tus-Resumable header (version) - tracerx.Printf("xfer: sending tus.io HEAD request for %q", t.Object.Oid) + tracerx.Printf("xfer: sending tus.io HEAD request for %q", t.Oid) req, err := httputil.NewHttpRequest("HEAD", rel.Href, rel.Header) if err != nil { return err @@ -69,9 +70,9 @@ func (a *tusUploadAdapter) DoTransfer(ctx interface{}, t *Transfer, cb ProgressC } // Upload-Offset=size means already completed (skip) // Batch API will probably already detect this, but handle just in case - if offset >= t.Object.Size { - tracerx.Printf("xfer: tus.io HEAD offset %d indicates %q is already fully uploaded, skipping", offset, t.Object.Oid) - advanceCallbackProgress(cb, t, t.Object.Size) + if offset >= t.Size { + tracerx.Printf("xfer: tus.io HEAD offset %d indicates %q is already fully uploaded, skipping", offset, t.Oid) + advanceCallbackProgress(cb, t, t.Size) return nil } @@ -84,9 +85,9 @@ func (a *tusUploadAdapter) DoTransfer(ctx interface{}, t *Transfer, cb ProgressC // Upload-Offset=0 means start from scratch, but still send PATCH if offset == 0 { - tracerx.Printf("xfer: tus.io uploading %q from start", t.Object.Oid) + tracerx.Printf("xfer: tus.io uploading %q from start", t.Oid) } else { - tracerx.Printf("xfer: tus.io resuming upload %q from %d", t.Object.Oid, offset) + tracerx.Printf("xfer: tus.io resuming upload %q from %d", t.Oid, offset) advanceCallbackProgress(cb, t, offset) _, err := f.Seek(offset, os.SEEK_CUR) if err != nil { @@ -99,7 +100,7 @@ func (a *tusUploadAdapter) DoTransfer(ctx interface{}, t *Transfer, cb ProgressC // Response Upload-Offset must be request Upload-Offset plus sent bytes // Response may include Upload-Expires header in which case check not passed - tracerx.Printf("xfer: sending tus.io PATCH request for %q", t.Object.Oid) + tracerx.Printf("xfer: sending tus.io PATCH request for %q", t.Oid) req, err = httputil.NewHttpRequest("PATCH", rel.Href, rel.Header) if err != nil { return err @@ -107,8 +108,8 @@ func (a *tusUploadAdapter) DoTransfer(ctx interface{}, t *Transfer, cb ProgressC req.Header.Set("Tus-Resumable", TusVersion) req.Header.Set("Upload-Offset", strconv.FormatInt(offset, 10)) req.Header.Set("Content-Type", "application/offset+octet-stream") - req.Header.Set("Content-Length", strconv.FormatInt(t.Object.Size-offset, 10)) - req.ContentLength = t.Object.Size - offset + req.Header.Set("Content-Length", strconv.FormatInt(t.Size-offset, 10)) + req.ContentLength = t.Size - offset // Ensure progress callbacks made while uploading // Wrap callback to give name context @@ -121,7 +122,7 @@ func (a *tusUploadAdapter) DoTransfer(ctx interface{}, t *Transfer, cb ProgressC var reader io.Reader reader = &progress.CallbackReader{ C: ccb, - TotalSize: t.Object.Size, + TotalSize: t.Size, Reader: f, } @@ -154,7 +155,7 @@ func (a *tusUploadAdapter) DoTransfer(ctx interface{}, t *Transfer, cb ProgressC io.Copy(ioutil.Discard, res.Body) res.Body.Close() - return api.VerifyUpload(config.Config, t.Object) + return api.VerifyUpload(config.Config, toApiObject(t)) } func configureTusAdapter(m *Manifest) { From 7f26137bae3d7575daaa5518375839034f2d0308 Mon Sep 17 00:00:00 2001 From: Taylor Blau Date: Wed, 14 Dec 2016 11:05:57 -0700 Subject: [PATCH 44/68] tq: only include necessary properties of Transferable in Add() --- commands/command_fetch.go | 4 +- commands/command_prune.go | 4 +- commands/uploader.go | 5 ++- test/git-lfs-test-server-api/main.go | 2 +- tq/transfer_queue.go | 57 ++++++++++++++++++---------- 5 files changed, 47 insertions(+), 25 deletions(-) diff --git a/commands/command_fetch.go b/commands/command_fetch.go index 7b346a3f..017730f4 100644 --- a/commands/command_fetch.go +++ b/commands/command_fetch.go @@ -326,7 +326,9 @@ func fetchAndReportToChan(allpointers []*lfs.WrappedPointer, filter *filepathfil for _, p := range pointers { tracerx.Printf("fetch %v [%v]", p.Name, p.Oid) - q.Add(lfs.NewDownloadable(p)) + + d := lfs.NewDownloadable(p) + q.Add(d.Name, d.Path, d.Oid, d.Size) } processQueue := time.Now() diff --git a/commands/command_prune.go b/commands/command_prune.go index 94682277..db0fb1ac 100644 --- a/commands/command_prune.go +++ b/commands/command_prune.go @@ -150,7 +150,9 @@ func prune(fetchPruneConfig config.FetchPruneConfig, verifyRemote, dryRun, verbo if verifyRemote { tracerx.Printf("VERIFYING: %v", file.Oid) pointer := lfs.NewPointer(file.Oid, file.Size, nil) - verifyQueue.Add(lfs.NewDownloadable(&lfs.WrappedPointer{Pointer: pointer})) + + d := lfs.NewDownloadable(&lfs.WrappedPointer{Pointer: pointer}) + verifyQueue.Add(d.Name, d.Path, d.Oid, d.Size) } } } diff --git a/commands/uploader.go b/commands/uploader.go index d5472ec3..c29ac974 100644 --- a/commands/uploader.go +++ b/commands/uploader.go @@ -113,7 +113,8 @@ func (c *uploadContext) checkMissing(missing []*lfs.WrappedPointer, missingSize }() for _, p := range missing { - checkQueue.Add(lfs.NewDownloadable(p)) + d := lfs.NewDownloadable(p) + checkQueue.Add(d.Name, d.Path, d.Oid, d.Size) } // Currently this is needed to flush the batch but is not enough to sync @@ -149,7 +150,7 @@ func uploadPointers(c *uploadContext, unfiltered []*lfs.WrappedPointer) { } } - q.Add(u) + q.Add(u.Name, u.Path, u.Oid, u.Size) c.SetUploaded(p.Oid) } diff --git a/test/git-lfs-test-server-api/main.go b/test/git-lfs-test-server-api/main.go index 5939741d..7b79dee3 100644 --- a/test/git-lfs-test-server-api/main.go +++ b/test/git-lfs-test-server-api/main.go @@ -167,7 +167,7 @@ func buildTestData() (oidsExist, oidsMissing []TestObject, err error) { if err != nil { return nil, nil, err } - uploadQueue.Add(u) + uploadQueue.Add(u.Name, u.Path, u.Oid, u.Size) } uploadQueue.Wait() diff --git a/tq/transfer_queue.go b/tq/transfer_queue.go index c4930a4e..edf97d9b 100644 --- a/tq/transfer_queue.go +++ b/tq/transfer_queue.go @@ -70,17 +70,21 @@ func (r *retryCounter) CanRetry(oid string) (int, bool) { // current batch has finished processing, this enables us to reduce the risk of // a single worker getting tied up on a large item at the end of a batch while // all other workers are sitting idle. -type Batch []*Transfer +type Batch []*objectTuple func (b Batch) ApiObjects() []*api.ObjectResource { transfers := make([]*api.ObjectResource, 0, len(b)) for _, t := range b { - transfers = append(transfers, toApiObject(t)) + transfers = append(transfers, tupleToApiObject(t)) } return transfers } +func tupleToApiObject(t *objectTuple) *api.ObjectResource { + return &api.ObjectResource{Oid: t.Oid, Size: t.Size} +} + func (b Batch) Len() int { return len(b) } func (b Batch) Less(i, j int) bool { return b[i].Size < b[j].Size } func (b Batch) Swap(i, j int) { b[i], b[j] = b[j], b[i] } @@ -96,11 +100,11 @@ type TransferQueue struct { dryRun bool meter progress.Meter errors []error - transfers map[string]*Transfer + transfers map[string]*objectTuple batchSize int bufferDepth int // Channel for processing (and buffering) incoming items - incoming chan *Transfer + incoming chan *objectTuple errorc chan error // Channel for processing errors watchers []chan string trMutex *sync.Mutex @@ -115,6 +119,11 @@ type TransferQueue struct { rc *retryCounter } +type objectTuple struct { + Name, Path, Oid string + Size int64 +} + type Option func(*TransferQueue) func DryRun(dryRun bool) Option { @@ -142,7 +151,7 @@ func NewTransferQueue(dir Direction, manifest *Manifest, options ...Option) *Tra q := &TransferQueue{ direction: dir, errorc: make(chan error), - transfers: make(map[string]*Transfer), + transfers: make(map[string]*objectTuple), trMutex: &sync.Mutex{}, manifest: manifest, rc: newRetryCounter(), @@ -161,7 +170,7 @@ func NewTransferQueue(dir Direction, manifest *Manifest, options ...Option) *Tra q.bufferDepth = q.batchSize } - q.incoming = make(chan *Transfer, q.bufferDepth) + q.incoming = make(chan *objectTuple, q.bufferDepth) if q.meter == nil { q.meter = progress.Noop() @@ -176,7 +185,14 @@ func NewTransferQueue(dir Direction, manifest *Manifest, options ...Option) *Tra // Add adds a *Transfer to the transfer queue. It only increments the amount // of waiting the TransferQueue has to do if the *Transfer "t" is new. -func (q *TransferQueue) Add(t *Transfer) { +func (q *TransferQueue) Add(name, path, oid string, size int64) { + t := &objectTuple{ + Name: name, + Path: path, + Oid: oid, + Size: size, + } + if isNew := q.remember(t); !isNew { tracerx.Printf("already transferring %q, skipping duplicate", t.Oid) return @@ -189,7 +205,7 @@ func (q *TransferQueue) Add(t *Transfer) { // know about a Transfer with the same OID. // // It returns if the value is new or not. -func (q *TransferQueue) remember(t *Transfer) bool { +func (q *TransferQueue) remember(t *objectTuple) bool { q.trMutex.Lock() defer q.trMutex.Unlock() @@ -320,21 +336,22 @@ func (q *TransferQueue) enqueueAndCollectRetriesFor(batch Batch) (Batch, error) q.Skip(o.Size) q.wait.Done() } else { - t = NewTransfer(t.Name, o, t.Path) + tr := NewTransfer(t.Name, o, t.Path) - if _, err := t.Actions.Get(q.transferKind()); err != nil { + if _, err := tr.Actions.Get(q.transferKind()); err != nil { // XXX(taylor): duplication - if q.canRetryObject(t.Oid, err) { - q.rc.Increment(t.Oid) - count := q.rc.CountFor(t.Oid) + if q.canRetryObject(tr.Oid, err) { + q.rc.Increment(tr.Oid) + count := q.rc.CountFor(tr.Oid) - tracerx.Printf("tq: enqueue retry #%d for %q (size: %d)", count, t.Oid, t.Size) + tracerx.Printf("tq: enqueue retry #%d for %q (size: %d)", count, tr.Oid, tr.Size) next = append(next, t) } else { if !IsActionMissingError(err) { - q.errorc <- errors.Errorf("[%v] %v", t.Name, err) + q.errorc <- errors.Errorf("[%v] %v", tr.Name, err) } - // q.errorc <- errors.Errorf("[%v] %v", t.Oid[:7], err) + // q.errorc <- errors.Errorf("[%v] %v", + // tr.Oid[:7], err) q.Skip(o.Size) q.wait.Done() @@ -342,7 +359,7 @@ func (q *TransferQueue) enqueueAndCollectRetriesFor(batch Batch) (Batch, error) } else { q.meter.StartTransfer(t.Name) - toTransfer = append(toTransfer, t) + toTransfer = append(toTransfer, tr) } } } @@ -370,8 +387,8 @@ func (q *TransferQueue) makeBatch() Batch { return make(Batch, 0, q.batchSize) } // closed. // // addToAdapter returns immediately, and does not block. -func (q *TransferQueue) addToAdapter(pending []*Transfer) <-chan *Transfer { - retries := make(chan *Transfer, len(pending)) +func (q *TransferQueue) addToAdapter(pending []*Transfer) <-chan *objectTuple { + retries := make(chan *objectTuple, len(pending)) if err := q.ensureAdapterBegun(); err != nil { close(retries) @@ -419,7 +436,7 @@ func (q *TransferQueue) makeDryRunResults(ts []*Transfer) <-chan TransferResult // handleTransferResult observes the transfer result, sending it on the retries // channel if it was able to be retried. func (q *TransferQueue) handleTransferResult( - res TransferResult, retries chan<- *Transfer, + res TransferResult, retries chan<- *objectTuple, ) { oid := res.Transfer.Oid From 94b2d0da9913833b7ce9f3083cacd22c2ff403ba Mon Sep 17 00:00:00 2001 From: Taylor Blau Date: Wed, 14 Dec 2016 14:54:31 -0700 Subject: [PATCH 45/68] tq: remove comments --- tq/transfer_queue.go | 2 -- 1 file changed, 2 deletions(-) diff --git a/tq/transfer_queue.go b/tq/transfer_queue.go index edf97d9b..2e31fed6 100644 --- a/tq/transfer_queue.go +++ b/tq/transfer_queue.go @@ -350,8 +350,6 @@ func (q *TransferQueue) enqueueAndCollectRetriesFor(batch Batch) (Batch, error) if !IsActionMissingError(err) { q.errorc <- errors.Errorf("[%v] %v", tr.Name, err) } - // q.errorc <- errors.Errorf("[%v] %v", - // tr.Oid[:7], err) q.Skip(o.Size) q.wait.Done() From db6112c513fcc6be5e5ced4587a8eef556f89817 Mon Sep 17 00:00:00 2001 From: Taylor Blau Date: Wed, 14 Dec 2016 15:00:54 -0700 Subject: [PATCH 46/68] tq/object: add deprecation note --- tq/object.go | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/tq/object.go b/tq/object.go index c68e4678..a5c178dd 100644 --- a/tq/object.go +++ b/tq/object.go @@ -24,6 +24,11 @@ type ObjectError struct { } // NewTransfer creates a new Transfer instance +// +// XXX(taylor): note, this function will be removed from the `tq` package's API +// before landing in master. It is currently used by the smudge operation to +// download a single file and pass _directly_ to the Adapter, whereas it should +// use a transferqueue. func NewTransfer(name string, obj *api.ObjectResource, path string) *Transfer { t := &Transfer{ Name: name, From 205c63aa82a1ae5905af74996cf8278ab83651f7 Mon Sep 17 00:00:00 2001 From: risk danger olson Date: Wed, 14 Dec 2016 15:16:59 -0700 Subject: [PATCH 47/68] Add Add() to the progress.Meter interface --- progress/noop.go | 1 + progress/progress.go | 1 + 2 files changed, 2 insertions(+) diff --git a/progress/noop.go b/progress/noop.go index 0ab4f3c6..27b36e2c 100644 --- a/progress/noop.go +++ b/progress/noop.go @@ -7,6 +7,7 @@ func Noop() Meter { type nonMeter struct{} func (m *nonMeter) Start() {} +func (m *nonMeter) Add(size int64) {} func (m *nonMeter) Skip(size int64) {} func (m *nonMeter) StartTransfer(name string) {} func (m *nonMeter) TransferBytes(direction, name string, read, total int64, current int) {} diff --git a/progress/progress.go b/progress/progress.go index 21ae289a..c016b3c5 100644 --- a/progress/progress.go +++ b/progress/progress.go @@ -4,6 +4,7 @@ package progress type Meter interface { Start() + Add(int64) Skip(size int64) StartTransfer(name string) TransferBytes(direction, name string, read, total int64, current int) From 8adf6ec8f607e228ee278a925bed51e0f716a991 Mon Sep 17 00:00:00 2001 From: risk danger olson Date: Wed, 14 Dec 2016 15:17:24 -0700 Subject: [PATCH 48/68] Encapsulate checking out a single file into *singleCheckout --- commands/command_checkout.go | 86 +----------------- commands/command_pull.go | 169 +++++++++++++++++------------------ commands/pull.go | 109 ++++++++++++++++++++++ 3 files changed, 192 insertions(+), 172 deletions(-) create mode 100644 commands/pull.go diff --git a/commands/command_checkout.go b/commands/command_checkout.go index edebf3e5..2ebb4a7b 100644 --- a/commands/command_checkout.go +++ b/commands/command_checkout.go @@ -1,12 +1,8 @@ package commands import ( - "bytes" "fmt" - "io" "os" - "os/exec" - "sync" "github.com/git-lfs/git-lfs/errors" "github.com/git-lfs/git-lfs/filepathfilter" @@ -24,18 +20,10 @@ func checkoutCommand(cmd *cobra.Command, args []string) { Panic(err, "Could not checkout") } - // Get a converter from repo-relative to cwd-relative - // Since writing data & calling git update-index must be relative to cwd - pathConverter, err := lfs.NewRepoToCurrentPathConverter() - if err != nil { - Panic(err, "Could not convert file paths") - } - var totalBytes int64 meter := progress.NewMeter(progress.WithOSEnv(cfg.Os)) - manifest := TransferManifest() - gitIndexer := &gitIndexer{} filter := filepathfilter.New(rootedPaths(args), nil) + singleCheckout := newSingleCheckout() chgitscanner := lfs.NewGitScanner(func(p *lfs.WrappedPointer, err error) { if err != nil { LoggedError(err, "Scanner error") @@ -46,17 +34,7 @@ func checkoutCommand(cmd *cobra.Command, args []string) { meter.Add(p.Size) meter.StartTransfer(p.Name) - cwdfilepath, err := checkout(p, pathConverter, manifest) - if err != nil { - LoggedError(err, "Checkout error") - } - - if len(cwdfilepath) > 0 { - // errors are only returned when the gitIndexer is starting a new cmd - if err := gitIndexer.Add(cwdfilepath); err != nil { - Panic(err, "Could not update the index") - } - } + singleCheckout.Run(p) // not strictly correct (parallel) but we don't have a callback & it's just local // plus only 1 slot in channel so it'll block & be close @@ -73,10 +51,7 @@ func checkoutCommand(cmd *cobra.Command, args []string) { meter.Start() chgitscanner.Close() meter.Finish() - - if err := gitIndexer.Close(); err != nil { - LoggedError(err, "Error updating the git index:\n%s", gitIndexer.Output()) - } + singleCheckout.Close() } func checkout(pointer *lfs.WrappedPointer, pathConverter lfs.PathConverter, manifest *transfer.Manifest) (string, error) { @@ -127,61 +102,6 @@ func rootedPaths(args []string) []string { return rootedpaths } -// Don't fire up the update-index command until we have at least one file to -// give it. Otherwise git interprets the lack of arguments to mean param-less update-index -// which can trigger entire working copy to be re-examined, which triggers clean filters -// and which has unexpected side effects (e.g. downloading filtered-out files) -type gitIndexer struct { - cmd *exec.Cmd - input io.WriteCloser - output bytes.Buffer - mu sync.Mutex -} - -func (i *gitIndexer) Add(path string) error { - i.mu.Lock() - defer i.mu.Unlock() - - if i.cmd == nil { - // Fire up the update-index command - i.cmd = exec.Command("git", "update-index", "-q", "--refresh", "--stdin") - i.cmd.Stdout = &i.output - i.cmd.Stderr = &i.output - stdin, err := i.cmd.StdinPipe() - if err == nil { - err = i.cmd.Start() - } - - if err != nil { - return err - } - - i.input = stdin - } - - i.input.Write([]byte(path + "\n")) - return nil -} - -func (i *gitIndexer) Output() string { - return i.output.String() -} - -func (i *gitIndexer) Close() error { - i.mu.Lock() - defer i.mu.Unlock() - - if i.input != nil { - i.input.Close() - } - - if i.cmd != nil { - return i.cmd.Wait() - } - - return nil -} - func init() { RegisterCommand("checkout", checkoutCommand, nil) } diff --git a/commands/command_pull.go b/commands/command_pull.go index 933107ba..c1b9c9d7 100644 --- a/commands/command_pull.go +++ b/commands/command_pull.go @@ -3,10 +3,13 @@ package commands import ( "fmt" "sync" + "time" "github.com/git-lfs/git-lfs/filepathfilter" "github.com/git-lfs/git-lfs/git" "github.com/git-lfs/git-lfs/lfs" + "github.com/git-lfs/git-lfs/progress" + "github.com/rubyist/tracerx" "github.com/spf13/cobra" ) @@ -40,111 +43,99 @@ func pull(filter *filepathfilter.Filter) { Panic(err, "Could not pull") } - c := fetchRefToChan(ref.Sha, filter) - checkoutFromFetchChan(c, filter) -} - -func fetchRefToChan(ref string, filter *filepathfilter.Filter) chan *lfs.WrappedPointer { - c := make(chan *lfs.WrappedPointer) - pointers, err := pointersToFetchForRef(ref, filter) - if err != nil { - Panic(err, "Could not scan for Git LFS files") - } - - go fetchAndReportToChan(pointers, filter, c) - - return c -} - -func checkoutFromFetchChan(in chan *lfs.WrappedPointer, filter *filepathfilter.Filter) { - ref, err := git.CurrentRef() - if err != nil { - Panic(err, "Could not checkout") - } - - // Need to ScanTree to identify multiple files with the same content (fetch will only report oids once) - // use new gitscanner so mapping has all the scanned pointers before continuing - mapping := make(map[string][]*lfs.WrappedPointer) - chgitscanner := lfs.NewGitScanner(func(p *lfs.WrappedPointer, err error) { + pointers := newPointerMap() + meter := progress.NewMeter(progress.WithOSEnv(cfg.Os)) + singleCheckout := newSingleCheckout() + q := lfs.NewDownloadQueue(lfs.WithProgress(meter)) + gitscanner := lfs.NewGitScanner(func(p *lfs.WrappedPointer, err error) { if err != nil { - Panic(err, "Could not scan for Git LFS files") + LoggedError(err, "Scanner error") return } - mapping[p.Oid] = append(mapping[p.Oid], p) - }) - chgitscanner.Filter = filter - if err := chgitscanner.ScanTree(ref.Sha); err != nil { + if pointers.Seen(p) { + return + } + + // no need to download objects that exist locally already + lfs.LinkOrCopyFromReference(p.Oid, p.Size) + if lfs.ObjectExistsOfSize(p.Oid, p.Size) { + singleCheckout.Run(p) + return + } + + meter.Add(p.Size) + meter.StartTransfer(p.Name) + tracerx.Printf("fetch %v [%v]", p.Name, p.Oid) + pointers.Add(p) + q.Add(lfs.NewDownloadable(p)) + }) + + gitscanner.Filter = filter + + dlwatch := q.Watch() + var wg sync.WaitGroup + wg.Add(1) + + go func() { + for oid := range dlwatch { + for _, p := range pointers.All(oid) { + singleCheckout.Run(p) + } + } + wg.Done() + }() + + processQueue := time.Now() + if err := gitscanner.ScanTree(ref.Sha); err != nil { ExitWithError(err) } - chgitscanner.Close() + meter.Start() + gitscanner.Close() + q.Wait() + wg.Wait() + tracerx.PerformanceSince("process queue", processQueue) - // Launch git update-index - c := make(chan *lfs.WrappedPointer) + singleCheckout.Close() - var wait sync.WaitGroup - wait.Add(1) - - go func() { - checkoutWithChan(c) - wait.Done() - }() - - // Feed it from in, which comes from fetch - for p := range in { - // Add all of the files for this oid - for _, fp := range mapping[p.Oid] { - c <- fp - } + for _, err := range q.Errors() { + FullError(err) } - close(c) - wait.Wait() } -// Populate the working copy with the real content of objects where the file is -// either missing, or contains a matching pointer placeholder, from a list of pointers. -// If the file exists but has other content it is left alone -// Callers of this function MUST NOT Panic or otherwise exit the process -// without waiting for this function to shut down. If the process exits while -// update-index is in the middle of processing a file the git index can be left -// in a locked state. -func checkoutWithChan(in <-chan *lfs.WrappedPointer) { - // Get a converter from repo-relative to cwd-relative - // Since writing data & calling git update-index must be relative to cwd - pathConverter, err := lfs.NewRepoToCurrentPathConverter() - if err != nil { - Panic(err, "Could not convert file paths") +type pointerMap struct { + pointers map[string][]*lfs.WrappedPointer + mu sync.Mutex +} + +func newPointerMap() *pointerMap { + return &pointerMap{pointers: make(map[string][]*lfs.WrappedPointer)} +} + +func (m *pointerMap) Seen(p *lfs.WrappedPointer) bool { + m.mu.Lock() + existing, ok := m.pointers[p.Oid] + if ok { + m.pointers[p.Oid] = append(existing, p) } + m.mu.Unlock() + return ok +} - manifest := TransferManifest() - gitIndexer := &gitIndexer{} +func (m *pointerMap) Add(p *lfs.WrappedPointer) { + m.mu.Lock() + defer m.mu.Unlock() + m.pointers[p.Oid] = append(m.pointers[p.Oid], p) +} - // From this point on, git update-index is running. Code in this loop MUST - // NOT Panic() or otherwise cause the process to exit. If the process exits - // while update-index is in the middle of updating, the index can remain in a - // locked state. +func (m *pointerMap) All(oid string) []*lfs.WrappedPointer { + m.mu.Lock() + pointers := m.pointers[oid] + delete(m.pointers, oid) + m.mu.Unlock() - // As files come in, write them to the wd and update the index - for pointer := range in { - cwdfilepath, err := checkout(pointer, pathConverter, manifest) - if err != nil { - LoggedError(err, "Checkout error") - } - - if len(cwdfilepath) == 0 { - continue - } - - // errors are only returned when the gitIndexer is starting a new cmd - if err := gitIndexer.Add(cwdfilepath); err != nil { - Panic(err, "Could not update the index") - } - } - - if err := gitIndexer.Close(); err != nil { - LoggedError(err, "Error updating the git index:\n%s", gitIndexer.Output()) - } + return pointers } func init() { diff --git a/commands/pull.go b/commands/pull.go new file mode 100644 index 00000000..cb5152b0 --- /dev/null +++ b/commands/pull.go @@ -0,0 +1,109 @@ +package commands + +import ( + "bytes" + "io" + "os/exec" + "sync" + + "github.com/git-lfs/git-lfs/lfs" + "github.com/git-lfs/git-lfs/transfer" +) + +// Handles the process of checking out a single file, and updating the git +// index. +func newSingleCheckout() *singleCheckout { + // Get a converter from repo-relative to cwd-relative + // Since writing data & calling git update-index must be relative to cwd + pathConverter, err := lfs.NewRepoToCurrentPathConverter() + if err != nil { + Panic(err, "Could not convert file paths") + } + + return &singleCheckout{ + gitIndexer: &gitIndexer{}, + pathConverter: pathConverter, + manifest: TransferManifest(), + } +} + +type singleCheckout struct { + gitIndexer *gitIndexer + pathConverter lfs.PathConverter + manifest *transfer.Manifest +} + +func (c *singleCheckout) Run(p *lfs.WrappedPointer) { + cwdfilepath, err := checkout(p, c.pathConverter, c.manifest) + if err != nil { + LoggedError(err, "Checkout error: %s", err) + } + + if len(cwdfilepath) > 0 { + // errors are only returned when the gitIndexer is starting a new cmd + if err := c.gitIndexer.Add(cwdfilepath); err != nil { + Panic(err, "Could not update the index") + } + } +} + +func (c *singleCheckout) Close() { + if err := c.gitIndexer.Close(); err != nil { + LoggedError(err, "Error updating the git index:\n%s", c.gitIndexer.Output()) + } +} + +// Don't fire up the update-index command until we have at least one file to +// give it. Otherwise git interprets the lack of arguments to mean param-less update-index +// which can trigger entire working copy to be re-examined, which triggers clean filters +// and which has unexpected side effects (e.g. downloading filtered-out files) +type gitIndexer struct { + cmd *exec.Cmd + input io.WriteCloser + output bytes.Buffer + mu sync.Mutex +} + +func (i *gitIndexer) Add(path string) error { + i.mu.Lock() + defer i.mu.Unlock() + + if i.cmd == nil { + // Fire up the update-index command + i.cmd = exec.Command("git", "update-index", "-q", "--refresh", "--stdin") + i.cmd.Stdout = &i.output + i.cmd.Stderr = &i.output + stdin, err := i.cmd.StdinPipe() + if err == nil { + err = i.cmd.Start() + } + + if err != nil { + return err + } + + i.input = stdin + } + + i.input.Write([]byte(path + "\n")) + return nil +} + +func (i *gitIndexer) Output() string { + return i.output.String() +} + +func (i *gitIndexer) Close() error { + i.mu.Lock() + defer i.mu.Unlock() + + if i.input != nil { + i.input.Close() + } + + if i.cmd != nil { + return i.cmd.Wait() + } + + return nil +} From 5fc2344e6ae991870b52268db44003f705c35c00 Mon Sep 17 00:00:00 2001 From: Taylor Blau Date: Wed, 14 Dec 2016 15:22:30 -0700 Subject: [PATCH 49/68] tq: move object.go into transfer.go --- tq/object.go | 145 ------------------------------------------------- tq/transfer.go | 144 ++++++++++++++++++++++++++++++++++++++++++++++++ 2 files changed, 144 insertions(+), 145 deletions(-) delete mode 100644 tq/object.go diff --git a/tq/object.go b/tq/object.go deleted file mode 100644 index a5c178dd..00000000 --- a/tq/object.go +++ /dev/null @@ -1,145 +0,0 @@ -package tq - -import ( - "fmt" - "time" - - "github.com/git-lfs/git-lfs/api" - "github.com/git-lfs/git-lfs/errors" -) - -type Transfer struct { - Name string `json:"name"` - Oid string `json:"oid,omitempty"` - Size int64 `json:"size"` - Authenticated bool `json:"authenticated,omitempty"` - Actions ActionSet `json:"actions,omitempty"` - Error *ObjectError `json:"error,omitempty"` - Path string `json:"path"` -} - -type ObjectError struct { - Code int `json:"code"` - Message string `json:"message"` -} - -// NewTransfer creates a new Transfer instance -// -// XXX(taylor): note, this function will be removed from the `tq` package's API -// before landing in master. It is currently used by the smudge operation to -// download a single file and pass _directly_ to the Adapter, whereas it should -// use a transferqueue. -func NewTransfer(name string, obj *api.ObjectResource, path string) *Transfer { - t := &Transfer{ - Name: name, - Oid: obj.Oid, - Size: obj.Size, - Authenticated: obj.Authenticated, - Actions: make(ActionSet), - Path: path, - } - - if obj.Error != nil { - t.Error = &ObjectError{ - Code: obj.Error.Code, - Message: obj.Error.Message, - } - } - - for rel, action := range obj.Actions { - t.Actions[rel] = &Action{ - Href: action.Href, - Header: action.Header, - ExpiresAt: action.ExpiresAt, - } - } - - return t - -} - -type Action struct { - Href string `json:"href"` - Header map[string]string `json:"header,omitempty"` - ExpiresAt time.Time `json:"expires_at,omitempty"` -} - -type ActionSet map[string]*Action - -const ( - // objectExpirationToTransfer is the duration we expect to have passed - // from the time that the object's expires_at property is checked to - // when the transfer is executed. - objectExpirationToTransfer = 5 * time.Second -) - -func (as ActionSet) Get(rel string) (*Action, error) { - a, ok := as[rel] - if !ok { - return nil, &ActionMissingError{Rel: rel} - } - - if !a.ExpiresAt.IsZero() && a.ExpiresAt.Before(time.Now().Add(objectExpirationToTransfer)) { - return nil, errors.NewRetriableError(&ActionExpiredErr{Rel: rel, At: a.ExpiresAt}) - } - - return a, nil -} - -type ActionExpiredErr struct { - Rel string - At time.Time -} - -func (e ActionExpiredErr) Error() string { - return fmt.Sprintf("tq: action %q expires at %s", - e.Rel, e.At.In(time.Local).Format(time.RFC822)) -} - -type ActionMissingError struct { - Rel string -} - -func (e ActionMissingError) Error() string { - return fmt.Sprintf("tq: unable to find action %q", e.Rel) -} - -func IsActionExpiredError(err error) bool { - if _, ok := err.(*ActionExpiredErr); ok { - return true - } - return false -} - -func IsActionMissingError(err error) bool { - if _, ok := err.(*ActionMissingError); ok { - return true - } - return false -} - -func toApiObject(t *Transfer) *api.ObjectResource { - o := &api.ObjectResource{ - Oid: t.Oid, - Size: t.Size, - Authenticated: t.Authenticated, - Actions: make(map[string]*api.LinkRelation), - } - - for rel, a := range t.Actions { - o.Actions[rel] = &api.LinkRelation{ - Href: a.Href, - Header: a.Header, - ExpiresAt: a.ExpiresAt, - } - } - - if t.Error != nil { - o.Error = &api.ObjectError{ - Code: t.Error.Code, - Message: t.Error.Message, - } - } - - return o -} diff --git a/tq/transfer.go b/tq/transfer.go index 0da7455f..cf255c29 100644 --- a/tq/transfer.go +++ b/tq/transfer.go @@ -2,6 +2,14 @@ // NOTE: Subject to change, do not rely on this package from outside git-lfs source package tq +import ( + "fmt" + "time" + + "github.com/git-lfs/git-lfs/api" + "github.com/git-lfs/git-lfs/errors" +) + type Direction int const ( @@ -9,6 +17,142 @@ const ( Download = Direction(iota) ) +type Transfer struct { + Name string `json:"name"` + Oid string `json:"oid,omitempty"` + Size int64 `json:"size"` + Authenticated bool `json:"authenticated,omitempty"` + Actions ActionSet `json:"actions,omitempty"` + Error *ObjectError `json:"error,omitempty"` + Path string `json:"path"` +} + +type ObjectError struct { + Code int `json:"code"` + Message string `json:"message"` +} + +// NewTransfer creates a new Transfer instance +// +// XXX(taylor): note, this function will be removed from the `tq` package's API +// before landing in master. It is currently used by the smudge operation to +// download a single file and pass _directly_ to the Adapter, whereas it should +// use a transferqueue. +func NewTransfer(name string, obj *api.ObjectResource, path string) *Transfer { + t := &Transfer{ + Name: name, + Oid: obj.Oid, + Size: obj.Size, + Authenticated: obj.Authenticated, + Actions: make(ActionSet), + Path: path, + } + + if obj.Error != nil { + t.Error = &ObjectError{ + Code: obj.Error.Code, + Message: obj.Error.Message, + } + } + + for rel, action := range obj.Actions { + t.Actions[rel] = &Action{ + Href: action.Href, + Header: action.Header, + ExpiresAt: action.ExpiresAt, + } + } + + return t + +} + +type Action struct { + Href string `json:"href"` + Header map[string]string `json:"header,omitempty"` + ExpiresAt time.Time `json:"expires_at,omitempty"` +} + +type ActionSet map[string]*Action + +const ( + // objectExpirationToTransfer is the duration we expect to have passed + // from the time that the object's expires_at property is checked to + // when the transfer is executed. + objectExpirationToTransfer = 5 * time.Second +) + +func (as ActionSet) Get(rel string) (*Action, error) { + a, ok := as[rel] + if !ok { + return nil, &ActionMissingError{Rel: rel} + } + + if !a.ExpiresAt.IsZero() && a.ExpiresAt.Before(time.Now().Add(objectExpirationToTransfer)) { + return nil, errors.NewRetriableError(&ActionExpiredErr{Rel: rel, At: a.ExpiresAt}) + } + + return a, nil +} + +type ActionExpiredErr struct { + Rel string + At time.Time +} + +func (e ActionExpiredErr) Error() string { + return fmt.Sprintf("tq: action %q expires at %s", + e.Rel, e.At.In(time.Local).Format(time.RFC822)) +} + +type ActionMissingError struct { + Rel string +} + +func (e ActionMissingError) Error() string { + return fmt.Sprintf("tq: unable to find action %q", e.Rel) +} + +func IsActionExpiredError(err error) bool { + if _, ok := err.(*ActionExpiredErr); ok { + return true + } + return false +} + +func IsActionMissingError(err error) bool { + if _, ok := err.(*ActionMissingError); ok { + return true + } + return false +} + +func toApiObject(t *Transfer) *api.ObjectResource { + o := &api.ObjectResource{ + Oid: t.Oid, + Size: t.Size, + Authenticated: t.Authenticated, + Actions: make(map[string]*api.LinkRelation), + } + + for rel, a := range t.Actions { + o.Actions[rel] = &api.LinkRelation{ + Href: a.Href, + Header: a.Header, + ExpiresAt: a.ExpiresAt, + } + } + + if t.Error != nil { + o.Error = &api.ObjectError{ + Code: t.Error.Code, + Message: t.Error.Message, + } + } + + return o +} + // NewAdapterFunc creates new instances of Adapter. Code that wishes // to provide new Adapter instances should pass an implementation of this // function to RegisterNewTransferAdapterFunc() on a *Manifest. From b2e6de51c3841f82f44a8ac37b43592201af5f63 Mon Sep 17 00:00:00 2001 From: risk danger olson Date: Wed, 14 Dec 2016 15:29:03 -0700 Subject: [PATCH 50/68] Move checkout() to *singleCheckout --- commands/command_checkout.go | 37 ---------------------------------- commands/command_pull.go | 1 + commands/pull.go | 39 ++++++++++++++++++++++++++++++------ 3 files changed, 34 insertions(+), 43 deletions(-) diff --git a/commands/command_checkout.go b/commands/command_checkout.go index 2ebb4a7b..4be1be56 100644 --- a/commands/command_checkout.go +++ b/commands/command_checkout.go @@ -1,15 +1,10 @@ package commands import ( - "fmt" - "os" - - "github.com/git-lfs/git-lfs/errors" "github.com/git-lfs/git-lfs/filepathfilter" "github.com/git-lfs/git-lfs/git" "github.com/git-lfs/git-lfs/lfs" "github.com/git-lfs/git-lfs/progress" - "github.com/git-lfs/git-lfs/transfer" "github.com/spf13/cobra" ) @@ -54,38 +49,6 @@ func checkoutCommand(cmd *cobra.Command, args []string) { singleCheckout.Close() } -func checkout(pointer *lfs.WrappedPointer, pathConverter lfs.PathConverter, manifest *transfer.Manifest) (string, error) { - // Check the content - either missing or still this pointer (not exist is ok) - filepointer, err := lfs.DecodePointerFromFile(pointer.Name) - if err != nil && !os.IsNotExist(err) { - if errors.IsNotAPointerError(err) { - // File has non-pointer content, leave it alone - return "", nil - } - return "", err - } - - if filepointer != nil && filepointer.Oid != pointer.Oid { - // User has probably manually reset a file to another commit - // while leaving it a pointer; don't mess with this - return "", nil - } - - cwdfilepath := pathConverter.Convert(pointer.Name) - - err = lfs.PointerSmudgeToFile(cwdfilepath, pointer.Pointer, false, manifest, nil) - if err != nil { - if errors.IsDownloadDeclinedError(err) { - // acceptable error, data not local (fetch not run or include/exclude) - return "", fmt.Errorf("Skipped checkout for %q, content not local. Use fetch to download.", pointer.Name) - } else { - return "", fmt.Errorf("Could not check out %q", pointer.Name) - } - } - - return cwdfilepath, nil -} - // Parameters are filters // firstly convert any pathspecs to the root of the repo, in case this is being // executed in a sub-folder diff --git a/commands/command_pull.go b/commands/command_pull.go index c1b9c9d7..3a55e746 100644 --- a/commands/command_pull.go +++ b/commands/command_pull.go @@ -104,6 +104,7 @@ func pull(filter *filepathfilter.Filter) { } } +// tracks LFS objects being downloaded, according to their unique OIDs. type pointerMap struct { pointers map[string][]*lfs.WrappedPointer mu sync.Mutex diff --git a/commands/pull.go b/commands/pull.go index cb5152b0..002aa99d 100644 --- a/commands/pull.go +++ b/commands/pull.go @@ -2,10 +2,13 @@ package commands import ( "bytes" + "fmt" "io" + "os" "os/exec" "sync" + "github.com/git-lfs/git-lfs/errors" "github.com/git-lfs/git-lfs/lfs" "github.com/git-lfs/git-lfs/transfer" ) @@ -34,16 +37,40 @@ type singleCheckout struct { } func (c *singleCheckout) Run(p *lfs.WrappedPointer) { - cwdfilepath, err := checkout(p, c.pathConverter, c.manifest) - if err != nil { + // Check the content - either missing or still this pointer (not exist is ok) + filepointer, err := lfs.DecodePointerFromFile(p.Name) + if err != nil && !os.IsNotExist(err) { + if errors.IsNotAPointerError(err) { + // File has non-pointer content, leave it alone + return + } + LoggedError(err, "Checkout error: %s", err) + return } - if len(cwdfilepath) > 0 { - // errors are only returned when the gitIndexer is starting a new cmd - if err := c.gitIndexer.Add(cwdfilepath); err != nil { - Panic(err, "Could not update the index") + if filepointer != nil && filepointer.Oid != p.Oid { + // User has probably manually reset a file to another commit + // while leaving it a pointer; don't mess with this + return + } + + cwdfilepath := c.pathConverter.Convert(p.Name) + + err = lfs.PointerSmudgeToFile(cwdfilepath, p.Pointer, false, c.manifest, nil) + if err != nil { + if errors.IsDownloadDeclinedError(err) { + // acceptable error, data not local (fetch not run or include/exclude) + LoggedError(err, "Skipped checkout for %q, content not local. Use fetch to download.", p.Name) + } else { + FullError(fmt.Errorf("Could not check out %q", p.Name)) } + return + } + + // errors are only returned when the gitIndexer is starting a new cmd + if err := c.gitIndexer.Add(cwdfilepath); err != nil { + Panic(err, "Could not update the index") } } From 5889292d792c4dbd9008e71622140d79d830f2b8 Mon Sep 17 00:00:00 2001 From: risk danger olson Date: Wed, 14 Dec 2016 15:35:58 -0700 Subject: [PATCH 51/68] no need to keep a filter var around --- commands/command_checkout.go | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/commands/command_checkout.go b/commands/command_checkout.go index 4be1be56..9b9a31bb 100644 --- a/commands/command_checkout.go +++ b/commands/command_checkout.go @@ -17,7 +17,6 @@ func checkoutCommand(cmd *cobra.Command, args []string) { var totalBytes int64 meter := progress.NewMeter(progress.WithOSEnv(cfg.Os)) - filter := filepathfilter.New(rootedPaths(args), nil) singleCheckout := newSingleCheckout() chgitscanner := lfs.NewGitScanner(func(p *lfs.WrappedPointer, err error) { if err != nil { @@ -37,7 +36,7 @@ func checkoutCommand(cmd *cobra.Command, args []string) { meter.FinishTransfer(p.Name) }) - chgitscanner.Filter = filter + chgitscanner.Filter = filepathfilter.New(rootedPaths(args), nil) if err := chgitscanner.ScanTree(ref.Sha); err != nil { ExitWithError(err) From d84f7e1cef7b760059d2e10a869769b3f378b4dc Mon Sep 17 00:00:00 2001 From: risk danger olson Date: Wed, 14 Dec 2016 15:48:59 -0700 Subject: [PATCH 52/68] defer those unlocks --- commands/command_pull.go | 11 +++++------ 1 file changed, 5 insertions(+), 6 deletions(-) diff --git a/commands/command_pull.go b/commands/command_pull.go index 3a55e746..32d2280f 100644 --- a/commands/command_pull.go +++ b/commands/command_pull.go @@ -116,12 +116,12 @@ func newPointerMap() *pointerMap { func (m *pointerMap) Seen(p *lfs.WrappedPointer) bool { m.mu.Lock() - existing, ok := m.pointers[p.Oid] - if ok { + defer m.mu.Unlock() + if existing, ok := m.pointers[p.Oid]; ok { m.pointers[p.Oid] = append(existing, p) + return true } - m.mu.Unlock() - return ok + return false } func (m *pointerMap) Add(p *lfs.WrappedPointer) { @@ -132,10 +132,9 @@ func (m *pointerMap) Add(p *lfs.WrappedPointer) { func (m *pointerMap) All(oid string) []*lfs.WrappedPointer { m.mu.Lock() + defer m.mu.Unlock() pointers := m.pointers[oid] delete(m.pointers, oid) - m.mu.Unlock() - return pointers } From acff69f78d0ec9c9a04476fb3776fa59d777665b Mon Sep 17 00:00:00 2001 From: Sebastian Schuberth Date: Thu, 15 Dec 2016 11:38:34 +0100 Subject: [PATCH 53/68] AppVeyor: Fix printing the GOPATH environment variable --- appveyor.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/appveyor.yml b/appveyor.yml index 06cfabea..5fea14fc 100644 --- a/appveyor.yml +++ b/appveyor.yml @@ -7,7 +7,7 @@ environment: clone_folder: $(GOPATH)\src\github.com\git-lfs\git-lfs install: - - echo $(GOPATH) + - echo %GOPATH% - rd C:\Go /s /q - appveyor DownloadFile https://storage.googleapis.com/golang/go1.7.4.windows-amd64.zip - 7z x go1.7.4.windows-amd64.zip -oC:\ >nul From b95bd408f63faffb291c8d14275ef6f056422607 Mon Sep 17 00:00:00 2001 From: Sebastian Schuberth Date: Fri, 9 Dec 2016 16:01:32 +0100 Subject: [PATCH 54/68] Add more meta-data to the Windows installer executable On Windows, this makes the detailed file properties dialog show the file version and copyright information also for the Windows installer executable. Previously only the git-lfs executables had this information. --- .../windows-installer/inno-setup-git-lfs-installer.iss | 9 ++++++--- 1 file changed, 6 insertions(+), 3 deletions(-) diff --git a/script/windows-installer/inno-setup-git-lfs-installer.iss b/script/windows-installer/inno-setup-git-lfs-installer.iss index d1a288d9..227da56c 100644 --- a/script/windows-installer/inno-setup-git-lfs-installer.iss +++ b/script/windows-installer/inno-setup-git-lfs-installer.iss @@ -1,8 +1,10 @@ #define MyAppName "Git LFS" -; Misuse RemoveFileExt to strip the 4th patch-level version number. ; Arbitrarily choose the x86 executable here as both have the version embedded. -#define MyAppVersion RemoveFileExt(GetFileVersion("..\..\git-lfs-x86.exe")) +#define MyVersionInfoVersion GetFileVersion("..\..\git-lfs-x86.exe") + +; Misuse RemoveFileExt to strip the 4th patch-level version number. +#define MyAppVersion RemoveFileExt(MyVersionInfoVersion) #define MyAppPublisher "GitHub, Inc." #define MyAppURL "https://git-lfs.github.com/" @@ -15,7 +17,7 @@ AppId={{286391DE-F778-44EA-9375-1B21AAA04FF0} AppName={#MyAppName} AppVersion={#MyAppVersion} -;AppVerName={#MyAppName} {#MyAppVersion} +AppCopyright=GitHub, Inc. and Git LFS contributors AppPublisher={#MyAppPublisher} AppPublisherURL={#MyAppURL} AppSupportURL={#MyAppURL} @@ -32,6 +34,7 @@ DisableReadyPage=True ArchitecturesInstallIn64BitMode=x64 ChangesEnvironment=yes SetupIconFile=git-lfs-logo.ico +VersionInfoVersion={#MyVersionInfoVersion} WizardImageFile=git-lfs-wizard-image.bmp WizardSmallImageFile=git-lfs-logo.bmp From a759a2fd05b1a9a2800f78cd8ccc8f4091bd4e02 Mon Sep 17 00:00:00 2001 From: Sebastian Schuberth Date: Wed, 14 Dec 2016 22:52:34 +0100 Subject: [PATCH 55/68] Use goversioninfo instead of windres to embed Windows resources windres is not always present whereas goversioninfo is easy to install. This also fixes the generation of resource.syso to be part of the standard build script instead of only the AppVeyor CI build. Finally, adapt update-version.sh to patch versioninfo.json. --- .gitignore | 2 +- appveyor.yml | 1 - git-lfs.go | 2 ++ script/bootstrap | 8 ++++++++ script/windows-installer/resources.rc | 20 -------------------- update-version.sh | 10 +++++++--- versioninfo.json | 18 ++++++++++++++++++ 7 files changed, 36 insertions(+), 25 deletions(-) delete mode 100644 script/windows-installer/resources.rc create mode 100644 versioninfo.json diff --git a/.gitignore b/.gitignore index c5d11ab7..b215777e 100644 --- a/.gitignore +++ b/.gitignore @@ -1,7 +1,7 @@ bin/ benchmark/ out/ -resources.syso +resource.syso # only allow man/*.\d.ronn files man/* diff --git a/appveyor.yml b/appveyor.yml index 5fea14fc..e07bcf45 100644 --- a/appveyor.yml +++ b/appveyor.yml @@ -16,7 +16,6 @@ install: - set PATH="C:\Program Files (x86)\Inno Setup 5";%PATH% build_script: - - C:\Ruby23\DevKit\mingw\bin\windres.exe script\windows-installer\resources.rc -o resources.syso - bash --login -c 'GOARCH=386 script/bootstrap' - mv bin\git-lfs.exe git-lfs-x86.exe - bash --login -c 'GOARCH=amd64 script/bootstrap' diff --git a/git-lfs.go b/git-lfs.go index 823dcebd..c0dad72a 100644 --- a/git-lfs.go +++ b/git-lfs.go @@ -1,3 +1,5 @@ +//go:generate goversioninfo -icon=script/windows-installer/git-lfs-logo.ico + package main import ( diff --git a/script/bootstrap b/script/bootstrap index 1ed05341..e57a1b80 100755 --- a/script/bootstrap +++ b/script/bootstrap @@ -14,6 +14,14 @@ if [ -z "$GOPATH" ]; then ln -s "$GOPATH" src/github.com/git-lfs/git-lfs fi +if uname -s | grep -q "_NT-"; then + echo "Installing goversioninfo to embed resources into Windows executables..." + go get github.com/josephspurrier/goversioninfo/cmd/goversioninfo + export PATH=$PATH:$GOPATH/bin/windows_386 + echo "Creating the resource.syso version information file..." + go generate +fi + script/fmt rm -rf bin/* GO15VENDOREXPERIMENT=1 go run script/*.go -cmd build "$@" diff --git a/script/windows-installer/resources.rc b/script/windows-installer/resources.rc deleted file mode 100644 index 5fb0b451..00000000 --- a/script/windows-installer/resources.rc +++ /dev/null @@ -1,20 +0,0 @@ -main ICON "git-lfs-logo.ico" -1 VERSIONINFO -FILEVERSION 1,5,0,0 -BEGIN - BLOCK "StringFileInfo" - BEGIN - BLOCK "040904b0" /* LANG_ENGLISH/SUBLANG_ENGLISH_US, Unicode CP */ - BEGIN - VALUE "FileDescription", "Git LFS\0" - VALUE "ProductName", "Git Large File Storage (LFS)\0" - VALUE "ProductVersion", "1.5.0\0" - VALUE "LegalCopyright", "GitHub, Inc. and Git LFS contributors\0" - END - END - - BLOCK "VarFileInfo" - BEGIN - VALUE "Translation", 0x409, 1200 - END -END diff --git a/update-version.sh b/update-version.sh index a1d2292e..936fdb1d 100755 --- a/update-version.sh +++ b/update-version.sh @@ -4,7 +4,8 @@ VERSION_STRING=$1 VERSION_ARRAY=( ${VERSION_STRING//./ } ) VERSION_MAJOR=${VERSION_ARRAY[0]} VERSION_MINOR=${VERSION_ARRAY[1]} -VERSION_PATCH=${VERSION_ARRAY[2]} +VERSION_PATCH=${VERSION_ARRAY[2]:-0} +VERSION_BUILD=${VERSION_ARRAY[3]:-0} # Update the version number git-lfs is reporting. sed -i "s,\(Version = \"\).*\(\"\),\1$VERSION_STRING\2," config/version.go @@ -13,5 +14,8 @@ sed -i "s,\(Version = \"\).*\(\"\),\1$VERSION_STRING\2," config/version.go sed -i "s,\(Version:[[:space:]]*\).*,\1$VERSION_STRING," rpm/SPECS/git-lfs.spec # Update the version numbers in the Windows installer. -sed -i "s,\(FILEVERSION \).*,\1$VERSION_MAJOR\,$VERSION_MINOR\,$VERSION_PATCH\,0," script/windows-installer/resources.rc -sed -i "s,\([[:space:]]*VALUE \"ProductVersion\"\, \"\).*\(\\\\0\"\),\1$VERSION_STRING\2," script/windows-installer/resources.rc +sed -i "s,\(\"Major\": \).*\,,\1$VERSION_MAJOR\,," versioninfo.json +sed -i "s,\(\"Minor\": \).*\,,\1$VERSION_MINOR\,," versioninfo.json +sed -i "s,\(\"Patch\": \).*\,,\1$VERSION_PATCH\,," versioninfo.json +sed -i "s,\(\"Build\": \).*,\1$VERSION_BUILD," versioninfo.json +sed -i "s,\(\"ProductVersion\": \"\).*\(\"\),\1$VERSION_STRING\2," versioninfo.json diff --git a/versioninfo.json b/versioninfo.json new file mode 100644 index 00000000..3d1e5ec7 --- /dev/null +++ b/versioninfo.json @@ -0,0 +1,18 @@ +{ + "FixedFileInfo": + { + "FileVersion": { + "Major": 1, + "Minor": 5, + "Patch": 0, + "Build": 0 + } + }, + "StringFileInfo": + { + "FileDescription": "Git LFS", + "LegalCopyright": "GitHub, Inc. and Git LFS contributors", + "ProductName": "Git Large File Storage (LFS)", + "ProductVersion": "1.5.0" + } +} From 303156e0e22ef7cdd863605fbb501174dc25e08a Mon Sep 17 00:00:00 2001 From: risk danger olson Date: Thu, 15 Dec 2016 09:59:06 -0700 Subject: [PATCH 56/68] Change Adapter Begin() signature to accept an AdapterConfig interface --- lfs/pointer_smudge.go | 10 +++++++++- test/test-custom-transfers.sh | 5 ++--- tq/adapterbase.go | 3 ++- tq/custom.go | 18 ++++++++---------- tq/transfer.go | 6 +++++- tq/transfer_queue.go | 2 +- tq/transfer_test.go | 2 +- 7 files changed, 28 insertions(+), 18 deletions(-) diff --git a/lfs/pointer_smudge.go b/lfs/pointer_smudge.go index d86a0410..5a0befbd 100644 --- a/lfs/pointer_smudge.go +++ b/lfs/pointer_smudge.go @@ -72,6 +72,14 @@ func PointerSmudge(writer io.Writer, ptr *Pointer, workingfile string, download return nil } +type tempAdapterConfig struct { + concurrentTransfers int +} + +func (c tempAdapterConfig) ConcurrentTransfers() int { + return c.concurrentTransfers +} + func downloadFile(writer io.Writer, ptr *Pointer, workingfile, mediafile string, manifest *tq.Manifest, cb progress.CopyCallback) error { fmt.Fprintf(os.Stderr, "Downloading %s (%s)\n", workingfile, pb.FormatBytes(ptr.Size)) @@ -93,7 +101,7 @@ func downloadFile(writer io.Writer, ptr *Pointer, workingfile, mediafile string, } } // Single download - err = adapter.Begin(1, tcb) + err = adapter.Begin(tempAdapterConfig{concurrentTransfers: 1}, tcb) if err != nil { return err } diff --git a/test/test-custom-transfers.sh b/test/test-custom-transfers.sh index 9a32bb76..e1ebd079 100755 --- a/test/test-custom-transfers.sh +++ b/test/test-custom-transfers.sh @@ -28,7 +28,7 @@ begin_test "custom-transfer-wrong-path" GIT_TRACE=1 git push origin master 2>&1 | tee pushcustom.log # use PIPESTATUS otherwise we get exit code from tee res=${PIPESTATUS[0]} - grep "xfer: Custom transfer adapter" pushcustom.log + grep "xfer: adapter \"testcustom\" Begin()" pushcustom.log grep "Failed to start custom transfer command" pushcustom.log if [ "$res" = "0" ]; then echo "Push should have failed because of an incorrect custom transfer path." @@ -101,9 +101,8 @@ begin_test "custom-transfer-upload-download" grep "xfer: started custom adapter process" fetchcustom.log grep "xfer\[lfstest-customadapter\]:" fetchcustom.log - grep "11 of 11 files" fetchcustom.log + grep "11 of 11 files" fetchcustom.log [ `find .git/lfs/objects -type f | wc -l` = 11 ] ) end_test - diff --git a/tq/adapterbase.go b/tq/adapterbase.go index 52fff93e..85262e51 100644 --- a/tq/adapterbase.go +++ b/tq/adapterbase.go @@ -60,9 +60,10 @@ func (a *adapterBase) Direction() Direction { return a.direction } -func (a *adapterBase) Begin(maxConcurrency int, cb ProgressCallback) error { +func (a *adapterBase) Begin(cfg AdapterConfig, cb ProgressCallback) error { a.cb = cb a.jobChan = make(chan *job, 100) + maxConcurrency := cfg.ConcurrentTransfers() tracerx.Printf("xfer: adapter %q Begin() with %d workers", a.Name(), maxConcurrency) diff --git a/tq/custom.go b/tq/custom.go index 290fb91a..bacd46e5 100644 --- a/tq/custom.go +++ b/tq/custom.go @@ -105,18 +105,16 @@ type customAdapterResponseMessage struct { BytesSinceLast int `json:"bytesSinceLast"` } -func (a *customAdapter) Begin(maxConcurrency int, cb ProgressCallback) error { - // If config says not to launch multiple processes, downgrade incoming value - useConcurrency := maxConcurrency - if !a.concurrent { - useConcurrency = 1 +func (a *customAdapter) Begin(cfg AdapterConfig, cb ProgressCallback) error { + a.originalConcurrency = cfg.ConcurrentTransfers() + if a.concurrent { + // Use common workers impl, but downgrade workers to number of processes + return a.adapterBase.Begin(cfg, cb) } - a.originalConcurrency = maxConcurrency - tracerx.Printf("xfer: Custom transfer adapter %q using concurrency %d", a.name, useConcurrency) - - // Use common workers impl, but downgrade workers to number of processes - return a.adapterBase.Begin(useConcurrency, cb) + // If config says not to launch multiple processes, downgrade incoming value + newCfg := &Manifest{concurrentTransfers: 1} + return a.adapterBase.Begin(newCfg, cb) } func (a *customAdapter) ClearTempStorage() error { diff --git a/tq/transfer.go b/tq/transfer.go index cf255c29..3d8e0dd7 100644 --- a/tq/transfer.go +++ b/tq/transfer.go @@ -161,6 +161,10 @@ type NewAdapterFunc func(name string, dir Direction) Adapter type ProgressCallback func(name string, totalSize, readSoFar int64, readSinceLast int) error +type AdapterConfig interface { + ConcurrentTransfers() int +} + // Adapter is implemented by types which can upload and/or download LFS // file content to a remote store. Each Adapter accepts one or more requests // which it may schedule and parallelise in whatever way it chooses, clients of @@ -185,7 +189,7 @@ type Adapter interface { // one or more Add calls. maxConcurrency controls the number of transfers // that may be done at once. The passed in callback will receive updates on // progress. Either argument may be nil if not required by the client. - Begin(maxConcurrency int, cb ProgressCallback) error + Begin(cfg AdapterConfig, cb ProgressCallback) error // Add queues a download/upload, which will complete asynchronously and // notify the callbacks given to Begin() Add(transfers ...*Transfer) (results <-chan TransferResult) diff --git a/tq/transfer_queue.go b/tq/transfer_queue.go index 2e31fed6..3752f69f 100644 --- a/tq/transfer_queue.go +++ b/tq/transfer_queue.go @@ -530,7 +530,7 @@ func (q *TransferQueue) ensureAdapterBegun() error { } tracerx.Printf("tq: starting transfer adapter %q", q.adapter.Name()) - err := q.adapter.Begin(q.manifest.ConcurrentTransfers(), cb) + err := q.adapter.Begin(q.manifest, cb) if err != nil { return err } diff --git a/tq/transfer_test.go b/tq/transfer_test.go index ad4dc862..96640146 100644 --- a/tq/transfer_test.go +++ b/tq/transfer_test.go @@ -21,7 +21,7 @@ func (a *testAdapter) Direction() Direction { return a.dir } -func (a *testAdapter) Begin(maxConcurrency int, cb ProgressCallback) error { +func (a *testAdapter) Begin(cfg AdapterConfig, cb ProgressCallback) error { return nil } From 27ff433d4630081bab2a5b711956a1705ea09870 Mon Sep 17 00:00:00 2001 From: risk danger olson Date: Thu, 15 Dec 2016 10:13:36 -0700 Subject: [PATCH 57/68] teach the smudge filter how to use the transfer queue directly --- lfs/pointer_smudge.go | 38 ++++++++++++-------------------------- 1 file changed, 12 insertions(+), 26 deletions(-) diff --git a/lfs/pointer_smudge.go b/lfs/pointer_smudge.go index d86a0410..df556396 100644 --- a/lfs/pointer_smudge.go +++ b/lfs/pointer_smudge.go @@ -10,7 +10,6 @@ import ( "github.com/git-lfs/git-lfs/tools" "github.com/git-lfs/git-lfs/tq" - "github.com/git-lfs/git-lfs/api" "github.com/git-lfs/git-lfs/config" "github.com/git-lfs/git-lfs/errors" "github.com/git-lfs/git-lfs/progress" @@ -75,34 +74,21 @@ func PointerSmudge(writer io.Writer, ptr *Pointer, workingfile string, download func downloadFile(writer io.Writer, ptr *Pointer, workingfile, mediafile string, manifest *tq.Manifest, cb progress.CopyCallback) error { fmt.Fprintf(os.Stderr, "Downloading %s (%s)\n", workingfile, pb.FormatBytes(ptr.Size)) - xfers := manifest.GetDownloadAdapterNames() - obj, adapterName, err := api.BatchSingle(config.Config, &api.ObjectResource{Oid: ptr.Oid, Size: ptr.Size}, "download", xfers) - if err != nil { - return errors.Wrapf(err, "Error downloading %s: %s", filepath.Base(mediafile), err) - } + q := tq.NewTransferQueue(tq.Download, manifest) + q.Add(filepath.Base(workingfile), mediafile, ptr.Oid, ptr.Size) + q.Wait() - if ptr.Size == 0 { - ptr.Size = obj.Size - } - - adapter := manifest.NewDownloadAdapter(adapterName) - var tcb tq.ProgressCallback - if cb != nil { - tcb = func(name string, totalSize, readSoFar int64, readSinceLast int) error { - return cb(totalSize, readSoFar, readSinceLast) + if errs := q.Errors(); len(errs) > 0 { + var multiErr error + for _, e := range errs { + if multiErr != nil { + multiErr = fmt.Errorf("%v\n%v", multiErr, e) + } else { + multiErr = e + } + return errors.Wrapf(multiErr, "Error downloading %s (%s)", workingfile, ptr.Oid) } } - // Single download - err = adapter.Begin(1, tcb) - if err != nil { - return err - } - res := <-adapter.Add(tq.NewTransfer(filepath.Base(workingfile), obj, mediafile)) - adapter.End() - - if res.Error != nil { - return errors.Wrapf(err, "Error buffering media file: %s", res.Error) - } return readLocalFile(writer, ptr, mediafile, workingfile, nil) } From a73a3c67a8da0749bdb82a852c2d6ecd62d04d1f Mon Sep 17 00:00:00 2001 From: Taylor Blau Date: Thu, 15 Dec 2016 13:20:41 -0700 Subject: [PATCH 58/68] lfs,commands: remove lfs.NewDownloadable() in favor of commands.downloadTransfer --- commands/command_fetch.go | 3 +-- commands/command_prune.go | 6 +++--- commands/commands.go | 6 ++++++ commands/uploader.go | 3 +-- lfs/download_queue.go | 11 ----------- 5 files changed, 11 insertions(+), 18 deletions(-) diff --git a/commands/command_fetch.go b/commands/command_fetch.go index 017730f4..78a9c217 100644 --- a/commands/command_fetch.go +++ b/commands/command_fetch.go @@ -327,8 +327,7 @@ func fetchAndReportToChan(allpointers []*lfs.WrappedPointer, filter *filepathfil for _, p := range pointers { tracerx.Printf("fetch %v [%v]", p.Name, p.Oid) - d := lfs.NewDownloadable(p) - q.Add(d.Name, d.Path, d.Oid, d.Size) + q.Add(downloadTransfer(p)) } processQueue := time.Now() diff --git a/commands/command_prune.go b/commands/command_prune.go index db0fb1ac..b1314c73 100644 --- a/commands/command_prune.go +++ b/commands/command_prune.go @@ -149,10 +149,10 @@ func prune(fetchPruneConfig config.FetchPruneConfig, verifyRemote, dryRun, verbo if verifyRemote { tracerx.Printf("VERIFYING: %v", file.Oid) - pointer := lfs.NewPointer(file.Oid, file.Size, nil) - d := lfs.NewDownloadable(&lfs.WrappedPointer{Pointer: pointer}) - verifyQueue.Add(d.Name, d.Path, d.Oid, d.Size) + verifyQueue.Add(downloadTransfer(&lfs.WrappedPointer{ + Pointer: lfs.NewPointer(file.Oid, file.Size, nil), + })) } } } diff --git a/commands/commands.go b/commands/commands.go index 85235df2..fb952b6e 100644 --- a/commands/commands.go +++ b/commands/commands.go @@ -67,6 +67,12 @@ func buildFilepathFilter(config *config.Configuration, includeArg, excludeArg *s return filepathfilter.New(inc, exc) } +func downloadTransfer(p *lfs.WrappedPointer) (name, path, oid string, size int64) { + path, _ = lfs.LocalMediaPath(p.Oid) + + return p.Name, path, p.Oid, p.Size +} + // Error prints a formatted message to Stderr. It also gets printed to the // panic log if one is created for this command. func Error(format string, args ...interface{}) { diff --git a/commands/uploader.go b/commands/uploader.go index c29ac974..97859945 100644 --- a/commands/uploader.go +++ b/commands/uploader.go @@ -113,8 +113,7 @@ func (c *uploadContext) checkMissing(missing []*lfs.WrappedPointer, missingSize }() for _, p := range missing { - d := lfs.NewDownloadable(p) - checkQueue.Add(d.Name, d.Path, d.Oid, d.Size) + checkQueue.Add(downloadTransfer(p)) } // Currently this is needed to flush the batch but is not enough to sync diff --git a/lfs/download_queue.go b/lfs/download_queue.go index 2ea18add..7e4b7ae2 100644 --- a/lfs/download_queue.go +++ b/lfs/download_queue.go @@ -5,17 +5,6 @@ import ( "github.com/git-lfs/git-lfs/tq" ) -func NewDownloadable(p *WrappedPointer) *tq.Transfer { - path, _ := LocalMediaPath(p.Oid) - - return &tq.Transfer{ - Oid: p.Oid, - Size: p.Size, - Name: p.Name, - Path: path, - } -} - // NewDownloadCheckQueue builds a checking queue, checks that objects are there but doesn't download func NewDownloadCheckQueue(cfg *config.Configuration, options ...tq.Option) *tq.TransferQueue { allOptions := make([]tq.Option, len(options), len(options)+1) From 0ce59eb5b7617228559ae79212b495d3c8f7536e Mon Sep 17 00:00:00 2001 From: Sebastian Schuberth Date: Thu, 15 Dec 2016 21:28:31 +0100 Subject: [PATCH 59/68] Move update-version to the script directory And also equalize the shebang to the other scripts. --- update-version.sh => script/update-version | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) rename update-version.sh => script/update-version (97%) diff --git a/update-version.sh b/script/update-version similarity index 97% rename from update-version.sh rename to script/update-version index 936fdb1d..1742ff88 100755 --- a/update-version.sh +++ b/script/update-version @@ -1,4 +1,4 @@ -#!/bin/bash +#!/usr/bin/env bash VERSION_STRING=$1 VERSION_ARRAY=( ${VERSION_STRING//./ } ) From 53b9a9d166fccaf318f498a8045432ce25f50377 Mon Sep 17 00:00:00 2001 From: Taylor Blau Date: Thu, 15 Dec 2016 13:49:04 -0700 Subject: [PATCH 60/68] lfs,commands: remove lfs.NewUploadable() in favor of commands.uploadTransfer --- commands/commands.go | 62 +++++++++++++++++++++++++ commands/uploader.go | 4 +- lfs/upload_queue.go | 69 ---------------------------- test/git-lfs-test-server-api/main.go | 67 ++++++++++++++++++++++++++- 4 files changed, 129 insertions(+), 73 deletions(-) diff --git a/commands/commands.go b/commands/commands.go index fb952b6e..26d493f3 100644 --- a/commands/commands.go +++ b/commands/commands.go @@ -73,6 +73,68 @@ func downloadTransfer(p *lfs.WrappedPointer) (name, path, oid string, size int64 return p.Name, path, p.Oid, p.Size } +func uploadTransfer(oid, filename string) (*tq.Transfer, error) { + localMediaPath, err := lfs.LocalMediaPath(oid) + if err != nil { + return nil, errors.Wrapf(err, "Error uploading file %s (%s)", filename, oid) + } + + if len(filename) > 0 { + if err = ensureFile(filename, localMediaPath); err != nil { + return nil, err + } + } + + fi, err := os.Stat(localMediaPath) + if err != nil { + return nil, errors.Wrapf(err, "Error uploading file %s (%s)", filename, oid) + } + + return &tq.Transfer{ + Name: filename, + Path: localMediaPath, + Oid: oid, + Size: fi.Size(), + }, nil +} + +// ensureFile makes sure that the cleanPath exists before pushing it. If it +// does not exist, it attempts to clean it by reading the file at smudgePath. +func ensureFile(smudgePath, cleanPath string) error { + if _, err := os.Stat(cleanPath); err == nil { + return nil + } + + expectedOid := filepath.Base(cleanPath) + localPath := filepath.Join(config.LocalWorkingDir, smudgePath) + file, err := os.Open(localPath) + if err != nil { + return err + } + + defer file.Close() + + stat, err := file.Stat() + if err != nil { + return err + } + + cleaned, err := lfs.PointerClean(file, file.Name(), stat.Size(), nil) + if cleaned != nil { + cleaned.Teardown() + } + + if err != nil { + return err + } + + if expectedOid != cleaned.Oid { + return fmt.Errorf("Trying to push %q with OID %s.\nNot found in %s.", smudgePath, expectedOid, filepath.Dir(cleanPath)) + } + + return nil +} + // Error prints a formatted message to Stderr. It also gets printed to the // panic log if one is created for this command. func Error(format string, args ...interface{}) { diff --git a/commands/uploader.go b/commands/uploader.go index 97859945..6634b7cf 100644 --- a/commands/uploader.go +++ b/commands/uploader.go @@ -140,7 +140,7 @@ func uploadPointers(c *uploadContext, unfiltered []*lfs.WrappedPointer) { q, pointers := c.prepareUpload(unfiltered) for _, p := range pointers { - u, err := lfs.NewUploadable(p.Oid, p.Name) + t, err := uploadTransfer(p.Oid, p.Name) if err != nil { if errors.IsCleanPointerError(err) { Exit(uploadMissingErr, p.Oid, p.Name, errors.GetContext(err, "pointer").(*lfs.Pointer).Oid) @@ -149,7 +149,7 @@ func uploadPointers(c *uploadContext, unfiltered []*lfs.WrappedPointer) { } } - q.Add(u.Name, u.Path, u.Oid, u.Size) + q.Add(t.Name, t.Path, t.Oid, t.Size) c.SetUploaded(p.Oid) } diff --git a/lfs/upload_queue.go b/lfs/upload_queue.go index b489cd98..f8df98c3 100644 --- a/lfs/upload_queue.go +++ b/lfs/upload_queue.go @@ -1,79 +1,10 @@ package lfs import ( - "fmt" - "os" - "path/filepath" - "github.com/git-lfs/git-lfs/config" - "github.com/git-lfs/git-lfs/errors" "github.com/git-lfs/git-lfs/tq" ) -// NewUploadable builds the Uploadable from the given information. -// "filename" can be empty if a raw object is pushed (see "object-id" flag in push command)/ -func NewUploadable(oid, filename string) (*tq.Transfer, error) { - localMediaPath, err := LocalMediaPath(oid) - if err != nil { - return nil, errors.Wrapf(err, "Error uploading file %s (%s)", filename, oid) - } - - if len(filename) > 0 { - if err := ensureFile(filename, localMediaPath); err != nil { - return nil, err - } - } - - fi, err := os.Stat(localMediaPath) - if err != nil { - return nil, errors.Wrapf(err, "Error uploading file %s (%s)", filename, oid) - } - - return &tq.Transfer{ - Name: filename, - Oid: oid, - Size: fi.Size(), - Path: localMediaPath, - }, nil -} - -// ensureFile makes sure that the cleanPath exists before pushing it. If it -// does not exist, it attempts to clean it by reading the file at smudgePath. -func ensureFile(smudgePath, cleanPath string) error { - if _, err := os.Stat(cleanPath); err == nil { - return nil - } - - expectedOid := filepath.Base(cleanPath) - localPath := filepath.Join(config.LocalWorkingDir, smudgePath) - file, err := os.Open(localPath) - if err != nil { - return err - } - - defer file.Close() - - stat, err := file.Stat() - if err != nil { - return err - } - - cleaned, err := PointerClean(file, file.Name(), stat.Size(), nil) - if cleaned != nil { - cleaned.Teardown() - } - - if err != nil { - return err - } - - if expectedOid != cleaned.Oid { - return fmt.Errorf("Trying to push %q with OID %s.\nNot found in %s.", smudgePath, expectedOid, filepath.Dir(cleanPath)) - } - - return nil -} - // NewUploadQueue builds an UploadQueue, allowing `workers` concurrent uploads. func NewUploadQueue(cfg *config.Configuration, options ...tq.Option) *tq.TransferQueue { return tq.NewTransferQueue(tq.Upload, TransferManifest(cfg), options...) diff --git a/test/git-lfs-test-server-api/main.go b/test/git-lfs-test-server-api/main.go index 7b79dee3..32d6fa8e 100644 --- a/test/git-lfs-test-server-api/main.go +++ b/test/git-lfs-test-server-api/main.go @@ -7,6 +7,7 @@ import ( "fmt" "math/rand" "os" + "path/filepath" "strconv" "strings" @@ -163,11 +164,11 @@ func buildTestData() (oidsExist, oidsMissing []TestObject, err error) { for _, f := range outputs[0].Files { oidsExist = append(oidsExist, TestObject{Oid: f.Oid, Size: f.Size}) - u, err := lfs.NewUploadable(f.Oid, "Test file") + t, err := uploadTransfer(f.Oid, "Test file") if err != nil { return nil, nil, err } - uploadQueue.Add(u.Name, u.Path, u.Oid, u.Size) + uploadQueue.Add(t.Name, t.Path, t.Oid, t.Size) } uploadQueue.Wait() @@ -285,6 +286,68 @@ func interleaveTestData(slice1, slice2 []TestObject) []TestObject { return ret } +func uploadTransfer(oid, filename string) (*tq.Transfer, error) { + localMediaPath, err := lfs.LocalMediaPath(oid) + if err != nil { + return nil, errors.Wrapf(err, "Error uploading file %s (%s)", filename, oid) + } + + if len(filename) > 0 { + if err = ensureFile(filename, localMediaPath); err != nil { + return nil, err + } + } + + fi, err := os.Stat(localMediaPath) + if err != nil { + return nil, errors.Wrapf(err, "Error uploading file %s (%s)", filename, oid) + } + + return &tq.Transfer{ + Name: filename, + Path: localMediaPath, + Oid: oid, + Size: fi.Size(), + }, nil +} + +// ensureFile makes sure that the cleanPath exists before pushing it. If it +// does not exist, it attempts to clean it by reading the file at smudgePath. +func ensureFile(smudgePath, cleanPath string) error { + if _, err := os.Stat(cleanPath); err == nil { + return nil + } + + expectedOid := filepath.Base(cleanPath) + localPath := filepath.Join(config.LocalWorkingDir, smudgePath) + file, err := os.Open(localPath) + if err != nil { + return err + } + + defer file.Close() + + stat, err := file.Stat() + if err != nil { + return err + } + + cleaned, err := lfs.PointerClean(file, file.Name(), stat.Size(), nil) + if cleaned != nil { + cleaned.Teardown() + } + + if err != nil { + return err + } + + if expectedOid != cleaned.Oid { + return fmt.Errorf("Trying to push %q with OID %s.\nNot found in %s.", smudgePath, expectedOid, filepath.Dir(cleanPath)) + } + + return nil +} + func init() { RootCmd.Flags().StringVarP(&apiUrl, "url", "u", "", "URL of the API (must supply this or --clone)") RootCmd.Flags().StringVarP(&cloneUrl, "clone", "c", "", "Clone URL from which to find API (must supply this or --url)") From a0b1adf4369db890427142b9045c198ebec3c324 Mon Sep 17 00:00:00 2001 From: Taylor Blau Date: Thu, 15 Dec 2016 14:02:34 -0700 Subject: [PATCH 61/68] tq: un-export `tq.NewTransfer` --- tq/transfer.go | 9 ++------- tq/transfer_queue.go | 2 +- 2 files changed, 3 insertions(+), 8 deletions(-) diff --git a/tq/transfer.go b/tq/transfer.go index cf255c29..e3d48654 100644 --- a/tq/transfer.go +++ b/tq/transfer.go @@ -32,13 +32,8 @@ type ObjectError struct { Message string `json:"message"` } -// NewTransfer creates a new Transfer instance -// -// XXX(taylor): note, this function will be removed from the `tq` package's API -// before landing in master. It is currently used by the smudge operation to -// download a single file and pass _directly_ to the Adapter, whereas it should -// use a transferqueue. -func NewTransfer(name string, obj *api.ObjectResource, path string) *Transfer { +// newTransfer creates a new Transfer instance +func newTransfer(name string, obj *api.ObjectResource, path string) *Transfer { t := &Transfer{ Name: name, Oid: obj.Oid, diff --git a/tq/transfer_queue.go b/tq/transfer_queue.go index 2e31fed6..a30281c2 100644 --- a/tq/transfer_queue.go +++ b/tq/transfer_queue.go @@ -336,7 +336,7 @@ func (q *TransferQueue) enqueueAndCollectRetriesFor(batch Batch) (Batch, error) q.Skip(o.Size) q.wait.Done() } else { - tr := NewTransfer(t.Name, o, t.Path) + tr := newTransfer(t.Name, o, t.Path) if _, err := tr.Actions.Get(q.transferKind()); err != nil { // XXX(taylor): duplication From 020bd08e9b66ffe96a51a6148611b30fa531017b Mon Sep 17 00:00:00 2001 From: Taylor Blau Date: Thu, 15 Dec 2016 14:03:57 -0700 Subject: [PATCH 62/68] tq: un-export `tq.Batch` --- tq/transfer_queue.go | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) diff --git a/tq/transfer_queue.go b/tq/transfer_queue.go index a30281c2..50110dc5 100644 --- a/tq/transfer_queue.go +++ b/tq/transfer_queue.go @@ -62,7 +62,7 @@ func (r *retryCounter) CanRetry(oid string) (int, bool) { return count, count < r.MaxRetries } -// Batch implements the sort.Interface interface and enables sorting on a slice +// batch implements the sort.Interface interface and enables sorting on a slice // of `*Transfer`s by object size. // // This interface is implemented here so that the largest objects can be @@ -70,9 +70,9 @@ func (r *retryCounter) CanRetry(oid string) (int, bool) { // current batch has finished processing, this enables us to reduce the risk of // a single worker getting tied up on a large item at the end of a batch while // all other workers are sitting idle. -type Batch []*objectTuple +type batch []*objectTuple -func (b Batch) ApiObjects() []*api.ObjectResource { +func (b batch) ApiObjects() []*api.ObjectResource { transfers := make([]*api.ObjectResource, 0, len(b)) for _, t := range b { transfers = append(transfers, tupleToApiObject(t)) @@ -85,9 +85,9 @@ func tupleToApiObject(t *objectTuple) *api.ObjectResource { return &api.ObjectResource{Oid: t.Oid, Size: t.Size} } -func (b Batch) Len() int { return len(b) } -func (b Batch) Less(i, j int) bool { return b[i].Size < b[j].Size } -func (b Batch) Swap(i, j int) { b[i], b[j] = b[j], b[i] } +func (b batch) Len() int { return len(b) } +func (b batch) Less(i, j int) bool { return b[i].Size < b[j].Size } +func (b batch) Swap(i, j int) { b[i], b[j] = b[j], b[i] } // TransferQueue organises the wider process of uploading and downloading, // including calling the API, passing the actual transfer request to transfer @@ -280,7 +280,7 @@ func (q *TransferQueue) collectBatches() { // // enqueueAndCollectRetriesFor blocks until the entire Batch "batch" has been // processed. -func (q *TransferQueue) enqueueAndCollectRetriesFor(batch Batch) (Batch, error) { +func (q *TransferQueue) enqueueAndCollectRetriesFor(batch batch) (batch, error) { cfg := config.Config next := q.makeBatch() @@ -377,7 +377,7 @@ func (q *TransferQueue) enqueueAndCollectRetriesFor(batch Batch) (Batch, error) // makeBatch returns a new, empty batch, with a capacity equal to the maximum // batch size designated by the `*TransferQueue`. -func (q *TransferQueue) makeBatch() Batch { return make(Batch, 0, q.batchSize) } +func (q *TransferQueue) makeBatch() batch { return make(batch, 0, q.batchSize) } // addToAdapter adds the given "pending" transfers to the transfer adapters and // returns a channel of Transfers that are to be retried in the next batch. From d8fc641b5530bf215303aa0218f938426c0f1e5d Mon Sep 17 00:00:00 2001 From: Taylor Blau Date: Thu, 15 Dec 2016 14:10:09 -0700 Subject: [PATCH 63/68] test: remove ensureFile function and call-site --- test/git-lfs-test-server-api/main.go | 44 ---------------------------- 1 file changed, 44 deletions(-) diff --git a/test/git-lfs-test-server-api/main.go b/test/git-lfs-test-server-api/main.go index 32d6fa8e..c211cce7 100644 --- a/test/git-lfs-test-server-api/main.go +++ b/test/git-lfs-test-server-api/main.go @@ -7,7 +7,6 @@ import ( "fmt" "math/rand" "os" - "path/filepath" "strconv" "strings" @@ -292,12 +291,6 @@ func uploadTransfer(oid, filename string) (*tq.Transfer, error) { return nil, errors.Wrapf(err, "Error uploading file %s (%s)", filename, oid) } - if len(filename) > 0 { - if err = ensureFile(filename, localMediaPath); err != nil { - return nil, err - } - } - fi, err := os.Stat(localMediaPath) if err != nil { return nil, errors.Wrapf(err, "Error uploading file %s (%s)", filename, oid) @@ -311,43 +304,6 @@ func uploadTransfer(oid, filename string) (*tq.Transfer, error) { }, nil } -// ensureFile makes sure that the cleanPath exists before pushing it. If it -// does not exist, it attempts to clean it by reading the file at smudgePath. -func ensureFile(smudgePath, cleanPath string) error { - if _, err := os.Stat(cleanPath); err == nil { - return nil - } - - expectedOid := filepath.Base(cleanPath) - localPath := filepath.Join(config.LocalWorkingDir, smudgePath) - file, err := os.Open(localPath) - if err != nil { - return err - } - - defer file.Close() - - stat, err := file.Stat() - if err != nil { - return err - } - - cleaned, err := lfs.PointerClean(file, file.Name(), stat.Size(), nil) - if cleaned != nil { - cleaned.Teardown() - } - - if err != nil { - return err - } - - if expectedOid != cleaned.Oid { - return fmt.Errorf("Trying to push %q with OID %s.\nNot found in %s.", smudgePath, expectedOid, filepath.Dir(cleanPath)) - } - - return nil -} - func init() { RootCmd.Flags().StringVarP(&apiUrl, "url", "u", "", "URL of the API (must supply this or --clone)") RootCmd.Flags().StringVarP(&cloneUrl, "clone", "c", "", "Clone URL from which to find API (must supply this or --url)") From 290437050cb12e902619a511ca7710469d24191a Mon Sep 17 00:00:00 2001 From: Taylor Blau Date: Thu, 15 Dec 2016 22:46:18 -0700 Subject: [PATCH 64/68] docs/api: object size must be positive --- docs/api/batch.md | 4 ++-- docs/api/schemas/http-batch-request-schema.json | 3 ++- 2 files changed, 4 insertions(+), 3 deletions(-) diff --git a/docs/api/batch.md b/docs/api/batch.md index 730dacfb..7a661613 100644 --- a/docs/api/batch.md +++ b/docs/api/batch.md @@ -31,7 +31,7 @@ that the client has configured. If omitted, the `basic` transfer adapter MUST be assumed by the server. * `objects` - An Array of objects to download. * `oid` - String OID of the LFS object. - * `size` - Integer byte size of the LFS object. + * `size` - Integer byte size of the LFS object. Must be at least zero. Note: Git LFS currently only supports the `basic` transfer adapter. This property was added for future compatibility with some experimental transfer @@ -70,7 +70,7 @@ client will use the `basic` transfer adapter if the `transfer` property is omitted. * `objects` - An Array of objects to download. * `oid` - String OID of the LFS object. - * `size` - Integer byte size of the LFS object. + * `size` - Integer byte size of the LFS object. Must be at least zero. * `authenticated` - Optional boolean specifying whether the request for this specific object is authenticated. If omitted or false, Git LFS will attempt to [find credentials for this URL](./authentication.md). diff --git a/docs/api/schemas/http-batch-request-schema.json b/docs/api/schemas/http-batch-request-schema.json index f50e4451..95e22cf3 100644 --- a/docs/api/schemas/http-batch-request-schema.json +++ b/docs/api/schemas/http-batch-request-schema.json @@ -21,7 +21,8 @@ "type": "string" }, "size": { - "type": "number" + "type": "number", + "minimum": 0 }, "authenticated": { "type": "boolean" From e469ca011554bf69f0f7a2426d40f5ea1df55b89 Mon Sep 17 00:00:00 2001 From: risk danger olson Date: Fri, 16 Dec 2016 10:35:38 -0700 Subject: [PATCH 65/68] fix some merge conflicts --- commands/command_pull.go | 5 +++-- commands/pull.go | 4 ++-- 2 files changed, 5 insertions(+), 4 deletions(-) diff --git a/commands/command_pull.go b/commands/command_pull.go index 32d2280f..6bd6e97c 100644 --- a/commands/command_pull.go +++ b/commands/command_pull.go @@ -9,6 +9,7 @@ import ( "github.com/git-lfs/git-lfs/git" "github.com/git-lfs/git-lfs/lfs" "github.com/git-lfs/git-lfs/progress" + "github.com/git-lfs/git-lfs/tq" "github.com/rubyist/tracerx" "github.com/spf13/cobra" ) @@ -46,7 +47,7 @@ func pull(filter *filepathfilter.Filter) { pointers := newPointerMap() meter := progress.NewMeter(progress.WithOSEnv(cfg.Os)) singleCheckout := newSingleCheckout() - q := lfs.NewDownloadQueue(lfs.WithProgress(meter)) + q := newDownloadQueue(tq.WithProgress(meter)) gitscanner := lfs.NewGitScanner(func(p *lfs.WrappedPointer, err error) { if err != nil { LoggedError(err, "Scanner error") @@ -68,7 +69,7 @@ func pull(filter *filepathfilter.Filter) { meter.StartTransfer(p.Name) tracerx.Printf("fetch %v [%v]", p.Name, p.Oid) pointers.Add(p) - q.Add(lfs.NewDownloadable(p)) + q.Add(downloadTransfer(p)) }) gitscanner.Filter = filter diff --git a/commands/pull.go b/commands/pull.go index 002aa99d..ce764982 100644 --- a/commands/pull.go +++ b/commands/pull.go @@ -10,7 +10,7 @@ import ( "github.com/git-lfs/git-lfs/errors" "github.com/git-lfs/git-lfs/lfs" - "github.com/git-lfs/git-lfs/transfer" + "github.com/git-lfs/git-lfs/tq" ) // Handles the process of checking out a single file, and updating the git @@ -33,7 +33,7 @@ func newSingleCheckout() *singleCheckout { type singleCheckout struct { gitIndexer *gitIndexer pathConverter lfs.PathConverter - manifest *transfer.Manifest + manifest *tq.Manifest } func (c *singleCheckout) Run(p *lfs.WrappedPointer) { From a4ab24dca60e158c1d1ef14036a74bd737092193 Mon Sep 17 00:00:00 2001 From: risk danger olson Date: Fri, 16 Dec 2016 15:03:20 -0700 Subject: [PATCH 66/68] lfsapi: initial implementation of verify --- lfsapi/lfsapi.go | 10 ++++++++ tq/basic_upload.go | 5 ++-- tq/verify.go | 44 ++++++++++++++++++++++++++++++++ tq/verify_test.go | 62 ++++++++++++++++++++++++++++++++++++++++++++++ 4 files changed, 119 insertions(+), 2 deletions(-) create mode 100644 lfsapi/lfsapi.go create mode 100644 tq/verify.go create mode 100644 tq/verify_test.go diff --git a/lfsapi/lfsapi.go b/lfsapi/lfsapi.go new file mode 100644 index 00000000..ef334947 --- /dev/null +++ b/lfsapi/lfsapi.go @@ -0,0 +1,10 @@ +package lfsapi + +import "net/http" + +type Client struct { +} + +func (c *Client) Do(req *http.Request) (*http.Response, error) { + return http.DefaultClient.Do(req) +} diff --git a/tq/basic_upload.go b/tq/basic_upload.go index 6175c50d..d03b9cea 100644 --- a/tq/basic_upload.go +++ b/tq/basic_upload.go @@ -7,10 +7,10 @@ import ( "path/filepath" "strconv" - "github.com/git-lfs/git-lfs/api" "github.com/git-lfs/git-lfs/config" "github.com/git-lfs/git-lfs/errors" "github.com/git-lfs/git-lfs/httputil" + "github.com/git-lfs/git-lfs/lfsapi" "github.com/git-lfs/git-lfs/progress" ) @@ -117,7 +117,8 @@ func (a *basicUploadAdapter) DoTransfer(ctx interface{}, t *Transfer, cb Progres io.Copy(ioutil.Discard, res.Body) res.Body.Close() - return api.VerifyUpload(config.Config, toApiObject(t)) + cli := &lfsapi.Client{} + return verifyUpload(cli, t) } // startCallbackReader is a reader wrapper which calls a function as soon as the diff --git a/tq/verify.go b/tq/verify.go new file mode 100644 index 00000000..ac0654b8 --- /dev/null +++ b/tq/verify.go @@ -0,0 +1,44 @@ +package tq + +import ( + "bytes" + "encoding/json" + "net/http" + + "github.com/git-lfs/git-lfs/lfsapi" +) + +func verifyUpload(c *lfsapi.Client, t *Transfer) error { + action, err := t.Actions.Get("verify") + if err != nil { + if IsActionMissingError(err) { + return nil + } + return err + } + + by, err := json.Marshal(struct { + Oid string `json:"oid"` + Size int64 `json:"size"` + }{Oid: t.Oid, Size: t.Size}) + if err != nil { + return err + } + + req, err := http.NewRequest("POST", action.Href, bytes.NewReader(by)) + if err != nil { + return err + } + + for key, value := range action.Header { + req.Header.Set(key, value) + } + req.Header.Set("Content-Type", "application/vnd.git-lfs+json") + + res, err := c.Do(req) + if err != nil { + return err + } + + return res.Body.Close() +} diff --git a/tq/verify_test.go b/tq/verify_test.go new file mode 100644 index 00000000..83a32447 --- /dev/null +++ b/tq/verify_test.go @@ -0,0 +1,62 @@ +package tq + +import ( + "encoding/json" + "net/http" + "net/http/httptest" + "sync/atomic" + "testing" + + "github.com/git-lfs/git-lfs/lfsapi" + "github.com/stretchr/testify/assert" +) + +func TestVerifyWithoutAction(t *testing.T) { + c := &lfsapi.Client{} + tr := &Transfer{ + Oid: "abc", + Size: 123, + } + + assert.Nil(t, verifyUpload(c, tr)) +} + +func TestVerifySuccess(t *testing.T) { + var called uint32 + srv := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + if r.URL.String() != "/verify" { + w.WriteHeader(http.StatusNotFound) + return + } + + atomic.AddUint32(&called, 1) + + var tr Transfer + err := json.NewDecoder(r.Body).Decode(&tr) + + assert.Nil(t, err) + assert.Equal(t, "abc", tr.Oid) + assert.EqualValues(t, 123, tr.Size) + assert.Equal(t, "bar", r.Header.Get("Foo")) + assert.Equal(t, "application/vnd.git-lfs+json", r.Header.Get("Content-Type")) + assert.Equal(t, "24", r.Header.Get("Content-Length")) + })) + defer srv.Close() + + c := &lfsapi.Client{} + tr := &Transfer{ + Oid: "abc", + Size: 123, + Actions: map[string]*Action{ + "verify": &Action{ + Href: srv.URL + "/verify", + Header: map[string]string{ + "foo": "bar", + }, + }, + }, + } + + assert.Nil(t, verifyUpload(c, tr)) + assert.EqualValues(t, 1, called) +} From e1b45638156989c82b4acccf4039879ad4fb6c26 Mon Sep 17 00:00:00 2001 From: risk danger olson Date: Fri, 16 Dec 2016 15:43:05 -0700 Subject: [PATCH 67/68] Implement verify failure handling --- lfsapi/errors.go | 79 +++++++++++++++++ lfsapi/lfsapi.go | 36 +++++++- tq/verify_test.go | 212 ++++++++++++++++++++++++++++++++++++++++++++++ 3 files changed, 325 insertions(+), 2 deletions(-) create mode 100644 lfsapi/errors.go diff --git a/lfsapi/errors.go b/lfsapi/errors.go new file mode 100644 index 00000000..58b13eeb --- /dev/null +++ b/lfsapi/errors.go @@ -0,0 +1,79 @@ +package lfsapi + +import ( + "fmt" + "net/http" + + "github.com/git-lfs/git-lfs/errors" +) + +type ClientError struct { + Message string `json:"message"` + DocumentationUrl string `json:"documentation_url,omitempty"` + RequestId string `json:"request_id,omitempty"` +} + +func (e *ClientError) Error() string { + msg := e.Message + if len(e.DocumentationUrl) > 0 { + msg += "\nDocs: " + e.DocumentationUrl + } + if len(e.RequestId) > 0 { + msg += "\nRequest ID: " + e.RequestId + } + return msg +} + +func (c *Client) handleResponse(res *http.Response) error { + if res.StatusCode < 400 { + return nil + } + + cliErr := &ClientError{} + err := decodeResponse(res, cliErr) + if err == nil { + if len(cliErr.Message) == 0 { + err = defaultError(res) + } else { + err = errors.Wrap(cliErr, "http") + } + } + + if res.StatusCode == 401 { + return errors.NewAuthError(err) + } + + if res.StatusCode > 499 && res.StatusCode != 501 && res.StatusCode != 507 && res.StatusCode != 509 { + return errors.NewFatalError(err) + } + + return err +} + +var ( + defaultErrors = map[int]string{ + 400: "Client error: %s", + 401: "Authorization error: %s\nCheck that you have proper access to the repository", + 403: "Authorization error: %s\nCheck that you have proper access to the repository", + 404: "Repository or object not found: %s\nCheck that it exists and that you have proper access to it", + 429: "Rate limit exceeded: %s", + 500: "Server error: %s", + 501: "Not Implemented: %s", + 507: "Insufficient server storage: %s", + 509: "Bandwidth limit exceeded: %s", + } +) + +func defaultError(res *http.Response) error { + var msgFmt string + + if f, ok := defaultErrors[res.StatusCode]; ok { + msgFmt = f + } else if res.StatusCode < 500 { + msgFmt = defaultErrors[400] + fmt.Sprintf(" from HTTP %d", res.StatusCode) + } else { + msgFmt = defaultErrors[500] + fmt.Sprintf(" from HTTP %d", res.StatusCode) + } + + return errors.Errorf(msgFmt, res.Request.URL) +} diff --git a/lfsapi/lfsapi.go b/lfsapi/lfsapi.go index ef334947..9feea05c 100644 --- a/lfsapi/lfsapi.go +++ b/lfsapi/lfsapi.go @@ -1,10 +1,42 @@ package lfsapi -import "net/http" +import ( + "encoding/json" + "net/http" + "regexp" + + "github.com/pkg/errors" +) + +var ( + lfsMediaTypeRE = regexp.MustCompile(`\Aapplication/vnd\.git\-lfs\+json(;|\z)`) + jsonMediaTypeRE = regexp.MustCompile(`\Aapplication/json(;|\z)`) +) type Client struct { } func (c *Client) Do(req *http.Request) (*http.Response, error) { - return http.DefaultClient.Do(req) + res, err := http.DefaultClient.Do(req) + if err != nil { + return res, err + } + + return res, c.handleResponse(res) +} + +func decodeResponse(res *http.Response, obj interface{}) error { + ctype := res.Header.Get("Content-Type") + if !(lfsMediaTypeRE.MatchString(ctype) || jsonMediaTypeRE.MatchString(ctype)) { + return nil + } + + err := json.NewDecoder(res.Body).Decode(obj) + res.Body.Close() + + if err != nil { + return errors.Wrapf(err, "Unable to parse HTTP response for %s %s", res.Request.Method, res.Request.URL) + } + + return nil } diff --git a/tq/verify_test.go b/tq/verify_test.go index 83a32447..205781e0 100644 --- a/tq/verify_test.go +++ b/tq/verify_test.go @@ -4,9 +4,11 @@ import ( "encoding/json" "net/http" "net/http/httptest" + "strings" "sync/atomic" "testing" + "github.com/git-lfs/git-lfs/errors" "github.com/git-lfs/git-lfs/lfsapi" "github.com/stretchr/testify/assert" ) @@ -60,3 +62,213 @@ func TestVerifySuccess(t *testing.T) { assert.Nil(t, verifyUpload(c, tr)) assert.EqualValues(t, 1, called) } + +func TestVerifyAuthErrWithBody(t *testing.T) { + var called uint32 + srv := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + if r.URL.String() != "/verify" { + w.WriteHeader(http.StatusNotFound) + return + } + + atomic.AddUint32(&called, 1) + w.Header().Set("Content-Type", "application/json") + w.WriteHeader(401) + w.Write([]byte(`{"message":"custom auth error"}`)) + })) + defer srv.Close() + + c := &lfsapi.Client{} + tr := &Transfer{ + Oid: "abc", + Size: 123, + Actions: map[string]*Action{ + "verify": &Action{ + Href: srv.URL + "/verify", + }, + }, + } + + err := verifyUpload(c, tr) + assert.NotNil(t, err) + assert.True(t, errors.IsAuthError(err)) + assert.Equal(t, "Authentication required: http: custom auth error", err.Error()) + assert.EqualValues(t, 1, called) +} + +func TestVerifyFatalWithBody(t *testing.T) { + var called uint32 + srv := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + if r.URL.String() != "/verify" { + w.WriteHeader(http.StatusNotFound) + return + } + + atomic.AddUint32(&called, 1) + w.Header().Set("Content-Type", "application/json") + w.WriteHeader(500) + w.Write([]byte(`{"message":"custom fatal error"}`)) + })) + defer srv.Close() + + c := &lfsapi.Client{} + tr := &Transfer{ + Oid: "abc", + Size: 123, + Actions: map[string]*Action{ + "verify": &Action{ + Href: srv.URL + "/verify", + }, + }, + } + + err := verifyUpload(c, tr) + assert.NotNil(t, err) + assert.True(t, errors.IsFatalError(err)) + assert.Equal(t, "Fatal error: http: custom fatal error", err.Error()) + assert.EqualValues(t, 1, called) +} + +func TestVerifyWithNonFatal500WithBody(t *testing.T) { + c := &lfsapi.Client{} + tr := &Transfer{ + Oid: "abc", + Size: 123, + Actions: map[string]*Action{ + "verify": &Action{}, + }, + } + + var called uint32 + + nonFatalCodes := map[int]string{ + 501: "custom 501 error", + 507: "custom 507 error", + 509: "custom 509 error", + } + + for nonFatalCode, expectedErr := range nonFatalCodes { + srv := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + if r.URL.String() != "/verify" { + w.WriteHeader(http.StatusNotFound) + return + } + + atomic.AddUint32(&called, 1) + w.Header().Set("Content-Type", "application/json") + w.WriteHeader(nonFatalCode) + w.Write([]byte(`{"message":"` + expectedErr + `"}`)) + })) + + tr.Actions["verify"].Href = srv.URL + "/verify" + err := verifyUpload(c, tr) + t.Logf("non fatal code %d", nonFatalCode) + assert.NotNil(t, err) + assert.Equal(t, "http: "+expectedErr, err.Error()) + srv.Close() + } + + assert.EqualValues(t, 3, called) +} + +func TestVerifyAuthErrWithoutBody(t *testing.T) { + var called uint32 + srv := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + if r.URL.String() != "/verify" { + w.WriteHeader(http.StatusNotFound) + return + } + + atomic.AddUint32(&called, 1) + w.WriteHeader(401) + })) + defer srv.Close() + + c := &lfsapi.Client{} + tr := &Transfer{ + Oid: "abc", + Size: 123, + Actions: map[string]*Action{ + "verify": &Action{ + Href: srv.URL + "/verify", + }, + }, + } + + err := verifyUpload(c, tr) + assert.NotNil(t, err) + assert.True(t, errors.IsAuthError(err)) + assert.True(t, strings.HasPrefix(err.Error(), "Authentication required: Authorization error:"), err.Error()) + assert.EqualValues(t, 1, called) +} + +func TestVerifyFatalWithoutBody(t *testing.T) { + var called uint32 + srv := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + if r.URL.String() != "/verify" { + w.WriteHeader(http.StatusNotFound) + return + } + + atomic.AddUint32(&called, 1) + w.WriteHeader(500) + })) + defer srv.Close() + + c := &lfsapi.Client{} + tr := &Transfer{ + Oid: "abc", + Size: 123, + Actions: map[string]*Action{ + "verify": &Action{ + Href: srv.URL + "/verify", + }, + }, + } + + err := verifyUpload(c, tr) + assert.NotNil(t, err) + assert.True(t, errors.IsFatalError(err)) + assert.True(t, strings.HasPrefix(err.Error(), "Fatal error: Server error:"), err.Error()) + assert.EqualValues(t, 1, called) +} + +func TestVerifyWithNonFatal500WithoutBody(t *testing.T) { + c := &lfsapi.Client{} + tr := &Transfer{ + Oid: "abc", + Size: 123, + Actions: map[string]*Action{ + "verify": &Action{}, + }, + } + + var called uint32 + + nonFatalCodes := map[int]string{ + 501: "Not Implemented:", + 507: "Insufficient server storage:", + 509: "Bandwidth limit exceeded:", + } + + for nonFatalCode, errPrefix := range nonFatalCodes { + srv := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + if r.URL.String() != "/verify" { + w.WriteHeader(http.StatusNotFound) + return + } + + atomic.AddUint32(&called, 1) + w.WriteHeader(nonFatalCode) + })) + + tr.Actions["verify"].Href = srv.URL + "/verify" + err := verifyUpload(c, tr) + t.Logf("non fatal code %d", nonFatalCode) + assert.NotNil(t, err) + assert.True(t, strings.HasPrefix(err.Error(), errPrefix)) + srv.Close() + } + + assert.EqualValues(t, 3, called) +} From fd64bb70b3f3437a59e2870bb3ae8c63777a2ba4 Mon Sep 17 00:00:00 2001 From: risk danger olson Date: Fri, 16 Dec 2016 15:47:02 -0700 Subject: [PATCH 68/68] use the correct errors pkg --- lfsapi/lfsapi.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lfsapi/lfsapi.go b/lfsapi/lfsapi.go index 9feea05c..4260f9bc 100644 --- a/lfsapi/lfsapi.go +++ b/lfsapi/lfsapi.go @@ -5,7 +5,7 @@ import ( "net/http" "regexp" - "github.com/pkg/errors" + "github.com/git-lfs/git-lfs/errors" ) var (