2015-05-21 16:36:49 +00:00
|
|
|
package lfs
|
|
|
|
|
|
|
|
import (
|
|
|
|
"fmt"
|
2015-06-19 17:59:30 +00:00
|
|
|
"os"
|
2015-06-02 15:19:59 +00:00
|
|
|
"path/filepath"
|
2015-05-21 16:36:49 +00:00
|
|
|
"sync"
|
|
|
|
"sync/atomic"
|
2015-05-21 17:47:52 +00:00
|
|
|
|
2015-06-02 15:19:59 +00:00
|
|
|
"github.com/github/git-lfs/git"
|
2015-05-28 17:52:06 +00:00
|
|
|
"github.com/github/git-lfs/vendor/_nuts/github.com/cheggaaa/pb"
|
2015-05-21 16:36:49 +00:00
|
|
|
)
|
|
|
|
|
|
|
|
type Transferable interface {
|
|
|
|
Check() (*objectResource, *WrappedError)
|
|
|
|
Transfer(CopyCallback) *WrappedError
|
|
|
|
Object() *objectResource
|
|
|
|
Oid() string
|
|
|
|
Size() int64
|
2015-06-19 17:59:30 +00:00
|
|
|
Name() string
|
2015-05-21 16:36:49 +00:00
|
|
|
SetObject(*objectResource)
|
|
|
|
}
|
|
|
|
|
|
|
|
// TransferQueue provides a queue that will allow concurrent transfers.
|
|
|
|
type TransferQueue struct {
|
2015-07-09 18:57:05 +00:00
|
|
|
filesAdded int32 // Number of files added to the queue
|
|
|
|
filesFinished int32 // Number of files that have finished transfering
|
|
|
|
transferIdx int32 // Used to track transfer number for progress monitor
|
|
|
|
workers int // Number of transfer workers to spawn
|
|
|
|
transferKind string
|
|
|
|
errors []*WrappedError
|
|
|
|
transferables map[string]Transferable
|
|
|
|
bar *pb.ProgressBar
|
|
|
|
batcher *Batcher
|
|
|
|
apic chan Transferable // Channel for processing individual API requests
|
|
|
|
transferc chan Transferable // Channel for processing transfers
|
|
|
|
errorc chan *WrappedError // Channel for processing errors
|
|
|
|
progressc chan string // Channel for GIT_LFS_PROGRESS monitor
|
|
|
|
watchers []chan string
|
|
|
|
wait sync.WaitGroup
|
2015-05-21 16:36:49 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// newTransferQueue builds a TransferQueue, allowing `workers` concurrent transfers.
|
2015-07-09 18:21:49 +00:00
|
|
|
func newTransferQueue(workers int) *TransferQueue {
|
|
|
|
q := &TransferQueue{
|
|
|
|
apic: make(chan Transferable, 100),
|
|
|
|
transferc: make(chan Transferable, 100),
|
2015-05-21 16:36:49 +00:00
|
|
|
errorc: make(chan *WrappedError),
|
2015-05-27 19:45:18 +00:00
|
|
|
watchers: make([]chan string, 0),
|
2015-07-09 18:21:49 +00:00
|
|
|
progressc: make(chan string, 100),
|
2015-05-21 16:36:49 +00:00
|
|
|
workers: workers,
|
|
|
|
transferables: make(map[string]Transferable),
|
|
|
|
}
|
2015-07-09 18:21:49 +00:00
|
|
|
|
|
|
|
q.run()
|
|
|
|
|
|
|
|
return q
|
2015-05-21 16:36:49 +00:00
|
|
|
}
|
|
|
|
|
2015-05-26 13:56:43 +00:00
|
|
|
// Add adds a Transferable to the transfer queue.
|
2015-05-21 16:36:49 +00:00
|
|
|
func (q *TransferQueue) Add(t Transferable) {
|
2015-07-09 18:57:05 +00:00
|
|
|
atomic.AddInt32(&q.filesAdded, 1)
|
|
|
|
|
|
|
|
// Sneak in and update the progress bar's total size
|
|
|
|
atomic.AddInt64(&q.bar.Total, t.Size())
|
|
|
|
|
2015-07-09 18:21:49 +00:00
|
|
|
q.wait.Add(1)
|
2015-05-21 16:36:49 +00:00
|
|
|
q.transferables[t.Oid()] = t
|
2015-07-09 18:21:49 +00:00
|
|
|
|
|
|
|
if q.batcher != nil {
|
|
|
|
q.batcher.Add(t)
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
q.apic <- t
|
|
|
|
}
|
|
|
|
|
|
|
|
// Wait waits for the queue to finish processing all transfers
|
|
|
|
func (q *TransferQueue) Wait() {
|
2015-07-09 18:57:05 +00:00
|
|
|
|
|
|
|
q.bar.Prefix(fmt.Sprintf("(%d of %d files) ", q.filesFinished, q.filesAdded))
|
|
|
|
|
2015-07-09 18:21:49 +00:00
|
|
|
if q.batcher != nil {
|
|
|
|
q.batcher.Exit()
|
|
|
|
}
|
|
|
|
q.wait.Wait()
|
|
|
|
close(q.apic)
|
|
|
|
close(q.transferc)
|
|
|
|
close(q.errorc)
|
|
|
|
for _, watcher := range q.watchers {
|
|
|
|
close(watcher)
|
|
|
|
}
|
|
|
|
close(q.progressc)
|
2015-07-09 18:57:05 +00:00
|
|
|
q.bar.Finish()
|
2015-05-21 16:36:49 +00:00
|
|
|
}
|
|
|
|
|
2015-05-27 19:45:18 +00:00
|
|
|
// Watch returns a channel where the queue will write the OID of each transfer
|
|
|
|
// as it completes. The channel will be closed when the queue finishes processing.
|
|
|
|
func (q *TransferQueue) Watch() chan string {
|
2015-07-09 18:57:05 +00:00
|
|
|
c := make(chan string, 100)
|
2015-05-27 19:45:18 +00:00
|
|
|
q.watchers = append(q.watchers, c)
|
|
|
|
return c
|
|
|
|
}
|
|
|
|
|
2015-07-09 18:21:49 +00:00
|
|
|
// individualApiRoutine processes the queue of transfers one at a time by making
|
2015-05-26 13:56:43 +00:00
|
|
|
// a POST call for each object, feeding the results to the transfer workers.
|
|
|
|
// If configured, the object transfers can still happen concurrently, the
|
|
|
|
// sequential nature here is only for the meta POST calls.
|
2015-07-09 18:21:49 +00:00
|
|
|
func (q *TransferQueue) individualApiRoutine(apiWaiter chan interface{}) {
|
|
|
|
for t := range q.apic {
|
|
|
|
obj, err := t.Check()
|
|
|
|
if err != nil {
|
|
|
|
q.wait.Done()
|
|
|
|
q.errorc <- err
|
|
|
|
continue
|
|
|
|
}
|
2015-05-21 16:36:49 +00:00
|
|
|
|
2015-07-09 18:21:49 +00:00
|
|
|
if apiWaiter != nil { // Signal to launch more individual api workers
|
|
|
|
select {
|
|
|
|
case apiWaiter <- 1:
|
|
|
|
default:
|
2015-05-21 16:36:49 +00:00
|
|
|
}
|
2015-07-09 18:21:49 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
if obj != nil {
|
|
|
|
t.SetObject(obj)
|
|
|
|
q.transferc <- t
|
|
|
|
}
|
2015-05-21 16:36:49 +00:00
|
|
|
}
|
2015-07-09 18:21:49 +00:00
|
|
|
}
|
2015-05-21 16:36:49 +00:00
|
|
|
|
2015-07-09 18:21:49 +00:00
|
|
|
// batchApiRoutine processes the queue of transfers using the batch endpoint,
|
|
|
|
// making only one POST call for all objects. The results are then handed
|
|
|
|
// off to the transfer workers.
|
|
|
|
func (q *TransferQueue) batchApiRoutine() {
|
|
|
|
for {
|
|
|
|
batch := q.batcher.Next()
|
|
|
|
if batch == nil {
|
|
|
|
break
|
|
|
|
}
|
2015-05-21 16:36:49 +00:00
|
|
|
|
2015-07-09 18:21:49 +00:00
|
|
|
transfers := make([]*objectResource, 0, len(batch))
|
|
|
|
for _, t := range batch {
|
|
|
|
transfers = append(transfers, &objectResource{Oid: t.Oid(), Size: t.Size()})
|
|
|
|
}
|
2015-05-21 16:36:49 +00:00
|
|
|
|
2015-07-09 18:21:49 +00:00
|
|
|
objects, err := Batch(transfers, q.transferKind)
|
|
|
|
if err != nil {
|
|
|
|
if isNotImplError(err) {
|
|
|
|
configFile := filepath.Join(LocalGitDir, "config")
|
|
|
|
git.Config.SetLocal(configFile, "lfs.batch", "false")
|
|
|
|
}
|
|
|
|
// TODO trigger the individual fallback
|
|
|
|
}
|
2015-06-15 11:46:02 +00:00
|
|
|
|
2015-07-09 18:21:49 +00:00
|
|
|
for _, o := range objects {
|
|
|
|
if _, ok := o.Links[q.transferKind]; ok {
|
|
|
|
// This object needs to be transfered
|
|
|
|
if transfer, ok := q.transferables[o.Oid]; ok {
|
|
|
|
transfer.SetObject(o)
|
|
|
|
q.transferc <- transfer
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
2015-05-21 16:36:49 +00:00
|
|
|
}
|
|
|
|
|
2015-07-09 18:21:49 +00:00
|
|
|
// This goroutine collects errors returned from transfers
|
|
|
|
func (q *TransferQueue) errorCollector() {
|
|
|
|
for err := range q.errorc {
|
|
|
|
q.errors = append(q.errors, err)
|
2015-05-21 16:36:49 +00:00
|
|
|
}
|
2015-07-09 18:21:49 +00:00
|
|
|
}
|
2015-05-21 16:36:49 +00:00
|
|
|
|
2015-07-09 18:21:49 +00:00
|
|
|
func (q *TransferQueue) progressMonitor() {
|
|
|
|
output, err := newProgressLogger()
|
2015-05-21 16:36:49 +00:00
|
|
|
if err != nil {
|
2015-07-09 18:21:49 +00:00
|
|
|
q.errorc <- Error(err)
|
2015-05-21 16:36:49 +00:00
|
|
|
}
|
|
|
|
|
2015-07-09 18:21:49 +00:00
|
|
|
for l := range q.progressc {
|
|
|
|
if err := output.Write([]byte(l)); err != nil {
|
|
|
|
q.errorc <- Error(err)
|
|
|
|
output.Shutdown()
|
2015-05-21 16:36:49 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2015-07-09 18:21:49 +00:00
|
|
|
output.Close()
|
2015-05-21 16:36:49 +00:00
|
|
|
}
|
|
|
|
|
2015-07-09 18:21:49 +00:00
|
|
|
func (q *TransferQueue) transferWorker() {
|
|
|
|
direction := "push"
|
|
|
|
if q.transferKind == "download" {
|
|
|
|
direction = "pull"
|
|
|
|
}
|
2015-05-21 16:36:49 +00:00
|
|
|
|
2015-07-09 18:21:49 +00:00
|
|
|
for transfer := range q.transferc {
|
2015-07-09 18:57:05 +00:00
|
|
|
c := atomic.AddInt32(&q.transferIdx, 1)
|
2015-07-09 18:21:49 +00:00
|
|
|
cb := func(total, read int64, current int) error {
|
2015-07-09 18:57:05 +00:00
|
|
|
q.progressc <- fmt.Sprintf("%s %d/%d %d/%d %s\n", direction, c, q.filesAdded, read, total, transfer.Name())
|
|
|
|
q.bar.Add(current)
|
2015-07-09 18:21:49 +00:00
|
|
|
return nil
|
2015-05-21 16:36:49 +00:00
|
|
|
}
|
|
|
|
|
2015-07-09 18:21:49 +00:00
|
|
|
if err := transfer.Transfer(cb); err != nil {
|
|
|
|
q.errorc <- err
|
|
|
|
} else {
|
|
|
|
oid := transfer.Oid()
|
|
|
|
for _, c := range q.watchers {
|
|
|
|
c <- oid
|
2015-05-21 16:36:49 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2015-07-09 18:57:05 +00:00
|
|
|
f := atomic.AddInt32(&q.filesFinished, 1)
|
|
|
|
q.bar.Prefix(fmt.Sprintf("(%d of %d files) ", f, q.filesAdded))
|
2015-07-09 18:21:49 +00:00
|
|
|
q.wait.Done()
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// launchIndividualApiRoutines first launches a single api worker. When it
|
|
|
|
// receives the first successful api request it launches workers - 1 more
|
|
|
|
// workers. This prevents being prompted for credentials multiple times at once
|
|
|
|
// when they're needed.
|
|
|
|
func (q *TransferQueue) launchIndividualApiRoutines() {
|
2015-06-19 17:59:30 +00:00
|
|
|
go func() {
|
2015-07-09 18:21:49 +00:00
|
|
|
apiWaiter := make(chan interface{})
|
|
|
|
go q.individualApiRoutine(apiWaiter)
|
2015-06-19 17:59:30 +00:00
|
|
|
|
2015-07-09 18:21:49 +00:00
|
|
|
<-apiWaiter
|
2015-06-19 17:59:30 +00:00
|
|
|
|
2015-07-09 18:21:49 +00:00
|
|
|
for i := 0; i < q.workers-1; i++ {
|
|
|
|
go q.individualApiRoutine(nil)
|
|
|
|
}
|
2015-06-19 17:59:30 +00:00
|
|
|
}()
|
2015-07-09 18:21:49 +00:00
|
|
|
}
|
2015-06-19 17:59:30 +00:00
|
|
|
|
2015-07-09 18:21:49 +00:00
|
|
|
// run starts the transfer queue and displays a progress bar. Process will
|
|
|
|
// do individual or batch transfers depending on the Config.BatchTransfer() value.
|
|
|
|
// Process will transfer files sequentially or concurrently depending on the
|
|
|
|
// Concig.ConcurrentTransfers() value.
|
|
|
|
func (q *TransferQueue) run() {
|
2015-07-09 18:57:05 +00:00
|
|
|
// Set up the pb progress bar. The total size will be updated as files are
|
|
|
|
// added to the queue.
|
|
|
|
q.bar = pb.New64(0)
|
|
|
|
q.bar.SetUnits(pb.U_BYTES)
|
|
|
|
q.bar.ShowBar = false
|
|
|
|
q.bar.Prefix(fmt.Sprintf("(%d of %d files) ", q.filesFinished, q.filesAdded))
|
|
|
|
q.bar.Start()
|
|
|
|
|
2015-07-09 18:21:49 +00:00
|
|
|
go q.errorCollector()
|
|
|
|
go q.progressMonitor()
|
2015-06-19 17:59:30 +00:00
|
|
|
|
2015-05-21 16:36:49 +00:00
|
|
|
for i := 0; i < q.workers; i++ {
|
2015-07-09 18:21:49 +00:00
|
|
|
go q.transferWorker()
|
2015-05-21 16:36:49 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
if Config.BatchTransfer() {
|
2015-07-09 18:21:49 +00:00
|
|
|
q.batcher = NewBatcher(100)
|
|
|
|
go q.batchApiRoutine()
|
2015-05-21 16:36:49 +00:00
|
|
|
} else {
|
2015-07-09 18:21:49 +00:00
|
|
|
q.launchIndividualApiRoutines()
|
2015-05-21 16:36:49 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2015-05-26 13:56:43 +00:00
|
|
|
// Errors returns any errors encountered during transfer.
|
2015-05-21 16:36:49 +00:00
|
|
|
func (q *TransferQueue) Errors() []*WrappedError {
|
|
|
|
return q.errors
|
|
|
|
}
|
2015-06-19 17:59:30 +00:00
|
|
|
|
2015-06-19 18:54:44 +00:00
|
|
|
// progressLogger provides a wrapper around an os.File that can either
|
|
|
|
// write to the file or ignore all writes completely.
|
2015-06-19 17:59:30 +00:00
|
|
|
type progressLogger struct {
|
|
|
|
writeData bool
|
|
|
|
log *os.File
|
|
|
|
}
|
|
|
|
|
2015-06-19 18:54:44 +00:00
|
|
|
// Write will write to the file and perform a Sync() if writing succeeds.
|
|
|
|
func (l *progressLogger) Write(b []byte) error {
|
2015-06-19 17:59:30 +00:00
|
|
|
if l.writeData {
|
2015-06-19 18:54:44 +00:00
|
|
|
if _, err := l.log.Write(b); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
return l.log.Sync()
|
2015-06-19 17:59:30 +00:00
|
|
|
}
|
2015-06-19 18:59:59 +00:00
|
|
|
return nil
|
2015-06-19 17:59:30 +00:00
|
|
|
}
|
|
|
|
|
2015-06-19 18:54:44 +00:00
|
|
|
// Close will call Close() on the underlying file
|
2015-06-19 17:59:30 +00:00
|
|
|
func (l *progressLogger) Close() error {
|
2015-06-19 19:26:42 +00:00
|
|
|
if l.log != nil {
|
2015-06-19 17:59:30 +00:00
|
|
|
return l.log.Close()
|
|
|
|
}
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2015-06-19 18:54:44 +00:00
|
|
|
// Shutdown will cause the logger to ignore any further writes. It should
|
|
|
|
// be used when writing causes an error.
|
|
|
|
func (l *progressLogger) Shutdown() {
|
|
|
|
l.writeData = false
|
2015-06-19 17:59:30 +00:00
|
|
|
}
|
|
|
|
|
2015-06-19 18:54:44 +00:00
|
|
|
// newProgressLogger creates a progressLogger based on the presence of
|
|
|
|
// the GIT_LFS_PROGRESS environment variable. If it is present and a log file
|
|
|
|
// is able to be created, the logger will write to the file. If it is absent,
|
|
|
|
// or there is an err creating the file, the logger will ignore all writes.
|
2015-06-19 17:59:30 +00:00
|
|
|
func newProgressLogger() (*progressLogger, error) {
|
|
|
|
logPath := Config.Getenv("GIT_LFS_PROGRESS")
|
|
|
|
|
|
|
|
if len(logPath) == 0 {
|
|
|
|
return &progressLogger{}, nil
|
|
|
|
}
|
|
|
|
if !filepath.IsAbs(logPath) {
|
|
|
|
return &progressLogger{}, fmt.Errorf("GIT_LFS_PROGRESS must be an absolute path")
|
|
|
|
}
|
|
|
|
|
|
|
|
cbDir := filepath.Dir(logPath)
|
|
|
|
if err := os.MkdirAll(cbDir, 0755); err != nil {
|
|
|
|
return &progressLogger{}, err
|
|
|
|
}
|
|
|
|
|
|
|
|
file, err := os.OpenFile(logPath, os.O_WRONLY|os.O_CREATE|os.O_APPEND, 0666)
|
|
|
|
if err != nil {
|
|
|
|
return &progressLogger{}, err
|
|
|
|
}
|
|
|
|
|
|
|
|
return &progressLogger{true, file}, nil
|
|
|
|
}
|