2016-04-06 19:06:34 +00:00
|
|
|
package commands
|
|
|
|
|
|
|
|
import (
|
2017-02-16 20:24:53 +00:00
|
|
|
"fmt"
|
2017-11-16 16:38:45 +00:00
|
|
|
"io"
|
|
|
|
"io/ioutil"
|
2017-04-17 21:29:23 +00:00
|
|
|
"net/url"
|
2016-04-06 19:06:34 +00:00
|
|
|
"os"
|
2017-09-12 14:19:26 +00:00
|
|
|
"path/filepath"
|
2017-02-16 00:24:22 +00:00
|
|
|
"strings"
|
2017-01-13 22:57:58 +00:00
|
|
|
"sync"
|
2016-04-06 19:06:34 +00:00
|
|
|
|
2016-11-15 17:01:18 +00:00
|
|
|
"github.com/git-lfs/git-lfs/errors"
|
2018-01-05 22:22:25 +00:00
|
|
|
"github.com/git-lfs/git-lfs/git"
|
2016-11-15 17:01:18 +00:00
|
|
|
"github.com/git-lfs/git-lfs/lfs"
|
2017-11-22 22:07:24 +00:00
|
|
|
"github.com/git-lfs/git-lfs/tasklog"
|
2016-11-15 17:01:18 +00:00
|
|
|
"github.com/git-lfs/git-lfs/tools"
|
2016-12-12 00:20:14 +00:00
|
|
|
"github.com/git-lfs/git-lfs/tq"
|
2017-02-16 00:24:22 +00:00
|
|
|
"github.com/rubyist/tracerx"
|
2016-04-06 19:06:34 +00:00
|
|
|
)
|
|
|
|
|
2018-01-05 22:22:25 +00:00
|
|
|
func uploadForRefUpdates(ctx *uploadContext, updates []*git.RefUpdate, pushAll bool) error {
|
2017-11-03 16:08:08 +00:00
|
|
|
gitscanner, err := ctx.buildGitScanner()
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
2018-01-04 18:59:43 +00:00
|
|
|
|
|
|
|
defer func() {
|
|
|
|
gitscanner.Close()
|
|
|
|
ctx.ReportErrors()
|
|
|
|
}()
|
2017-11-03 16:08:08 +00:00
|
|
|
|
|
|
|
verifyLocksForUpdates(ctx.lockVerifier, updates)
|
Optimize pushes of multiple refs
When pushing multiple refs, we know any Git objects on the remote side
can be excluded from the objects that refer to LFS objects we need to
push, since if the remote side already has the Git objects, it should
have the corresponding LFS objects as well.
However, when traversing Git objects, we traverse them on a per-ref
basis, which is required since any LFS objects which spawn a batch
request will need the ref to be placed in the batch request as part of
the protocol.
Let's find a list of all the remote sides that exist before traversing
any Git objects, and exclude traversing any of those objects in any
traversal. As a result, we can traverse far, far fewer objects,
especially when pushing new refs in a large repository.
Note that we exclude the case when the left and right sides are the same
because our code sets them to the same thing in some cases even though
Git does not, so we cannot reason about the values in that case.
2020-01-14 19:43:35 +00:00
|
|
|
rightSides := make([]string, 0, len(updates))
|
|
|
|
for _, update := range updates {
|
|
|
|
right := update.Right().Sha
|
|
|
|
if update.LeftCommitish() != right {
|
|
|
|
rightSides = append(rightSides, right)
|
|
|
|
}
|
|
|
|
}
|
2017-11-03 16:08:08 +00:00
|
|
|
for _, update := range updates {
|
2018-01-17 18:06:50 +00:00
|
|
|
// initialized here to prevent looped defer
|
|
|
|
q := ctx.NewQueue(
|
2018-01-05 18:12:57 +00:00
|
|
|
tq.RemoteRef(update.Right()),
|
|
|
|
)
|
Optimize pushes of multiple refs
When pushing multiple refs, we know any Git objects on the remote side
can be excluded from the objects that refer to LFS objects we need to
push, since if the remote side already has the Git objects, it should
have the corresponding LFS objects as well.
However, when traversing Git objects, we traverse them on a per-ref
basis, which is required since any LFS objects which spawn a batch
request will need the ref to be placed in the batch request as part of
the protocol.
Let's find a list of all the remote sides that exist before traversing
any Git objects, and exclude traversing any of those objects in any
traversal. As a result, we can traverse far, far fewer objects,
especially when pushing new refs in a large repository.
Note that we exclude the case when the left and right sides are the same
because our code sets them to the same thing in some cases even though
Git does not, so we cannot reason about the values in that case.
2020-01-14 19:43:35 +00:00
|
|
|
err := uploadLeftOrAll(gitscanner, ctx, q, rightSides, update, pushAll)
|
2018-01-04 18:59:43 +00:00
|
|
|
ctx.CollectErrors(q)
|
|
|
|
|
|
|
|
if err != nil {
|
2017-11-03 16:08:08 +00:00
|
|
|
return errors.Wrap(err, fmt.Sprintf("ref %s:", update.Left().Name))
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
Optimize pushes of multiple refs
When pushing multiple refs, we know any Git objects on the remote side
can be excluded from the objects that refer to LFS objects we need to
push, since if the remote side already has the Git objects, it should
have the corresponding LFS objects as well.
However, when traversing Git objects, we traverse them on a per-ref
basis, which is required since any LFS objects which spawn a batch
request will need the ref to be placed in the batch request as part of
the protocol.
Let's find a list of all the remote sides that exist before traversing
any Git objects, and exclude traversing any of those objects in any
traversal. As a result, we can traverse far, far fewer objects,
especially when pushing new refs in a large repository.
Note that we exclude the case when the left and right sides are the same
because our code sets them to the same thing in some cases even though
Git does not, so we cannot reason about the values in that case.
2020-01-14 19:43:35 +00:00
|
|
|
func uploadLeftOrAll(g *lfs.GitScanner, ctx *uploadContext, q *tq.TransferQueue, bases []string, update *git.RefUpdate, pushAll bool) error {
|
2018-01-04 18:54:20 +00:00
|
|
|
cb := ctx.gitScannerCallback(q)
|
2017-02-16 20:24:53 +00:00
|
|
|
if pushAll {
|
2018-01-04 16:35:57 +00:00
|
|
|
if err := g.ScanRefWithDeleted(update.LeftCommitish(), cb); err != nil {
|
2017-02-16 20:24:53 +00:00
|
|
|
return err
|
|
|
|
}
|
|
|
|
} else {
|
2019-08-05 16:39:47 +00:00
|
|
|
left := update.LeftCommitish()
|
|
|
|
right := update.Right().Sha
|
|
|
|
if left == right {
|
|
|
|
right = ""
|
|
|
|
}
|
Optimize pushes of multiple refs
When pushing multiple refs, we know any Git objects on the remote side
can be excluded from the objects that refer to LFS objects we need to
push, since if the remote side already has the Git objects, it should
have the corresponding LFS objects as well.
However, when traversing Git objects, we traverse them on a per-ref
basis, which is required since any LFS objects which spawn a batch
request will need the ref to be placed in the batch request as part of
the protocol.
Let's find a list of all the remote sides that exist before traversing
any Git objects, and exclude traversing any of those objects in any
traversal. As a result, we can traverse far, far fewer objects,
especially when pushing new refs in a large repository.
Note that we exclude the case when the left and right sides are the same
because our code sets them to the same thing in some cases even though
Git does not, so we cannot reason about the values in that case.
2020-01-14 19:43:35 +00:00
|
|
|
if err := g.ScanMultiRangeToRemote(left, bases, cb); err != nil {
|
2017-02-16 20:24:53 +00:00
|
|
|
return err
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return ctx.scannerError()
|
|
|
|
}
|
|
|
|
|
2016-04-06 19:06:34 +00:00
|
|
|
type uploadContext struct {
|
2017-01-04 15:56:09 +00:00
|
|
|
Remote string
|
2016-04-06 19:06:34 +00:00
|
|
|
DryRun bool
|
2017-01-04 15:56:09 +00:00
|
|
|
Manifest *tq.Manifest
|
2016-07-07 16:16:13 +00:00
|
|
|
uploadedOids tools.StringSet
|
2017-10-24 18:07:53 +00:00
|
|
|
gitfilter *lfs.GitFilter
|
2016-12-29 21:30:23 +00:00
|
|
|
|
2017-11-22 22:07:24 +00:00
|
|
|
logger *tasklog.Logger
|
2017-11-29 19:07:44 +00:00
|
|
|
meter *tq.Meter
|
2017-01-12 23:39:53 +00:00
|
|
|
|
|
|
|
committerName string
|
|
|
|
committerEmail string
|
2017-01-13 22:57:58 +00:00
|
|
|
|
2017-11-01 23:35:43 +00:00
|
|
|
lockVerifier *lockVerifier
|
2017-02-16 20:24:53 +00:00
|
|
|
|
2017-05-02 18:24:59 +00:00
|
|
|
// allowMissing specifies whether pushes containing missing/corrupt
|
|
|
|
// pointers should allow pushing Git blobs
|
|
|
|
allowMissing bool
|
|
|
|
|
2017-02-16 20:24:53 +00:00
|
|
|
// tracks errors from gitscanner callbacks
|
|
|
|
scannerErr error
|
|
|
|
errMu sync.Mutex
|
2018-01-02 20:33:50 +00:00
|
|
|
|
|
|
|
// filename => oid
|
|
|
|
missing map[string]string
|
|
|
|
corrupt map[string]string
|
|
|
|
otherErrs []error
|
2016-04-06 19:06:34 +00:00
|
|
|
}
|
|
|
|
|
2017-10-27 22:14:26 +00:00
|
|
|
func newUploadContext(dryRun bool) *uploadContext {
|
2017-11-03 16:36:12 +00:00
|
|
|
remote := cfg.PushRemote()
|
2017-11-01 23:35:43 +00:00
|
|
|
manifest := getTransferManifestOperationRemote("upload", remote)
|
2017-01-11 20:59:25 +00:00
|
|
|
ctx := &uploadContext{
|
2017-11-01 23:35:43 +00:00
|
|
|
Remote: remote,
|
|
|
|
Manifest: manifest,
|
|
|
|
DryRun: dryRun,
|
|
|
|
uploadedOids: tools.NewStringSet(),
|
|
|
|
gitfilter: lfs.NewGitFilter(cfg),
|
|
|
|
lockVerifier: newLockVerifier(manifest),
|
2018-07-05 17:23:49 +00:00
|
|
|
allowMissing: cfg.Git.Bool("lfs.allowincompletepush", false),
|
2018-01-02 20:33:50 +00:00
|
|
|
missing: make(map[string]string),
|
|
|
|
corrupt: make(map[string]string),
|
|
|
|
otherErrs: make([]error, 0),
|
2016-04-06 19:06:34 +00:00
|
|
|
}
|
2016-12-29 21:30:23 +00:00
|
|
|
|
2017-11-16 16:38:45 +00:00
|
|
|
var sink io.Writer = os.Stdout
|
|
|
|
if dryRun {
|
|
|
|
sink = ioutil.Discard
|
|
|
|
}
|
|
|
|
|
2018-10-30 08:06:55 +00:00
|
|
|
ctx.logger = tasklog.NewLogger(sink,
|
|
|
|
tasklog.ForceProgress(cfg.ForceProgress()),
|
|
|
|
)
|
2018-01-06 02:01:50 +00:00
|
|
|
ctx.meter = buildProgressMeter(ctx.DryRun, tq.Upload)
|
2017-11-16 16:38:45 +00:00
|
|
|
ctx.logger.Enqueue(ctx.meter)
|
2017-01-13 18:25:21 +00:00
|
|
|
ctx.committerName, ctx.committerEmail = cfg.CurrentCommitter()
|
2017-02-16 00:24:22 +00:00
|
|
|
return ctx
|
|
|
|
}
|
|
|
|
|
2018-01-04 16:31:40 +00:00
|
|
|
func (c *uploadContext) NewQueue(options ...tq.Option) *tq.TransferQueue {
|
2018-01-05 18:12:57 +00:00
|
|
|
return tq.NewTransferQueue(tq.Upload, c.Manifest, c.Remote, append(options,
|
|
|
|
tq.DryRun(c.DryRun),
|
|
|
|
tq.WithProgress(c.meter),
|
|
|
|
)...)
|
2018-01-04 16:31:40 +00:00
|
|
|
}
|
|
|
|
|
2017-02-16 20:24:53 +00:00
|
|
|
func (c *uploadContext) scannerError() error {
|
|
|
|
c.errMu.Lock()
|
|
|
|
defer c.errMu.Unlock()
|
|
|
|
|
|
|
|
return c.scannerErr
|
|
|
|
}
|
|
|
|
|
|
|
|
func (c *uploadContext) addScannerError(err error) {
|
|
|
|
c.errMu.Lock()
|
|
|
|
defer c.errMu.Unlock()
|
|
|
|
|
|
|
|
if c.scannerErr != nil {
|
|
|
|
c.scannerErr = fmt.Errorf("%v\n%v", c.scannerErr, err)
|
|
|
|
} else {
|
|
|
|
c.scannerErr = err
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
func (c *uploadContext) buildGitScanner() (*lfs.GitScanner, error) {
|
2019-08-09 15:25:23 +00:00
|
|
|
gitscanner := lfs.NewGitScanner(cfg, nil)
|
2018-01-04 16:35:57 +00:00
|
|
|
gitscanner.FoundLockable = func(n string) { c.lockVerifier.LockedByThem(n) }
|
|
|
|
gitscanner.PotentialLockables = c.lockVerifier
|
|
|
|
return gitscanner, gitscanner.RemoteForPush(c.Remote)
|
|
|
|
}
|
|
|
|
|
|
|
|
func (c *uploadContext) gitScannerCallback(tqueue *tq.TransferQueue) func(*lfs.WrappedPointer, error) {
|
|
|
|
return func(p *lfs.WrappedPointer, err error) {
|
2017-02-16 20:24:53 +00:00
|
|
|
if err != nil {
|
|
|
|
c.addScannerError(err)
|
|
|
|
} else {
|
2018-01-04 16:35:57 +00:00
|
|
|
c.UploadPointers(tqueue, p)
|
2017-02-16 20:24:53 +00:00
|
|
|
}
|
2018-01-04 16:35:57 +00:00
|
|
|
}
|
2017-02-16 20:24:53 +00:00
|
|
|
}
|
|
|
|
|
2016-04-07 16:52:24 +00:00
|
|
|
// AddUpload adds the given oid to the set of oids that have been uploaded in
|
|
|
|
// the current process.
|
2016-04-08 15:59:02 +00:00
|
|
|
func (c *uploadContext) SetUploaded(oid string) {
|
2016-04-07 16:52:24 +00:00
|
|
|
c.uploadedOids.Add(oid)
|
|
|
|
}
|
2016-04-06 19:06:34 +00:00
|
|
|
|
2016-04-07 16:52:24 +00:00
|
|
|
// HasUploaded determines if the given oid has already been uploaded in the
|
|
|
|
// current process.
|
|
|
|
func (c *uploadContext) HasUploaded(oid string) bool {
|
|
|
|
return c.uploadedOids.Contains(oid)
|
|
|
|
}
|
2016-04-06 19:06:34 +00:00
|
|
|
|
2018-01-04 16:31:40 +00:00
|
|
|
func (c *uploadContext) prepareUpload(unfiltered ...*lfs.WrappedPointer) []*lfs.WrappedPointer {
|
2016-04-07 16:52:24 +00:00
|
|
|
numUnfiltered := len(unfiltered)
|
|
|
|
uploadables := make([]*lfs.WrappedPointer, 0, numUnfiltered)
|
2016-04-06 19:06:34 +00:00
|
|
|
|
2016-10-20 18:07:40 +00:00
|
|
|
// XXX(taylor): temporary measure to fix duplicate (broken) results from
|
|
|
|
// scanner
|
|
|
|
uniqOids := tools.NewStringSet()
|
|
|
|
|
2016-04-07 16:52:24 +00:00
|
|
|
// separate out objects that _should_ be uploaded, but don't exist in
|
|
|
|
// .git/lfs/objects. Those will skipped if the server already has them.
|
|
|
|
for _, p := range unfiltered {
|
2016-10-20 18:07:40 +00:00
|
|
|
// object already uploaded in this process, or we've already
|
|
|
|
// seen this OID (see above), skip!
|
|
|
|
if uniqOids.Contains(p.Oid) || c.HasUploaded(p.Oid) {
|
2016-04-07 16:52:24 +00:00
|
|
|
continue
|
2016-04-06 19:06:34 +00:00
|
|
|
}
|
2016-10-20 18:07:40 +00:00
|
|
|
uniqOids.Add(p.Oid)
|
2016-04-06 19:06:34 +00:00
|
|
|
|
2017-01-12 23:39:53 +00:00
|
|
|
// canUpload determines whether the current pointer "p" can be
|
|
|
|
// uploaded through the TransferQueue below. It is set to false
|
|
|
|
// only when the file is locked by someone other than the
|
|
|
|
// current committer.
|
|
|
|
var canUpload bool = true
|
2016-04-06 19:06:34 +00:00
|
|
|
|
2017-11-01 23:35:43 +00:00
|
|
|
if c.lockVerifier.LockedByThem(p.Name) {
|
2017-04-27 17:10:22 +00:00
|
|
|
// If the verification state is enabled, this failed
|
|
|
|
// locks verification means that the push should fail.
|
2017-04-26 19:43:59 +00:00
|
|
|
//
|
|
|
|
// If the state is disabled, the verification error is
|
|
|
|
// silent and the user can upload.
|
2017-04-27 17:10:22 +00:00
|
|
|
//
|
|
|
|
// If the state is undefined, the verification error is
|
|
|
|
// sent as a warning and the user can upload.
|
2017-11-01 23:35:43 +00:00
|
|
|
canUpload = !c.lockVerifier.Enabled()
|
2017-02-01 20:08:11 +00:00
|
|
|
}
|
2016-04-06 19:06:34 +00:00
|
|
|
|
2017-11-01 23:35:43 +00:00
|
|
|
c.lockVerifier.LockedByUs(p.Name)
|
2016-04-07 16:52:24 +00:00
|
|
|
|
2017-01-12 23:39:53 +00:00
|
|
|
if canUpload {
|
|
|
|
// estimate in meter early (even if it's not going into
|
|
|
|
// uploadables), since we will call Skip() based on the
|
|
|
|
// results of the download check queue.
|
|
|
|
c.meter.Add(p.Size)
|
|
|
|
|
|
|
|
uploadables = append(uploadables, p)
|
|
|
|
}
|
2016-11-17 15:08:18 +00:00
|
|
|
}
|
|
|
|
|
2018-01-04 16:31:40 +00:00
|
|
|
return uploadables
|
2016-04-06 19:06:34 +00:00
|
|
|
}
|
|
|
|
|
2018-01-04 16:31:40 +00:00
|
|
|
func (c *uploadContext) UploadPointers(q *tq.TransferQueue, unfiltered ...*lfs.WrappedPointer) {
|
2016-04-07 16:52:24 +00:00
|
|
|
if c.DryRun {
|
|
|
|
for _, p := range unfiltered {
|
|
|
|
if c.HasUploaded(p.Oid) {
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
|
|
|
|
Print("push %s => %s", p.Oid, p.Name)
|
2016-04-08 15:59:02 +00:00
|
|
|
c.SetUploaded(p.Oid)
|
2016-04-07 16:52:24 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
2018-01-04 16:31:40 +00:00
|
|
|
pointers := c.prepareUpload(unfiltered...)
|
2016-04-07 16:52:24 +00:00
|
|
|
for _, p := range pointers {
|
2017-10-24 18:07:53 +00:00
|
|
|
t, err := c.uploadTransfer(p)
|
2017-01-11 20:59:25 +00:00
|
|
|
if err != nil && !errors.IsCleanPointerError(err) {
|
|
|
|
ExitWithError(err)
|
2016-04-07 16:52:24 +00:00
|
|
|
}
|
2016-04-06 19:06:34 +00:00
|
|
|
|
2020-02-10 15:52:15 +00:00
|
|
|
q.Add(t.Name, t.Path, t.Oid, t.Size, t.Missing, nil)
|
2016-04-08 15:59:02 +00:00
|
|
|
c.SetUploaded(p.Oid)
|
2016-04-07 16:52:24 +00:00
|
|
|
}
|
2016-12-29 21:30:23 +00:00
|
|
|
}
|
2016-04-06 19:06:34 +00:00
|
|
|
|
2018-01-04 19:05:00 +00:00
|
|
|
func (c *uploadContext) CollectErrors(tqueue *tq.TransferQueue) {
|
|
|
|
tqueue.Wait()
|
|
|
|
|
|
|
|
for _, err := range tqueue.Errors() {
|
|
|
|
if malformed, ok := err.(*tq.MalformedObjectError); ok {
|
|
|
|
if malformed.Missing() {
|
|
|
|
c.missing[malformed.Name] = malformed.Oid
|
|
|
|
} else if malformed.Corrupt() {
|
|
|
|
c.corrupt[malformed.Name] = malformed.Oid
|
2017-03-24 20:37:29 +00:00
|
|
|
}
|
2018-01-04 19:05:00 +00:00
|
|
|
} else {
|
|
|
|
c.otherErrs = append(c.otherErrs, err)
|
2017-03-24 20:37:29 +00:00
|
|
|
}
|
|
|
|
}
|
2018-01-02 20:33:50 +00:00
|
|
|
}
|
2017-03-24 20:37:29 +00:00
|
|
|
|
2018-01-02 20:33:50 +00:00
|
|
|
func (c *uploadContext) ReportErrors() {
|
2018-01-04 18:49:41 +00:00
|
|
|
c.meter.Finish()
|
2018-01-04 18:59:43 +00:00
|
|
|
|
2018-01-02 20:33:50 +00:00
|
|
|
for _, err := range c.otherErrs {
|
2017-05-02 18:22:06 +00:00
|
|
|
FullError(err)
|
|
|
|
}
|
|
|
|
|
2018-01-02 20:33:50 +00:00
|
|
|
if len(c.missing) > 0 || len(c.corrupt) > 0 {
|
2017-05-02 18:27:17 +00:00
|
|
|
var action string
|
|
|
|
if c.allowMissing {
|
|
|
|
action = "missing objects"
|
|
|
|
} else {
|
|
|
|
action = "failed"
|
|
|
|
}
|
|
|
|
|
|
|
|
Print("LFS upload %s:", action)
|
2018-01-02 20:33:50 +00:00
|
|
|
for name, oid := range c.missing {
|
2017-03-24 20:37:29 +00:00
|
|
|
Print(" (missing) %s (%s)", name, oid)
|
|
|
|
}
|
2018-01-02 20:33:50 +00:00
|
|
|
for name, oid := range c.corrupt {
|
2017-03-24 20:37:29 +00:00
|
|
|
Print(" (corrupt) %s (%s)", name, oid)
|
|
|
|
}
|
2017-05-02 18:27:47 +00:00
|
|
|
|
|
|
|
if !c.allowMissing {
|
2018-07-05 17:23:49 +00:00
|
|
|
pushMissingHint := []string{
|
|
|
|
"hint: Your push was rejected due to missing or corrupt local objects.",
|
|
|
|
"hint: You can disable this check with: 'git config lfs.allowincompletepush true'",
|
|
|
|
}
|
|
|
|
Print(strings.Join(pushMissingHint, "\n"))
|
2017-05-02 18:27:47 +00:00
|
|
|
os.Exit(2)
|
|
|
|
}
|
2017-03-24 22:05:48 +00:00
|
|
|
}
|
2017-03-24 20:37:29 +00:00
|
|
|
|
2018-01-02 20:33:50 +00:00
|
|
|
if len(c.otherErrs) > 0 {
|
2017-01-23 21:25:17 +00:00
|
|
|
os.Exit(2)
|
2017-01-13 22:57:58 +00:00
|
|
|
}
|
|
|
|
|
2017-11-01 23:35:43 +00:00
|
|
|
if c.lockVerifier.HasUnownedLocks() {
|
|
|
|
Print("Unable to push locked files:")
|
|
|
|
for _, unowned := range c.lockVerifier.UnownedLocks() {
|
|
|
|
Print("* %s - %s", unowned.Path(), unowned.Owners())
|
2017-01-13 22:57:58 +00:00
|
|
|
}
|
2017-01-12 23:39:53 +00:00
|
|
|
|
2017-11-01 23:35:43 +00:00
|
|
|
if c.lockVerifier.Enabled() {
|
2017-04-21 21:33:30 +00:00
|
|
|
Exit("ERROR: Cannot update locked files.")
|
|
|
|
} else {
|
|
|
|
Error("WARNING: The above files would have halted this push.")
|
|
|
|
}
|
2017-11-01 23:35:43 +00:00
|
|
|
} else if c.lockVerifier.HasOwnedLocks() {
|
|
|
|
Print("Consider unlocking your own locked files: (`git lfs unlock <path>`)")
|
|
|
|
for _, owned := range c.lockVerifier.OwnedLocks() {
|
|
|
|
Print("* %s", owned.Path())
|
2017-04-26 19:43:59 +00:00
|
|
|
}
|
2017-01-12 23:39:53 +00:00
|
|
|
}
|
2016-04-07 16:52:24 +00:00
|
|
|
}
|
2017-02-16 00:24:22 +00:00
|
|
|
|
2017-04-18 15:19:43 +00:00
|
|
|
var (
|
2017-04-18 15:29:20 +00:00
|
|
|
githubHttps, _ = url.Parse("https://github.com")
|
|
|
|
githubSsh, _ = url.Parse("ssh://github.com")
|
|
|
|
|
2017-04-18 15:19:43 +00:00
|
|
|
// hostsWithKnownLockingSupport is a list of scheme-less hostnames
|
|
|
|
// (without port numbers) that are known to implement the LFS locking
|
|
|
|
// API.
|
|
|
|
//
|
|
|
|
// Additions are welcome.
|
2017-04-18 15:29:20 +00:00
|
|
|
hostsWithKnownLockingSupport = []*url.URL{
|
|
|
|
githubHttps, githubSsh,
|
2017-04-18 15:19:43 +00:00
|
|
|
}
|
|
|
|
)
|
|
|
|
|
2017-10-24 18:07:53 +00:00
|
|
|
func (c *uploadContext) uploadTransfer(p *lfs.WrappedPointer) (*tq.Transfer, error) {
|
Don't fail if we lack objects the server has
A Git LFS client may not have the entire history of the objects for the
repository. However, in some situations, we traverse the entire history
of a branch when pushing it, meaning that we need to process every
LFS object in the history of that branch. If the objects for the entire
history are not present, we currently fail to push.
Instead, let's mark objects we don't have on disk as missing and only
fail when we would need to upload those objects. We'll know the server
has the objects if the batch response provides no actions to take for
them when we request an upload. Pass the missing flag down through the
code, and always set it to false for non-uploads.
If for some reason we fail to properly flag a missing object, we will
still fail later on when we cannot open the file, just in a messier and
more poorly controlled way. The technique used here will attempt to
abort the batch as soon as we notice a problem, which means that in the
common case (less than 100 objects) we won't have transferred any
objects, so the user can notice the failure as soon as possible.
Update the tests to look for a string which will occur in the error
message, since we no longer produce the system error message for ENOENT.
2019-04-30 19:18:18 +00:00
|
|
|
var missing bool
|
|
|
|
|
2017-09-12 14:19:26 +00:00
|
|
|
filename := p.Name
|
|
|
|
oid := p.Oid
|
|
|
|
|
2017-10-25 17:31:15 +00:00
|
|
|
localMediaPath, err := c.gitfilter.ObjectPath(oid)
|
2017-09-12 14:19:26 +00:00
|
|
|
if err != nil {
|
|
|
|
return nil, errors.Wrapf(err, "Error uploading file %s (%s)", filename, oid)
|
|
|
|
}
|
|
|
|
|
|
|
|
if len(filename) > 0 {
|
Don't fail if we lack objects the server has
A Git LFS client may not have the entire history of the objects for the
repository. However, in some situations, we traverse the entire history
of a branch when pushing it, meaning that we need to process every
LFS object in the history of that branch. If the objects for the entire
history are not present, we currently fail to push.
Instead, let's mark objects we don't have on disk as missing and only
fail when we would need to upload those objects. We'll know the server
has the objects if the batch response provides no actions to take for
them when we request an upload. Pass the missing flag down through the
code, and always set it to false for non-uploads.
If for some reason we fail to properly flag a missing object, we will
still fail later on when we cannot open the file, just in a messier and
more poorly controlled way. The technique used here will attempt to
abort the batch as soon as we notice a problem, which means that in the
common case (less than 100 objects) we won't have transferred any
objects, so the user can notice the failure as soon as possible.
Update the tests to look for a string which will occur in the error
message, since we no longer produce the system error message for ENOENT.
2019-04-30 19:18:18 +00:00
|
|
|
if missing, err = c.ensureFile(filename, localMediaPath, oid); err != nil && !errors.IsCleanPointerError(err) {
|
2017-09-12 14:19:26 +00:00
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return &tq.Transfer{
|
Don't fail if we lack objects the server has
A Git LFS client may not have the entire history of the objects for the
repository. However, in some situations, we traverse the entire history
of a branch when pushing it, meaning that we need to process every
LFS object in the history of that branch. If the objects for the entire
history are not present, we currently fail to push.
Instead, let's mark objects we don't have on disk as missing and only
fail when we would need to upload those objects. We'll know the server
has the objects if the batch response provides no actions to take for
them when we request an upload. Pass the missing flag down through the
code, and always set it to false for non-uploads.
If for some reason we fail to properly flag a missing object, we will
still fail later on when we cannot open the file, just in a messier and
more poorly controlled way. The technique used here will attempt to
abort the batch as soon as we notice a problem, which means that in the
common case (less than 100 objects) we won't have transferred any
objects, so the user can notice the failure as soon as possible.
Update the tests to look for a string which will occur in the error
message, since we no longer produce the system error message for ENOENT.
2019-04-30 19:18:18 +00:00
|
|
|
Name: filename,
|
|
|
|
Path: localMediaPath,
|
|
|
|
Oid: oid,
|
|
|
|
Size: p.Size,
|
|
|
|
Missing: missing,
|
2017-09-12 14:19:26 +00:00
|
|
|
}, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// ensureFile makes sure that the cleanPath exists before pushing it. If it
|
|
|
|
// does not exist, it attempts to clean it by reading the file at smudgePath.
|
Don't fail if we lack objects the server has
A Git LFS client may not have the entire history of the objects for the
repository. However, in some situations, we traverse the entire history
of a branch when pushing it, meaning that we need to process every
LFS object in the history of that branch. If the objects for the entire
history are not present, we currently fail to push.
Instead, let's mark objects we don't have on disk as missing and only
fail when we would need to upload those objects. We'll know the server
has the objects if the batch response provides no actions to take for
them when we request an upload. Pass the missing flag down through the
code, and always set it to false for non-uploads.
If for some reason we fail to properly flag a missing object, we will
still fail later on when we cannot open the file, just in a messier and
more poorly controlled way. The technique used here will attempt to
abort the batch as soon as we notice a problem, which means that in the
common case (less than 100 objects) we won't have transferred any
objects, so the user can notice the failure as soon as possible.
Update the tests to look for a string which will occur in the error
message, since we no longer produce the system error message for ENOENT.
2019-04-30 19:18:18 +00:00
|
|
|
func (c *uploadContext) ensureFile(smudgePath, cleanPath, oid string) (bool, error) {
|
2017-09-12 14:19:26 +00:00
|
|
|
if _, err := os.Stat(cleanPath); err == nil {
|
Don't fail if we lack objects the server has
A Git LFS client may not have the entire history of the objects for the
repository. However, in some situations, we traverse the entire history
of a branch when pushing it, meaning that we need to process every
LFS object in the history of that branch. If the objects for the entire
history are not present, we currently fail to push.
Instead, let's mark objects we don't have on disk as missing and only
fail when we would need to upload those objects. We'll know the server
has the objects if the batch response provides no actions to take for
them when we request an upload. Pass the missing flag down through the
code, and always set it to false for non-uploads.
If for some reason we fail to properly flag a missing object, we will
still fail later on when we cannot open the file, just in a messier and
more poorly controlled way. The technique used here will attempt to
abort the batch as soon as we notice a problem, which means that in the
common case (less than 100 objects) we won't have transferred any
objects, so the user can notice the failure as soon as possible.
Update the tests to look for a string which will occur in the error
message, since we no longer produce the system error message for ENOENT.
2019-04-30 19:18:18 +00:00
|
|
|
return false, nil
|
2017-09-12 14:19:26 +00:00
|
|
|
}
|
|
|
|
|
2017-10-19 00:09:33 +00:00
|
|
|
localPath := filepath.Join(cfg.LocalWorkingDir(), smudgePath)
|
2017-09-12 14:19:26 +00:00
|
|
|
file, err := os.Open(localPath)
|
|
|
|
if err != nil {
|
Don't fail if we lack objects the server has
A Git LFS client may not have the entire history of the objects for the
repository. However, in some situations, we traverse the entire history
of a branch when pushing it, meaning that we need to process every
LFS object in the history of that branch. If the objects for the entire
history are not present, we currently fail to push.
Instead, let's mark objects we don't have on disk as missing and only
fail when we would need to upload those objects. We'll know the server
has the objects if the batch response provides no actions to take for
them when we request an upload. Pass the missing flag down through the
code, and always set it to false for non-uploads.
If for some reason we fail to properly flag a missing object, we will
still fail later on when we cannot open the file, just in a messier and
more poorly controlled way. The technique used here will attempt to
abort the batch as soon as we notice a problem, which means that in the
common case (less than 100 objects) we won't have transferred any
objects, so the user can notice the failure as soon as possible.
Update the tests to look for a string which will occur in the error
message, since we no longer produce the system error message for ENOENT.
2019-04-30 19:18:18 +00:00
|
|
|
return !c.allowMissing, nil
|
2017-09-12 14:19:26 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
defer file.Close()
|
|
|
|
|
|
|
|
stat, err := file.Stat()
|
|
|
|
if err != nil {
|
Don't fail if we lack objects the server has
A Git LFS client may not have the entire history of the objects for the
repository. However, in some situations, we traverse the entire history
of a branch when pushing it, meaning that we need to process every
LFS object in the history of that branch. If the objects for the entire
history are not present, we currently fail to push.
Instead, let's mark objects we don't have on disk as missing and only
fail when we would need to upload those objects. We'll know the server
has the objects if the batch response provides no actions to take for
them when we request an upload. Pass the missing flag down through the
code, and always set it to false for non-uploads.
If for some reason we fail to properly flag a missing object, we will
still fail later on when we cannot open the file, just in a messier and
more poorly controlled way. The technique used here will attempt to
abort the batch as soon as we notice a problem, which means that in the
common case (less than 100 objects) we won't have transferred any
objects, so the user can notice the failure as soon as possible.
Update the tests to look for a string which will occur in the error
message, since we no longer produce the system error message for ENOENT.
2019-04-30 19:18:18 +00:00
|
|
|
return false, err
|
2017-09-12 14:19:26 +00:00
|
|
|
}
|
|
|
|
|
2017-10-24 18:07:53 +00:00
|
|
|
cleaned, err := c.gitfilter.Clean(file, file.Name(), stat.Size(), nil)
|
2017-09-12 14:19:26 +00:00
|
|
|
if cleaned != nil {
|
|
|
|
cleaned.Teardown()
|
|
|
|
}
|
|
|
|
|
|
|
|
if err != nil {
|
Don't fail if we lack objects the server has
A Git LFS client may not have the entire history of the objects for the
repository. However, in some situations, we traverse the entire history
of a branch when pushing it, meaning that we need to process every
LFS object in the history of that branch. If the objects for the entire
history are not present, we currently fail to push.
Instead, let's mark objects we don't have on disk as missing and only
fail when we would need to upload those objects. We'll know the server
has the objects if the batch response provides no actions to take for
them when we request an upload. Pass the missing flag down through the
code, and always set it to false for non-uploads.
If for some reason we fail to properly flag a missing object, we will
still fail later on when we cannot open the file, just in a messier and
more poorly controlled way. The technique used here will attempt to
abort the batch as soon as we notice a problem, which means that in the
common case (less than 100 objects) we won't have transferred any
objects, so the user can notice the failure as soon as possible.
Update the tests to look for a string which will occur in the error
message, since we no longer produce the system error message for ENOENT.
2019-04-30 19:18:18 +00:00
|
|
|
return false, err
|
2017-09-12 14:19:26 +00:00
|
|
|
}
|
Don't fail if we lack objects the server has
A Git LFS client may not have the entire history of the objects for the
repository. However, in some situations, we traverse the entire history
of a branch when pushing it, meaning that we need to process every
LFS object in the history of that branch. If the objects for the entire
history are not present, we currently fail to push.
Instead, let's mark objects we don't have on disk as missing and only
fail when we would need to upload those objects. We'll know the server
has the objects if the batch response provides no actions to take for
them when we request an upload. Pass the missing flag down through the
code, and always set it to false for non-uploads.
If for some reason we fail to properly flag a missing object, we will
still fail later on when we cannot open the file, just in a messier and
more poorly controlled way. The technique used here will attempt to
abort the batch as soon as we notice a problem, which means that in the
common case (less than 100 objects) we won't have transferred any
objects, so the user can notice the failure as soon as possible.
Update the tests to look for a string which will occur in the error
message, since we no longer produce the system error message for ENOENT.
2019-04-30 19:18:18 +00:00
|
|
|
return false, nil
|
2017-09-12 14:19:26 +00:00
|
|
|
}
|
|
|
|
|
2017-11-01 23:35:43 +00:00
|
|
|
// supportsLockingAPI returns whether or not a given url is known to support
|
|
|
|
// the LFS locking API by whether or not its hostname is included in the list
|
|
|
|
// above.
|
|
|
|
func supportsLockingAPI(rawurl string) bool {
|
|
|
|
u, err := url.Parse(rawurl)
|
2017-04-18 15:19:43 +00:00
|
|
|
if err != nil {
|
2017-11-01 23:35:43 +00:00
|
|
|
tracerx.Printf("commands: unable to parse %q to determine locking support: %v", rawurl, err)
|
2017-04-18 15:19:43 +00:00
|
|
|
return false
|
|
|
|
}
|
|
|
|
|
2017-04-18 15:29:20 +00:00
|
|
|
for _, supported := range hostsWithKnownLockingSupport {
|
|
|
|
if supported.Scheme == u.Scheme &&
|
|
|
|
supported.Hostname() == u.Hostname() &&
|
|
|
|
strings.HasPrefix(u.Path, supported.Path) {
|
2017-04-18 15:19:43 +00:00
|
|
|
return true
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return false
|
|
|
|
}
|
|
|
|
|
2017-02-16 00:24:22 +00:00
|
|
|
// disableFor disables lock verification for the given lfsapi.Endpoint,
|
|
|
|
// "endpoint".
|
2017-11-01 23:35:43 +00:00
|
|
|
func disableFor(rawurl string) error {
|
|
|
|
tracerx.Printf("commands: disabling lock verification for %q", rawurl)
|
2017-02-16 00:24:22 +00:00
|
|
|
|
2017-11-01 23:35:43 +00:00
|
|
|
key := strings.Join([]string{"lfs", rawurl, "locksverify"}, ".")
|
2017-02-16 00:24:22 +00:00
|
|
|
|
2017-10-26 01:20:35 +00:00
|
|
|
_, err := cfg.SetGitLocalKey(key, "false")
|
2017-02-16 00:24:22 +00:00
|
|
|
return err
|
|
|
|
}
|