2015-05-14 16:33:29 +00:00
|
|
|
package commands
|
|
|
|
|
|
|
|
import (
|
2015-09-03 16:23:30 +00:00
|
|
|
"fmt"
|
2015-07-28 13:50:19 +00:00
|
|
|
"time"
|
|
|
|
|
2016-11-21 22:14:33 +00:00
|
|
|
"github.com/git-lfs/git-lfs/filepathfilter"
|
2016-11-15 17:01:18 +00:00
|
|
|
"github.com/git-lfs/git-lfs/git"
|
|
|
|
"github.com/git-lfs/git-lfs/lfs"
|
|
|
|
"github.com/git-lfs/git-lfs/progress"
|
2016-12-12 00:20:14 +00:00
|
|
|
"github.com/git-lfs/git-lfs/tq"
|
2016-05-23 18:02:27 +00:00
|
|
|
"github.com/rubyist/tracerx"
|
|
|
|
"github.com/spf13/cobra"
|
2015-05-14 16:33:29 +00:00
|
|
|
)
|
|
|
|
|
|
|
|
var (
|
2016-08-02 16:37:16 +00:00
|
|
|
fetchRecentArg bool
|
|
|
|
fetchAllArg bool
|
|
|
|
fetchPruneArg bool
|
2015-05-14 16:33:29 +00:00
|
|
|
)
|
|
|
|
|
2016-08-02 16:37:16 +00:00
|
|
|
func getIncludeExcludeArgs(cmd *cobra.Command) (include, exclude *string) {
|
2016-08-01 22:47:41 +00:00
|
|
|
includeFlag := cmd.Flag("include")
|
|
|
|
excludeFlag := cmd.Flag("exclude")
|
|
|
|
if includeFlag.Changed {
|
2016-08-02 16:37:16 +00:00
|
|
|
include = &includeArg
|
2016-08-01 22:47:41 +00:00
|
|
|
}
|
|
|
|
if excludeFlag.Changed {
|
2016-08-02 16:37:16 +00:00
|
|
|
exclude = &excludeArg
|
2016-08-01 22:47:41 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
2015-05-29 15:28:10 +00:00
|
|
|
func fetchCommand(cmd *cobra.Command, args []string) {
|
2015-09-08 16:23:27 +00:00
|
|
|
requireInRepo()
|
2015-05-14 16:33:29 +00:00
|
|
|
|
2015-09-08 16:23:27 +00:00
|
|
|
var refs []*git.Ref
|
2015-09-08 15:29:53 +00:00
|
|
|
|
2015-07-29 15:34:15 +00:00
|
|
|
if len(args) > 0 {
|
2015-08-13 16:22:29 +00:00
|
|
|
// Remote is first arg
|
2015-10-06 16:35:58 +00:00
|
|
|
if err := git.ValidateRemote(args[0]); err != nil {
|
2015-10-07 08:13:20 +00:00
|
|
|
Exit("Invalid remote name %q", args[0])
|
2015-10-06 16:35:58 +00:00
|
|
|
}
|
2016-07-21 22:37:53 +00:00
|
|
|
cfg.CurrentRemote = args[0]
|
2015-08-13 16:22:29 +00:00
|
|
|
} else {
|
2016-03-22 12:51:24 +00:00
|
|
|
cfg.CurrentRemote = ""
|
2015-08-13 16:22:29 +00:00
|
|
|
}
|
2015-09-08 15:29:53 +00:00
|
|
|
|
2015-08-13 16:22:29 +00:00
|
|
|
if len(args) > 1 {
|
2016-04-06 16:29:59 +00:00
|
|
|
resolvedrefs, err := git.ResolveRefs(args[1:])
|
|
|
|
if err != nil {
|
|
|
|
Panic(err, "Invalid ref argument: %v", args[1:])
|
2015-08-21 14:19:16 +00:00
|
|
|
}
|
2016-04-06 16:29:59 +00:00
|
|
|
refs = resolvedrefs
|
2016-06-16 16:14:53 +00:00
|
|
|
} else if !fetchAllArg {
|
2015-07-29 15:34:15 +00:00
|
|
|
ref, err := git.CurrentRef()
|
2015-05-14 16:33:29 +00:00
|
|
|
if err != nil {
|
2015-05-29 15:28:10 +00:00
|
|
|
Panic(err, "Could not fetch")
|
2015-05-14 16:33:29 +00:00
|
|
|
}
|
2015-08-21 14:19:16 +00:00
|
|
|
refs = []*git.Ref{ref}
|
2015-07-29 15:34:15 +00:00
|
|
|
}
|
|
|
|
|
2015-10-13 14:29:25 +00:00
|
|
|
success := true
|
2016-11-29 16:40:50 +00:00
|
|
|
gitscanner := lfs.NewGitScanner(nil)
|
2016-11-18 00:22:21 +00:00
|
|
|
defer gitscanner.Close()
|
|
|
|
|
2016-08-02 16:37:16 +00:00
|
|
|
include, exclude := getIncludeExcludeArgs(cmd)
|
|
|
|
|
2015-09-03 11:29:00 +00:00
|
|
|
if fetchAllArg {
|
2015-09-03 16:36:27 +00:00
|
|
|
if fetchRecentArg || len(args) > 1 {
|
2015-09-03 11:29:00 +00:00
|
|
|
Exit("Cannot combine --all with ref arguments or --recent")
|
|
|
|
}
|
2016-08-02 16:37:16 +00:00
|
|
|
if include != nil || exclude != nil {
|
2015-09-03 11:29:00 +00:00
|
|
|
Exit("Cannot combine --all with --include or --exclude")
|
|
|
|
}
|
2016-07-21 22:37:53 +00:00
|
|
|
if len(cfg.FetchIncludePaths()) > 0 || len(cfg.FetchExcludePaths()) > 0 {
|
2015-09-03 11:29:00 +00:00
|
|
|
Print("Ignoring global include / exclude paths to fulfil --all")
|
|
|
|
}
|
2016-12-06 23:52:28 +00:00
|
|
|
success = fetchAll()
|
2015-05-14 16:33:29 +00:00
|
|
|
|
2015-09-03 11:29:00 +00:00
|
|
|
} else { // !all
|
2016-11-21 22:14:33 +00:00
|
|
|
filter := buildFilepathFilter(cfg, include, exclude)
|
2015-09-03 11:29:00 +00:00
|
|
|
|
|
|
|
// Fetch refs sequentially per arg order; duplicates in later refs will be ignored
|
|
|
|
for _, ref := range refs {
|
|
|
|
Print("Fetching %v", ref.Name)
|
2016-12-06 23:52:28 +00:00
|
|
|
s := fetchRef(ref.Sha, filter)
|
2015-10-13 14:29:25 +00:00
|
|
|
success = success && s
|
2015-09-03 11:29:00 +00:00
|
|
|
}
|
|
|
|
|
2016-07-21 22:37:53 +00:00
|
|
|
if fetchRecentArg || cfg.FetchPruneConfig().FetchRecentAlways {
|
2016-12-06 23:52:28 +00:00
|
|
|
s := fetchRecent(refs, filter)
|
2015-10-13 14:29:25 +00:00
|
|
|
success = success && s
|
2015-09-03 11:29:00 +00:00
|
|
|
}
|
|
|
|
}
|
2015-10-13 14:29:25 +00:00
|
|
|
|
2015-10-15 15:35:26 +00:00
|
|
|
if fetchPruneArg {
|
2016-08-05 23:23:56 +00:00
|
|
|
fetchconf := cfg.FetchPruneConfig()
|
|
|
|
verify := fetchconf.PruneVerifyRemoteAlways
|
2015-10-15 15:35:26 +00:00
|
|
|
// no dry-run or verbose options in fetch, assume false
|
2016-08-05 23:23:56 +00:00
|
|
|
prune(fetchconf, verify, false, false)
|
2015-10-15 15:35:26 +00:00
|
|
|
}
|
2015-11-09 17:46:29 +00:00
|
|
|
|
2015-10-13 14:29:25 +00:00
|
|
|
if !success {
|
|
|
|
Exit("Warning: errors occurred")
|
|
|
|
}
|
2015-07-23 16:53:58 +00:00
|
|
|
}
|
|
|
|
|
2016-12-06 23:52:28 +00:00
|
|
|
func pointersToFetchForRef(ref string, filter *filepathfilter.Filter) ([]*lfs.WrappedPointer, error) {
|
2016-11-29 17:56:03 +00:00
|
|
|
var pointers []*lfs.WrappedPointer
|
|
|
|
var multiErr error
|
|
|
|
tempgitscanner := lfs.NewGitScanner(func(p *lfs.WrappedPointer, err error) {
|
|
|
|
if err != nil {
|
|
|
|
if multiErr != nil {
|
|
|
|
multiErr = fmt.Errorf("%v\n%v", multiErr, err)
|
|
|
|
} else {
|
|
|
|
multiErr = err
|
|
|
|
}
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
pointers = append(pointers, p)
|
|
|
|
})
|
|
|
|
|
2016-12-06 23:52:28 +00:00
|
|
|
tempgitscanner.Filter = filter
|
|
|
|
|
2016-12-14 17:53:34 +00:00
|
|
|
if err := tempgitscanner.ScanTree(ref); err != nil {
|
2016-11-18 00:22:21 +00:00
|
|
|
return nil, err
|
|
|
|
}
|
2016-11-29 17:56:03 +00:00
|
|
|
|
|
|
|
tempgitscanner.Close()
|
|
|
|
return pointers, multiErr
|
2015-08-26 15:13:50 +00:00
|
|
|
}
|
|
|
|
|
2015-07-27 10:28:46 +00:00
|
|
|
// Fetch all binaries for a given ref (that we don't have already)
|
2016-12-06 23:52:28 +00:00
|
|
|
func fetchRef(ref string, filter *filepathfilter.Filter) bool {
|
|
|
|
pointers, err := pointersToFetchForRef(ref, filter)
|
2015-07-27 10:28:46 +00:00
|
|
|
if err != nil {
|
|
|
|
Panic(err, "Could not scan for Git LFS files")
|
|
|
|
}
|
2016-11-21 22:14:33 +00:00
|
|
|
return fetchAndReportToChan(pointers, filter, nil)
|
2015-07-27 10:28:46 +00:00
|
|
|
}
|
|
|
|
|
2015-08-21 14:19:16 +00:00
|
|
|
// Fetch all previous versions of objects from since to ref (not including final state at ref)
|
|
|
|
// So this will fetch all the '-' sides of the diff from since to ref
|
2016-12-06 23:52:28 +00:00
|
|
|
func fetchPreviousVersions(ref string, since time.Time, filter *filepathfilter.Filter) bool {
|
2016-11-29 17:27:42 +00:00
|
|
|
var pointers []*lfs.WrappedPointer
|
|
|
|
|
2016-11-30 17:30:26 +00:00
|
|
|
tempgitscanner := lfs.NewGitScanner(func(p *lfs.WrappedPointer, err error) {
|
2016-11-29 17:27:42 +00:00
|
|
|
if err != nil {
|
|
|
|
Panic(err, "Could not scan for Git LFS previous versions")
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
pointers = append(pointers, p)
|
|
|
|
})
|
|
|
|
|
2016-12-06 23:52:28 +00:00
|
|
|
tempgitscanner.Filter = filter
|
|
|
|
|
2016-11-30 17:30:26 +00:00
|
|
|
if err := tempgitscanner.ScanPreviousVersions(ref, since, nil); err != nil {
|
2016-11-18 00:22:21 +00:00
|
|
|
ExitWithError(err)
|
|
|
|
}
|
2016-11-29 17:27:42 +00:00
|
|
|
|
|
|
|
tempgitscanner.Close()
|
2016-11-21 22:14:33 +00:00
|
|
|
return fetchAndReportToChan(pointers, filter, nil)
|
2015-08-21 14:19:16 +00:00
|
|
|
}
|
|
|
|
|
2015-08-20 14:36:10 +00:00
|
|
|
// Fetch recent objects based on config
|
2016-12-06 23:52:28 +00:00
|
|
|
func fetchRecent(alreadyFetchedRefs []*git.Ref, filter *filepathfilter.Filter) bool {
|
2016-07-21 22:37:53 +00:00
|
|
|
fetchconf := cfg.FetchPruneConfig()
|
2015-08-20 14:36:10 +00:00
|
|
|
|
2015-08-21 14:19:16 +00:00
|
|
|
if fetchconf.FetchRecentRefsDays == 0 && fetchconf.FetchRecentCommitsDays == 0 {
|
2015-10-13 14:29:25 +00:00
|
|
|
return true
|
2015-08-21 14:19:16 +00:00
|
|
|
}
|
|
|
|
|
2015-10-13 14:29:25 +00:00
|
|
|
ok := true
|
2015-08-21 14:19:16 +00:00
|
|
|
// Make a list of what unique commits we've already fetched for to avoid duplicating work
|
|
|
|
uniqueRefShas := make(map[string]string, len(alreadyFetchedRefs))
|
|
|
|
for _, ref := range alreadyFetchedRefs {
|
|
|
|
uniqueRefShas[ref.Sha] = ref.Name
|
|
|
|
}
|
2015-08-20 14:36:10 +00:00
|
|
|
// First find any other recent refs
|
|
|
|
if fetchconf.FetchRecentRefsDays > 0 {
|
2015-08-27 14:18:46 +00:00
|
|
|
Print("Fetching recent branches within %v days", fetchconf.FetchRecentRefsDays)
|
2015-08-20 14:36:10 +00:00
|
|
|
refsSince := time.Now().AddDate(0, 0, -fetchconf.FetchRecentRefsDays)
|
2016-07-21 22:37:53 +00:00
|
|
|
refs, err := git.RecentBranches(refsSince, fetchconf.FetchRecentRefsIncludeRemotes, cfg.CurrentRemote)
|
2015-08-20 14:36:10 +00:00
|
|
|
if err != nil {
|
|
|
|
Panic(err, "Could not scan for recent refs")
|
|
|
|
}
|
2015-08-21 14:19:16 +00:00
|
|
|
for _, ref := range refs {
|
2015-08-20 14:36:10 +00:00
|
|
|
// Don't fetch for the same SHA twice
|
|
|
|
if prevRefName, ok := uniqueRefShas[ref.Sha]; ok {
|
2015-08-27 14:18:46 +00:00
|
|
|
if ref.Name != prevRefName {
|
|
|
|
tracerx.Printf("Skipping fetch for %v, already fetched via %v", ref.Name, prevRefName)
|
|
|
|
}
|
2015-08-20 14:36:10 +00:00
|
|
|
} else {
|
|
|
|
uniqueRefShas[ref.Sha] = ref.Name
|
2015-08-27 14:18:46 +00:00
|
|
|
Print("Fetching %v", ref.Name)
|
2016-12-06 23:52:28 +00:00
|
|
|
k := fetchRef(ref.Sha, filter)
|
2015-10-13 14:29:25 +00:00
|
|
|
ok = ok && k
|
2015-08-21 14:19:16 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
// For every unique commit we've fetched, check recent commits too
|
|
|
|
if fetchconf.FetchRecentCommitsDays > 0 {
|
|
|
|
for commit, refName := range uniqueRefShas {
|
|
|
|
// We measure from the last commit at the ref
|
|
|
|
summ, err := git.GetCommitSummary(commit)
|
|
|
|
if err != nil {
|
|
|
|
Error("Couldn't scan commits at %v: %v", refName, err)
|
|
|
|
continue
|
2015-08-20 14:36:10 +00:00
|
|
|
}
|
2015-08-27 14:18:46 +00:00
|
|
|
Print("Fetching changes within %v days of %v", fetchconf.FetchRecentCommitsDays, refName)
|
2015-08-21 14:19:16 +00:00
|
|
|
commitsSince := summ.CommitDate.AddDate(0, 0, -fetchconf.FetchRecentCommitsDays)
|
2016-12-06 23:52:28 +00:00
|
|
|
k := fetchPreviousVersions(commit, commitsSince, filter)
|
2015-10-13 14:29:25 +00:00
|
|
|
ok = ok && k
|
2015-08-20 14:36:10 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
}
|
2015-10-13 14:29:25 +00:00
|
|
|
return ok
|
2015-08-20 14:36:10 +00:00
|
|
|
}
|
|
|
|
|
2016-12-06 23:52:28 +00:00
|
|
|
func fetchAll() bool {
|
|
|
|
pointers := scanAll()
|
2015-09-09 22:44:58 +00:00
|
|
|
Print("Fetching objects...")
|
2016-11-21 22:14:33 +00:00
|
|
|
return fetchAndReportToChan(pointers, nil, nil)
|
2015-09-09 22:44:58 +00:00
|
|
|
}
|
|
|
|
|
2016-12-06 23:52:28 +00:00
|
|
|
func scanAll() []*lfs.WrappedPointer {
|
2015-09-03 16:23:30 +00:00
|
|
|
// This could be a long process so use the chan version & report progress
|
2015-09-03 16:38:26 +00:00
|
|
|
Print("Scanning for all objects ever referenced...")
|
2016-05-06 13:46:13 +00:00
|
|
|
spinner := progress.NewSpinner()
|
2015-09-03 16:23:30 +00:00
|
|
|
var numObjs int64
|
2016-11-16 19:56:07 +00:00
|
|
|
|
2016-11-29 20:04:05 +00:00
|
|
|
// use temp gitscanner to collect pointers
|
|
|
|
var pointers []*lfs.WrappedPointer
|
|
|
|
var multiErr error
|
|
|
|
tempgitscanner := lfs.NewGitScanner(func(p *lfs.WrappedPointer, err error) {
|
|
|
|
if err != nil {
|
|
|
|
if multiErr != nil {
|
|
|
|
multiErr = fmt.Errorf("%v\n%v", multiErr, err)
|
|
|
|
} else {
|
|
|
|
multiErr = err
|
|
|
|
}
|
|
|
|
return
|
|
|
|
}
|
2015-09-09 22:44:58 +00:00
|
|
|
|
2015-09-03 16:23:30 +00:00
|
|
|
numObjs++
|
|
|
|
spinner.Print(OutputWriter, fmt.Sprintf("%d objects found", numObjs))
|
|
|
|
pointers = append(pointers, p)
|
2016-11-29 20:04:05 +00:00
|
|
|
})
|
|
|
|
|
|
|
|
if err := tempgitscanner.ScanAll(nil); err != nil {
|
2016-03-31 11:00:44 +00:00
|
|
|
Panic(err, "Could not scan for Git LFS files")
|
|
|
|
}
|
2015-09-09 22:44:58 +00:00
|
|
|
|
2016-11-29 20:04:05 +00:00
|
|
|
tempgitscanner.Close()
|
|
|
|
|
|
|
|
if multiErr != nil {
|
|
|
|
Panic(multiErr, "Could not scan for Git LFS files")
|
|
|
|
}
|
|
|
|
|
2015-09-03 16:23:30 +00:00
|
|
|
spinner.Finish(OutputWriter, fmt.Sprintf("%d objects found", numObjs))
|
2015-09-09 22:44:58 +00:00
|
|
|
return pointers
|
2015-09-03 11:29:00 +00:00
|
|
|
}
|
|
|
|
|
2015-07-23 16:53:58 +00:00
|
|
|
// Fetch and report completion of each OID to a channel (optional, pass nil to skip)
|
2015-10-13 14:29:25 +00:00
|
|
|
// Returns true if all completed with no errors, false if errors were written to stderr/log
|
2016-11-21 22:14:33 +00:00
|
|
|
func fetchAndReportToChan(allpointers []*lfs.WrappedPointer, filter *filepathfilter.Filter, out chan<- *lfs.WrappedPointer) bool {
|
2016-03-22 12:51:24 +00:00
|
|
|
// Lazily initialize the current remote.
|
|
|
|
if len(cfg.CurrentRemote) == 0 {
|
|
|
|
// Actively find the default remote, don't just assume origin
|
|
|
|
defaultRemote, err := git.DefaultRemote()
|
|
|
|
if err != nil {
|
|
|
|
Exit("No default remote")
|
|
|
|
}
|
|
|
|
cfg.CurrentRemote = defaultRemote
|
|
|
|
}
|
|
|
|
|
2016-12-07 02:50:57 +00:00
|
|
|
ready, pointers, meter := readyAndMissingPointers(allpointers, filter)
|
2017-01-04 21:46:30 +00:00
|
|
|
q := newDownloadQueue(buildTransferManifest(), cfg.CurrentRemote, tq.WithProgress(meter))
|
2015-05-14 16:33:29 +00:00
|
|
|
|
2015-07-23 16:53:58 +00:00
|
|
|
if out != nil {
|
2016-08-17 20:14:05 +00:00
|
|
|
// If we already have it, or it won't be fetched
|
|
|
|
// report it to chan immediately to support pull/checkout
|
2016-08-19 15:35:05 +00:00
|
|
|
for _, p := range ready {
|
2016-08-17 20:14:05 +00:00
|
|
|
out <- p
|
|
|
|
}
|
|
|
|
|
2015-07-23 16:53:58 +00:00
|
|
|
dlwatch := q.Watch()
|
2015-05-27 19:45:18 +00:00
|
|
|
|
|
|
|
go func() {
|
2015-07-23 16:53:58 +00:00
|
|
|
// fetch only reports single OID, but OID *might* be referenced by multiple
|
|
|
|
// WrappedPointers if same content is at multiple paths, so map oid->slice
|
|
|
|
oidToPointers := make(map[string][]*lfs.WrappedPointer, len(pointers))
|
2015-05-27 19:45:18 +00:00
|
|
|
for _, pointer := range pointers {
|
2015-07-23 16:53:58 +00:00
|
|
|
plist := oidToPointers[pointer.Oid]
|
|
|
|
oidToPointers[pointer.Oid] = append(plist, pointer)
|
2015-05-27 19:45:18 +00:00
|
|
|
}
|
|
|
|
|
2015-07-23 16:53:58 +00:00
|
|
|
for oid := range dlwatch {
|
|
|
|
plist, ok := oidToPointers[oid]
|
2015-05-27 19:45:18 +00:00
|
|
|
if !ok {
|
|
|
|
continue
|
|
|
|
}
|
2015-07-23 16:53:58 +00:00
|
|
|
for _, p := range plist {
|
|
|
|
out <- p
|
2015-05-27 19:45:18 +00:00
|
|
|
}
|
|
|
|
}
|
2015-07-23 16:53:58 +00:00
|
|
|
close(out)
|
2015-05-27 19:45:18 +00:00
|
|
|
}()
|
2016-02-03 19:40:47 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
for _, p := range pointers {
|
2016-08-17 20:14:05 +00:00
|
|
|
tracerx.Printf("fetch %v [%v]", p.Name, p.Oid)
|
2016-12-14 18:05:57 +00:00
|
|
|
|
2016-12-15 20:20:41 +00:00
|
|
|
q.Add(downloadTransfer(p))
|
2015-07-23 16:53:58 +00:00
|
|
|
}
|
2016-02-03 19:40:47 +00:00
|
|
|
|
2015-06-20 16:10:54 +00:00
|
|
|
processQueue := time.Now()
|
2015-07-09 18:21:49 +00:00
|
|
|
q.Wait()
|
2015-06-20 16:10:54 +00:00
|
|
|
tracerx.PerformanceSince("process queue", processQueue)
|
2015-08-17 21:45:43 +00:00
|
|
|
|
2015-10-13 14:29:25 +00:00
|
|
|
ok := true
|
2015-08-17 21:45:43 +00:00
|
|
|
for _, err := range q.Errors() {
|
2015-10-13 14:29:25 +00:00
|
|
|
ok = false
|
2016-08-16 18:03:37 +00:00
|
|
|
FullError(err)
|
2015-08-17 21:45:43 +00:00
|
|
|
}
|
2015-10-13 14:29:25 +00:00
|
|
|
return ok
|
2015-05-14 16:33:29 +00:00
|
|
|
}
|
2016-08-10 15:33:25 +00:00
|
|
|
|
2016-12-07 02:50:57 +00:00
|
|
|
func readyAndMissingPointers(allpointers []*lfs.WrappedPointer, filter *filepathfilter.Filter) ([]*lfs.WrappedPointer, []*lfs.WrappedPointer, *progress.ProgressMeter) {
|
2016-12-07 20:27:42 +00:00
|
|
|
meter := buildProgressMeter(false)
|
2016-08-17 20:14:05 +00:00
|
|
|
seen := make(map[string]bool, len(allpointers))
|
2016-08-19 15:35:05 +00:00
|
|
|
missing := make([]*lfs.WrappedPointer, 0, len(allpointers))
|
|
|
|
ready := make([]*lfs.WrappedPointer, 0, len(allpointers))
|
2016-08-17 20:14:05 +00:00
|
|
|
|
|
|
|
for _, p := range allpointers {
|
|
|
|
// no need to download the same object multiple times
|
|
|
|
if seen[p.Oid] {
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
|
|
|
|
seen[p.Oid] = true
|
|
|
|
|
|
|
|
// no need to download objects that exist locally already
|
|
|
|
lfs.LinkOrCopyFromReference(p.Oid, p.Size)
|
|
|
|
if lfs.ObjectExistsOfSize(p.Oid, p.Size) {
|
2016-08-19 15:35:05 +00:00
|
|
|
ready = append(ready, p)
|
2016-08-17 20:14:05 +00:00
|
|
|
continue
|
|
|
|
}
|
|
|
|
|
2016-08-19 15:35:05 +00:00
|
|
|
missing = append(missing, p)
|
2016-12-07 23:42:50 +00:00
|
|
|
meter.Add(p.Size)
|
2016-08-17 20:14:05 +00:00
|
|
|
}
|
|
|
|
|
2016-12-07 02:50:57 +00:00
|
|
|
return ready, missing, meter
|
2016-08-17 20:14:05 +00:00
|
|
|
}
|
|
|
|
|
2016-08-10 15:33:25 +00:00
|
|
|
func init() {
|
2016-09-01 16:09:38 +00:00
|
|
|
RegisterCommand("fetch", fetchCommand, func(cmd *cobra.Command) {
|
2016-08-10 15:33:25 +00:00
|
|
|
cmd.Flags().StringVarP(&includeArg, "include", "I", "", "Include a list of paths")
|
|
|
|
cmd.Flags().StringVarP(&excludeArg, "exclude", "X", "", "Exclude a list of paths")
|
|
|
|
cmd.Flags().BoolVarP(&fetchRecentArg, "recent", "r", false, "Fetch recent refs & commits")
|
|
|
|
cmd.Flags().BoolVarP(&fetchAllArg, "all", "a", false, "Fetch all LFS files ever referenced")
|
|
|
|
cmd.Flags().BoolVarP(&fetchPruneArg, "prune", "p", false, "After fetching, prune old data")
|
|
|
|
})
|
|
|
|
}
|