2015-05-14 16:33:29 +00:00
|
|
|
package commands
|
|
|
|
|
|
|
|
import (
|
2015-09-03 16:23:30 +00:00
|
|
|
"fmt"
|
2017-11-16 16:37:22 +00:00
|
|
|
"os"
|
2015-07-28 13:50:19 +00:00
|
|
|
"time"
|
|
|
|
|
2016-11-21 22:14:33 +00:00
|
|
|
"github.com/git-lfs/git-lfs/filepathfilter"
|
2016-11-15 17:01:18 +00:00
|
|
|
"github.com/git-lfs/git-lfs/git"
|
|
|
|
"github.com/git-lfs/git-lfs/lfs"
|
2017-11-22 22:07:24 +00:00
|
|
|
"github.com/git-lfs/git-lfs/tasklog"
|
2016-12-12 00:20:14 +00:00
|
|
|
"github.com/git-lfs/git-lfs/tq"
|
2016-05-23 18:02:27 +00:00
|
|
|
"github.com/rubyist/tracerx"
|
|
|
|
"github.com/spf13/cobra"
|
2015-05-14 16:33:29 +00:00
|
|
|
)
|
|
|
|
|
|
|
|
var (
|
2016-08-02 16:37:16 +00:00
|
|
|
fetchRecentArg bool
|
|
|
|
fetchAllArg bool
|
|
|
|
fetchPruneArg bool
|
2015-05-14 16:33:29 +00:00
|
|
|
)
|
|
|
|
|
2016-08-02 16:37:16 +00:00
|
|
|
func getIncludeExcludeArgs(cmd *cobra.Command) (include, exclude *string) {
|
2016-08-01 22:47:41 +00:00
|
|
|
includeFlag := cmd.Flag("include")
|
|
|
|
excludeFlag := cmd.Flag("exclude")
|
|
|
|
if includeFlag.Changed {
|
2016-08-02 16:37:16 +00:00
|
|
|
include = &includeArg
|
2016-08-01 22:47:41 +00:00
|
|
|
}
|
|
|
|
if excludeFlag.Changed {
|
2016-08-02 16:37:16 +00:00
|
|
|
exclude = &excludeArg
|
2016-08-01 22:47:41 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
2015-05-29 15:28:10 +00:00
|
|
|
func fetchCommand(cmd *cobra.Command, args []string) {
|
commands: make sure we're in the working tree
In the normal case, Git commands perform repository autodiscovery based
on the current working directory. However, in some cases, it's possible
to specify a Git working tree unrelated to the current working directory
by using GIT_WORK_TREE. In such a case, we want to make sure that we
change into the working tree such that our working directory is always
within the working tree, if one exists. This is what Git does, and it
means that when we write files into the repository, such as a
.gitattributes file, we write them into the proper place.
Note also that we adjust the code to require that the working directory
be non-empty when we require a working copy instead of that the
repository be non-bare. That's because we don't want people to be
working inside of the Git directory in such situations, where the
repository would be non-bare but would not have a working tree.
We add tests for this case for track and untrack, which require a
working tree, and for checkout, which requires only a repository. This
means that we can verify the behavior of the functions we've added
without needing to add tests for this case to each of the subcommands.
2020-10-02 19:03:55 +00:00
|
|
|
setupRepository()
|
2015-05-14 16:33:29 +00:00
|
|
|
|
2015-09-08 16:23:27 +00:00
|
|
|
var refs []*git.Ref
|
2015-09-08 15:29:53 +00:00
|
|
|
|
2015-07-29 15:34:15 +00:00
|
|
|
if len(args) > 0 {
|
2015-08-13 16:22:29 +00:00
|
|
|
// Remote is first arg
|
2017-10-27 22:20:38 +00:00
|
|
|
if err := cfg.SetValidRemote(args[0]); err != nil {
|
|
|
|
Exit("Invalid remote name %q: %s", args[0], err)
|
2015-10-06 16:35:58 +00:00
|
|
|
}
|
2015-08-13 16:22:29 +00:00
|
|
|
}
|
2015-09-08 15:29:53 +00:00
|
|
|
|
2015-08-13 16:22:29 +00:00
|
|
|
if len(args) > 1 {
|
2016-04-06 16:29:59 +00:00
|
|
|
resolvedrefs, err := git.ResolveRefs(args[1:])
|
|
|
|
if err != nil {
|
|
|
|
Panic(err, "Invalid ref argument: %v", args[1:])
|
2015-08-21 14:19:16 +00:00
|
|
|
}
|
2016-04-06 16:29:59 +00:00
|
|
|
refs = resolvedrefs
|
2016-06-16 16:14:53 +00:00
|
|
|
} else if !fetchAllArg {
|
2015-07-29 15:34:15 +00:00
|
|
|
ref, err := git.CurrentRef()
|
2015-05-14 16:33:29 +00:00
|
|
|
if err != nil {
|
2015-05-29 15:28:10 +00:00
|
|
|
Panic(err, "Could not fetch")
|
2015-05-14 16:33:29 +00:00
|
|
|
}
|
2015-08-21 14:19:16 +00:00
|
|
|
refs = []*git.Ref{ref}
|
2015-07-29 15:34:15 +00:00
|
|
|
}
|
|
|
|
|
2015-10-13 14:29:25 +00:00
|
|
|
success := true
|
2019-08-09 15:25:23 +00:00
|
|
|
gitscanner := lfs.NewGitScanner(cfg, nil)
|
2016-11-18 00:22:21 +00:00
|
|
|
defer gitscanner.Close()
|
|
|
|
|
2016-08-02 16:37:16 +00:00
|
|
|
include, exclude := getIncludeExcludeArgs(cmd)
|
2017-10-18 19:51:57 +00:00
|
|
|
fetchPruneCfg := lfs.NewFetchPruneConfig(cfg.Git)
|
2016-08-02 16:37:16 +00:00
|
|
|
|
2015-09-03 11:29:00 +00:00
|
|
|
if fetchAllArg {
|
2019-10-08 01:02:57 +00:00
|
|
|
if fetchRecentArg {
|
|
|
|
Exit("Cannot combine --all with --recent")
|
2015-09-03 11:29:00 +00:00
|
|
|
}
|
2016-08-02 16:37:16 +00:00
|
|
|
if include != nil || exclude != nil {
|
2015-09-03 11:29:00 +00:00
|
|
|
Exit("Cannot combine --all with --include or --exclude")
|
|
|
|
}
|
2016-07-21 22:37:53 +00:00
|
|
|
if len(cfg.FetchIncludePaths()) > 0 || len(cfg.FetchExcludePaths()) > 0 {
|
2015-09-03 11:29:00 +00:00
|
|
|
Print("Ignoring global include / exclude paths to fulfil --all")
|
|
|
|
}
|
2019-10-08 01:02:57 +00:00
|
|
|
|
|
|
|
if len(args) > 1 {
|
|
|
|
refShas := make([]string, len(refs))
|
|
|
|
for _, ref := range refs {
|
|
|
|
refShas = append(refShas, ref.Sha)
|
|
|
|
}
|
|
|
|
success = fetchRefs(refShas)
|
|
|
|
} else {
|
|
|
|
success = fetchAll()
|
|
|
|
}
|
2015-05-14 16:33:29 +00:00
|
|
|
|
2015-09-03 11:29:00 +00:00
|
|
|
} else { // !all
|
2020-03-27 19:02:58 +00:00
|
|
|
filter := buildFilepathFilter(cfg, include, exclude, true)
|
2015-09-03 11:29:00 +00:00
|
|
|
|
|
|
|
// Fetch refs sequentially per arg order; duplicates in later refs will be ignored
|
|
|
|
for _, ref := range refs {
|
2017-12-05 01:27:44 +00:00
|
|
|
Print("fetch: Fetching reference %s", ref.Refspec())
|
2016-12-06 23:52:28 +00:00
|
|
|
s := fetchRef(ref.Sha, filter)
|
2015-10-13 14:29:25 +00:00
|
|
|
success = success && s
|
2015-09-03 11:29:00 +00:00
|
|
|
}
|
|
|
|
|
2017-10-18 19:51:57 +00:00
|
|
|
if fetchRecentArg || fetchPruneCfg.FetchRecentAlways {
|
|
|
|
s := fetchRecent(fetchPruneCfg, refs, filter)
|
2015-10-13 14:29:25 +00:00
|
|
|
success = success && s
|
2015-09-03 11:29:00 +00:00
|
|
|
}
|
|
|
|
}
|
2015-10-13 14:29:25 +00:00
|
|
|
|
2015-10-15 15:35:26 +00:00
|
|
|
if fetchPruneArg {
|
2017-10-18 19:51:57 +00:00
|
|
|
verify := fetchPruneCfg.PruneVerifyRemoteAlways
|
2015-10-15 15:35:26 +00:00
|
|
|
// no dry-run or verbose options in fetch, assume false
|
2017-10-18 19:51:57 +00:00
|
|
|
prune(fetchPruneCfg, verify, false, false)
|
2015-10-15 15:35:26 +00:00
|
|
|
}
|
2015-11-09 17:46:29 +00:00
|
|
|
|
2015-10-13 14:29:25 +00:00
|
|
|
if !success {
|
2017-05-19 16:35:49 +00:00
|
|
|
c := getAPIClient()
|
2017-10-27 21:00:07 +00:00
|
|
|
e := c.Endpoints.Endpoint("download", cfg.Remote())
|
2017-06-14 09:24:30 +00:00
|
|
|
Exit("error: failed to fetch some objects from '%s'", e.Url)
|
2015-10-13 14:29:25 +00:00
|
|
|
}
|
2015-07-23 16:53:58 +00:00
|
|
|
}
|
|
|
|
|
2016-12-06 23:52:28 +00:00
|
|
|
func pointersToFetchForRef(ref string, filter *filepathfilter.Filter) ([]*lfs.WrappedPointer, error) {
|
2016-11-29 17:56:03 +00:00
|
|
|
var pointers []*lfs.WrappedPointer
|
|
|
|
var multiErr error
|
2019-08-09 15:25:23 +00:00
|
|
|
tempgitscanner := lfs.NewGitScanner(cfg, func(p *lfs.WrappedPointer, err error) {
|
2016-11-29 17:56:03 +00:00
|
|
|
if err != nil {
|
|
|
|
if multiErr != nil {
|
|
|
|
multiErr = fmt.Errorf("%v\n%v", multiErr, err)
|
|
|
|
} else {
|
|
|
|
multiErr = err
|
|
|
|
}
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
pointers = append(pointers, p)
|
|
|
|
})
|
|
|
|
|
2016-12-06 23:52:28 +00:00
|
|
|
tempgitscanner.Filter = filter
|
|
|
|
|
2016-12-14 17:53:34 +00:00
|
|
|
if err := tempgitscanner.ScanTree(ref); err != nil {
|
2016-11-18 00:22:21 +00:00
|
|
|
return nil, err
|
|
|
|
}
|
2016-11-29 17:56:03 +00:00
|
|
|
|
|
|
|
tempgitscanner.Close()
|
|
|
|
return pointers, multiErr
|
2015-08-26 15:13:50 +00:00
|
|
|
}
|
|
|
|
|
2015-07-27 10:28:46 +00:00
|
|
|
// Fetch all binaries for a given ref (that we don't have already)
|
2016-12-06 23:52:28 +00:00
|
|
|
func fetchRef(ref string, filter *filepathfilter.Filter) bool {
|
|
|
|
pointers, err := pointersToFetchForRef(ref, filter)
|
2015-07-27 10:28:46 +00:00
|
|
|
if err != nil {
|
|
|
|
Panic(err, "Could not scan for Git LFS files")
|
|
|
|
}
|
2016-11-21 22:14:33 +00:00
|
|
|
return fetchAndReportToChan(pointers, filter, nil)
|
2015-07-27 10:28:46 +00:00
|
|
|
}
|
|
|
|
|
2019-10-08 01:02:57 +00:00
|
|
|
func pointersToFetchForRefs(refs []string) ([]*lfs.WrappedPointer, error) {
|
|
|
|
// This could be a long process so use the chan version & report progress
|
|
|
|
task := tasklog.NewSimpleTask()
|
|
|
|
defer task.Complete()
|
|
|
|
|
|
|
|
logger := tasklog.NewLogger(OutputWriter,
|
|
|
|
tasklog.ForceProgress(cfg.ForceProgress()),
|
|
|
|
)
|
|
|
|
logger.Enqueue(task)
|
|
|
|
var numObjs int64
|
|
|
|
|
|
|
|
// use temp gitscanner to collect pointers
|
|
|
|
var pointers []*lfs.WrappedPointer
|
|
|
|
var multiErr error
|
|
|
|
tempgitscanner := lfs.NewGitScanner(cfg, func(p *lfs.WrappedPointer, err error) {
|
|
|
|
if err != nil {
|
|
|
|
if multiErr != nil {
|
|
|
|
multiErr = fmt.Errorf("%v\n%v", multiErr, err)
|
|
|
|
} else {
|
|
|
|
multiErr = err
|
|
|
|
}
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
numObjs++
|
|
|
|
task.Logf("fetch: %d object(s) found", numObjs)
|
|
|
|
pointers = append(pointers, p)
|
|
|
|
})
|
|
|
|
|
|
|
|
if err := tempgitscanner.ScanRefs(refs, nil, nil); err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
|
|
|
tempgitscanner.Close()
|
|
|
|
return pointers, multiErr
|
|
|
|
}
|
|
|
|
|
|
|
|
func fetchRefs(refs []string) bool {
|
|
|
|
pointers, err := pointersToFetchForRefs(refs)
|
|
|
|
if err != nil {
|
|
|
|
Panic(err, "Could not scan for Git LFS files")
|
|
|
|
}
|
|
|
|
return fetchAndReportToChan(pointers, nil, nil)
|
|
|
|
}
|
|
|
|
|
2015-08-21 14:19:16 +00:00
|
|
|
// Fetch all previous versions of objects from since to ref (not including final state at ref)
|
|
|
|
// So this will fetch all the '-' sides of the diff from since to ref
|
2016-12-06 23:52:28 +00:00
|
|
|
func fetchPreviousVersions(ref string, since time.Time, filter *filepathfilter.Filter) bool {
|
2016-11-29 17:27:42 +00:00
|
|
|
var pointers []*lfs.WrappedPointer
|
|
|
|
|
2019-08-09 15:25:23 +00:00
|
|
|
tempgitscanner := lfs.NewGitScanner(cfg, func(p *lfs.WrappedPointer, err error) {
|
2016-11-29 17:27:42 +00:00
|
|
|
if err != nil {
|
|
|
|
Panic(err, "Could not scan for Git LFS previous versions")
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
pointers = append(pointers, p)
|
|
|
|
})
|
|
|
|
|
2016-12-06 23:52:28 +00:00
|
|
|
tempgitscanner.Filter = filter
|
|
|
|
|
2016-11-30 17:30:26 +00:00
|
|
|
if err := tempgitscanner.ScanPreviousVersions(ref, since, nil); err != nil {
|
2016-11-18 00:22:21 +00:00
|
|
|
ExitWithError(err)
|
|
|
|
}
|
2016-11-29 17:27:42 +00:00
|
|
|
|
|
|
|
tempgitscanner.Close()
|
2016-11-21 22:14:33 +00:00
|
|
|
return fetchAndReportToChan(pointers, filter, nil)
|
2015-08-21 14:19:16 +00:00
|
|
|
}
|
|
|
|
|
2015-08-20 14:36:10 +00:00
|
|
|
// Fetch recent objects based on config
|
2017-10-18 19:51:57 +00:00
|
|
|
func fetchRecent(fetchconf lfs.FetchPruneConfig, alreadyFetchedRefs []*git.Ref, filter *filepathfilter.Filter) bool {
|
2015-08-21 14:19:16 +00:00
|
|
|
if fetchconf.FetchRecentRefsDays == 0 && fetchconf.FetchRecentCommitsDays == 0 {
|
2015-10-13 14:29:25 +00:00
|
|
|
return true
|
2015-08-21 14:19:16 +00:00
|
|
|
}
|
|
|
|
|
2015-10-13 14:29:25 +00:00
|
|
|
ok := true
|
2015-08-21 14:19:16 +00:00
|
|
|
// Make a list of what unique commits we've already fetched for to avoid duplicating work
|
|
|
|
uniqueRefShas := make(map[string]string, len(alreadyFetchedRefs))
|
|
|
|
for _, ref := range alreadyFetchedRefs {
|
|
|
|
uniqueRefShas[ref.Sha] = ref.Name
|
|
|
|
}
|
2015-08-20 14:36:10 +00:00
|
|
|
// First find any other recent refs
|
|
|
|
if fetchconf.FetchRecentRefsDays > 0 {
|
2017-11-29 04:03:17 +00:00
|
|
|
Print("fetch: Fetching recent branches within %v days", fetchconf.FetchRecentRefsDays)
|
2015-08-20 14:36:10 +00:00
|
|
|
refsSince := time.Now().AddDate(0, 0, -fetchconf.FetchRecentRefsDays)
|
2017-10-27 21:00:07 +00:00
|
|
|
refs, err := git.RecentBranches(refsSince, fetchconf.FetchRecentRefsIncludeRemotes, cfg.Remote())
|
2015-08-20 14:36:10 +00:00
|
|
|
if err != nil {
|
|
|
|
Panic(err, "Could not scan for recent refs")
|
|
|
|
}
|
2015-08-21 14:19:16 +00:00
|
|
|
for _, ref := range refs {
|
2015-08-20 14:36:10 +00:00
|
|
|
// Don't fetch for the same SHA twice
|
|
|
|
if prevRefName, ok := uniqueRefShas[ref.Sha]; ok {
|
2015-08-27 14:18:46 +00:00
|
|
|
if ref.Name != prevRefName {
|
|
|
|
tracerx.Printf("Skipping fetch for %v, already fetched via %v", ref.Name, prevRefName)
|
|
|
|
}
|
2015-08-20 14:36:10 +00:00
|
|
|
} else {
|
|
|
|
uniqueRefShas[ref.Sha] = ref.Name
|
2017-11-29 04:03:17 +00:00
|
|
|
Print("fetch: Fetching reference %s", ref.Name)
|
2016-12-06 23:52:28 +00:00
|
|
|
k := fetchRef(ref.Sha, filter)
|
2015-10-13 14:29:25 +00:00
|
|
|
ok = ok && k
|
2015-08-21 14:19:16 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
// For every unique commit we've fetched, check recent commits too
|
|
|
|
if fetchconf.FetchRecentCommitsDays > 0 {
|
|
|
|
for commit, refName := range uniqueRefShas {
|
|
|
|
// We measure from the last commit at the ref
|
|
|
|
summ, err := git.GetCommitSummary(commit)
|
|
|
|
if err != nil {
|
|
|
|
Error("Couldn't scan commits at %v: %v", refName, err)
|
|
|
|
continue
|
2015-08-20 14:36:10 +00:00
|
|
|
}
|
2017-11-29 04:03:17 +00:00
|
|
|
Print("fetch: Fetching changes within %v days of %v", fetchconf.FetchRecentCommitsDays, refName)
|
2015-08-21 14:19:16 +00:00
|
|
|
commitsSince := summ.CommitDate.AddDate(0, 0, -fetchconf.FetchRecentCommitsDays)
|
2016-12-06 23:52:28 +00:00
|
|
|
k := fetchPreviousVersions(commit, commitsSince, filter)
|
2015-10-13 14:29:25 +00:00
|
|
|
ok = ok && k
|
2015-08-20 14:36:10 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
}
|
2015-10-13 14:29:25 +00:00
|
|
|
return ok
|
2015-08-20 14:36:10 +00:00
|
|
|
}
|
|
|
|
|
2016-12-06 23:52:28 +00:00
|
|
|
func fetchAll() bool {
|
|
|
|
pointers := scanAll()
|
2017-11-29 04:03:17 +00:00
|
|
|
Print("fetch: Fetching all references...")
|
2016-11-21 22:14:33 +00:00
|
|
|
return fetchAndReportToChan(pointers, nil, nil)
|
2015-09-09 22:44:58 +00:00
|
|
|
}
|
|
|
|
|
2016-12-06 23:52:28 +00:00
|
|
|
func scanAll() []*lfs.WrappedPointer {
|
2015-09-03 16:23:30 +00:00
|
|
|
// This could be a long process so use the chan version & report progress
|
2017-11-29 04:03:17 +00:00
|
|
|
task := tasklog.NewSimpleTask()
|
2017-12-05 01:27:55 +00:00
|
|
|
defer task.Complete()
|
|
|
|
|
2018-10-30 08:06:55 +00:00
|
|
|
logger := tasklog.NewLogger(OutputWriter,
|
|
|
|
tasklog.ForceProgress(cfg.ForceProgress()),
|
|
|
|
)
|
2017-11-29 04:03:17 +00:00
|
|
|
logger.Enqueue(task)
|
2015-09-03 16:23:30 +00:00
|
|
|
var numObjs int64
|
2016-11-16 19:56:07 +00:00
|
|
|
|
2016-11-29 20:04:05 +00:00
|
|
|
// use temp gitscanner to collect pointers
|
|
|
|
var pointers []*lfs.WrappedPointer
|
|
|
|
var multiErr error
|
2019-08-09 15:25:23 +00:00
|
|
|
tempgitscanner := lfs.NewGitScanner(cfg, func(p *lfs.WrappedPointer, err error) {
|
2016-11-29 20:04:05 +00:00
|
|
|
if err != nil {
|
|
|
|
if multiErr != nil {
|
|
|
|
multiErr = fmt.Errorf("%v\n%v", multiErr, err)
|
|
|
|
} else {
|
|
|
|
multiErr = err
|
|
|
|
}
|
|
|
|
return
|
|
|
|
}
|
2015-09-09 22:44:58 +00:00
|
|
|
|
2015-09-03 16:23:30 +00:00
|
|
|
numObjs++
|
2017-11-29 04:03:17 +00:00
|
|
|
task.Logf("fetch: %d object(s) found", numObjs)
|
2015-09-03 16:23:30 +00:00
|
|
|
pointers = append(pointers, p)
|
2016-11-29 20:04:05 +00:00
|
|
|
})
|
|
|
|
|
|
|
|
if err := tempgitscanner.ScanAll(nil); err != nil {
|
2016-03-31 11:00:44 +00:00
|
|
|
Panic(err, "Could not scan for Git LFS files")
|
|
|
|
}
|
2015-09-09 22:44:58 +00:00
|
|
|
|
2016-11-29 20:04:05 +00:00
|
|
|
tempgitscanner.Close()
|
|
|
|
|
|
|
|
if multiErr != nil {
|
|
|
|
Panic(multiErr, "Could not scan for Git LFS files")
|
|
|
|
}
|
|
|
|
|
2015-09-09 22:44:58 +00:00
|
|
|
return pointers
|
2015-09-03 11:29:00 +00:00
|
|
|
}
|
|
|
|
|
2015-07-23 16:53:58 +00:00
|
|
|
// Fetch and report completion of each OID to a channel (optional, pass nil to skip)
|
2015-10-13 14:29:25 +00:00
|
|
|
// Returns true if all completed with no errors, false if errors were written to stderr/log
|
2016-11-21 22:14:33 +00:00
|
|
|
func fetchAndReportToChan(allpointers []*lfs.WrappedPointer, filter *filepathfilter.Filter, out chan<- *lfs.WrappedPointer) bool {
|
2016-12-07 02:50:57 +00:00
|
|
|
ready, pointers, meter := readyAndMissingPointers(allpointers, filter)
|
2017-09-17 10:43:35 +00:00
|
|
|
q := newDownloadQueue(
|
2017-10-27 21:00:07 +00:00
|
|
|
getTransferManifestOperationRemote("download", cfg.Remote()),
|
|
|
|
cfg.Remote(), tq.WithProgress(meter),
|
2017-09-17 10:43:35 +00:00
|
|
|
)
|
2015-05-14 16:33:29 +00:00
|
|
|
|
2015-07-23 16:53:58 +00:00
|
|
|
if out != nil {
|
2016-08-17 20:14:05 +00:00
|
|
|
// If we already have it, or it won't be fetched
|
|
|
|
// report it to chan immediately to support pull/checkout
|
2016-08-19 15:35:05 +00:00
|
|
|
for _, p := range ready {
|
2016-08-17 20:14:05 +00:00
|
|
|
out <- p
|
|
|
|
}
|
|
|
|
|
2015-07-23 16:53:58 +00:00
|
|
|
dlwatch := q.Watch()
|
2015-05-27 19:45:18 +00:00
|
|
|
|
|
|
|
go func() {
|
2015-07-23 16:53:58 +00:00
|
|
|
// fetch only reports single OID, but OID *might* be referenced by multiple
|
|
|
|
// WrappedPointers if same content is at multiple paths, so map oid->slice
|
|
|
|
oidToPointers := make(map[string][]*lfs.WrappedPointer, len(pointers))
|
2015-05-27 19:45:18 +00:00
|
|
|
for _, pointer := range pointers {
|
2015-07-23 16:53:58 +00:00
|
|
|
plist := oidToPointers[pointer.Oid]
|
|
|
|
oidToPointers[pointer.Oid] = append(plist, pointer)
|
2015-05-27 19:45:18 +00:00
|
|
|
}
|
|
|
|
|
2017-08-02 19:17:39 +00:00
|
|
|
for t := range dlwatch {
|
|
|
|
plist, ok := oidToPointers[t.Oid]
|
2015-05-27 19:45:18 +00:00
|
|
|
if !ok {
|
|
|
|
continue
|
|
|
|
}
|
2015-07-23 16:53:58 +00:00
|
|
|
for _, p := range plist {
|
|
|
|
out <- p
|
2015-05-27 19:45:18 +00:00
|
|
|
}
|
|
|
|
}
|
2015-07-23 16:53:58 +00:00
|
|
|
close(out)
|
2015-05-27 19:45:18 +00:00
|
|
|
}()
|
2016-02-03 19:40:47 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
for _, p := range pointers {
|
2016-08-17 20:14:05 +00:00
|
|
|
tracerx.Printf("fetch %v [%v]", p.Name, p.Oid)
|
2016-12-14 18:05:57 +00:00
|
|
|
|
2016-12-15 20:20:41 +00:00
|
|
|
q.Add(downloadTransfer(p))
|
2015-07-23 16:53:58 +00:00
|
|
|
}
|
2016-02-03 19:40:47 +00:00
|
|
|
|
2015-06-20 16:10:54 +00:00
|
|
|
processQueue := time.Now()
|
2015-07-09 18:21:49 +00:00
|
|
|
q.Wait()
|
2015-06-20 16:10:54 +00:00
|
|
|
tracerx.PerformanceSince("process queue", processQueue)
|
2015-08-17 21:45:43 +00:00
|
|
|
|
2015-10-13 14:29:25 +00:00
|
|
|
ok := true
|
2015-08-17 21:45:43 +00:00
|
|
|
for _, err := range q.Errors() {
|
2015-10-13 14:29:25 +00:00
|
|
|
ok = false
|
2016-08-16 18:03:37 +00:00
|
|
|
FullError(err)
|
2015-08-17 21:45:43 +00:00
|
|
|
}
|
2015-10-13 14:29:25 +00:00
|
|
|
return ok
|
2015-05-14 16:33:29 +00:00
|
|
|
}
|
2016-08-10 15:33:25 +00:00
|
|
|
|
2017-11-29 19:07:44 +00:00
|
|
|
func readyAndMissingPointers(allpointers []*lfs.WrappedPointer, filter *filepathfilter.Filter) ([]*lfs.WrappedPointer, []*lfs.WrappedPointer, *tq.Meter) {
|
2018-10-30 08:06:55 +00:00
|
|
|
logger := tasklog.NewLogger(os.Stdout,
|
|
|
|
tasklog.ForceProgress(cfg.ForceProgress()),
|
|
|
|
)
|
2018-01-06 02:01:50 +00:00
|
|
|
meter := buildProgressMeter(false, tq.Download)
|
2017-11-16 16:37:22 +00:00
|
|
|
logger.Enqueue(meter)
|
|
|
|
|
2016-08-17 20:14:05 +00:00
|
|
|
seen := make(map[string]bool, len(allpointers))
|
2016-08-19 15:35:05 +00:00
|
|
|
missing := make([]*lfs.WrappedPointer, 0, len(allpointers))
|
|
|
|
ready := make([]*lfs.WrappedPointer, 0, len(allpointers))
|
2016-08-17 20:14:05 +00:00
|
|
|
|
|
|
|
for _, p := range allpointers {
|
|
|
|
// no need to download the same object multiple times
|
|
|
|
if seen[p.Oid] {
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
|
|
|
|
seen[p.Oid] = true
|
|
|
|
|
|
|
|
// no need to download objects that exist locally already
|
2017-10-24 21:35:14 +00:00
|
|
|
lfs.LinkOrCopyFromReference(cfg, p.Oid, p.Size)
|
2017-10-25 01:20:09 +00:00
|
|
|
if cfg.LFSObjectExists(p.Oid, p.Size) {
|
2016-08-19 15:35:05 +00:00
|
|
|
ready = append(ready, p)
|
2016-08-17 20:14:05 +00:00
|
|
|
continue
|
|
|
|
}
|
|
|
|
|
2016-08-19 15:35:05 +00:00
|
|
|
missing = append(missing, p)
|
2016-12-07 23:42:50 +00:00
|
|
|
meter.Add(p.Size)
|
2016-08-17 20:14:05 +00:00
|
|
|
}
|
|
|
|
|
2016-12-07 02:50:57 +00:00
|
|
|
return ready, missing, meter
|
2016-08-17 20:14:05 +00:00
|
|
|
}
|
|
|
|
|
2016-08-10 15:33:25 +00:00
|
|
|
func init() {
|
2016-09-01 16:09:38 +00:00
|
|
|
RegisterCommand("fetch", fetchCommand, func(cmd *cobra.Command) {
|
2016-08-10 15:33:25 +00:00
|
|
|
cmd.Flags().StringVarP(&includeArg, "include", "I", "", "Include a list of paths")
|
|
|
|
cmd.Flags().StringVarP(&excludeArg, "exclude", "X", "", "Exclude a list of paths")
|
|
|
|
cmd.Flags().BoolVarP(&fetchRecentArg, "recent", "r", false, "Fetch recent refs & commits")
|
|
|
|
cmd.Flags().BoolVarP(&fetchAllArg, "all", "a", false, "Fetch all LFS files ever referenced")
|
|
|
|
cmd.Flags().BoolVarP(&fetchPruneArg, "prune", "p", false, "After fetching, prune old data")
|
|
|
|
})
|
|
|
|
}
|