git-lfs/commands/command_fetch.go
Chris Darroch a0986c786c commands,lfs: drop GitScanner Close() method
The GitScanner structure and its methods were introduced in PR #1670,
and in commit bdbca399c46f0447f08066ce53185009b3db90ec of that PR
the structure's Close() method was introduced.  Unlike other similar
structures whose Close() methods should be called to release underlying
resources such as channels or I/O streams, the (*GitScanner).Close()
method serves only to output an optional performance timing trace metric.

This Close() method is not called consistently; for instance, it is never
called by the migrateExportCommand() function of the "git lfs migrate"
command, and will be skipped by the checkoutCommand() function of the
"git lfs checkout" command if an error is returned by the
(*GitScanner).ScanTree() method.

The utility of the performance timing metric is also undercut by the
fact that some commands perform other tasks before and after calling
the specific (*GitScanner).Scan*() method they invoke.  And in the
particular case of the "git lfs prune" command, multiple goroutines
are started, each of which runs a different Scan*() method simultaneously
with the others, so the final timing metric does not account for
their different execution times, just the overall final timing.

We can improve the value of the timing metric while also simplifying
the calling convention for the GitScanner structure's methods by
removing the Close() method, and tracing the performance of each
Scan*() method individually.

Removing the Close() method clarifies that no underlying resources
must be released for the GitScanner structure, and so callers need
not try to register a deferred call to the method.  This parallels
some other conventional Go structures, such as the Scanner structure
of the "bufio" package.

As well, running a "git lfs prune" command with the GIT_TRACE_PERFORMANCE=1
environment variable set now results in more detailed and useful output,
for example:

  12:36:51.221526 performance ScanStashed: 0.013632533 s
  12:36:51.224494 performance ScanUnpushed: 0.016570280 s
  12:36:51.240670 performance ScanTree: 0.017171717 s
2023-06-07 15:57:35 -07:00

424 lines
12 KiB
Go

package commands
import (
"fmt"
"os"
"time"
"github.com/git-lfs/git-lfs/v3/filepathfilter"
"github.com/git-lfs/git-lfs/v3/git"
"github.com/git-lfs/git-lfs/v3/lfs"
"github.com/git-lfs/git-lfs/v3/tasklog"
"github.com/git-lfs/git-lfs/v3/tq"
"github.com/git-lfs/git-lfs/v3/tr"
"github.com/rubyist/tracerx"
"github.com/spf13/cobra"
)
var (
fetchRecentArg bool
fetchAllArg bool
fetchPruneArg bool
)
func getIncludeExcludeArgs(cmd *cobra.Command) (include, exclude *string) {
includeFlag := cmd.Flag("include")
excludeFlag := cmd.Flag("exclude")
if includeFlag.Changed {
include = &includeArg
}
if excludeFlag.Changed {
exclude = &excludeArg
}
return
}
func fetchCommand(cmd *cobra.Command, args []string) {
setupRepository()
var refs []*git.Ref
if len(args) > 0 {
// Remote is first arg
if err := cfg.SetValidRemote(args[0]); err != nil {
Exit(tr.Tr.Get("Invalid remote name %q: %s", args[0], err))
}
}
if len(args) > 1 {
resolvedrefs, err := git.ResolveRefs(args[1:])
if err != nil {
Panic(err, tr.Tr.Get("Invalid ref argument: %v", args[1:]))
}
refs = resolvedrefs
} else if !fetchAllArg {
ref, err := git.CurrentRef()
if err != nil {
Panic(err, tr.Tr.Get("Could not fetch"))
}
refs = []*git.Ref{ref}
}
success := true
include, exclude := getIncludeExcludeArgs(cmd)
fetchPruneCfg := lfs.NewFetchPruneConfig(cfg.Git)
if fetchAllArg {
if fetchRecentArg {
Exit(tr.Tr.Get("Cannot combine --all with --recent"))
}
if include != nil || exclude != nil {
Exit(tr.Tr.Get("Cannot combine --all with --include or --exclude"))
}
if len(cfg.FetchIncludePaths()) > 0 || len(cfg.FetchExcludePaths()) > 0 {
Print(tr.Tr.Get("Ignoring global include / exclude paths to fulfil --all"))
}
if len(args) > 1 {
refShas := make([]string, len(refs))
for _, ref := range refs {
refShas = append(refShas, ref.Sha)
}
success = fetchRefs(refShas)
} else {
success = fetchAll()
}
} else { // !all
filter := buildFilepathFilter(cfg, include, exclude, true)
// Fetch refs sequentially per arg order; duplicates in later refs will be ignored
for _, ref := range refs {
Print("fetch: %s", tr.Tr.Get("Fetching reference %s", ref.Refspec()))
s := fetchRef(ref.Sha, filter)
success = success && s
}
if fetchRecentArg || fetchPruneCfg.FetchRecentAlways {
s := fetchRecent(fetchPruneCfg, refs, filter)
success = success && s
}
}
if fetchPruneArg {
verify := fetchPruneCfg.PruneVerifyRemoteAlways
// no dry-run or verbose options in fetch, assume false
prune(fetchPruneCfg, verify, false, false)
}
if !success {
c := getAPIClient()
e := c.Endpoints.Endpoint("download", cfg.Remote())
Exit(tr.Tr.Get("error: failed to fetch some objects from '%s'", e.Url))
}
}
func pointersToFetchForRef(ref string, filter *filepathfilter.Filter) ([]*lfs.WrappedPointer, error) {
var pointers []*lfs.WrappedPointer
var multiErr error
tempgitscanner := lfs.NewGitScanner(cfg, func(p *lfs.WrappedPointer, err error) {
if err != nil {
if multiErr != nil {
multiErr = fmt.Errorf("%v\n%v", multiErr, err)
} else {
multiErr = err
}
return
}
pointers = append(pointers, p)
})
tempgitscanner.Filter = filter
if err := tempgitscanner.ScanTree(ref, nil); err != nil {
return nil, err
}
return pointers, multiErr
}
// Fetch all binaries for a given ref (that we don't have already)
func fetchRef(ref string, filter *filepathfilter.Filter) bool {
pointers, err := pointersToFetchForRef(ref, filter)
if err != nil {
Panic(err, tr.Tr.Get("Could not scan for Git LFS files"))
}
return fetchAndReportToChan(pointers, filter, nil)
}
func pointersToFetchForRefs(refs []string) ([]*lfs.WrappedPointer, error) {
// This could be a long process so use the chan version & report progress
logger := tasklog.NewLogger(OutputWriter,
tasklog.ForceProgress(cfg.ForceProgress()),
)
task := logger.Simple()
defer task.Complete()
// use temp gitscanner to collect pointers
var pointers []*lfs.WrappedPointer
var multiErr error
var numObjs int64
tempgitscanner := lfs.NewGitScanner(cfg, func(p *lfs.WrappedPointer, err error) {
if err != nil {
if multiErr != nil {
multiErr = fmt.Errorf("%v\n%v", multiErr, err)
} else {
multiErr = err
}
return
}
numObjs++
task.Logf("fetch: %s", tr.Tr.GetN("%d object found", "%d objects found", int(numObjs), numObjs))
pointers = append(pointers, p)
})
if err := tempgitscanner.ScanRefs(refs, nil, nil); err != nil {
return nil, err
}
return pointers, multiErr
}
func fetchRefs(refs []string) bool {
pointers, err := pointersToFetchForRefs(refs)
if err != nil {
Panic(err, tr.Tr.Get("Could not scan for Git LFS files"))
}
return fetchAndReportToChan(pointers, nil, nil)
}
// Fetch all previous versions of objects from since to ref (not including final state at ref)
// So this will fetch all the '-' sides of the diff from since to ref
func fetchPreviousVersions(ref string, since time.Time, filter *filepathfilter.Filter) bool {
var pointers []*lfs.WrappedPointer
tempgitscanner := lfs.NewGitScanner(cfg, func(p *lfs.WrappedPointer, err error) {
if err != nil {
Panic(err, tr.Tr.Get("Could not scan for Git LFS previous versions"))
return
}
pointers = append(pointers, p)
})
tempgitscanner.Filter = filter
if err := tempgitscanner.ScanPreviousVersions(ref, since, nil); err != nil {
ExitWithError(err)
}
return fetchAndReportToChan(pointers, filter, nil)
}
// Fetch recent objects based on config
func fetchRecent(fetchconf lfs.FetchPruneConfig, alreadyFetchedRefs []*git.Ref, filter *filepathfilter.Filter) bool {
if fetchconf.FetchRecentRefsDays == 0 && fetchconf.FetchRecentCommitsDays == 0 {
return true
}
ok := true
// Make a list of what unique commits we've already fetched for to avoid duplicating work
uniqueRefShas := make(map[string]string, len(alreadyFetchedRefs))
for _, ref := range alreadyFetchedRefs {
uniqueRefShas[ref.Sha] = ref.Name
}
// First find any other recent refs
if fetchconf.FetchRecentRefsDays > 0 {
Print("fetch: %s", tr.Tr.GetN(
"Fetching recent branches within %v day",
"Fetching recent branches within %v days",
fetchconf.FetchRecentRefsDays,
fetchconf.FetchRecentRefsDays,
))
refsSince := time.Now().AddDate(0, 0, -fetchconf.FetchRecentRefsDays)
refs, err := git.RecentBranches(refsSince, fetchconf.FetchRecentRefsIncludeRemotes, cfg.Remote())
if err != nil {
Panic(err, tr.Tr.Get("Could not scan for recent refs"))
}
for _, ref := range refs {
// Don't fetch for the same SHA twice
if prevRefName, ok := uniqueRefShas[ref.Sha]; ok {
if ref.Name != prevRefName {
tracerx.Printf("Skipping fetch for %v, already fetched via %v", ref.Name, prevRefName)
}
} else {
uniqueRefShas[ref.Sha] = ref.Name
Print("fetch: %s", tr.Tr.Get("Fetching reference %s", ref.Name))
k := fetchRef(ref.Sha, filter)
ok = ok && k
}
}
}
// For every unique commit we've fetched, check recent commits too
if fetchconf.FetchRecentCommitsDays > 0 {
for commit, refName := range uniqueRefShas {
// We measure from the last commit at the ref
summ, err := git.GetCommitSummary(commit)
if err != nil {
Error(tr.Tr.Get("Couldn't scan commits at %v: %v", refName, err))
continue
}
Print("fetch: %s", tr.Tr.GetN(
"Fetching changes within %v day of %v",
"Fetching changes within %v days of %v",
fetchconf.FetchRecentCommitsDays,
fetchconf.FetchRecentCommitsDays,
refName,
))
commitsSince := summ.CommitDate.AddDate(0, 0, -fetchconf.FetchRecentCommitsDays)
k := fetchPreviousVersions(commit, commitsSince, filter)
ok = ok && k
}
}
return ok
}
func fetchAll() bool {
pointers := scanAll()
Print("fetch: %s", tr.Tr.Get("Fetching all references..."))
return fetchAndReportToChan(pointers, nil, nil)
}
func scanAll() []*lfs.WrappedPointer {
// This could be a long process so use the chan version & report progress
logger := tasklog.NewLogger(OutputWriter,
tasklog.ForceProgress(cfg.ForceProgress()),
)
task := logger.Simple()
defer task.Complete()
// use temp gitscanner to collect pointers
var pointers []*lfs.WrappedPointer
var multiErr error
var numObjs int64
tempgitscanner := lfs.NewGitScanner(cfg, func(p *lfs.WrappedPointer, err error) {
if err != nil {
if multiErr != nil {
multiErr = fmt.Errorf("%v\n%v", multiErr, err)
} else {
multiErr = err
}
return
}
numObjs++
task.Logf("fetch: %s", tr.Tr.GetN("%d object found", "%d objects found", int(numObjs), numObjs))
pointers = append(pointers, p)
})
if err := tempgitscanner.ScanAll(nil); err != nil {
Panic(err, tr.Tr.Get("Could not scan for Git LFS files"))
}
if multiErr != nil {
Panic(multiErr, tr.Tr.Get("Could not scan for Git LFS files"))
}
return pointers
}
// Fetch and report completion of each OID to a channel (optional, pass nil to skip)
// Returns true if all completed with no errors, false if errors were written to stderr/log
func fetchAndReportToChan(allpointers []*lfs.WrappedPointer, filter *filepathfilter.Filter, out chan<- *lfs.WrappedPointer) bool {
ready, pointers, meter := readyAndMissingPointers(allpointers, filter)
q := newDownloadQueue(
getTransferManifestOperationRemote("download", cfg.Remote()),
cfg.Remote(), tq.WithProgress(meter),
)
if out != nil {
// If we already have it, or it won't be fetched
// report it to chan immediately to support pull/checkout
for _, p := range ready {
out <- p
}
dlwatch := q.Watch()
go func() {
// fetch only reports single OID, but OID *might* be referenced by multiple
// WrappedPointers if same content is at multiple paths, so map oid->slice
oidToPointers := make(map[string][]*lfs.WrappedPointer, len(pointers))
for _, pointer := range pointers {
plist := oidToPointers[pointer.Oid]
oidToPointers[pointer.Oid] = append(plist, pointer)
}
for t := range dlwatch {
plist, ok := oidToPointers[t.Oid]
if !ok {
continue
}
for _, p := range plist {
out <- p
}
}
close(out)
}()
}
for _, p := range pointers {
tracerx.Printf("fetch %v [%v]", p.Name, p.Oid)
q.Add(downloadTransfer(p))
}
processQueue := time.Now()
q.Wait()
tracerx.PerformanceSince("process queue", processQueue)
ok := true
for _, err := range q.Errors() {
ok = false
FullError(err)
}
return ok
}
func readyAndMissingPointers(allpointers []*lfs.WrappedPointer, filter *filepathfilter.Filter) ([]*lfs.WrappedPointer, []*lfs.WrappedPointer, *tq.Meter) {
logger := tasklog.NewLogger(os.Stdout,
tasklog.ForceProgress(cfg.ForceProgress()),
)
meter := buildProgressMeter(false, tq.Download)
logger.Enqueue(meter)
seen := make(map[string]bool, len(allpointers))
missing := make([]*lfs.WrappedPointer, 0, len(allpointers))
ready := make([]*lfs.WrappedPointer, 0, len(allpointers))
for _, p := range allpointers {
// no need to download the same object multiple times
if seen[p.Oid] {
continue
}
seen[p.Oid] = true
// no need to download objects that exist locally already
lfs.LinkOrCopyFromReference(cfg, p.Oid, p.Size)
if cfg.LFSObjectExists(p.Oid, p.Size) {
ready = append(ready, p)
continue
}
missing = append(missing, p)
meter.Add(p.Size)
}
return ready, missing, meter
}
func init() {
RegisterCommand("fetch", fetchCommand, func(cmd *cobra.Command) {
cmd.Flags().StringVarP(&includeArg, "include", "I", "", "Include a list of paths")
cmd.Flags().StringVarP(&excludeArg, "exclude", "X", "", "Exclude a list of paths")
cmd.Flags().BoolVarP(&fetchRecentArg, "recent", "r", false, "Fetch recent refs & commits")
cmd.Flags().BoolVarP(&fetchAllArg, "all", "a", false, "Fetch all LFS files ever referenced")
cmd.Flags().BoolVarP(&fetchPruneArg, "prune", "p", false, "After fetching, prune old data")
})
}