Merge pull request #4781 from bk2204/translations

Mark almost all strings for translation
This commit is contained in:
brian m. carlson 2022-01-18 20:01:35 +00:00 committed by GitHub
commit 8029531074
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
101 changed files with 991 additions and 709 deletions

1
.gitignore vendored

@ -34,6 +34,7 @@ src
commands/mancontent_gen.go
po/build
po/i-reverse.po
*.mo
*.pot
tr/tr_gen.go

@ -110,6 +110,9 @@ PO = $(wildcard po/*.po)
# MO is a list of all the mo (gettext compiled) files to be built.
MO = $(patsubst po/%.po,po/build/%.mo,$(PO))
# XGOTEXT is the string extractor for gotext.
XGOTEXT ?= xgotext
# PKGS is a listing of packages that are considered to be a part of Git LFS, and
# are used in package-specific commands, such as the 'make test' targets. For
# example:
@ -232,6 +235,15 @@ po/build/%.mo: po/%.po po/build
$(MSGFMT) -o $@ $<; \
fi
po/i-reverse.po: po/default.pot
script/gen-i-reverse $< $@
po/default.pot:
if command -v $(XGOTEXT) >/dev/null 2>&1; \
then \
$(XGOTEXT) -in . -exclude .git,.github,vendor -out po -v; \
fi
# Targets 'all' and 'build' build binaries of Git LFS for the above release
# matrix.
.PHONY : all build

@ -4,11 +4,13 @@ import (
"fmt"
"os"
"github.com/git-lfs/git-lfs/v3/errors"
"github.com/git-lfs/git-lfs/v3/filepathfilter"
"github.com/git-lfs/git-lfs/v3/git"
"github.com/git-lfs/git-lfs/v3/lfs"
"github.com/git-lfs/git-lfs/v3/tasklog"
"github.com/git-lfs/git-lfs/v3/tq"
"github.com/git-lfs/git-lfs/v3/tr"
"github.com/spf13/cobra"
)
@ -24,7 +26,7 @@ func checkoutCommand(cmd *cobra.Command, args []string) {
stage, err := whichCheckout()
if err != nil {
Exit("Error parsing args: %v", err)
Exit(tr.Tr.Get("Error parsing args: %v", err))
}
if checkoutTo != "" && stage != git.IndexStageDefault {
@ -34,17 +36,17 @@ func checkoutCommand(cmd *cobra.Command, args []string) {
checkoutConflict(rootedPaths(args)[0], stage)
return
} else if checkoutTo != "" || stage != git.IndexStageDefault {
Exit("--to and exactly one of --theirs, --ours, and --base must be used together")
Exit(tr.Tr.Get("--to and exactly one of --theirs, --ours, and --base must be used together"))
}
ref, err := git.CurrentRef()
if err != nil {
Panic(err, "Could not checkout")
Panic(err, tr.Tr.Get("Could not checkout"))
}
singleCheckout := newSingleCheckout(cfg.Git, "")
if singleCheckout.Skip() {
fmt.Println("Cannot checkout LFS objects, Git LFS is not installed.")
fmt.Println(tr.Tr.Get("Cannot checkout LFS objects, Git LFS is not installed."))
return
}
@ -59,7 +61,7 @@ func checkoutCommand(cmd *cobra.Command, args []string) {
logger.Enqueue(meter)
chgitscanner := lfs.NewGitScanner(cfg, func(p *lfs.WrappedPointer, err error) {
if err != nil {
LoggedError(err, "Scanner error: %s", err)
LoggedError(err, tr.Tr.Get("Scanner error: %s", err))
return
}
@ -93,33 +95,33 @@ func checkoutCommand(cmd *cobra.Command, args []string) {
func checkoutConflict(file string, stage git.IndexStage) {
singleCheckout := newSingleCheckout(cfg.Git, "")
if singleCheckout.Skip() {
fmt.Println("Cannot checkout LFS objects, Git LFS is not installed.")
fmt.Println(tr.Tr.Get("Cannot checkout LFS objects, Git LFS is not installed."))
return
}
ref, err := git.ResolveRef(fmt.Sprintf(":%d:%s", stage, file))
if err != nil {
Exit("Could not checkout (are you not in the middle of a merge?): %v", err)
Exit(tr.Tr.Get("Could not checkout (are you not in the middle of a merge?): %v", err))
}
scanner, err := git.NewObjectScanner(cfg.GitEnv(), cfg.OSEnv())
if err != nil {
Exit("Could not create object scanner: %v", err)
Exit(tr.Tr.Get("Could not create object scanner: %v", err))
}
if !scanner.Scan(ref.Sha) {
Exit("Could not find object %q", ref.Sha)
Exit(tr.Tr.Get("Could not find object %q", ref.Sha))
}
ptr, err := lfs.DecodePointer(scanner.Contents())
if err != nil {
Exit("Could not find decoder pointer for object %q: %v", ref.Sha, err)
Exit(tr.Tr.Get("Could not find decoder pointer for object %q: %v", ref.Sha, err))
}
p := &lfs.WrappedPointer{Name: file, Pointer: ptr}
if err := singleCheckout.RunToPath(p, checkoutTo); err != nil {
Exit("Error checking out %v to %q: %v", ref.Sha, checkoutTo, err)
Exit(tr.Tr.Get("Error checking out %v to %q: %v", ref.Sha, checkoutTo, err))
}
singleCheckout.Close()
}
@ -142,7 +144,7 @@ func whichCheckout() (stage git.IndexStage, err error) {
}
if seen > 1 {
return 0, fmt.Errorf("at most one of --base, --theirs, and --ours is allowed")
return 0, errors.New(tr.Tr.Get("at most one of --base, --theirs, and --ours is allowed"))
}
return stage, nil
}

@ -7,6 +7,7 @@ import (
"github.com/git-lfs/git-lfs/v3/errors"
"github.com/git-lfs/git-lfs/v3/lfs"
"github.com/git-lfs/git-lfs/v3/tools"
"github.com/git-lfs/git-lfs/v3/tr"
"github.com/spf13/cobra"
)
@ -62,26 +63,26 @@ func clean(gf *lfs.GitFilter, to io.Writer, from io.Reader, fileName string, fil
}
if err != nil {
ExitWithError(errors.Wrap(err, "Error cleaning LFS object"))
ExitWithError(errors.Wrap(err, tr.Tr.Get("Error cleaning LFS object")))
}
tmpfile := cleaned.Filename
mediafile, err := gf.ObjectPath(cleaned.Oid)
if err != nil {
Panic(err, "Unable to get local media path.")
Panic(err, tr.Tr.Get("Unable to get local media path."))
}
if stat, _ := os.Stat(mediafile); stat != nil {
if stat.Size() != cleaned.Size && len(cleaned.Pointer.Extensions) == 0 {
Exit("Files don't match:\n%s\n%s", mediafile, tmpfile)
Exit(tr.Tr.Get("Files don't match:\n%s\n%s", mediafile, tmpfile))
}
Debug("%s exists", mediafile)
} else {
if err := os.Rename(tmpfile, mediafile); err != nil {
Panic(err, "Unable to move %s to %s\n", tmpfile, mediafile)
Panic(err, tr.Tr.Get("Unable to move %s to %s\n", tmpfile, mediafile))
}
Debug("Writing %s", mediafile)
Debug(tr.Tr.Get("Writing %s", mediafile))
}
_, err = lfs.EncodePointer(to, cleaned.Pointer)
@ -89,7 +90,7 @@ func clean(gf *lfs.GitFilter, to io.Writer, from io.Reader, fileName string, fil
}
func cleanCommand(cmd *cobra.Command, args []string) {
requireStdin("This command should be run by the Git 'clean' filter")
requireStdin(tr.Tr.Get("This command should be run by the Git 'clean' filter"))
setupRepository()
installHooks(false)
@ -105,7 +106,7 @@ func cleanCommand(cmd *cobra.Command, args []string) {
}
if ptr != nil && possiblyMalformedObjectSize(ptr.Size) {
Error("Possibly malformed conversion on Windows, see `git lfs help smudge` for more details.")
Error(tr.Tr.Get("Possibly malformed conversion on Windows, see `git lfs help smudge` for more details."))
}
}

@ -8,6 +8,7 @@ import (
"strings"
"github.com/git-lfs/git-lfs/v3/subprocess"
"github.com/git-lfs/git-lfs/v3/tr"
"github.com/git-lfs/git-lfs/v3/git"
"github.com/git-lfs/git-lfs/v3/tools"
@ -24,27 +25,26 @@ func cloneCommand(cmd *cobra.Command, args []string) {
requireGitVersion()
if git.IsGitVersionAtLeast("2.15.0") {
msg := []string{
"WARNING: 'git lfs clone' is deprecated and will not be updated",
" with new flags from 'git clone'",
"",
"'git clone' has been updated in upstream Git to have comparable",
"speeds to 'git lfs clone'.",
}
msg := tr.Tr.Get(`WARNING: 'git lfs clone' is deprecated and will not be updated
with new flags from 'git clone'
fmt.Fprintln(os.Stderr, strings.Join(msg, "\n"))
'git clone' has been updated in upstream Git to have comparable
speeds to 'git lfs clone'.
`)
fmt.Fprintln(os.Stderr, msg)
}
// We pass all args to git clone
err := git.CloneWithoutFilters(cloneFlags, args)
if err != nil {
Exit("Error(s) during clone:\n%v", err)
Exit(tr.Tr.Get("Error(s) during clone:\n%v", err))
}
// now execute pull (need to be inside dir)
cwd, err := tools.Getwd()
if err != nil {
Exit("Unable to derive current working dir: %v", err)
Exit(tr.Tr.Get("Unable to derive current working dir: %v", err))
}
// Either the last argument was a relative or local dir, or we have to
@ -58,13 +58,13 @@ func cloneCommand(cmd *cobra.Command, args []string) {
}
clonedir, _ = filepath.Abs(base)
if !tools.DirExists(clonedir) {
Exit("Unable to find clone dir at %q", clonedir)
Exit(tr.Tr.Get("Unable to find clone dir at %q", clonedir))
}
}
err = os.Chdir(clonedir)
if err != nil {
Exit("Unable to change directory to clone dir %q: %v", clonedir, err)
Exit(tr.Tr.Get("Unable to change directory to clone dir %q: %v", clonedir, err))
}
// Make sure we pop back to dir we started in at the end
@ -87,7 +87,7 @@ func cloneCommand(cmd *cobra.Command, args []string) {
pull(filter)
err := postCloneSubmodules(args)
if err != nil {
Exit("Error performing 'git lfs pull' for submodules: %v", err)
Exit(tr.Tr.Get("Error performing 'git lfs pull' for submodules: %v", err))
}
}
}

@ -10,6 +10,7 @@ import (
"github.com/git-lfs/git-lfs/v3/git"
"github.com/git-lfs/git-lfs/v3/lfs"
"github.com/git-lfs/git-lfs/v3/tools"
"github.com/git-lfs/git-lfs/v3/tr"
"github.com/spf13/cobra"
)
@ -30,11 +31,11 @@ func dedupTestCommand(*cobra.Command, []string) {
if err == nil {
err = errors.New("Unknown reason.")
}
Exit("This system does not support deduplication. %s", err)
Exit(tr.Tr.Get("This system does not support deduplication. %s", err))
}
if len(cfg.Extensions()) > 0 {
Exit("This platform supports file de-duplication, however, Git LFS extensions are configured and therefore de-duplication can not be used.")
Exit(tr.Tr.Get("This platform supports file de-duplication, however, Git LFS extensions are configured and therefore de-duplication can not be used."))
}
Print("OK: This platform and repository support file de-duplication.")
@ -50,30 +51,30 @@ func dedupCommand(cmd *cobra.Command, args []string) {
if gitDir, err := git.GitDir(); err != nil {
ExitWithError(err)
} else if supported, err := tools.CheckCloneFileSupported(gitDir); err != nil || !supported {
Exit("This system does not support deduplication.")
Exit(tr.Tr.Get("This system does not support deduplication."))
}
if len(cfg.Extensions()) > 0 {
Exit("This platform supports file de-duplication, however, Git LFS extensions are configured and therefore de-duplication can not be used.")
Exit(tr.Tr.Get("This platform supports file de-duplication, however, Git LFS extensions are configured and therefore de-duplication can not be used."))
}
if dirty, err := git.IsWorkingCopyDirty(); err != nil {
ExitWithError(err)
} else if dirty {
Exit("Working tree is dirty. Please commit or reset your change.")
Exit(tr.Tr.Get("Working tree is dirty. Please commit or reset your change."))
}
// We assume working tree is clean.
gitScanner := lfs.NewGitScanner(config.New(), func(p *lfs.WrappedPointer, err error) {
if err != nil {
Exit("Could not scan for Git LFS tree: %s", err)
Exit(tr.Tr.Get("Could not scan for Git LFS tree: %s", err))
return
}
if success, err := dedup(p); err != nil {
Error("Skipped: %s (Size: %d)\n %s", p.Name, p.Size, err)
Error(tr.Tr.Get("Skipped: %s (Size: %d)\n %s", p.Name, p.Size, err))
} else if !success {
Error("Skipped: %s (Size: %d)", p.Name, p.Size)
Error(tr.Tr.Get("Skipped: %s (Size: %d)", p.Name, p.Size))
} else if success {
Print("Success: %s (Size: %d)", p.Name, p.Size)

@ -4,6 +4,7 @@ import (
"fmt"
"github.com/git-lfs/git-lfs/v3/config"
"github.com/git-lfs/git-lfs/v3/tr"
"github.com/spf13/cobra"
)
@ -36,10 +37,10 @@ func printAllExts() {
}
func printExt(ext config.Extension) {
Print("Extension: %s", ext.Name)
Print(" clean = %s", ext.Clean)
Print(" smudge = %s", ext.Smudge)
Print(" priority = %d", ext.Priority)
Print(tr.Tr.Get(`Extension: %s
clean = %s
smudge = %s
priority = %d`, ext.Name, ext.Clean, ext.Smudge, ext.Priority))
}
func init() {

@ -10,6 +10,7 @@ import (
"github.com/git-lfs/git-lfs/v3/lfs"
"github.com/git-lfs/git-lfs/v3/tasklog"
"github.com/git-lfs/git-lfs/v3/tq"
"github.com/git-lfs/git-lfs/v3/tr"
"github.com/rubyist/tracerx"
"github.com/spf13/cobra"
)
@ -41,20 +42,20 @@ func fetchCommand(cmd *cobra.Command, args []string) {
if len(args) > 0 {
// Remote is first arg
if err := cfg.SetValidRemote(args[0]); err != nil {
Exit("Invalid remote name %q: %s", args[0], err)
Exit(tr.Tr.Get("Invalid remote name %q: %s", args[0], err))
}
}
if len(args) > 1 {
resolvedrefs, err := git.ResolveRefs(args[1:])
if err != nil {
Panic(err, "Invalid ref argument: %v", args[1:])
Panic(err, tr.Tr.Get("Invalid ref argument: %v", args[1:]))
}
refs = resolvedrefs
} else if !fetchAllArg {
ref, err := git.CurrentRef()
if err != nil {
Panic(err, "Could not fetch")
Panic(err, tr.Tr.Get("Could not fetch"))
}
refs = []*git.Ref{ref}
}
@ -68,13 +69,13 @@ func fetchCommand(cmd *cobra.Command, args []string) {
if fetchAllArg {
if fetchRecentArg {
Exit("Cannot combine --all with --recent")
Exit(tr.Tr.Get("Cannot combine --all with --recent"))
}
if include != nil || exclude != nil {
Exit("Cannot combine --all with --include or --exclude")
Exit(tr.Tr.Get("Cannot combine --all with --include or --exclude"))
}
if len(cfg.FetchIncludePaths()) > 0 || len(cfg.FetchExcludePaths()) > 0 {
Print("Ignoring global include / exclude paths to fulfil --all")
Print(tr.Tr.Get("Ignoring global include / exclude paths to fulfil --all"))
}
if len(args) > 1 {
@ -92,7 +93,7 @@ func fetchCommand(cmd *cobra.Command, args []string) {
// Fetch refs sequentially per arg order; duplicates in later refs will be ignored
for _, ref := range refs {
Print("fetch: Fetching reference %s", ref.Refspec())
Print(tr.Tr.Get("fetch: Fetching reference %s", ref.Refspec()))
s := fetchRef(ref.Sha, filter)
success = success && s
}
@ -112,7 +113,7 @@ func fetchCommand(cmd *cobra.Command, args []string) {
if !success {
c := getAPIClient()
e := c.Endpoints.Endpoint("download", cfg.Remote())
Exit("error: failed to fetch some objects from '%s'", e.Url)
Exit(tr.Tr.Get("error: failed to fetch some objects from '%s'", e.Url))
}
}
@ -146,7 +147,7 @@ func pointersToFetchForRef(ref string, filter *filepathfilter.Filter) ([]*lfs.Wr
func fetchRef(ref string, filter *filepathfilter.Filter) bool {
pointers, err := pointersToFetchForRef(ref, filter)
if err != nil {
Panic(err, "Could not scan for Git LFS files")
Panic(err, tr.Tr.Get("Could not scan for Git LFS files"))
}
return fetchAndReportToChan(pointers, filter, nil)
}
@ -176,7 +177,7 @@ func pointersToFetchForRefs(refs []string) ([]*lfs.WrappedPointer, error) {
}
numObjs++
task.Logf("fetch: %d object(s) found", numObjs)
task.Logf(tr.Tr.GetN("fetch: %d object found", "fetch: %d objects found", int(numObjs), numObjs))
pointers = append(pointers, p)
})
@ -191,7 +192,7 @@ func pointersToFetchForRefs(refs []string) ([]*lfs.WrappedPointer, error) {
func fetchRefs(refs []string) bool {
pointers, err := pointersToFetchForRefs(refs)
if err != nil {
Panic(err, "Could not scan for Git LFS files")
Panic(err, tr.Tr.Get("Could not scan for Git LFS files"))
}
return fetchAndReportToChan(pointers, nil, nil)
}
@ -203,7 +204,7 @@ func fetchPreviousVersions(ref string, since time.Time, filter *filepathfilter.F
tempgitscanner := lfs.NewGitScanner(cfg, func(p *lfs.WrappedPointer, err error) {
if err != nil {
Panic(err, "Could not scan for Git LFS previous versions")
Panic(err, tr.Tr.Get("Could not scan for Git LFS previous versions"))
return
}
@ -234,11 +235,16 @@ func fetchRecent(fetchconf lfs.FetchPruneConfig, alreadyFetchedRefs []*git.Ref,
}
// First find any other recent refs
if fetchconf.FetchRecentRefsDays > 0 {
Print("fetch: Fetching recent branches within %v days", fetchconf.FetchRecentRefsDays)
Print(tr.Tr.GetN(
"fetch: Fetching recent branches within %v day",
"fetch: Fetching recent branches within %v days",
fetchconf.FetchRecentRefsDays,
fetchconf.FetchRecentRefsDays,
))
refsSince := time.Now().AddDate(0, 0, -fetchconf.FetchRecentRefsDays)
refs, err := git.RecentBranches(refsSince, fetchconf.FetchRecentRefsIncludeRemotes, cfg.Remote())
if err != nil {
Panic(err, "Could not scan for recent refs")
Panic(err, tr.Tr.Get("Could not scan for recent refs"))
}
for _, ref := range refs {
// Don't fetch for the same SHA twice
@ -248,7 +254,7 @@ func fetchRecent(fetchconf lfs.FetchPruneConfig, alreadyFetchedRefs []*git.Ref,
}
} else {
uniqueRefShas[ref.Sha] = ref.Name
Print("fetch: Fetching reference %s", ref.Name)
Print(tr.Tr.Get("fetch: Fetching reference %s", ref.Name))
k := fetchRef(ref.Sha, filter)
ok = ok && k
}
@ -260,10 +266,16 @@ func fetchRecent(fetchconf lfs.FetchPruneConfig, alreadyFetchedRefs []*git.Ref,
// We measure from the last commit at the ref
summ, err := git.GetCommitSummary(commit)
if err != nil {
Error("Couldn't scan commits at %v: %v", refName, err)
Error(tr.Tr.Get("Couldn't scan commits at %v: %v", refName, err))
continue
}
Print("fetch: Fetching changes within %v days of %v", fetchconf.FetchRecentCommitsDays, refName)
Print(tr.Tr.GetN(
"fetch: Fetching changes within %v day of %v",
"fetch: Fetching changes within %v days of %v",
fetchconf.FetchRecentCommitsDays,
fetchconf.FetchRecentCommitsDays,
refName,
))
commitsSince := summ.CommitDate.AddDate(0, 0, -fetchconf.FetchRecentCommitsDays)
k := fetchPreviousVersions(commit, commitsSince, filter)
ok = ok && k
@ -275,7 +287,7 @@ func fetchRecent(fetchconf lfs.FetchPruneConfig, alreadyFetchedRefs []*git.Ref,
func fetchAll() bool {
pointers := scanAll()
Print("fetch: Fetching all references...")
Print(tr.Tr.Get("fetch: Fetching all references..."))
return fetchAndReportToChan(pointers, nil, nil)
}
@ -304,18 +316,18 @@ func scanAll() []*lfs.WrappedPointer {
}
numObjs++
task.Logf("fetch: %d object(s) found", numObjs)
task.Logf(tr.Tr.GetN("fetch: %d object found", "fetch: %d objects found", int(numObjs), numObjs))
pointers = append(pointers, p)
})
if err := tempgitscanner.ScanAll(nil); err != nil {
Panic(err, "Could not scan for Git LFS files")
Panic(err, tr.Tr.Get("Could not scan for Git LFS files"))
}
tempgitscanner.Close()
if multiErr != nil {
Panic(multiErr, "Could not scan for Git LFS files")
Panic(multiErr, tr.Tr.Get("Could not scan for Git LFS files"))
}
return pointers

@ -172,7 +172,7 @@ func filterCommand(cmd *cobra.Command, args []string) {
}
err = s.WriteList(paths)
default:
ExitWithError(fmt.Errorf("unknown command %q", req.Header["command"]))
ExitWithError(errors.New(tr.Tr.Get("unknown command %q", req.Header["command"])))
}
if errors.IsNotAPointerError(err) {
@ -206,20 +206,30 @@ func filterCommand(cmd *cobra.Command, args []string) {
}
if len(malformed) > 0 {
fmt.Fprintf(os.Stderr, "Encountered %d file(s) that should have been pointers, but weren't:\n", len(malformed))
fmt.Fprintf(os.Stderr, tr.Tr.GetN(
"Encountered %d file that should have been pointers, but wasn't:\n",
"Encountered %d files that should have been pointers, but weren't:\n",
len(malformed),
len(malformed),
))
for _, m := range malformed {
fmt.Fprintf(os.Stderr, "\t%s\n", m)
}
}
if len(malformedOnWindows) > 0 && cfg.Git.Bool("lfs.largefilewarning", !git.IsGitVersionAtLeast("2.34.0")) {
fmt.Fprintf(os.Stderr, "Encountered %d file(s) that may not have been copied correctly on Windows:\n", len(malformedOnWindows))
fmt.Fprintf(os.Stderr, tr.Tr.GetN(
"Encountered %d file that may not have been copied correctly on Windows:\n",
"Encountered %d files that may not have been copied correctly on Windows:\n",
len(malformedOnWindows),
len(malformedOnWindows),
))
for _, m := range malformedOnWindows {
fmt.Fprintf(os.Stderr, "\t%s\n", m)
}
fmt.Fprintf(os.Stderr, "\nSee: `git lfs help smudge` for more details.\n")
fmt.Fprintf(os.Stderr, tr.Tr.Get("\nSee: `git lfs help smudge` for more details.\n"))
}
if err := s.Err(); err != nil && err != io.EOF {

@ -14,6 +14,7 @@ import (
"github.com/git-lfs/git-lfs/v3/git"
"github.com/git-lfs/git-lfs/v3/lfs"
"github.com/git-lfs/git-lfs/v3/tools"
"github.com/git-lfs/git-lfs/v3/tr"
"github.com/spf13/cobra"
)
@ -90,7 +91,7 @@ func fsckCommand(cmd *cobra.Command, args []string) {
}
if ok {
Print("Git LFS fsck OK")
Print(tr.Tr.Get("Git LFS fsck OK"))
return
}
@ -99,7 +100,7 @@ func fsckCommand(cmd *cobra.Command, args []string) {
}
badDir := filepath.Join(cfg.LFSStorageDir(), "bad")
Print("objects: repair: moving corrupt objects to %s", badDir)
Print("objects: repair: %s", tr.Tr.Get("moving corrupt objects to %s", badDir))
if err := tools.MkdirAll(badDir, cfg); err != nil {
ExitWithError(err)
@ -131,7 +132,7 @@ func doFsckObjects(start, end string, useIndex bool) []string {
}
if err != nil {
Panic(err, "Error checking Git LFS files")
Panic(err, tr.Tr.Get("Error checking Git LFS files"))
}
})
@ -167,12 +168,12 @@ func doFsckPointers(start, end string) []corruptPointer {
var corruptPointers []corruptPointer
gitscanner := lfs.NewGitScanner(cfg, func(p *lfs.WrappedPointer, err error) {
if p != nil {
Debug("Examining %v (%v)", p.Oid, p.Name)
Debug(tr.Tr.Get("Examining %v (%v)", p.Oid, p.Name))
if !p.Canonical {
cp := corruptPointer{
blobOid: p.Sha1,
lfsOid: p.Oid,
message: fmt.Sprintf("Pointer for %s (blob %s) was not canonical", p.Oid, p.Sha1),
message: fmt.Sprintf(tr.Tr.Get("Pointer for %s (blob %s) was not canonical", p.Oid, p.Sha1)),
kind: "nonCanonicalPointer",
}
Print("pointer: %s", cp.String())
@ -184,7 +185,7 @@ func doFsckPointers(start, end string) []corruptPointer {
cp := corruptPointer{
treeOid: psErr.OID(),
path: psErr.Path(),
message: fmt.Sprintf("%q (treeish %s) should have been a pointer but was not", psErr.Path(), psErr.OID()),
message: fmt.Sprintf(tr.Tr.Get("%q (treeish %s) should have been a pointer but was not", psErr.Path(), psErr.OID())),
kind: "unexpectedGitObject",
}
Print("pointer: %s", cp.String())
@ -212,7 +213,7 @@ func doFsckPointers(start, end string) []corruptPointer {
func fsckPointer(name, oid string, size int64) (bool, error) {
path := cfg.Filesystem().ObjectPathname(oid)
Debug("Examining %v (%v)", name, path)
Debug(tr.Tr.Get("Examining %v (%v)", name, path))
f, err := os.Open(path)
if pErr, pOk := err.(*os.PathError); pOk {
@ -220,7 +221,7 @@ func fsckPointer(name, oid string, size int64) (bool, error) {
if size == 0 {
return true, nil
}
Print("objects: openError: %s (%s) could not be checked: %s", name, oid, pErr.Err)
Print("objects: openError: %s", tr.Tr.Get("%s (%s) could not be checked: %s", name, oid, pErr.Err))
return false, nil
}
@ -240,7 +241,7 @@ func fsckPointer(name, oid string, size int64) (bool, error) {
return true, nil
}
Print("objects: corruptObject: %s (%s) is corrupt", name, oid)
Print(fmt.Sprintf("objects: corruptObject: %s", tr.Tr.Get("%s (%s) is corrupt", name, oid)))
return false, nil
}

@ -5,6 +5,7 @@ import (
"github.com/git-lfs/git-lfs/v3/git"
"github.com/git-lfs/git-lfs/v3/lfs"
"github.com/git-lfs/git-lfs/v3/tr"
"github.com/spf13/cobra"
)
@ -20,8 +21,7 @@ var (
func installCommand(cmd *cobra.Command, args []string) {
if err := cmdInstallOptions().Install(); err != nil {
Print("WARNING: %s", err.Error())
Print("Run `git lfs install --force` to reset git config.")
Print(tr.Tr.Get("warning: %s\nRun `git lfs install --force` to reset git config.", err.Error()))
os.Exit(2)
}
@ -29,7 +29,7 @@ func installCommand(cmd *cobra.Command, args []string) {
installHooksCommand(cmd, args)
}
Print("Git LFS initialized.")
Print(tr.Tr.Get("Git LFS initialized."))
}
func cmdInstallOptions() *lfs.FilterOptions {
@ -41,18 +41,18 @@ func cmdInstallOptions() *lfs.FilterOptions {
switch {
case localInstall && worktreeInstall:
Exit("Only one of --local and --worktree options can be specified.")
Exit(tr.Tr.Get("Only one of --local and --worktree options can be specified."))
case localInstall && systemInstall:
Exit("Only one of --local and --system options can be specified.")
Exit(tr.Tr.Get("Only one of --local and --system options can be specified."))
case worktreeInstall && systemInstall:
Exit("Only one of --worktree and --system options can be specified.")
Exit(tr.Tr.Get("Only one of --worktree and --system options can be specified."))
}
// This call will return -1 on Windows; don't warn about this there,
// since we can't detect it correctly.
uid := os.Geteuid()
if systemInstall && uid != 0 && uid != -1 {
Print("WARNING: current user is not root/admin, system install is likely to fail.")
Print("warning: current user is not root/admin, system install is likely to fail.")
}
return &lfs.FilterOptions{

@ -2,7 +2,6 @@ package commands
import (
"encoding/json"
"fmt"
"os"
"path/filepath"
"strings"
@ -11,12 +10,12 @@ import (
"github.com/git-lfs/git-lfs/v3/git"
"github.com/git-lfs/git-lfs/v3/locking"
"github.com/git-lfs/git-lfs/v3/tools"
"github.com/git-lfs/git-lfs/v3/tr"
"github.com/spf13/cobra"
)
var (
lockRemote string
lockRemoteHelp = "specify which remote to use when interacting with locks"
lockRemote string
)
func lockCommand(cmd *cobra.Command, args []string) {
@ -41,7 +40,7 @@ func lockCommand(cmd *cobra.Command, args []string) {
lock, err := lockClient.LockFile(path)
if err != nil {
Error("Locking %s failed: %v", path, errors.Cause(err))
Error(tr.Tr.Get("Locking %s failed: %v", path, errors.Cause(err)))
success = false
continue
}
@ -52,7 +51,7 @@ func lockCommand(cmd *cobra.Command, args []string) {
continue
}
Print("Locked %s", path)
Print(tr.Tr.Get("Locked %s", path))
}
if locksCmdFlags.JSON {
@ -97,14 +96,14 @@ func lockPath(file string) (string, error) {
wd, err = tools.CanonicalizeSystemPath(wd)
if err != nil {
return "", errors.Wrapf(err,
"could not follow symlinks for %s", wd)
tr.Tr.Get("could not follow symlinks for %s", wd))
}
var abs string
if filepath.IsAbs(file) {
abs, err = tools.CanonicalizeSystemPath(file)
if err != nil {
return "", fmt.Errorf("lfs: unable to canonicalize path %q: %v", file, err)
return "", errors.New(tr.Tr.Get("lfs: unable to canonicalize path %q: %v", file, err))
}
} else {
abs = filepath.Join(wd, file)
@ -116,11 +115,11 @@ func lockPath(file string) (string, error) {
path = filepath.ToSlash(path)
if strings.HasPrefix(path, "../") {
return "", fmt.Errorf("lfs: unable to canonicalize path %q", path)
return "", errors.New(tr.Tr.Get("lfs: unable to canonicalize path %q", path))
}
if stat, err := os.Stat(abs); err == nil && stat.IsDir() {
return path, fmt.Errorf("lfs: cannot lock directory: %s", file)
return path, errors.New(tr.Tr.Get("lfs: cannot lock directory: %s", file))
}
return filepath.ToSlash(path), nil
@ -128,7 +127,7 @@ func lockPath(file string) (string, error) {
func init() {
RegisterCommand("lock", lockCommand, func(cmd *cobra.Command) {
cmd.Flags().StringVarP(&lockRemote, "remote", "r", "", lockRemoteHelp)
cmd.Flags().StringVarP(&lockRemote, "remote", "r", "", "specify which remote to use when interacting with locks")
cmd.Flags().BoolVarP(&locksCmdFlags.JSON, "json", "", false, "print output in json")
})
}

@ -10,6 +10,7 @@ import (
"github.com/git-lfs/git-lfs/v3/git"
"github.com/git-lfs/git-lfs/v3/locking"
"github.com/git-lfs/git-lfs/v3/tools"
"github.com/git-lfs/git-lfs/v3/tr"
"github.com/spf13/cobra"
)
@ -20,7 +21,7 @@ var (
func locksCommand(cmd *cobra.Command, args []string) {
filters, err := locksCmdFlags.Filters()
if err != nil {
Exit("Error building filters: %v", err)
Exit(tr.Tr.Get("Error building filters: %v", err))
}
if len(lockRemote) > 0 {
@ -34,22 +35,22 @@ func locksCommand(cmd *cobra.Command, args []string) {
if locksCmdFlags.Cached {
if locksCmdFlags.Limit > 0 {
Exit("--cached option can't be combined with --limit")
Exit(tr.Tr.Get("--cached option can't be combined with --limit"))
}
if len(filters) > 0 {
Exit("--cached option can't be combined with filters")
Exit(tr.Tr.Get("--cached option can't be combined with filters"))
}
if locksCmdFlags.Local {
Exit("--cached option can't be combined with --local")
Exit(tr.Tr.Get("--cached option can't be combined with --local"))
}
}
if locksCmdFlags.Verify {
if len(filters) > 0 {
Exit("--verify option can't be combined with filters")
Exit(tr.Tr.Get("--verify option can't be combined with filters"))
}
if locksCmdFlags.Local {
Exit("--verify option can't be combined with --local")
Exit(tr.Tr.Get("--verify option can't be combined with --local"))
}
}
@ -123,7 +124,7 @@ func locksCommand(cmd *cobra.Command, args []string) {
}
if err != nil {
Exit("Error while retrieving locks: %v", errors.Cause(err))
Exit(tr.Tr.Get("Error while retrieving locks: %v", errors.Cause(err)))
}
}
@ -173,7 +174,7 @@ func (l *locksFlags) Filters() (map[string]string, error) {
func init() {
RegisterCommand("locks", locksCommand, func(cmd *cobra.Command) {
cmd.Flags().StringVarP(&lockRemote, "remote", "r", "", lockRemoteHelp)
cmd.Flags().StringVarP(&lockRemote, "remote", "r", "", "specify which remote to use when interacting with locks")
cmd.Flags().StringVarP(&locksCmdFlags.Path, "path", "p", "", "filter locks results matching a particular path")
cmd.Flags().StringVarP(&locksCmdFlags.Id, "id", "i", "", "filter locks results matching a particular ID")
cmd.Flags().IntVarP(&locksCmdFlags.Limit, "limit", "l", 0, "optional limit for number of results to return")

@ -6,6 +6,7 @@ import (
"path/filepath"
"github.com/git-lfs/git-lfs/v3/errors"
"github.com/git-lfs/git-lfs/v3/tr"
"github.com/spf13/cobra"
)
@ -18,7 +19,7 @@ func logsCommand(cmd *cobra.Command, args []string) {
func logsLastCommand(cmd *cobra.Command, args []string) {
logs := sortedLogs()
if len(logs) < 1 {
Print("No logs to show")
Print(tr.Tr.Get("No logs to show"))
return
}
@ -27,34 +28,33 @@ func logsLastCommand(cmd *cobra.Command, args []string) {
func logsShowCommand(cmd *cobra.Command, args []string) {
if len(args) == 0 {
Print("Supply a log name.")
Print(tr.Tr.Get("Supply a log name."))
return
}
name := args[0]
by, err := ioutil.ReadFile(filepath.Join(cfg.LocalLogDir(), name))
if err != nil {
Exit("Error reading log: %s", name)
Exit(tr.Tr.Get("Error reading log: %s", name))
}
Debug("Reading log: %s", name)
Debug(tr.Tr.Get("Reading log: %s", name))
os.Stdout.Write(by)
}
func logsClearCommand(cmd *cobra.Command, args []string) {
err := os.RemoveAll(cfg.LocalLogDir())
if err != nil {
Panic(err, "Error clearing %s", cfg.LocalLogDir())
Panic(err, tr.Tr.Get("Error clearing %s", cfg.LocalLogDir()))
}
Print("Cleared %s", cfg.LocalLogDir())
Print(tr.Tr.Get("Cleared %s", cfg.LocalLogDir()))
}
func logsBoomtownCommand(cmd *cobra.Command, args []string) {
Debug("Debug message")
err := errors.Wrapf(errors.New("Inner error message!"), "Error")
Panic(err, "Welcome to Boomtown")
Debug("Never seen")
Debug(tr.Tr.Get("Sample debug message"))
err := errors.Wrapf(errors.New(tr.Tr.Get("Sample wrapped error message")), tr.Tr.Get("Sample error message"))
Panic(err, tr.Tr.Get("Sample panic message"))
}
func sortedLogs() []string {

@ -7,6 +7,7 @@ import (
"github.com/git-lfs/git-lfs/v3/git"
"github.com/git-lfs/git-lfs/v3/lfs"
"github.com/git-lfs/git-lfs/v3/tools/humanize"
"github.com/git-lfs/git-lfs/v3/tr"
"github.com/spf13/cobra"
)
@ -27,7 +28,7 @@ func lsFilesCommand(cmd *cobra.Command, args []string) {
var scanRange = false
if len(args) > 0 {
if lsFilesScanAll {
Exit("fatal: cannot use --all with explicit reference")
Exit(tr.Tr.Get("Cannot use --all with explicit reference"))
} else if args[0] == "--all" {
// Since --all is a valid argument to "git rev-parse",
// if we try to give it to git.ResolveRef below, we'll
@ -35,13 +36,13 @@ func lsFilesCommand(cmd *cobra.Command, args []string) {
//
// So, let's check early that the caller invoked the
// command correctly.
Exit("fatal: did you mean \"git lfs ls-files --all --\" ?")
Exit(tr.Tr.Get("Did you mean \"git lfs ls-files --all --\" ?"))
}
ref = args[0]
if len(args) > 1 {
if lsFilesScanDeleted {
Exit("fatal: cannot use --deleted with reference range")
Exit(tr.Tr.Get("Cannot use --deleted with reference range"))
}
otherRef = args[1]
scanRange = true
@ -64,7 +65,7 @@ func lsFilesCommand(cmd *cobra.Command, args []string) {
gitscanner := lfs.NewGitScanner(cfg, func(p *lfs.WrappedPointer, err error) {
if err != nil {
Exit("Could not scan for Git LFS tree: %s", err)
Exit(tr.Tr.Get("Could not scan for Git LFS tree: %s", err))
return
}
@ -79,20 +80,17 @@ func lsFilesCommand(cmd *cobra.Command, args []string) {
}
if debug {
// TRANSLATORS: these strings should have the colons
// aligned in a column.
Print(
"filepath: %s\n"+
" size: %d\n"+
"checkout: %v\n"+
"download: %v\n"+
" oid: %s %s\n"+
" version: %s\n",
p.Name,
p.Size,
fileExistsOfSize(p),
cfg.LFSObjectExists(p.Oid, p.Size),
p.OidType,
p.Oid,
p.Version)
tr.Tr.Get("filepath: %s\n size: %d\ncheckout: %v\ndownload: %v\n oid: %s %s\n version: %s\n",
p.Name,
p.Size,
fileExistsOfSize(p),
cfg.LFSObjectExists(p.Oid, p.Size),
p.OidType,
p.Oid,
p.Version))
} else {
msg := []string{p.Oid[:showOidLen], lsFilesMarker(p), p.Name}
if lsFilesShowNameOnly {
@ -120,12 +118,12 @@ func lsFilesCommand(cmd *cobra.Command, args []string) {
// Do so to avoid showing "mixed" results, e.g., ls-files output
// from a specific historical revision, and the index.
if err := gitscanner.ScanIndex(ref, nil); err != nil {
Exit("Could not scan for Git LFS index: %s", err)
Exit(tr.Tr.Get("Could not scan for Git LFS index: %s", err))
}
}
if lsFilesScanAll {
if err := gitscanner.ScanAll(nil); err != nil {
Exit("Could not scan for Git LFS history: %s", err)
Exit(tr.Tr.Get("Could not scan for Git LFS history: %s", err))
}
} else {
var err error
@ -138,7 +136,7 @@ func lsFilesCommand(cmd *cobra.Command, args []string) {
}
if err != nil {
Exit("Could not scan for Git LFS tree: %s", err)
Exit(tr.Tr.Get("Could not scan for Git LFS tree: %s", err))
}
}
}

@ -11,6 +11,7 @@ import (
"github.com/git-lfs/git-lfs/v3/git"
"github.com/git-lfs/git-lfs/v3/git/githistory"
"github.com/git-lfs/git-lfs/v3/tasklog"
"github.com/git-lfs/git-lfs/v3/tr"
"github.com/git-lfs/gitobj/v2"
"github.com/spf13/cobra"
)
@ -150,7 +151,7 @@ func includeExcludeRefs(l *tasklog.Logger, args []string) (include, exclude []st
}
if migrateEverything && len(args) > 0 {
return nil, nil, errors.New("fatal: cannot use --everything with explicit reference arguments")
return nil, nil, errors.New(tr.Tr.Get("Cannot use --everything with explicit reference arguments"))
}
for _, name := range args {
@ -176,7 +177,7 @@ func includeExcludeRefs(l *tasklog.Logger, args []string) (include, exclude []st
if hardcore {
if migrateEverything {
return nil, nil, errors.New("fatal: cannot use --everything with --include-ref or --exclude-ref")
return nil, nil, errors.New(tr.Tr.Get("Cannot use --everything with --include-ref or --exclude-ref"))
}
// If either --include-ref=<ref> or --exclude-ref=<ref> were
@ -214,7 +215,7 @@ func includeExcludeRefs(l *tasklog.Logger, args []string) (include, exclude []st
} else {
bare, err := git.IsBare()
if err != nil {
return nil, nil, errors.Wrap(err, "fatal: unable to determine bareness")
return nil, nil, errors.Wrap(err, tr.Tr.Get("Unable to determine bareness"))
}
if !bare {
@ -251,7 +252,7 @@ func getRemoteRefs(l *tasklog.Logger) (map[string][]*git.Ref, error) {
}
if !migrateSkipFetch {
w := l.Waiter("migrate: Fetching remote refs")
w := l.Waiter(fmt.Sprintf("migrate: %s", tr.Tr.Get("Fetching remote refs")))
if err := git.Fetch(remotes...); err != nil {
return nil, err
}
@ -299,7 +300,7 @@ func currentRefToMigrate() (*git.Ref, error) {
if current.Type == git.RefTypeOther ||
current.Type == git.RefTypeRemoteBranch {
return nil, errors.Errorf("fatal: cannot migrate non-local ref: %s", current.Name)
return nil, errors.Errorf(tr.Tr.Get("Cannot migrate non-local ref: %s", current.Name))
}
return current, nil
}
@ -318,7 +319,7 @@ func ensureWorkingCopyClean(in io.Reader, out io.Writer) {
dirty, err := git.IsWorkingCopyDirty()
if err != nil {
ExitWithError(errors.Wrap(err,
"fatal: could not determine if working copy is dirty"))
tr.Tr.Get("Could not determine if working copy is dirty")))
}
if !dirty {
@ -332,21 +333,23 @@ func ensureWorkingCopyClean(in io.Reader, out io.Writer) {
answer := bufio.NewReader(in)
L:
for {
fmt.Fprintf(out, "migrate: override changes in your working copy? All uncommitted changes will be lost! [y/N] ")
fmt.Fprintf(out, "migrate: %s", tr.Tr.Get("override changes in your working copy? All uncommitted changes will be lost! [y/N] "))
s, err := answer.ReadString('\n')
if err != nil {
if err == io.EOF {
break L
}
ExitWithError(errors.Wrap(err,
"fatal: could not read answer"))
tr.Tr.Get("Could not read answer")))
}
switch strings.TrimSpace(s) {
case "n", "N", "":
// TRANSLATORS: these are negative (no) responses.
case tr.Tr.Get("n"), tr.Tr.Get("N"), "":
proceed = false
break L
case "y", "Y":
// TRANSLATORS: these are positive (yes) responses.
case tr.Tr.Get("y"), tr.Tr.Get("Y"):
proceed = true
break L
}
@ -358,9 +361,9 @@ func ensureWorkingCopyClean(in io.Reader, out io.Writer) {
}
if proceed {
fmt.Fprintf(out, "migrate: changes in your working copy will be overridden ...\n")
fmt.Fprintf(out, "migrate: %s", tr.Tr.Get("changes in your working copy will be overridden ...\n"))
} else {
Exit("migrate: working copy must not be dirty")
Exit(tr.Tr.Get("migrate: working copy must not be dirty"))
}
}

@ -12,6 +12,7 @@ import (
"github.com/git-lfs/git-lfs/v3/lfs"
"github.com/git-lfs/git-lfs/v3/tasklog"
"github.com/git-lfs/git-lfs/v3/tools"
"github.com/git-lfs/git-lfs/v3/tr"
"github.com/git-lfs/gitobj/v2"
"github.com/spf13/cobra"
)
@ -34,7 +35,7 @@ func migrateExportCommand(cmd *cobra.Command, args []string) {
filter := rewriter.Filter()
if len(filter.Include()) <= 0 {
ExitWithError(errors.Errorf("fatal: one or more files must be specified with --include"))
ExitWithError(errors.Errorf(tr.Tr.Get("One or more files must be specified with --include")))
}
tracked := trackedFromExportFilter(filter)
@ -115,7 +116,7 @@ func migrateExportCommand(cmd *cobra.Command, args []string) {
}
remoteURL := getAPIClient().Endpoints.RemoteEndpoint("download", remote).Url
if remoteURL == "" && cmd.Flag("remote").Changed {
ExitWithError(errors.Errorf("fatal: invalid remote %s provided", remote))
ExitWithError(errors.Errorf(tr.Tr.Get("Invalid remote %s provided", remote)))
}
// If we have a valid remote, pre-download all objects using the Transfer Queue
@ -157,7 +158,7 @@ func migrateExportCommand(cmd *cobra.Command, args []string) {
// Only perform `git-checkout(1) -f` if the repository is non-bare.
if bare, _ := git.IsBare(); !bare {
t := l.Waiter("migrate: checkout")
t := l.Waiter(fmt.Sprintf("migrate: %s", tr.Tr.Get("checkout")))
err := git.Checkout("", nil, true)
t.Complete()

@ -44,43 +44,43 @@ func migrateImportCommand(cmd *cobra.Command, args []string) {
if migrateNoRewrite {
if migrateFixup {
ExitWithError(errors.Errorf("fatal: --no-rewrite and --fixup cannot be combined"))
ExitWithError(errors.Errorf(tr.Tr.Get("--no-rewrite and --fixup cannot be combined")))
}
if len(args) == 0 {
ExitWithError(errors.Errorf("fatal: expected one or more files with --no-rewrite"))
ExitWithError(errors.Errorf(tr.Tr.Get("Expected one or more files with --no-rewrite")))
}
ref, err := git.CurrentRef()
if err != nil {
ExitWithError(errors.Wrap(err, "fatal: unable to find current reference"))
ExitWithError(errors.Wrap(err, tr.Tr.Get("Unable to find current reference")))
}
sha, _ := hex.DecodeString(ref.Sha)
commit, err := db.Commit(sha)
if err != nil {
ExitWithError(errors.Wrap(err, "fatal: unable to load commit"))
ExitWithError(errors.Wrap(err, tr.Tr.Get("Unable to load commit")))
}
root := commit.TreeID
filter := git.GetAttributeFilter(cfg.LocalWorkingDir(), cfg.LocalGitDir())
if len(filter.Include()) == 0 {
ExitWithError(errors.Errorf("fatal: no Git LFS filters found in .gitattributes"))
ExitWithError(errors.Errorf(tr.Tr.Get("No Git LFS filters found in .gitattributes")))
}
gf := lfs.NewGitFilter(cfg)
for _, file := range args {
if !filter.Allows(file) {
ExitWithError(errors.Errorf("fatal: file %s did not match any Git LFS filters in .gitattributes", file))
ExitWithError(errors.Errorf(tr.Tr.Get("File %s did not match any Git LFS filters in .gitattributes", file)))
}
}
for _, file := range args {
root, err = rewriteTree(gf, db, root, file)
if err != nil {
ExitWithError(errors.Wrapf(err, "fatal: could not rewrite %q", file))
ExitWithError(errors.Wrapf(err, tr.Tr.Get("Could not rewrite %q", file)))
}
}
@ -107,15 +107,15 @@ func migrateImportCommand(cmd *cobra.Command, args []string) {
})
if err != nil {
ExitWithError(errors.Wrap(err, "fatal: unable to write commit"))
ExitWithError(errors.Wrap(err, tr.Tr.Get("Unable to write commit")))
}
if err := git.UpdateRef(ref, oid, "git lfs migrate import --no-rewrite"); err != nil {
ExitWithError(errors.Wrap(err, "fatal: unable to update ref"))
ExitWithError(errors.Wrap(err, tr.Tr.Get("Unable to update ref")))
}
if err := checkoutNonBare(l); err != nil {
ExitWithError(errors.Wrap(err, "fatal: could not checkout"))
ExitWithError(errors.Wrap(err, tr.Tr.Get("Could not checkout")))
}
return
@ -124,7 +124,7 @@ func migrateImportCommand(cmd *cobra.Command, args []string) {
if migrateFixup {
include, exclude := getIncludeExcludeArgs(cmd)
if include != nil || exclude != nil {
ExitWithError(errors.Errorf("fatal: cannot use --fixup with --include, --exclude"))
ExitWithError(errors.Errorf(tr.Tr.Get("Cannot use --fixup with --include, --exclude")))
}
}
@ -137,12 +137,12 @@ func migrateImportCommand(cmd *cobra.Command, args []string) {
var fixups *gitattr.Tree
above, err := humanize.ParseBytes(migrateImportAboveFmt)
if err != nil {
ExitWithError(errors.Wrap(err, "fatal: cannot parse --above=<n>"))
ExitWithError(errors.Wrap(err, tr.Tr.Get("Cannot parse --above=<n>")))
}
if above > 0 {
include, exclude := getIncludeExcludeArgs(cmd)
if include != nil || exclude != nil || migrateFixup {
ExitWithError(errors.Errorf(tr.Tr.Get("fatal: cannot use --above with --include, --exclude, --fixup")))
ExitWithError(errors.Errorf(tr.Tr.Get("Cannot use --above with --include, --exclude, --fixup")))
}
}
@ -261,7 +261,7 @@ func migrateImportCommand(cmd *cobra.Command, args []string) {
})
if err := checkoutNonBare(l); err != nil {
ExitWithError(errors.Wrap(err, "fatal: could not checkout"))
ExitWithError(errors.Wrap(err, tr.Tr.Get("Could not checkout")))
}
}
@ -271,7 +271,7 @@ func generateMigrateCommitMessage(cmd *cobra.Command, patterns string) string {
if cmd.Flag("message").Changed {
return migrateCommitMessage
}
return fmt.Sprintf("%s: convert to Git LFS", patterns)
return fmt.Sprintf(tr.Tr.Get("%s: convert to Git LFS", patterns))
}
// checkoutNonBare forces a checkout of the current reference, so long as the
@ -283,7 +283,7 @@ func checkoutNonBare(l *tasklog.Logger) error {
return nil
}
t := l.Waiter("migrate: checkout")
t := l.Waiter(fmt.Sprintf("migrate: %s", tr.Tr.Get("checkout")))
defer t.Complete()
return git.Checkout("", nil, true)
@ -392,7 +392,7 @@ func rewriteTree(gf *lfs.GitFilter, db *gitobj.ObjectDatabase, root []byte, path
// Try to replace this blob with a Git LFS pointer.
index := findEntry(tree, splits[0])
if index < 0 {
return nil, errors.Errorf("unable to find entry %s in tree", splits[0])
return nil, errors.Errorf(tr.Tr.Get("unable to find entry %s in tree", splits[0]))
}
blobEntry := tree.Entries[index]
@ -430,12 +430,12 @@ func rewriteTree(gf *lfs.GitFilter, db *gitobj.ObjectDatabase, root []byte, path
index := findEntry(tree, head)
if index < 0 {
return nil, errors.Errorf("unable to find entry %s in tree", head)
return nil, errors.Errorf(tr.Tr.Get("unable to find entry %s in tree", head))
}
subtreeEntry := tree.Entries[index]
if subtreeEntry.Type() != gitobj.TreeObjectType {
return nil, errors.Errorf("migrate: expected %s to be a tree, got %s", head, subtreeEntry.Type())
return nil, errors.Errorf(tr.Tr.Get("migrate: expected %s to be a tree, got %s", head, subtreeEntry.Type()))
}
rewrittenSubtree, err := rewriteTree(gf, db, subtreeEntry.Oid, tail)
@ -452,7 +452,7 @@ func rewriteTree(gf *lfs.GitFilter, db *gitobj.ObjectDatabase, root []byte, path
return db.WriteTree(tree)
default:
return nil, errors.Errorf("error parsing path %s", path)
return nil, errors.Errorf(tr.Tr.Get("error parsing path %s", path))
}
}

@ -15,6 +15,7 @@ import (
"github.com/git-lfs/git-lfs/v3/tasklog"
"github.com/git-lfs/git-lfs/v3/tools"
"github.com/git-lfs/git-lfs/v3/tools/humanize"
"github.com/git-lfs/git-lfs/v3/tr"
"github.com/git-lfs/gitobj/v2"
"github.com/spf13/cobra"
)
@ -73,13 +74,13 @@ func migrateInfoCommand(cmd *cobra.Command, args []string) {
above, err := humanize.ParseBytes(migrateInfoAboveFmt)
if err != nil {
ExitWithError(errors.Wrap(err, "cannot parse --above=<n>"))
ExitWithError(errors.Wrap(err, tr.Tr.Get("cannot parse --above=<n>")))
}
if u := cmd.Flag("unit"); u.Changed {
unit, err := humanize.ParseByteUnit(u.Value.String())
if err != nil {
ExitWithError(errors.Wrap(err, "cannot parse --unit=<unit>"))
ExitWithError(errors.Wrap(err, tr.Tr.Get("cannot parse --unit=<unit>")))
}
migrateInfoUnit = unit
@ -95,17 +96,17 @@ func migrateInfoCommand(cmd *cobra.Command, args []string) {
case "ignore":
migrateInfoPointersMode = migrateInfoPointersIgnore
default:
ExitWithError(errors.Errorf("fatal: unsupported --pointers option value"))
ExitWithError(errors.Errorf(tr.Tr.Get("Unsupported --pointers option value")))
}
}
if migrateFixup {
include, exclude := getIncludeExcludeArgs(cmd)
if include != nil || exclude != nil {
ExitWithError(errors.Errorf("fatal: cannot use --fixup with --include, --exclude"))
ExitWithError(errors.Errorf(tr.Tr.Get("Cannot use --fixup with --include, --exclude")))
}
if pointers.Changed && migrateInfoPointersMode != migrateInfoPointersIgnore {
ExitWithError(errors.Errorf("fatal: cannot use --fixup with --pointers=%s", pointers.Value.String()))
ExitWithError(errors.Errorf(tr.Tr.Get("Cannot use --fixup with --pointers=%s", pointers.Value.String())))
}
migrateInfoPointersMode = migrateInfoPointersIgnore
}
@ -302,8 +303,16 @@ func (e EntriesBySize) Print(to io.Writer) (int, error) {
size = humanize.FormatBytes(bytesAbove)
}
stat := fmt.Sprintf("%d/%d files(s)",
above, total)
// TRANSLATORS: The strings here are intended to have the same
// display width including spaces, so please insert trailing
// spaces as necessary for your language.
stat := fmt.Sprintf(tr.Tr.GetN(
"%d/%d file ",
"%d/%d files",
int(total),
above,
total,
))
percentage := fmt.Sprintf("%.0f%%", percentAbove)

@ -12,6 +12,7 @@ import (
"github.com/git-lfs/git-lfs/v3/git"
"github.com/git-lfs/git-lfs/v3/lfs"
"github.com/git-lfs/git-lfs/v3/tr"
"github.com/spf13/cobra"
)
@ -35,16 +36,16 @@ func pointerCommand(cmd *cobra.Command, args []string) {
var err error
if pointerStrict && pointerNoStrict {
ExitWithError(fmt.Errorf("fatal: cannot combine --strict with --no-strict"))
ExitWithError(errors.New(tr.Tr.Get("Cannot combine --strict with --no-strict")))
}
if len(pointerCompare) > 0 {
ExitWithError(fmt.Errorf("fatal: cannot combine --check with --compare"))
ExitWithError(errors.New(tr.Tr.Get("Cannot combine --check with --compare")))
}
if len(pointerFile) > 0 {
if pointerStdin {
ExitWithError(fmt.Errorf("fatal: with --check, --file cannot be combined with --stdin"))
ExitWithError(errors.New(tr.Tr.Get("With --check, --file cannot be combined with --stdin")))
}
r, err = os.Open(pointerFile)
if err != nil {
@ -53,7 +54,7 @@ func pointerCommand(cmd *cobra.Command, args []string) {
} else if pointerStdin {
r = ioutil.NopCloser(os.Stdin)
} else {
ExitWithError(fmt.Errorf("fatal: must specify either --file or --stdin with --compare"))
ExitWithError(errors.New(tr.Tr.Get("Must specify either --file or --stdin with --compare")))
}
p, err := lfs.DecodePointer(r)
@ -89,7 +90,7 @@ func pointerCommand(cmd *cobra.Command, args []string) {
}
ptr := lfs.NewPointer(hex.EncodeToString(oidHash.Sum(nil)), size, nil)
fmt.Fprintf(os.Stderr, "Git LFS pointer for %s\n\n", pointerFile)
fmt.Fprintf(os.Stderr, tr.Tr.Get("Git LFS pointer for %s\n\n", pointerFile))
buf := &bytes.Buffer{}
lfs.EncodePointer(io.MultiWriter(os.Stdout, buf), ptr)
@ -99,7 +100,7 @@ func pointerCommand(cmd *cobra.Command, args []string) {
Error(err.Error())
os.Exit(1)
}
fmt.Fprintf(os.Stderr, "\nGit blob OID: %s\n\n", buildOid)
fmt.Fprintf(os.Stderr, tr.Tr.Get("\nGit blob OID: %s\n\n", buildOid))
}
} else {
comparing = false
@ -122,7 +123,7 @@ func pointerCommand(cmd *cobra.Command, args []string) {
if !pointerStdin {
pointerName = pointerCompare
}
fmt.Fprintf(os.Stderr, "Pointer from %s\n\n", pointerName)
fmt.Fprintf(os.Stderr, tr.Tr.Get("Pointer from %s\n\n", pointerName))
if err != nil {
Error(err.Error())
@ -136,17 +137,17 @@ func pointerCommand(cmd *cobra.Command, args []string) {
Error(err.Error())
os.Exit(1)
}
fmt.Fprintf(os.Stderr, "\nGit blob OID: %s\n", compareOid)
fmt.Fprintf(os.Stderr, tr.Tr.Get("\nGit blob OID: %s\n", compareOid))
}
}
if comparing && buildOid != compareOid {
fmt.Fprintf(os.Stderr, "\nPointers do not match\n")
fmt.Fprintf(os.Stderr, tr.Tr.Get("\nPointers do not match\n"))
os.Exit(1)
}
if !something {
Error("Nothing to do!")
Error(tr.Tr.Get("Nothing to do!"))
os.Exit(1)
}
}
@ -154,13 +155,13 @@ func pointerCommand(cmd *cobra.Command, args []string) {
func pointerReader() (io.ReadCloser, error) {
if len(pointerCompare) > 0 {
if pointerStdin {
return nil, errors.New("cannot read from STDIN and --pointer")
return nil, errors.New(tr.Tr.Get("cannot read from STDIN and --pointer"))
}
return os.Open(pointerCompare)
}
requireStdin("The --stdin flag expects a pointer file from STDIN.")
requireStdin(tr.Tr.Get("The --stdin flag expects a pointer file from STDIN."))
return os.Stdin, nil
}

@ -5,6 +5,7 @@ import (
"github.com/git-lfs/git-lfs/v3/git"
"github.com/git-lfs/git-lfs/v3/locking"
"github.com/git-lfs/git-lfs/v3/tr"
"github.com/rubyist/tracerx"
"github.com/spf13/cobra"
)
@ -21,7 +22,7 @@ import (
// optimising that as best it can based on the available information.
func postCheckoutCommand(cmd *cobra.Command, args []string) {
if len(args) != 3 {
Print("This should be run through Git's post-checkout hook. Run `git lfs update` to install it.")
Print(tr.Tr.Get("This should be run through Git's post-checkout hook. Run `git lfs update` to install it."))
os.Exit(1)
}
@ -54,13 +55,13 @@ func postCheckoutRevChange(client *locking.Client, pre, post string) {
files, err := git.GetFilesChanged(pre, post)
if err != nil {
LoggedError(err, "Warning: post-checkout rev diff %v:%v failed: %v\nFalling back on full scan.", pre, post, err)
LoggedError(err, tr.Tr.Get("Warning: post-checkout rev diff %v:%v failed: %v\nFalling back on full scan.", pre, post, err))
postCheckoutFileChange(client)
}
tracerx.Printf("post-checkout: checking write flags on %v", files)
err = client.FixLockableFileWriteFlags(files)
if err != nil {
LoggedError(err, "Warning: post-checkout locked file check failed: %v", err)
LoggedError(err, tr.Tr.Get("Warning: post-checkout locked file check failed: %v", err))
}
}
@ -71,7 +72,7 @@ func postCheckoutFileChange(client *locking.Client) {
// so we have to check the entire repo
err := client.FixAllLockableFileWriteFlags()
if err != nil {
LoggedError(err, "Warning: post-checkout locked file check failed: %v", err)
LoggedError(err, tr.Tr.Get("Warning: post-checkout locked file check failed: %v", err))
}
}

@ -4,6 +4,7 @@ import (
"os"
"github.com/git-lfs/git-lfs/v3/git"
"github.com/git-lfs/git-lfs/v3/tr"
"github.com/rubyist/tracerx"
"github.com/spf13/cobra"
)
@ -37,13 +38,13 @@ func postCommitCommand(cmd *cobra.Command, args []string) {
files, err := git.GetFilesChanged("HEAD", "")
if err != nil {
LoggedError(err, "Warning: post-commit failed: %v", err)
LoggedError(err, tr.Tr.Get("Warning: post-commit failed: %v", err))
os.Exit(1)
}
tracerx.Printf("post-commit: checking write flags on %v", files)
err = lockClient.FixLockableFileWriteFlags(files)
if err != nil {
LoggedError(err, "Warning: post-commit locked file check failed: %v", err)
LoggedError(err, tr.Tr.Get("Warning: post-commit locked file check failed: %v", err))
}
}

@ -3,6 +3,7 @@ package commands
import (
"os"
"github.com/git-lfs/git-lfs/v3/tr"
"github.com/rubyist/tracerx"
"github.com/spf13/cobra"
)
@ -13,7 +14,7 @@ import (
// optimising that as best it can based on the available information.
func postMergeCommand(cmd *cobra.Command, args []string) {
if len(args) != 1 {
Print("This should be run through Git's post-merge hook. Run `git lfs update` to install it.")
Print(tr.Tr.Get("This should be run through Git's post-merge hook. Run `git lfs update` to install it."))
os.Exit(1)
}
@ -41,7 +42,7 @@ func postMergeCommand(cmd *cobra.Command, args []string) {
// so we have to check the entire repo
err := lockClient.FixAllLockableFileWriteFlags()
if err != nil {
LoggedError(err, "Warning: post-merge locked file check failed: %v", err)
LoggedError(err, tr.Tr.Get("Warning: post-merge locked file check failed: %v", err))
}
}

@ -7,6 +7,7 @@ import (
"strings"
"github.com/git-lfs/git-lfs/v3/git"
"github.com/git-lfs/git-lfs/v3/tr"
"github.com/rubyist/tracerx"
"github.com/spf13/cobra"
)
@ -39,7 +40,7 @@ var (
// made.
func prePushCommand(cmd *cobra.Command, args []string) {
if len(args) == 0 {
Print("This should be run through Git's pre-push hook. Run `git lfs update` to install it.")
Print(tr.Tr.Get("This should be run through Git's pre-push hook. Run `git lfs update` to install it."))
os.Exit(1)
}
@ -52,7 +53,7 @@ func prePushCommand(cmd *cobra.Command, args []string) {
// Remote is first arg
remote, _ := git.MapRemoteURL(args[0], true)
if err := cfg.SetValidPushRemote(remote); err != nil {
Exit("Invalid remote name %q: %s", args[0], err)
Exit(tr.Tr.Get("Invalid remote name %q: %s", args[0], err))
}
ctx := newUploadContext(prePushDryRun)

@ -9,6 +9,7 @@ import (
"sync"
"time"
"github.com/git-lfs/git-lfs/v3/errors"
"github.com/git-lfs/git-lfs/v3/filepathfilter"
"github.com/git-lfs/git-lfs/v3/fs"
"github.com/git-lfs/git-lfs/v3/git"
@ -17,6 +18,7 @@ import (
"github.com/git-lfs/git-lfs/v3/tools"
"github.com/git-lfs/git-lfs/v3/tools/humanize"
"github.com/git-lfs/git-lfs/v3/tq"
"github.com/git-lfs/git-lfs/v3/tr"
"github.com/rubyist/tracerx"
"github.com/spf13/cobra"
"golang.org/x/sync/semaphore"
@ -34,7 +36,7 @@ var (
func pruneCommand(cmd *cobra.Command, args []string) {
// Guts of this must be re-usable from fetch --prune so just parse & dispatch
if pruneVerifyArg && pruneDoNotVerifyArg {
Exit("Cannot specify both --verify-remote and --no-verify-remote")
Exit(tr.Tr.Get("Cannot specify both --verify-remote and --no-verify-remote"))
}
fetchPruneConfig := lfs.NewFetchPruneConfig(cfg.Git)
@ -198,7 +200,12 @@ func prune(fetchPruneConfig lfs.FetchPruneConfig, verifyRemote, dryRun, verbose
info := tasklog.NewSimpleTask()
logger.Enqueue(info)
if dryRun {
info.Logf("prune: %d file(s) would be pruned (%s)", len(prunableObjects), humanize.FormatBytes(uint64(totalSize)))
info.Logf("prune: %s", tr.Tr.GetN(
"%d file would be pruned (%s)",
"%d files would be pruned (%s)",
len(prunableObjects),
len(prunableObjects),
humanize.FormatBytes(uint64(totalSize))))
for _, item := range verboseOutput {
info.Logf("\n * %s", item)
}
@ -232,16 +239,16 @@ func pruneCheckVerified(prunableObjects []string, reachableObjects, verifiedObje
// deleted but that's incorrect; bad state has occurred somehow, might need
// push --all to resolve
if problems.Len() > 0 {
Exit("Abort: these objects to be pruned are missing on remote:\n%v", problems.String())
Exit(tr.Tr.Get("These objects to be pruned are missing on remote:\n%v", problems.String()))
}
}
func pruneCheckErrors(taskErrors []error) {
if len(taskErrors) > 0 {
for _, err := range taskErrors {
LoggedError(err, "Prune error: %v", err)
LoggedError(err, tr.Tr.Get("Prune error: %v", err))
}
Exit("Prune sub-tasks failed, cannot continue")
Exit(tr.Tr.Get("Prune sub-tasks failed, cannot continue"))
}
}
@ -266,9 +273,11 @@ func pruneTaskDisplayProgress(progressChan PruneProgressChan, waitg *sync.WaitGr
case PruneProgressTypeVerify:
verifyCount++
}
msg = fmt.Sprintf("prune: %d local object(s), %d retained", localCount, retainCount)
msg = fmt.Sprintf("prune: %s, %s",
tr.Tr.GetN("%d local object", "%d local objects", localCount, localCount),
tr.Tr.GetN("%d retained", "%d retained", retainCount, retainCount))
if verifyCount > 0 {
msg += fmt.Sprintf(", %d verified with remote", verifyCount)
msg += tr.Tr.GetN(", %d verified with remote", ", %d verified with remote", verifyCount, verifyCount)
}
task.Log(msg)
}
@ -296,7 +305,7 @@ func pruneTaskCollectErrors(outtaskErrors *[]error, errorChan chan error, errorw
}
func pruneDeleteFiles(prunableObjects []string, logger *tasklog.Logger) {
task := logger.Percentage("prune: Deleting objects", uint64(len(prunableObjects)))
task := logger.Percentage(fmt.Sprintf("prune: %s", tr.Tr.Get("Deleting objects")), uint64(len(prunableObjects)))
var problems bytes.Buffer
// In case we fail to delete some
@ -304,7 +313,7 @@ func pruneDeleteFiles(prunableObjects []string, logger *tasklog.Logger) {
for _, oid := range prunableObjects {
mediaFile, err := cfg.Filesystem().ObjectPath(oid)
if err != nil {
problems.WriteString(fmt.Sprintf("Unable to find media path for %v: %v\n", oid, err))
problems.WriteString(tr.Tr.Get("Unable to find media path for %v: %v\n", oid, err))
continue
}
if mediaFile == os.DevNull {
@ -312,15 +321,15 @@ func pruneDeleteFiles(prunableObjects []string, logger *tasklog.Logger) {
}
err = os.Remove(mediaFile)
if err != nil {
problems.WriteString(fmt.Sprintf("Failed to remove file %v: %v\n", mediaFile, err))
problems.WriteString(tr.Tr.Get("Failed to remove file %v: %v\n", mediaFile, err))
continue
}
deletedFiles++
task.Count(1)
}
if problems.Len() > 0 {
LoggedError(fmt.Errorf("failed to delete some files"), problems.String())
Exit("Prune failed, see errors above")
LoggedError(errors.New(tr.Tr.Get("failed to delete some files")), problems.String())
Exit(tr.Tr.Get("Prune failed, see errors above"))
}
}
@ -407,7 +416,7 @@ func pruneTaskGetRetainedCurrentAndRecentRefs(gitscanner *lfs.GitScanner, fetchc
// Keep all recent refs including any recent remote branches
refs, err := git.RecentBranches(refsSince, fetchconf.FetchRecentRefsIncludeRemotes, "")
if err != nil {
Panic(err, "Could not scan for recent refs")
Panic(err, tr.Tr.Get("Could not scan for recent refs"))
}
for _, ref := range refs {
if commits.Add(ref.Sha) {
@ -426,7 +435,7 @@ func pruneTaskGetRetainedCurrentAndRecentRefs(gitscanner *lfs.GitScanner, fetchc
// We measure from the last commit at the ref
summ, err := git.GetCommitSummary(commit)
if err != nil {
errorChan <- fmt.Errorf("couldn't scan commits at %v: %v", commit, err)
errorChan <- errors.New(tr.Tr.Get("couldn't scan commits at %v: %v", commit, err))
continue
}
commitsSince := summ.CommitDate.AddDate(0, 0, -pruneCommitDays)

@ -11,6 +11,7 @@ import (
"github.com/git-lfs/git-lfs/v3/lfs"
"github.com/git-lfs/git-lfs/v3/tasklog"
"github.com/git-lfs/git-lfs/v3/tq"
"github.com/git-lfs/git-lfs/v3/tr"
"github.com/rubyist/tracerx"
"github.com/spf13/cobra"
)
@ -22,7 +23,7 @@ func pullCommand(cmd *cobra.Command, args []string) {
if len(args) > 0 {
// Remote is first arg
if err := cfg.SetValidRemote(args[0]); err != nil {
Exit("Invalid remote name %q: %s", args[0], err)
Exit(tr.Tr.Get("Invalid remote name %q: %s", args[0], err))
}
}
@ -34,7 +35,7 @@ func pullCommand(cmd *cobra.Command, args []string) {
func pull(filter *filepathfilter.Filter) {
ref, err := git.CurrentRef()
if err != nil {
Panic(err, "Could not pull")
Panic(err, tr.Tr.Get("Could not pull"))
}
pointers := newPointerMap()
@ -49,7 +50,7 @@ func pull(filter *filepathfilter.Filter) {
q := newDownloadQueue(singleCheckout.Manifest(), remote, tq.WithProgress(meter))
gitscanner := lfs.NewGitScanner(cfg, func(p *lfs.WrappedPointer, err error) {
if err != nil {
LoggedError(err, "Scanner error: %s", err)
LoggedError(err, tr.Tr.Get("Scanner error: %s", err))
return
}
@ -108,11 +109,11 @@ func pull(filter *filepathfilter.Filter) {
if !success {
c := getAPIClient()
e := c.Endpoints.Endpoint("download", remote)
Exit("error: failed to fetch some objects from '%s'", e.Url)
Exit(tr.Tr.Get("Failed to fetch some objects from '%s'", e.Url))
}
if singleCheckout.Skip() {
fmt.Println("Skipping object checkout, Git LFS is not installed.")
fmt.Println(tr.Tr.Get("Skipping object checkout, Git LFS is not installed."))
}
}

@ -7,6 +7,7 @@ import (
"github.com/git-lfs/git-lfs/v3/git"
"github.com/git-lfs/git-lfs/v3/lfs"
"github.com/git-lfs/git-lfs/v3/tq"
"github.com/git-lfs/git-lfs/v3/tr"
"github.com/rubyist/tracerx"
"github.com/spf13/cobra"
)
@ -31,7 +32,7 @@ var (
// of commits between the local and remote git servers.
func pushCommand(cmd *cobra.Command, args []string) {
if len(args) == 0 {
Print("Specify a remote and a remote branch name (`git lfs push origin main`)")
Print(tr.Tr.Get("Specify a remote and a remote branch name (`git lfs push origin main`)"))
os.Exit(1)
}
@ -39,23 +40,18 @@ func pushCommand(cmd *cobra.Command, args []string) {
// Remote is first arg
if err := cfg.SetValidPushRemote(args[0]); err != nil {
Exit("Invalid remote name %q: %s", args[0], err)
Exit(tr.Tr.Get("Invalid remote name %q: %s", args[0], err))
}
ctx := newUploadContext(pushDryRun)
if pushObjectIDs {
if len(args) < 2 {
Print("Usage: git lfs push --object-id <remote> <lfs-object-id> [lfs-object-id] ...")
Print(tr.Tr.Get("At least one object ID must be supplied with --object-id"))
return
}
uploadsWithObjectIDs(ctx, args[1:])
} else {
if len(args) < 1 {
Print("Usage: git lfs push --dry-run <remote> [ref]")
return
}
uploadsBetweenRefAndRemote(ctx, args[1:])
}
}
@ -66,7 +62,7 @@ func uploadsBetweenRefAndRemote(ctx *uploadContext, refnames []string) {
updates, err := lfsPushRefs(refnames, pushAll)
if err != nil {
Error(err.Error())
Exit("Error getting local refs.")
Exit(tr.Tr.Get("Error getting local refs."))
}
if err := uploadForRefUpdates(ctx, updates, pushAll); err != nil {
@ -79,12 +75,12 @@ func uploadsWithObjectIDs(ctx *uploadContext, oids []string) {
for i, oid := range oids {
mp, err := ctx.gitfilter.ObjectPath(oid)
if err != nil {
ExitWithError(errors.Wrap(err, "Unable to find local media path:"))
ExitWithError(errors.Wrap(err, tr.Tr.Get("Unable to find local media path:")))
}
stat, err := os.Stat(mp)
if err != nil {
ExitWithError(errors.Wrap(err, "Unable to stat local media path"))
ExitWithError(errors.Wrap(err, tr.Tr.Get("Unable to stat local media path")))
}
pointers[i] = &lfs.WrappedPointer{

@ -12,6 +12,7 @@ import (
"github.com/git-lfs/git-lfs/v3/tools"
"github.com/git-lfs/git-lfs/v3/tools/humanize"
"github.com/git-lfs/git-lfs/v3/tq"
"github.com/git-lfs/git-lfs/v3/tr"
"github.com/spf13/cobra"
)
@ -44,7 +45,7 @@ func delayedSmudge(gf *lfs.GitFilter, s *git.FilterProcessScanner, to io.Writer,
if n != 0 {
return 0, false, nil, errors.NewNotAPointerError(errors.Errorf(
"Unable to parse pointer at: %q", filename,
tr.Tr.Get("Unable to parse pointer at: %q", filename),
))
}
return 0, false, nil, nil
@ -107,7 +108,7 @@ func smudge(gf *lfs.GitFilter, to io.Writer, from io.Reader, filename string, sk
if n != 0 {
return 0, errors.NewNotAPointerError(errors.Errorf(
"Unable to parse pointer at: %q", filename,
tr.Tr.Get("Unable to parse pointer at: %q", filename),
))
}
return 0, nil
@ -138,7 +139,7 @@ func smudge(gf *lfs.GitFilter, to io.Writer, from io.Reader, filename string, sk
oid = oid[:7]
}
LoggedError(err, "Error downloading object: %s (%s): %s", filename, oid, err)
LoggedError(err, tr.Tr.Get("Error downloading object: %s (%s): %s", filename, oid, err))
if !cfg.SkipDownloadErrors() {
os.Exit(2)
}
@ -149,7 +150,7 @@ func smudge(gf *lfs.GitFilter, to io.Writer, from io.Reader, filename string, sk
}
func smudgeCommand(cmd *cobra.Command, args []string) {
requireStdin("This command should be run by the Git 'smudge' filter")
requireStdin(tr.Tr.Get("This command should be run by the Git 'smudge' filter"))
setupRepository()
installHooks(false)
@ -166,7 +167,7 @@ func smudgeCommand(cmd *cobra.Command, args []string) {
Error(err.Error())
}
} else if possiblyMalformedObjectSize(n) {
fmt.Fprintln(os.Stderr, "Possibly malformed smudge on Windows: see `git lfs help smudge` for more info.")
fmt.Fprintln(os.Stderr, tr.Tr.Get("Possibly malformed smudge on Windows: see `git lfs help smudge` for more info."))
}
}
@ -174,7 +175,7 @@ func smudgeFilename(args []string) string {
if len(args) > 0 {
return args[0]
}
return "<unknown file>"
return tr.Tr.Get("<unknown file>")
}
func possiblyMalformedObjectSize(n int64) bool {

@ -12,6 +12,7 @@ import (
"github.com/git-lfs/git-lfs/v3/git"
"github.com/git-lfs/git-lfs/v3/lfs"
"github.com/git-lfs/git-lfs/v3/tools"
"github.com/git-lfs/git-lfs/v3/tr"
"github.com/spf13/cobra"
)
@ -56,7 +57,7 @@ func statusCommand(cmd *cobra.Command, args []string) {
wd = tools.ResolveSymlinks(wd)
Print("\nObjects to be committed:\n")
Print(tr.Tr.Get("\nObjects to be committed:\n"))
for _, entry := range staged {
// Find a path from the current working directory to the
// absolute path of each side of the entry.
@ -71,7 +72,7 @@ func statusCommand(cmd *cobra.Command, args []string) {
}
}
Print("\nObjects not staged for commit:\n")
Print(tr.Tr.Get("\nObjects not staged for commit:\n"))
for _, entry := range unstaged {
src := relativize(wd, filepath.Join(repo, entry.SrcName))
@ -128,7 +129,7 @@ func blobInfo(s *lfs.PointerScanner, blobSha, name string) (sha, from string, er
s.Scan(blobSha)
if err := s.Err(); err != nil {
if git.IsMissingObject(err) {
return "<missing>", "?", nil
return tr.Tr.Get("<missing>"), "?", nil
}
return "", "", err
}
@ -145,7 +146,7 @@ func blobInfo(s *lfs.PointerScanner, blobSha, name string) (sha, from string, er
f, err := os.Open(filepath.Join(cfg.LocalWorkingDir(), name))
if os.IsNotExist(err) {
return "deleted", "File", nil
return tr.Tr.Get("deleted"), tr.Tr.Get("File"), nil
}
if err != nil {
return "", "", err
@ -154,7 +155,7 @@ func blobInfo(s *lfs.PointerScanner, blobSha, name string) (sha, from string, er
// We've replaced a file with a directory.
if fi, err := f.Stat(); err == nil && fi.Mode().IsDir() {
return "deleted", "File", nil
return tr.Tr.Get("deleted"), tr.Tr.Get("File"), nil
}
shasum := sha256.New()
@ -162,7 +163,7 @@ func blobInfo(s *lfs.PointerScanner, blobSha, name string) (sha, from string, er
return "", "", err
}
return fmt.Sprintf("%x", shasum.Sum(nil))[:7], "File", nil
return fmt.Sprintf("%x", shasum.Sum(nil))[:7], tr.Tr.Get("File"), nil
}
func scanIndex(ref string) (staged, unstaged []*lfs.DiffIndexEntry, err error) {
@ -225,7 +226,7 @@ func statusScanRefRange(ref *git.Ref) {
return
}
Print("On branch %s", ref.Name)
Print(tr.Tr.Get("On branch %s", ref.Name))
remoteRef, err := cfg.GitConfig().CurrentRemoteRef()
if err != nil {
@ -234,7 +235,7 @@ func statusScanRefRange(ref *git.Ref) {
gitscanner := lfs.NewGitScanner(cfg, func(p *lfs.WrappedPointer, err error) {
if err != nil {
Panic(err, "Could not scan for Git LFS objects")
Panic(err, tr.Tr.Get("Could not scan for Git LFS objects"))
return
}
@ -242,9 +243,9 @@ func statusScanRefRange(ref *git.Ref) {
})
defer gitscanner.Close()
Print("Objects to be pushed to %s:\n", remoteRef.Name)
Print(tr.Tr.Get("Objects to be pushed to %s:\n", remoteRef.Name))
if err := gitscanner.ScanRefRange(ref.Sha, remoteRef.Sha, nil); err != nil {
Panic(err, "Could not scan for Git LFS objects")
Panic(err, tr.Tr.Get("Could not scan for Git LFS objects"))
}
}

@ -13,6 +13,7 @@ import (
"github.com/git-lfs/git-lfs/v3/git"
"github.com/git-lfs/git-lfs/v3/git/gitattr"
"github.com/git-lfs/git-lfs/v3/tools"
"github.com/git-lfs/git-lfs/v3/tr"
"github.com/spf13/cobra"
)
@ -59,7 +60,7 @@ func trackCommand(cmd *cobra.Command, args []string) {
wd = tools.ResolveSymlinks(wd)
relpath, err := filepath.Rel(cfg.LocalWorkingDir(), wd)
if err != nil {
Exit("Current directory %q outside of git working directory %q.", wd, cfg.LocalWorkingDir())
Exit(tr.Tr.Get("Current directory %q outside of git working directory %q.", wd, cfg.LocalWorkingDir()))
}
changedAttribLines := make(map[string]string)
@ -84,7 +85,7 @@ ArgsLoop:
((trackLockableFlag && known.Lockable) || // enabling lockable & already lockable (no change)
(trackNotLockableFlag && !known.Lockable) || // disabling lockable & not lockable (no change)
(!trackLockableFlag && !trackNotLockableFlag)) { // leave lockable as-is in all cases
Print("%q already supported", pattern)
Print(tr.Tr.Get("%q already supported", pattern))
continue ArgsLoop
}
}
@ -103,7 +104,7 @@ ArgsLoop:
writeablePatterns = append(writeablePatterns, pattern)
}
Print("Tracking %q", unescapeAttrPattern(encodedArg))
Print(tr.Tr.Get("Tracking %q", unescapeAttrPattern(encodedArg)))
}
// Now read the whole local attributes file and iterate over the contents,
@ -173,22 +174,22 @@ ArgsLoop:
// the repository, the leading slash is simply removed for its
// implicit counterpart.
if trackVerboseLoggingFlag {
Print("Searching for files matching pattern: %s", pattern)
Print(tr.Tr.Get("Searching for files matching pattern: %s", pattern))
}
gittracked, err := git.GetTrackedFiles(pattern)
if err != nil {
Exit("Error getting tracked files for %q: %s", pattern, err)
Exit(tr.Tr.Get("Error getting tracked files for %q: %s", pattern, err))
}
if trackVerboseLoggingFlag {
Print("Found %d files previously added to Git matching pattern: %s", len(gittracked), pattern)
Print(tr.Tr.Get("Found %d files previously added to Git matching pattern: %s", len(gittracked), pattern))
}
var matchedBlocklist bool
for _, f := range gittracked {
if forbidden := blocklistItem(f); forbidden != "" {
Print("Pattern %s matches forbidden file %s. If you would like to track %s, modify .gitattributes manually.", pattern, f, f)
Print(tr.Tr.Get("Pattern %s matches forbidden file %s. If you would like to track %s, modify .gitattributes manually.", pattern, f, f))
matchedBlocklist = true
}
}
@ -198,14 +199,14 @@ ArgsLoop:
for _, f := range gittracked {
if trackVerboseLoggingFlag || trackDryRunFlag {
Print("Git LFS: touching %q", f)
Print(tr.Tr.Get("Git LFS: touching %q", f))
}
if !trackDryRunFlag {
now := time.Now()
err := os.Chtimes(f, now, now)
if err != nil {
LoggedError(err, "Error marking %q modified: %s", f, err)
LoggedError(err, tr.Tr.Get("Error marking %q modified: %s", f, err))
continue
}
}
@ -216,7 +217,7 @@ ArgsLoop:
lockClient := newLockClient()
err = lockClient.FixFileWriteFlagsInDir(relpath, readOnlyPatterns, writeablePatterns)
if err != nil {
LoggedError(err, "Error changing lockable file permissions: %s", err)
LoggedError(err, tr.Tr.Get("Error changing lockable file permissions: %s", err))
}
}
@ -226,10 +227,11 @@ func listPatterns() {
return
}
Print("Listing tracked patterns")
Print(tr.Tr.Get("Listing tracked patterns"))
for _, t := range knownPatterns {
if t.Lockable {
Print(" %s [lockable] (%s)", t.Path, t.Source)
// TRANSLATORS: Leading spaces here should be preserved.
Print(tr.Tr.Get(" %s [lockable] (%s)", t.Path, t.Source))
} else if t.Tracked {
Print(" %s (%s)", t.Path, t.Source)
}
@ -239,7 +241,7 @@ func listPatterns() {
return
}
Print("Listing excluded patterns")
Print(tr.Tr.Get("Listing excluded patterns"))
for _, t := range knownPatterns {
if !t.Tracked && !t.Lockable {
Print(" %s (%s)", t.Path, t.Source)

@ -2,13 +2,14 @@ package commands
import (
"github.com/git-lfs/git-lfs/v3/git"
"github.com/git-lfs/git-lfs/v3/tr"
"github.com/spf13/cobra"
)
// uninstallCmd removes any configuration and hooks set by Git LFS.
func uninstallCommand(cmd *cobra.Command, args []string) {
if err := cmdInstallOptions().Uninstall(); err != nil {
Print("WARNING: %s", err.Error())
Print(tr.Tr.Get("warning: %s", err.Error()))
}
if !skipRepoInstall && (localInstall || worktreeInstall || cfg.InRepo()) {
@ -16,9 +17,9 @@ func uninstallCommand(cmd *cobra.Command, args []string) {
}
if systemInstall {
Print("System Git LFS configuration has been removed.")
Print(tr.Tr.Get("System Git LFS configuration has been removed."))
} else if !(localInstall || worktreeInstall) {
Print("Global Git LFS configuration has been removed.")
Print(tr.Tr.Get("Global Git LFS configuration has been removed."))
}
}
@ -28,7 +29,7 @@ func uninstallHooksCommand(cmd *cobra.Command, args []string) {
Error(err.Error())
}
Print("Hooks for this repository have been removed.")
Print(tr.Tr.Get("Hooks for this repository have been removed."))
}
func init() {

@ -2,12 +2,12 @@ package commands
import (
"encoding/json"
"fmt"
"os"
"github.com/git-lfs/git-lfs/v3/errors"
"github.com/git-lfs/git-lfs/v3/git"
"github.com/git-lfs/git-lfs/v3/locking"
"github.com/git-lfs/git-lfs/v3/tr"
"github.com/spf13/cobra"
)
@ -25,8 +25,6 @@ type unlockFlags struct {
Force bool
}
var unlockUsage = "Usage: git lfs unlock (--id my-lock-id | <path>)"
type unlockResponse struct {
Id string `json:"id,omitempty"`
Path string `json:"path,omitempty"`
@ -53,7 +51,7 @@ func unlockCommand(cmd *cobra.Command, args []string) {
if hasPath == hasId {
// If there is both an `--id` AND a `<path>`, or there is
// neither, print the usage and quit.
Exit(unlockUsage)
Exit(tr.Tr.Get("Exactly one of --id or a set of paths must be provided"))
}
if len(lockRemote) > 0 {
@ -72,7 +70,7 @@ func unlockCommand(cmd *cobra.Command, args []string) {
path, err := lockPath(pathspec)
if err != nil {
if !unlockCmdFlags.Force {
locks = handleUnlockError(locks, "", path, fmt.Errorf("Unable to determine path: %v", err.Error()))
locks = handleUnlockError(locks, "", path, errors.New(tr.Tr.Get("Unable to determine path: %v", err.Error())))
success = false
continue
}
@ -107,13 +105,13 @@ func unlockCommand(cmd *cobra.Command, args []string) {
err := lockClient.UnlockFileById(unlockCmdFlags.Id, unlockCmdFlags.Force)
if err != nil {
locks = handleUnlockError(locks, unlockCmdFlags.Id, "", fmt.Errorf("Unable to unlock %v: %v", unlockCmdFlags.Id, errors.Cause(err)))
locks = handleUnlockError(locks, unlockCmdFlags.Id, "", errors.New(tr.Tr.Get("Unable to unlock %v: %v", unlockCmdFlags.Id, errors.Cause(err))))
success = false
} else if !locksCmdFlags.JSON {
Print("Unlocked Lock %s", unlockCmdFlags.Id)
Print(tr.Tr.Get("Unlocked Lock %s", unlockCmdFlags.Id))
}
} else {
Error(unlockUsage)
Exit(tr.Tr.Get("Exactly one of --id or a set of paths must be provided"))
}
if locksCmdFlags.JSON {
@ -145,9 +143,9 @@ func unlockAbortIfFileModified(path string) error {
if modified {
if unlockCmdFlags.Force {
// Only a warning
Error("Warning: unlocking with uncommitted changes because --force")
Error(tr.Tr.Get("warning: unlocking with uncommitted changes because --force"))
} else {
return fmt.Errorf("Cannot unlock file with uncommitted changes")
return errors.New(tr.Tr.Get("Cannot unlock file with uncommitted changes"))
}
}
@ -174,7 +172,7 @@ func unlockAbortIfFileModifiedById(id string, lockClient *locking.Client) error
func init() {
RegisterCommand("unlock", unlockCommand, func(cmd *cobra.Command) {
cmd.Flags().StringVarP(&lockRemote, "remote", "r", "", lockRemoteHelp)
cmd.Flags().StringVarP(&lockRemote, "remote", "r", "", "specify which remote to use when interacting with locks")
cmd.Flags().StringVarP(&unlockCmdFlags.Id, "id", "i", "", "unlock a lock by its ID")
cmd.Flags().BoolVarP(&unlockCmdFlags.Force, "force", "f", false, "forcibly break another user's lock(s)")
cmd.Flags().BoolVarP(&locksCmdFlags.JSON, "json", "", false, "print output in json")

@ -6,6 +6,7 @@ import (
"os"
"strings"
"github.com/git-lfs/git-lfs/v3/tr"
"github.com/spf13/cobra"
)
@ -17,7 +18,7 @@ func untrackCommand(cmd *cobra.Command, args []string) {
installHooks(false)
if len(args) < 1 {
Print("git lfs untrack <path> [path]*")
Print(tr.Tr.Get("git lfs untrack <path> [path]*"))
return
}
@ -30,7 +31,7 @@ func untrackCommand(cmd *cobra.Command, args []string) {
attributesFile, err := os.Create(".gitattributes")
if err != nil {
Print("Error opening .gitattributes for writing")
Print(tr.Tr.Get("Error opening .gitattributes for writing"))
return
}
defer attributesFile.Close()
@ -48,7 +49,7 @@ func untrackCommand(cmd *cobra.Command, args []string) {
path := strings.Fields(line)[0]
if removePath(path, args) {
Print("Untracking %q", unescapeAttrPattern(path))
Print(tr.Tr.Get("Untracking %q", unescapeAttrPattern(path)))
} else {
attributesFile.WriteString(line + "\n")
}

@ -3,6 +3,7 @@ package commands
import (
"regexp"
"github.com/git-lfs/git-lfs/v3/tr"
"github.com/spf13/cobra"
)
@ -30,10 +31,10 @@ func updateCommand(cmd *cobra.Command, args []string) {
case "basic":
case "private":
cfg.SetGitLocalKey(key, "basic")
Print("Updated %s access from %s to %s.", matches[1], value, "basic")
Print(tr.Tr.Get("Updated %s access from %s to %s.", matches[1], value, "basic"))
default:
cfg.UnsetGitLocalKey(key)
Print("Removed invalid %s access of %s.", matches[1], value)
Print(tr.Tr.Get("Removed invalid %s access of %s.", matches[1], value))
}
}
@ -46,9 +47,9 @@ func updateCommand(cmd *cobra.Command, args []string) {
} else {
if err := installHooks(updateForce); err != nil {
Error(err.Error())
Exit("To resolve this, either:\n 1: run `git lfs update --manual` for instructions on how to merge hooks.\n 2: run `git lfs update --force` to overwrite your hook.")
Exit(tr.Tr.Get("To resolve this, either:\n 1: run `git lfs update --manual` for instructions on how to merge hooks.\n 2: run `git lfs update --force` to overwrite your hook."))
} else {
Print("Updated git hooks.")
Print(tr.Tr.Get("Updated git hooks."))
}
}

@ -330,7 +330,7 @@ func setupRepository() {
bare, err := git.IsBare()
if err != nil {
ExitWithError(errors.Wrap(
err, "fatal: could not determine bareness"))
err, "Could not determine bareness"))
}
verifyRepositoryVersion()
@ -362,12 +362,12 @@ func changeToWorkingCopy() {
cwd, err := tools.Getwd()
if err != nil {
ExitWithError(errors.Wrap(
err, "fatal: could not determine current working directory"))
err, "Could not determine current working directory"))
}
cwd, err = tools.CanonicalizeSystemPath(cwd)
if err != nil {
ExitWithError(errors.Wrap(
err, "fatal: could not canonicalize current working directory"))
err, "Could not canonicalize current working directory"))
}
// If the current working directory is not within the repository's

@ -12,6 +12,7 @@ import (
"github.com/git-lfs/git-lfs/v3/lfshttp"
"github.com/git-lfs/git-lfs/v3/locking"
"github.com/git-lfs/git-lfs/v3/tq"
"github.com/git-lfs/git-lfs/v3/tr"
)
type verifyState byte
@ -63,12 +64,12 @@ func (lv *lockVerifier) Verify(ref *git.Ref) {
} else if lv.verifyState == verifyStateUnknown || lv.verifyState == verifyStateEnabled {
if errors.IsAuthError(err) {
if lv.verifyState == verifyStateUnknown {
Error("WARNING: Authentication error: %s", err)
Error(tr.Tr.Get("warning: Authentication error: %s", err))
} else if lv.verifyState == verifyStateEnabled {
Exit("ERROR: Authentication error: %s", err)
Exit(tr.Tr.Get("error: Authentication error: %s", err))
}
} else {
Print("Remote %q does not support the LFS locking API. Consider disabling it with:", cfg.PushRemote())
Print(tr.Tr.Get("Remote %q does not support the LFS locking API. Consider disabling it with:", cfg.PushRemote()))
Print(" $ git config lfs.%s.locksverify false", lv.endpoint.Url)
if lv.verifyState == verifyStateEnabled {
ExitWithError(err)
@ -76,8 +77,8 @@ func (lv *lockVerifier) Verify(ref *git.Ref) {
}
}
} else if lv.verifyState == verifyStateUnknown {
Print("Locking support detected on remote %q. Consider enabling it with:", cfg.PushRemote())
Print(" $ git config lfs.%s.locksverify true", lv.endpoint.Url)
Print(tr.Tr.Get("Locking support detected on remote %q. Consider enabling it with:", cfg.PushRemote()))
Print("$ git config lfs.%s.locksverify true", lv.endpoint.Url)
}
lv.addLocks(ref, ours, lv.ourLocks)
@ -89,7 +90,7 @@ func (lv *lockVerifier) addLocks(ref *git.Ref, locks []locking.Lock, set map[str
for _, l := range locks {
if rl, ok := set[l.Path]; ok {
if err := rl.Add(ref, l); err != nil {
Error("WARNING: error adding %q lock for ref %q: %+v", l.Path, ref, err)
Error(tr.Tr.Get("warning: error adding %q lock for ref %q: %+v", l.Path, ref, err))
}
} else {
set[l.Path] = lv.newRefLocks(ref, l)

@ -2,7 +2,6 @@ package commands
import (
"bytes"
"fmt"
"io"
"os"
"sync"
@ -13,6 +12,7 @@ import (
"github.com/git-lfs/git-lfs/v3/lfs"
"github.com/git-lfs/git-lfs/v3/subprocess"
"github.com/git-lfs/git-lfs/v3/tq"
"github.com/git-lfs/git-lfs/v3/tr"
)
// Handles the process of checking out a single file, and updating the git
@ -29,7 +29,7 @@ func newSingleCheckout(gitEnv config.Environment, remote string) abstractCheckou
// Since writing data & calling git update-index must be relative to cwd
pathConverter, err := lfs.NewRepoToCurrentPathConverter(cfg)
if err != nil {
Panic(err, "Could not convert file paths")
Panic(err, tr.Tr.Get("Could not convert file paths"))
}
return &singleCheckout{
@ -72,7 +72,7 @@ func (c *singleCheckout) Run(p *lfs.WrappedPointer) {
return
}
LoggedError(err, "Checkout error: %s", err)
LoggedError(err, tr.Tr.Get("Checkout error: %s", err))
return
}
@ -85,16 +85,16 @@ func (c *singleCheckout) Run(p *lfs.WrappedPointer) {
if err := c.RunToPath(p, cwdfilepath); err != nil {
if errors.IsDownloadDeclinedError(err) {
// acceptable error, data not local (fetch not run or include/exclude)
Error("Skipped checkout for %q, content not local. Use fetch to download.", p.Name)
Error(tr.Tr.Get("Skipped checkout for %q, content not local. Use fetch to download.", p.Name))
} else {
FullError(fmt.Errorf("could not check out %q", p.Name))
FullError(errors.New(tr.Tr.Get("could not check out %q", p.Name)))
}
return
}
// errors are only returned when the gitIndexer is starting a new cmd
if err := c.gitIndexer.Add(cwdfilepath); err != nil {
Panic(err, "Could not update the index")
Panic(err, tr.Tr.Get("Could not update the index"))
}
}
@ -107,7 +107,7 @@ func (c *singleCheckout) RunToPath(p *lfs.WrappedPointer, path string) error {
func (c *singleCheckout) Close() {
if err := c.gitIndexer.Close(); err != nil {
LoggedError(err, "Error updating the git index:\n%s", c.gitIndexer.Output())
LoggedError(err, tr.Tr.Get("Error updating the git index:\n%s", c.gitIndexer.Output()))
}
}

@ -78,7 +78,7 @@ Simply type ` + root.Name() + ` help [path to command] for full details.`,
cmd, _, e = c.Root().Find([]string{"help"})
}
if cmd == nil || e != nil {
c.Printf("Unknown help topic %#q\n", args)
c.Print(tr.Tr.Get("Unknown help topic %#q\n", args))
c.Root().Usage()
} else {
c.HelpFunc()(cmd, args)
@ -140,7 +140,7 @@ func printHelp(commandName string) {
if txt, ok := ManPages[commandName]; ok {
fmt.Fprintf(os.Stdout, "%s\n", strings.TrimSpace(txt))
} else {
fmt.Fprintf(os.Stdout, "Sorry, no usage text found for %q\n", commandName)
fmt.Fprint(os.Stdout, tr.Tr.Get("Sorry, no usage text found for %q\n", commandName))
}
}
@ -151,14 +151,14 @@ func setupHTTPLogger(cmd *cobra.Command, args []string) {
logBase := filepath.Join(cfg.LocalLogDir(), "http")
if err := tools.MkdirAll(logBase, cfg); err != nil {
fmt.Fprintf(os.Stderr, "Error logging http stats: %s\n", err)
fmt.Fprint(os.Stderr, tr.Tr.Get("Error logging http stats: %s\n", err))
return
}
logFile := fmt.Sprintf("http-%d.log", time.Now().Unix())
file, err := os.Create(filepath.Join(logBase, logFile))
if err != nil {
fmt.Fprintf(os.Stderr, "Error logging http stats: %s\n", err)
fmt.Fprint(os.Stderr, tr.Tr.Get("Error logging http stats: %s\n", err))
} else {
getAPIClient().LogHTTPStats(file)
}

@ -16,6 +16,7 @@ import (
"github.com/git-lfs/git-lfs/v3/tasklog"
"github.com/git-lfs/git-lfs/v3/tools"
"github.com/git-lfs/git-lfs/v3/tq"
"github.com/git-lfs/git-lfs/v3/tr"
"github.com/rubyist/tracerx"
)
@ -244,7 +245,7 @@ func (c *uploadContext) UploadPointers(q *tq.TransferQueue, unfiltered ...*lfs.W
continue
}
Print("push %s => %s", p.Oid, p.Name)
Print(tr.Tr.Get("push %s => %s", p.Oid, p.Name))
c.SetUploaded(p.Oid)
}
@ -289,23 +290,25 @@ func (c *uploadContext) ReportErrors() {
if len(c.missing) > 0 || len(c.corrupt) > 0 {
var action string
if c.allowMissing {
action = "missing objects"
action = tr.Tr.Get("missing objects")
} else {
action = "failed"
action = tr.Tr.Get("failed")
}
Print("LFS upload %s:", action)
Print(tr.Tr.Get("LFS upload %s:", action))
for name, oid := range c.missing {
Print(" (missing) %s (%s)", name, oid)
// TRANSLATORS: Leading spaces should be preserved.
Print(tr.Tr.Get(" (missing) %s (%s)", name, oid))
}
for name, oid := range c.corrupt {
Print(" (corrupt) %s (%s)", name, oid)
// TRANSLATORS: Leading spaces should be preserved.
Print(tr.Tr.Get(" (corrupt) %s (%s)", name, oid))
}
if !c.allowMissing {
pushMissingHint := []string{
"hint: Your push was rejected due to missing or corrupt local objects.",
"hint: You can disable this check with: 'git config lfs.allowincompletepush true'",
tr.Tr.Get("hint: Your push was rejected due to missing or corrupt local objects."),
tr.Tr.Get("hint: You can disable this check with: 'git config lfs.allowincompletepush true'"),
}
Print(strings.Join(pushMissingHint, "\n"))
os.Exit(2)
@ -317,18 +320,18 @@ func (c *uploadContext) ReportErrors() {
}
if c.lockVerifier.HasUnownedLocks() {
Print("Unable to push locked files:")
Print(tr.Tr.Get("Unable to push locked files:"))
for _, unowned := range c.lockVerifier.UnownedLocks() {
Print("* %s - %s", unowned.Path(), unowned.Owners())
}
if c.lockVerifier.Enabled() {
Exit("ERROR: Cannot update locked files.")
Exit(tr.Tr.Get("Cannot update locked files."))
} else {
Error("WARNING: The above files would have halted this push.")
Error(tr.Tr.Get("warning: The above files would have halted this push."))
}
} else if c.lockVerifier.HasOwnedLocks() {
Print("Consider unlocking your own locked files: (`git lfs unlock <path>`)")
Print(tr.Tr.Get("Consider unlocking your own locked files: (`git lfs unlock <path>`)"))
for _, owned := range c.lockVerifier.OwnedLocks() {
Print("* %s", owned.Path())
}
@ -357,7 +360,7 @@ func (c *uploadContext) uploadTransfer(p *lfs.WrappedPointer) (*tq.Transfer, err
localMediaPath, err := c.gitfilter.ObjectPath(oid)
if err != nil {
return nil, errors.Wrapf(err, "Error uploading file %s (%s)", filename, oid)
return nil, errors.Wrap(err, tr.Tr.Get("Error uploading file %s (%s)", filename, oid))
}
if len(filename) > 0 {

@ -17,6 +17,7 @@ import (
"github.com/git-lfs/git-lfs/v3/fs"
"github.com/git-lfs/git-lfs/v3/git"
"github.com/git-lfs/git-lfs/v3/tools"
"github.com/git-lfs/git-lfs/v3/tr"
"github.com/rubyist/tracerx"
)
@ -79,7 +80,7 @@ func NewIn(workdir, gitdir string) *Configuration {
callback: func() Environment {
sources, err := gitConf.Sources(c.LocalWorkingDir(), ".lfsconfig")
if err != nil {
fmt.Fprintf(os.Stderr, "Error reading git config: %s\n", err)
fmt.Fprintf(os.Stderr, tr.Tr.Get("Error reading git config: %s\n", err))
}
return c.readGitConfig(sources...)
},

@ -1,8 +1,10 @@
package config
import (
"fmt"
"errors"
"sort"
"github.com/git-lfs/git-lfs/v3/tr"
)
// An Extension describes how to manipulate files during smudge and clean.
@ -21,7 +23,7 @@ func SortExtensions(m map[string]Extension) ([]Extension, error) {
for n, ext := range m {
p := ext.Priority
if _, exist := pMap[p]; exist {
err := fmt.Errorf("duplicate priority %d on %s", p, n)
err := errors.New(tr.Tr.Get("duplicate priority %d on %s", p, n))
return nil, err
}
pMap[p] = ext

@ -8,6 +8,7 @@ import (
"sync"
"github.com/git-lfs/git-lfs/v3/git"
"github.com/git-lfs/git-lfs/v3/tr"
)
type GitFetcher struct {
@ -39,7 +40,7 @@ func readGitConfig(configs ...*git.ConfigurationSource) (gf *GitFetcher, extensi
if origKey, ok := uniqKeys[key]; ok {
if ShowConfigWarnings && len(vals[key]) > 0 && vals[key][len(vals[key])-1] != val && strings.HasPrefix(key, gitConfigWarningPrefix) {
fmt.Fprintf(os.Stderr, "WARNING: These git config values clash:\n")
fmt.Fprint(os.Stderr, tr.Tr.Get("warning: These git config values clash:\n"))
fmt.Fprintf(os.Stderr, " git config %q = %q\n", origKey, vals[key])
fmt.Fprintf(os.Stderr, " git config %q = %q\n", pieces[0], val)
}
@ -101,7 +102,7 @@ func readGitConfig(configs ...*git.ConfigurationSource) (gf *GitFetcher, extensi
}
if len(ignored) > 0 {
fmt.Fprintf(os.Stderr, "WARNING: These unsafe lfsconfig keys were ignored:\n\n")
fmt.Fprintf(os.Stderr, tr.Tr.Get("warning: These unsafe lfsconfig keys were ignored:\n\n"))
for _, key := range ignored {
fmt.Fprintf(os.Stderr, " %s\n", key)
}

@ -13,6 +13,7 @@ import (
"github.com/git-lfs/git-lfs/v3/errors"
"github.com/git-lfs/git-lfs/v3/subprocess"
"github.com/git-lfs/git-lfs/v3/tools"
"github.com/git-lfs/git-lfs/v3/tr"
"github.com/rubyist/tracerx"
)
@ -36,7 +37,7 @@ type CredentialHelper interface {
func (credWrapper *CredentialHelperWrapper) FillCreds() error {
creds, err := credWrapper.CredentialHelper.Fill(credWrapper.Input)
if creds == nil || len(creds) < 1 {
errmsg := fmt.Sprintf("Git credentials for %s not found", credWrapper.Url)
errmsg := fmt.Sprintf(tr.Tr.Get("Git credentials for %s not found", credWrapper.Url))
if err != nil {
errmsg = fmt.Sprintf("%s:\n%s", errmsg, err.Error())
} else {
@ -210,7 +211,7 @@ func (a *AskPassCredentialHelper) getValue(what Creds, valueType credValueType,
case credValueTypePassword:
valueString = "password"
default:
return "", errors.Errorf("Invalid Credential type queried from AskPass")
return "", errors.Errorf(tr.Tr.Get("Invalid Credential type queried from AskPass"))
}
// Return the existing credential if it was already provided, otherwise
@ -235,7 +236,7 @@ func (a *AskPassCredentialHelper) getFromProgram(valueType credValueType, u *url
case credValueTypePassword:
valueString = "Password"
default:
return "", errors.Errorf("Invalid Credential type queried from AskPass")
return "", errors.Errorf(tr.Tr.Get("Invalid Credential type queried from AskPass"))
}
// 'cmd' will run the GIT_ASKPASS (or core.askpass) command prompting
@ -322,8 +323,8 @@ func (h *commandCredentialHelper) exec(subcommand string, input Creds) (Creds, e
if _, ok := err.(*exec.ExitError); ok {
if h.SkipPrompt {
return nil, fmt.Errorf("change the GIT_TERMINAL_PROMPT env var to be prompted to enter your credentials for %s://%s",
input["protocol"], input["host"])
return nil, errors.New(tr.Tr.Get("change the GIT_TERMINAL_PROMPT env var to be prompted to enter your credentials for %s://%s",
input["protocol"], input["host"]))
}
// 'git credential' exits with 128 if the helper doesn't fill the username
@ -462,7 +463,7 @@ func (s *CredentialHelpers) Fill(what Creds) (Creds, error) {
}
if len(errs) > 0 {
return nil, errors.New("credential fill errors:\n" + strings.Join(errs, "\n"))
return nil, errors.New(tr.Tr.Get("credential fill errors:\n%s", strings.Join(errs, "\n")))
}
return nil, nil
@ -481,7 +482,7 @@ func (s *CredentialHelpers) Reject(what Creds) error {
}
}
return errors.New("no valid credential helpers to reject")
return errors.New(tr.Tr.Get("no valid credential helpers to reject"))
}
// Approve implements CredentialHelper.Approve and approves the given Creds
@ -509,7 +510,7 @@ func (s *CredentialHelpers) Approve(what Creds) error {
}
}
return errors.New("no valid credential helpers to approve")
return errors.New(tr.Tr.Get("no valid credential helpers to approve"))
}
func (s *CredentialHelpers) skip(i int) {

82
docs/l10n.md Normal file

@ -0,0 +1,82 @@
# Localization of Git LFS
Git LFS now has support for a localization framework using [Gotext](https://github.com/leonelquinteros/gotext), a Go library based around the popular gettext format.
Localization is important since the majority of people on the planet don't speak English and people should be able to use software in the language they're most comfortable with.
In addition, having access to localized software is a great way for people learning a language to improve their technical vocabulary.
## Choosing What to Translate
Here are some things that should be translated:
* Status messages
* Error messages
* Help output
* Generally, anything that the user sees in the normal course of operation
Here are some things that should not be translated:
* Trace output (e.g., calls to `tracerx.Printf`)
* Strings which have a functional or protocol use (e.g., `Basic` and other authentication schemes, HTTP verbs)
* Names of programs, commands, command-line options, or subcommands (although you _should_ translate their help output)
* Personal names, names of businesses, email addresses, and other proper nouns (most of which we should not typically have in our codebase)
* The names of Git LFS and Git themselves
## Making Text Translatable
The easiest way to make a string translatable is to wrap the string and any formatting arguments it takes in a call to `tr.Tr.Get`.
For example, you might write this:
```
Print(tr.Tr.Get("fetch: Fetching reference %s", ref.Name))
```
If you have a string which varies based on a number, use `tr.Tr.GetN`, provide the singular string, the plural string, the number upon which it varies, and then the arguments:
```
Print(tr.Tr.GetN(
"fetch: Fetching changes within %v day of %v",
"fetch: Fetching changes within %v days of %v",
fetchconf.FetchRecentCommitsDays,
fetchconf.FetchRecentCommitsDays,
refName,
))
```
Here are some tips for making your code as easy to translate as possible:
* Avoid creating strings out of several independent words.
For example, rather than taking a string like "Upload" or "Download" and appending "ing objects" to it, write the strings out in full as "Uploading objects" and "Downloading objects".
Not all languages compose words and phrases in the same way, and using full sentences or phrases makes it easier to translate.
* If you have a string which varies based on number, use `tr.Tr.GetN`, which handles pluralization correctly.
In some languages, zero is singular instead of plural. Also, unlike English, some languages have more than just two forms of a word (e.g., singular, dual, and plural), or words may not vary in number at all.
This also lets users in English see correctly pluralized strings in all cases.
* Only mark literal strings for translation, and mark them wherever they appear in the codebase.
The `xgotext` binary rips out only literal strings and doesn't handle variables, so strings that are not marked for translation at the same place they appear in the text won't end up in the translation files to be translated.
* Try to use text in strings that is simple and direct, avoiding the use of slang, idioms, or regional varieties of English.
Slang, idioms, and regional vocabulary are hard to translate and they aren't as accessible to people who are learning English as a second language.
## Guidelines for Translators
If you're interested in submitting a translation, please open an issue, and we'll work with you to get what you need.
We'll let you know a little in advance of our next planned release to give you time to update the translations.
When choosing to create a translation, we ask that you write a generic translation when possible (e.g., `es` rather than `es_MX`).
We realize that is not always achievable (such as with `pt_BR` and `pt_PT`), but when possible, it makes the translations more accessible.
For vocabulary, we recommend using the same rules as Git whenever possible.
If there's a decision to be made, picking the more universally intelligible option (e.g., _quatre-vingts_ instead of _huitante_ in French or _ustedes_ instead of _vosotros_ in Spanish) is preferable.
We'll generally defer to your experience on these matters.
To extract the strings for translation, run the following commands:
```
$ go install -v github.com/leonelquinteros/gotext/cli/xgotext
$ xgotext -in . -out po -v
```
Be aware that at the moment, `xgotext` is extremely slow.
You may wish to get a coffee while you wait.
## Help
If you're unclear on the best way to do things or you find a problem, feel free to open an issue or discussion, as appropriate, and we'll be happy to work with you to find a solution.

@ -9,6 +9,7 @@ import (
"syscall"
"time"
"github.com/git-lfs/git-lfs/v3/tr"
"github.com/pkg/errors"
)
@ -219,7 +220,7 @@ type wrappedError struct {
// newWrappedError creates a wrappedError.
func newWrappedError(err error, message string) *wrappedError {
if err == nil {
err = errors.New("Error")
err = errors.New(tr.Tr.Get("Error"))
}
var errWithCause errorWithCause
@ -269,7 +270,7 @@ func (e fatalError) Fatal() bool {
}
func NewFatalError(err error) error {
return fatalError{newWrappedError(err, "Fatal error")}
return fatalError{newWrappedError(err, tr.Tr.Get("Fatal error"))}
}
// Definitions for IsNotImplementedError()
@ -283,7 +284,7 @@ func (e notImplementedError) NotImplemented() bool {
}
func NewNotImplementedError(err error) error {
return notImplementedError{newWrappedError(err, "Not implemented")}
return notImplementedError{newWrappedError(err, tr.Tr.Get("Not implemented"))}
}
// Definitions for IsAuthError()
@ -297,7 +298,7 @@ func (e authError) AuthError() bool {
}
func NewAuthError(err error) error {
return authError{newWrappedError(err, "Authentication required")}
return authError{newWrappedError(err, tr.Tr.Get("Authentication required"))}
}
// Definitions for IsSmudgeError()
@ -311,7 +312,7 @@ func (e smudgeError) SmudgeError() bool {
}
func NewSmudgeError(err error, oid, filename string) error {
e := smudgeError{newWrappedError(err, "Smudge error")}
e := smudgeError{newWrappedError(err, tr.Tr.Get("Smudge error"))}
SetContext(e, "OID", oid)
SetContext(e, "FileName", filename)
return e
@ -328,7 +329,7 @@ func (e cleanPointerError) CleanPointerError() bool {
}
func NewCleanPointerError(pointer interface{}, bytes []byte) error {
err := New("pointer error")
err := New(tr.Tr.Get("pointer error"))
e := cleanPointerError{newWrappedError(err, "clean")}
SetContext(e, "pointer", pointer)
SetContext(e, "bytes", bytes)
@ -346,7 +347,7 @@ func (e notAPointerError) NotAPointerError() bool {
}
func NewNotAPointerError(err error) error {
return notAPointerError{newWrappedError(err, "Pointer file error")}
return notAPointerError{newWrappedError(err, tr.Tr.Get("Pointer file error"))}
}
// Definitions for IsPointerScanError()
@ -370,7 +371,7 @@ func (e PointerScanError) Path() string {
}
func NewPointerScanError(err error, treeishOid, path string) error {
return PointerScanError{treeishOid, path, newWrappedError(err, "Pointer error")}
return PointerScanError{treeishOid, path, newWrappedError(err, tr.Tr.Get("Pointer error"))}
}
type badPointerKeyError struct {
@ -385,8 +386,8 @@ func (e badPointerKeyError) BadPointerKeyError() bool {
}
func NewBadPointerKeyError(expected, actual string) error {
err := Errorf("Expected key %s, got %s", expected, actual)
return badPointerKeyError{expected, actual, newWrappedError(err, "pointer parsing")}
err := Errorf(tr.Tr.Get("Expected key %s, got %s", expected, actual))
return badPointerKeyError{expected, actual, newWrappedError(err, tr.Tr.Get("pointer parsing"))}
}
// Definitions for IsDownloadDeclinedError()

@ -5,7 +5,7 @@ import (
"bytes"
"crypto/sha256"
"encoding/hex"
"fmt"
"errors"
"io/ioutil"
"os"
"path/filepath"
@ -15,6 +15,7 @@ import (
"sync"
"github.com/git-lfs/git-lfs/v3/tools"
"github.com/git-lfs/git-lfs/v3/tr"
"github.com/rubyist/tracerx"
)
@ -74,14 +75,14 @@ func (f *Filesystem) ObjectExists(oid string, size int64) bool {
func (f *Filesystem) ObjectPath(oid string) (string, error) {
if len(oid) < 4 {
return "", fmt.Errorf("too short object ID: %q", oid)
return "", errors.New(tr.Tr.Get("too short object ID: %q", oid))
}
if oid == EmptyObjectSHA256 {
return os.DevNull, nil
}
dir := f.localObjectDir(oid)
if err := tools.MkdirAll(dir, f); err != nil {
return "", fmt.Errorf("error trying to create local storage directory in %q: %s", dir, err)
return "", errors.New(tr.Tr.Get("error trying to create local storage directory in %q: %s", dir, err))
}
return filepath.Join(dir, oid), nil
}

@ -5,6 +5,7 @@ import (
"strings"
"github.com/git-lfs/git-lfs/v3/git"
"github.com/git-lfs/git-lfs/v3/tr"
)
// Attribute wraps the structure and some operations of Git's conception of an
@ -158,7 +159,7 @@ func (a *Attribute) set(gitConfig *git.Configuration, key, value string, upgrade
}
return err
} else if currentValue != value {
return fmt.Errorf("the %q attribute should be %q but is %q",
return fmt.Errorf(fmt.Sprintf(tr.Tr.Get("the %%q attribute should be %%q but is %%q")),
key, value, currentValue)
}

@ -8,6 +8,7 @@ import (
"github.com/git-lfs/git-lfs/v3/errors"
"github.com/git-lfs/git-lfs/v3/git"
"github.com/git-lfs/git-lfs/v3/tr"
)
// Status represents the status of a file that appears in the output of `git
@ -69,7 +70,7 @@ func (s DiffIndexStatus) Format(state fmt.State, c rune) {
state.Write([]byte{byte(rune(s))})
}
default:
panic(fmt.Sprintf("cannot format %v for DiffIndexStatus", c))
panic(fmt.Sprintf(tr.Tr.Get("cannot format %v for DiffIndexStatus", c)))
}
}
@ -180,12 +181,12 @@ func (s *DiffIndexScanner) scan(line string) (*DiffIndexEntry, error) {
parts := strings.Split(line, "\t")
if len(parts) < 2 {
return nil, errors.Errorf("invalid line: %s", line)
return nil, errors.Errorf(tr.Tr.Get("invalid line: %s", line))
}
desc := strings.Fields(parts[0])
if len(desc) < 5 {
return nil, errors.Errorf("invalid description: %s", parts[0])
return nil, errors.Errorf(tr.Tr.Get("invalid description: %s", parts[0]))
}
entry := &DiffIndexEntry{

@ -11,7 +11,9 @@ import (
"strings"
"github.com/git-lfs/git-lfs/v3/config"
"github.com/git-lfs/git-lfs/v3/errors"
"github.com/git-lfs/git-lfs/v3/subprocess"
"github.com/git-lfs/git-lfs/v3/tr"
)
type pipeRequest struct {
@ -142,7 +144,7 @@ func pipeExtensions(cfg *config.Configuration, request *pipeRequest) (response p
if err = ec.cmd.Wait(); err != nil {
if ec.err != nil {
errStr := ec.err.String()
err = fmt.Errorf("extension '%s' failed with: %s", ec.result.name, errStr)
err = errors.New(tr.Tr.Get("extension '%s' failed with: %s", ec.result.name, errStr))
}
return
}

@ -11,6 +11,7 @@ import (
"github.com/git-lfs/git-lfs/v3/tools"
"github.com/git-lfs/git-lfs/v3/tools/humanize"
"github.com/git-lfs/git-lfs/v3/tq"
"github.com/git-lfs/git-lfs/v3/tr"
"github.com/rubyist/tracerx"
)
@ -20,7 +21,7 @@ func (f *GitFilter) SmudgeToFile(filename string, ptr *Pointer, download bool, m
if stat, _ := os.Stat(filename); stat != nil && stat.Mode()&0200 == 0 {
if err := os.Chmod(filename, stat.Mode()|0200); err != nil {
return errors.Wrap(err,
"Could not restore write permission")
tr.Tr.Get("Could not restore write permission"))
}
// When we're done, return the file back to its normal
@ -30,12 +31,12 @@ func (f *GitFilter) SmudgeToFile(filename string, ptr *Pointer, download bool, m
abs, err := filepath.Abs(filename)
if err != nil {
return fmt.Errorf("could not produce absolute path for %q", filename)
return errors.New(tr.Tr.Get("could not produce absolute path for %q", filename))
}
file, err := os.Create(abs)
if err != nil {
return fmt.Errorf("could not create working directory file: %v", err)
return errors.New(tr.Tr.Get("could not create working directory file: %v", err))
}
defer file.Close()
if _, err := f.Smudge(file, ptr, filename, download, manifest, cb); err != nil {
@ -45,7 +46,7 @@ func (f *GitFilter) SmudgeToFile(filename string, ptr *Pointer, download bool, m
ptr.Encode(file)
return err
} else {
return fmt.Errorf("could not write working directory file: %v", err)
return errors.New(tr.Tr.Get("could not write working directory file: %v", err))
}
}
return nil
@ -91,7 +92,7 @@ func (f *GitFilter) Smudge(writer io.Writer, ptr *Pointer, workingfile string, d
}
func (f *GitFilter) downloadFile(writer io.Writer, ptr *Pointer, workingfile, mediafile string, manifest *tq.Manifest, cb tools.CopyCallback) (int64, error) {
fmt.Fprintf(os.Stderr, "Downloading %s (%s)\n", workingfile, humanize.FormatBytes(uint64(ptr.Size)))
fmt.Fprintf(os.Stderr, tr.Tr.Get("Downloading %s (%s)\n", workingfile, humanize.FormatBytes(uint64(ptr.Size))))
// NOTE: if given, "cb" is a tools.CopyCallback which writes updates
// to the logpath specified by GIT_LFS_PROGRESS.
@ -116,7 +117,7 @@ func (f *GitFilter) downloadFile(writer io.Writer, ptr *Pointer, workingfile, me
}
}
return 0, errors.Wrapf(multiErr, "Error downloading %s (%s)", workingfile, ptr.Oid)
return 0, errors.Wrapf(multiErr, tr.Tr.Get("Error downloading %s (%s)", workingfile, ptr.Oid))
}
return f.readLocalFile(writer, ptr, mediafile, workingfile, nil)
@ -125,7 +126,7 @@ func (f *GitFilter) downloadFile(writer io.Writer, ptr *Pointer, workingfile, me
func (f *GitFilter) readLocalFile(writer io.Writer, ptr *Pointer, mediafile string, workingfile string, cb tools.CopyCallback) (int64, error) {
reader, err := tools.RobustOpen(mediafile)
if err != nil {
return 0, errors.Wrapf(err, "error opening media file")
return 0, errors.Wrapf(err, tr.Tr.Get("error opening media file"))
}
defer reader.Close()
@ -141,7 +142,7 @@ func (f *GitFilter) readLocalFile(writer io.Writer, ptr *Pointer, mediafile stri
for _, ptrExt := range ptr.Extensions {
ext, ok := registeredExts[ptrExt.Name]
if !ok {
err := fmt.Errorf("extension '%s' is not configured", ptrExt.Name)
err := errors.New(tr.Tr.Get("extension '%s' is not configured", ptrExt.Name))
return 0, errors.Wrap(err, "smudge")
}
ext.Priority = ptrExt.Priority
@ -174,18 +175,18 @@ func (f *GitFilter) readLocalFile(writer io.Writer, ptr *Pointer, mediafile stri
// verify name, order, and oids
oid := response.results[0].oidIn
if ptr.Oid != oid {
err = fmt.Errorf("actual oid %s during smudge does not match expected %s", oid, ptr.Oid)
err = errors.New(tr.Tr.Get("actual oid %s during smudge does not match expected %s", oid, ptr.Oid))
return 0, errors.Wrap(err, "smudge")
}
for _, expected := range ptr.Extensions {
actual := actualExts[expected.Name]
if actual.name != expected.Name {
err = fmt.Errorf("actual extension name '%s' does not match expected '%s'", actual.name, expected.Name)
err = errors.New(tr.Tr.Get("actual extension name '%s' does not match expected '%s'", actual.name, expected.Name))
return 0, errors.Wrap(err, "smudge")
}
if actual.oidOut != expected.Oid {
err = fmt.Errorf("actual oid %s for extension '%s' does not match expected %s", actual.oidOut, expected.Name, expected.Oid)
err = errors.New(tr.Tr.Get("actual oid %s for extension '%s' does not match expected %s", actual.oidOut, expected.Name, expected.Oid))
return 0, errors.Wrap(err, "smudge")
}
}
@ -193,14 +194,14 @@ func (f *GitFilter) readLocalFile(writer io.Writer, ptr *Pointer, mediafile stri
// setup reader
reader, err = os.Open(response.file.Name())
if err != nil {
return 0, errors.Wrapf(err, "Error opening smudged file: %s", err)
return 0, errors.Wrapf(err, tr.Tr.Get("Error opening smudged file: %s", err))
}
defer reader.Close()
}
n, err := tools.CopyWithCallback(writer, reader, ptr.Size, cb)
if err != nil {
return n, errors.Wrapf(err, "Error reading from media file: %s", err)
return n, errors.Wrapf(err, tr.Tr.Get("Error reading from media file: %s", err))
}
return n, nil

@ -2,12 +2,12 @@ package lfs
import (
"errors"
"fmt"
"sync"
"time"
"github.com/git-lfs/git-lfs/v3/config"
"github.com/git-lfs/git-lfs/v3/filepathfilter"
"github.com/git-lfs/git-lfs/v3/tr"
"github.com/rubyist/tracerx"
)
@ -68,7 +68,7 @@ func (s *GitScanner) RemoteForPush(r string) error {
defer s.mu.Unlock()
if len(s.remote) > 0 && s.remote != r {
return fmt.Errorf("trying to set remote to %q, already set to %q", r, s.remote)
return errors.New(tr.Tr.Get("trying to set remote to %q, already set to %q", r, s.remote))
}
s.remote = r
@ -88,7 +88,7 @@ func (s *GitScanner) ScanRangeToRemote(left, right string, cb GitScannerFoundPoi
s.mu.Lock()
if len(s.remote) == 0 {
s.mu.Unlock()
return fmt.Errorf("unable to scan starting at %q: no remote set", left)
return errors.New(tr.Tr.Get("unable to scan starting at %q: no remote set", left))
}
s.mu.Unlock()
@ -107,7 +107,7 @@ func (s *GitScanner) ScanMultiRangeToRemote(left string, rights []string, cb Git
s.mu.Lock()
if len(s.remote) == 0 {
s.mu.Unlock()
return fmt.Errorf("unable to scan starting at %q: no remote set", left)
return errors.New(tr.Tr.Get("unable to scan starting at %q: no remote set", left))
}
s.mu.Unlock()

@ -7,7 +7,9 @@ import (
"io"
"github.com/git-lfs/git-lfs/v3/config"
"github.com/git-lfs/git-lfs/v3/errors"
"github.com/git-lfs/git-lfs/v3/git"
"github.com/git-lfs/git-lfs/v3/tr"
)
// runCatFileBatch uses 'git cat-file --batch' to get the object contents of a
@ -144,7 +146,7 @@ func (s *PointerScanner) next(blob string) (string, string, *WrappedPointer, err
}
if int64(size) != read {
return blobSha, "", nil, fmt.Errorf("expected %d bytes, read %d bytes", size, read)
return blobSha, "", nil, errors.New(tr.Tr.Get("expected %d bytes, read %d bytes", size, read))
}
var pointer *WrappedPointer

@ -2,12 +2,13 @@ package lfs
import (
"bufio"
"fmt"
"io/ioutil"
"strconv"
"strings"
"github.com/git-lfs/git-lfs/v3/errors"
"github.com/git-lfs/git-lfs/v3/git"
"github.com/git-lfs/git-lfs/v3/tr"
)
// runCatFileBatchCheck uses 'git cat-file --batch-check' to get the type and
@ -49,7 +50,7 @@ func runCatFileBatchCheck(smallRevCh chan string, lockableCh chan string, lockab
stderr, _ := ioutil.ReadAll(cmd.Stderr)
err := cmd.Wait()
if err != nil {
errCh <- fmt.Errorf("error in git cat-file --batch-check: %v %v", err, string(stderr))
errCh <- errors.New(tr.Tr.Get("error in git cat-file --batch-check: %v %v", err, string(stderr)))
}
close(smallRevCh)
close(errCh)

@ -10,9 +10,11 @@ import (
"strings"
"time"
"github.com/git-lfs/git-lfs/v3/errors"
"github.com/git-lfs/git-lfs/v3/filepathfilter"
"github.com/git-lfs/git-lfs/v3/git"
"github.com/git-lfs/git-lfs/v3/subprocess"
"github.com/git-lfs/git-lfs/v3/tr"
"github.com/rubyist/tracerx"
)
@ -100,7 +102,7 @@ func scanStashed(cb GitScannerFoundPointer, s *GitScanner) error {
stashMergeShas = append(stashMergeShas, fmt.Sprintf("%v^..%v", stashMergeSha, stashMergeSha))
}
if err := scanner.Err(); err != nil {
fmt.Errorf("error while scanning git log for stashed refs: %v", err)
errors.New(tr.Tr.Get("error while scanning git log for stashed refs: %v", err))
}
err = cmd.Wait()
if err != nil {
@ -143,12 +145,12 @@ func parseScannerLogOutput(cb GitScannerFoundPointer, direction LogDiffDirection
}
if err := scanner.Err(); err != nil {
ioutil.ReadAll(cmd.Stdout)
ch <- gitscannerResult{Err: fmt.Errorf("error while scanning git log: %v", err)}
ch <- gitscannerResult{Err: errors.New(tr.Tr.Get("error while scanning git log: %v", err))}
}
stderr, _ := ioutil.ReadAll(cmd.Stderr)
err := cmd.Wait()
if err != nil {
ch <- gitscannerResult{Err: fmt.Errorf("error in git log: %v %v", err, string(stderr))}
ch <- gitscannerResult{Err: errors.New(tr.Tr.Get("error in git log: %v %v", err, string(stderr)))}
}
close(ch)
}()

@ -1,7 +1,6 @@
package lfs
import (
"fmt"
"io/ioutil"
"path"
"path/filepath"
@ -11,6 +10,7 @@ import (
"github.com/git-lfs/git-lfs/v3/filepathfilter"
"github.com/git-lfs/git-lfs/v3/git"
"github.com/git-lfs/git-lfs/v3/git/gitattr"
"github.com/git-lfs/git-lfs/v3/tr"
)
func runScanTree(cb GitScannerFoundPointer, ref string, filter *filepathfilter.Filter, gitEnv, osEnv config.Environment) error {
@ -116,7 +116,7 @@ func lsTreeBlobs(ref string, predicate func(*git.TreeBlob) bool) (*TreeBlobChann
stderr, _ := ioutil.ReadAll(cmd.Stderr)
err := cmd.Wait()
if err != nil {
errchan <- fmt.Errorf("error in git ls-tree: %v %v", err, string(stderr))
errchan <- errors.New(tr.Tr.Get("error in git ls-tree: %v %v", err, string(stderr)))
}
close(blobs)
close(errchan)

@ -9,7 +9,9 @@ import (
"strings"
"github.com/git-lfs/git-lfs/v3/config"
"github.com/git-lfs/git-lfs/v3/errors"
"github.com/git-lfs/git-lfs/v3/tools"
"github.com/git-lfs/git-lfs/v3/tr"
"github.com/rubyist/tracerx"
)
@ -157,5 +159,5 @@ func (h *Hook) matchesCurrent() (bool, error) {
}
}
return false, fmt.Errorf("Hook already exists: %s\n\n%s\n", string(h.Type), tools.Indent(contents))
return false, errors.New(tr.Tr.Get("Hook already exists: %s\n\n%s\n", string(h.Type), tools.Indent(contents)))
}

@ -13,6 +13,7 @@ import (
"github.com/git-lfs/git-lfs/v3/errors"
"github.com/git-lfs/git-lfs/v3/fs"
"github.com/git-lfs/git-lfs/v3/tr"
"github.com/git-lfs/gitobj/v2"
)
@ -103,7 +104,7 @@ func DecodePointerFromFile(file string) (*Pointer, error) {
return nil, err
}
if stat.Size() >= blobSizeCutoff {
return nil, errors.NewNotAPointerError(errors.New("file size exceeds lfs pointer size cutoff"))
return nil, errors.NewNotAPointerError(errors.New(tr.Tr.Get("file size exceeds lfs pointer size cutoff")))
}
f, err := os.OpenFile(file, os.O_RDONLY, 0644)
if err != nil {
@ -150,7 +151,7 @@ func DecodeFrom(reader io.Reader) (*Pointer, io.Reader, error) {
func verifyVersion(version string) error {
if len(version) == 0 {
return errors.NewNotAPointerError(errors.New("Missing version"))
return errors.NewNotAPointerError(errors.New(tr.Tr.Get("Missing version")))
}
for _, v := range v1Aliases {
@ -159,7 +160,7 @@ func verifyVersion(version string) error {
}
}
return errors.New("Invalid version: " + version)
return errors.New(tr.Tr.Get("Invalid version: %s", version))
}
func decodeKV(data []byte) (*Pointer, error) {
@ -177,7 +178,7 @@ func decodeKV(data []byte) (*Pointer, error) {
value, ok := kvps["oid"]
if !ok {
return nil, errors.New("Invalid Oid")
return nil, errors.New(tr.Tr.Get("Invalid OID"))
}
oid, err := parseOid(value)
@ -188,7 +189,7 @@ func decodeKV(data []byte) (*Pointer, error) {
value, ok = kvps["size"]
size, err := strconv.ParseInt(value, 10, 64)
if err != nil || size < 0 {
return nil, fmt.Errorf("invalid size: %q", value)
return nil, errors.New(tr.Tr.Get("invalid size: %q", value))
}
var extensions []*PointerExtension
@ -212,14 +213,14 @@ func decodeKV(data []byte) (*Pointer, error) {
func parseOid(value string) (string, error) {
parts := strings.SplitN(value, ":", 2)
if len(parts) != 2 {
return "", errors.New("Invalid Oid value: " + value)
return "", errors.New(tr.Tr.Get("Invalid OID value: %s", value))
}
if parts[0] != oidType {
return "", errors.New("Invalid Oid type: " + parts[0])
return "", errors.New(tr.Tr.Get("Invalid OID type: %s", parts[0]))
}
oid := parts[1]
if !oidRE.Match([]byte(oid)) {
return "", errors.New("Invalid Oid: " + oid)
return "", errors.New(tr.Tr.Get("Invalid OID: %s", oid))
}
return oid, nil
}
@ -227,12 +228,12 @@ func parseOid(value string) (string, error) {
func parsePointerExtension(key string, value string) (*PointerExtension, error) {
keyParts := strings.SplitN(key, "-", 3)
if len(keyParts) != 3 || keyParts[0] != "ext" {
return nil, errors.New("Invalid extension value: " + value)
return nil, errors.New(tr.Tr.Get("Invalid extension value: %s", value))
}
p, err := strconv.Atoi(keyParts[1])
if err != nil || p < 0 {
return nil, errors.New("Invalid priority: " + keyParts[1])
return nil, errors.New(tr.Tr.Get("Invalid priority: %s", keyParts[1]))
}
name := keyParts[2]
@ -249,7 +250,7 @@ func validatePointerExtensions(exts []*PointerExtension) error {
m := make(map[int]struct{})
for _, ext := range exts {
if _, exist := m[ext.Priority]; exist {
return fmt.Errorf("duplicate priority found: %d", ext.Priority)
return errors.New(tr.Tr.Get("duplicate priority found: %d", ext.Priority))
}
m[ext.Priority] = struct{}{}
}
@ -260,7 +261,7 @@ func decodeKVData(data []byte) (kvps map[string]string, exts map[string]string,
kvps = make(map[string]string)
if !matcherRE.Match(data) {
err = errors.NewNotAPointerError(errors.New("invalid header"))
err = errors.NewNotAPointerError(errors.New(tr.Tr.Get("invalid header")))
return
}
@ -275,7 +276,7 @@ func decodeKVData(data []byte) (kvps map[string]string, exts map[string]string,
parts := strings.SplitN(text, " ", 2)
if len(parts) < 2 {
err = errors.NewNotAPointerError(fmt.Errorf("error reading line %d: %s", line, text))
err = errors.NewNotAPointerError(errors.New(tr.Tr.Get("error reading line %d: %s", line, text)))
return
}
@ -283,7 +284,7 @@ func decodeKVData(data []byte) (kvps map[string]string, exts map[string]string,
value := parts[1]
if numKeys <= line {
err = errors.NewNotAPointerError(fmt.Errorf("extra line: %s", text))
err = errors.NewNotAPointerError(errors.New(tr.Tr.Get("extra line: %s", text)))
return
}

@ -9,7 +9,9 @@ import (
"strings"
"github.com/git-lfs/git-lfs/v3/config"
"github.com/git-lfs/git-lfs/v3/errors"
"github.com/git-lfs/git-lfs/v3/tools"
"github.com/git-lfs/git-lfs/v3/tr"
)
type Platform int
@ -209,7 +211,7 @@ func (p *currentToRepoPatternConverter) Convert(filename string) string {
func pathConverterArgs(cfg *config.Configuration) (string, string, bool, error) {
currDir, err := os.Getwd()
if err != nil {
return "", "", false, fmt.Errorf("unable to get working dir: %v", err)
return "", "", false, errors.New(tr.Tr.Get("unable to get working dir: %v", err))
}
currDir = tools.ResolveSymlinks(currDir)
return cfg.LocalWorkingDir(), currDir, cfg.LocalWorkingDir() == currDir, nil

@ -11,6 +11,7 @@ import (
"github.com/git-lfs/git-lfs/v3/creds"
"github.com/git-lfs/git-lfs/v3/errors"
"github.com/git-lfs/git-lfs/v3/lfshttp"
"github.com/git-lfs/git-lfs/v3/tr"
"github.com/rubyist/tracerx"
)
@ -104,7 +105,7 @@ func (c *Client) doWithCreds(req *http.Request, credWrapper creds.CredentialHelp
}
if redirectedReq == nil {
return res, errors.New("failed to redirect request")
return res, errors.New(tr.Tr.Get("failed to redirect request"))
}
return c.doWithAuth("", access, redirectedReq, via)
@ -281,7 +282,7 @@ func setRequestAuthFromURL(req *http.Request, u *url.URL) bool {
}
if pass, ok := u.User.Password(); ok {
fmt.Fprintln(os.Stderr, "warning: current Git remote contains credentials")
fmt.Fprintln(os.Stderr, tr.Tr.Get("warning: current Git remote contains credentials"))
setRequestAuth(req, u.User.Username(), pass)
return true
}

@ -12,6 +12,7 @@ import (
"github.com/git-lfs/git-lfs/v3/creds"
"github.com/git-lfs/git-lfs/v3/git"
"github.com/git-lfs/git-lfs/v3/lfshttp"
"github.com/git-lfs/git-lfs/v3/tr"
"github.com/rubyist/tracerx"
)
@ -337,7 +338,7 @@ func storeAlias(aliases map[string]string, key string, values []string, suffix s
for _, value := range values {
url := key[len(aliasPrefix) : len(key)-len(suffix)]
if v, ok := aliases[value]; ok && v != url {
fmt.Fprintf(os.Stderr, "WARNING: Multiple 'url.*.%s' keys with the same alias: %q\n", suffix, value)
fmt.Fprintf(os.Stderr, tr.Tr.Get("warning: Multiple 'url.*.%s' keys with the same alias: %q\n", suffix, value))
}
aliases[value] = url
}

@ -1,12 +1,11 @@
package lfsapi
import (
"fmt"
"github.com/git-lfs/git-lfs/v3/creds"
"github.com/git-lfs/git-lfs/v3/errors"
"github.com/git-lfs/git-lfs/v3/lfshttp"
"github.com/git-lfs/git-lfs/v3/ssh"
"github.com/git-lfs/git-lfs/v3/tr"
"github.com/rubyist/tracerx"
)
@ -30,7 +29,7 @@ func NewClient(ctx lfshttp.Context) (*Client, error) {
httpClient, err := lfshttp.NewClient(ctx)
if err != nil {
return nil, errors.Wrap(err, fmt.Sprintf("error creating http client"))
return nil, errors.Wrap(err, tr.Tr.Get("error creating http client"))
}
c := &Client{

@ -21,6 +21,7 @@ import (
"github.com/git-lfs/git-lfs/v3/creds"
"github.com/git-lfs/git-lfs/v3/errors"
"github.com/git-lfs/git-lfs/v3/tools"
"github.com/git-lfs/git-lfs/v3/tr"
"github.com/rubyist/tracerx"
"golang.org/x/net/http2"
)
@ -32,12 +33,6 @@ var (
httpRE = regexp.MustCompile(`\Ahttps?://`)
)
var hintFileUrl = strings.TrimSpace(`
hint: The remote resolves to a file:// URL, which can only work with a
hint: standalone transfer agent. See section "Using a Custom Transfer Type
hint: without the API server" in custom-transfers.md for details.
`)
type hostData struct {
host string
mode creds.AccessMode
@ -118,7 +113,9 @@ func (c *Client) URLConfig() *config.URLConfig {
func (c *Client) NewRequest(method string, e Endpoint, suffix string, body interface{}) (*http.Request, error) {
if strings.HasPrefix(e.Url, "file://") {
// Initial `\n` to avoid overprinting `Downloading LFS...`.
fmt.Fprintf(os.Stderr, "\n%s\n", hintFileUrl)
fmt.Fprintf(os.Stderr, "\n%s\n", tr.Tr.Get(`hint: The remote resolves to a file:// URL, which can only work with a
hint: standalone transfer agent. See section "Using a Custom Transfer Type
hint: without the API server" in custom-transfers.md for details.`))
}
sshRes, err := c.sshResolveWithRetries(e, method)
@ -133,7 +130,7 @@ func (c *Client) NewRequest(method string, e Endpoint, suffix string, body inter
if !httpRE.MatchString(prefix) {
urlfragment := strings.SplitN(prefix, "?", 2)[0]
return nil, fmt.Errorf("missing protocol: %q", urlfragment)
return nil, errors.New(tr.Tr.Get("missing protocol: %q", urlfragment))
}
req, err := http.NewRequest(method, joinURL(prefix, suffix), nil)
@ -339,7 +336,7 @@ func (c *Client) DoWithRedirect(cli *http.Client, req *http.Request, remote stri
via = append(via, req)
if len(via) >= 3 {
return nil, res, errors.New("too many redirects")
return nil, res, errors.New(tr.Tr.Get("too many redirects"))
}
redirectedReq, err := newRequestForRetry(req, redirectTo)
@ -359,28 +356,28 @@ func (c *Client) doWithRedirects(cli *http.Client, req *http.Request, remote str
}
if redirectedReq == nil {
return nil, errors.New("failed to redirect request")
return nil, errors.New(tr.Tr.Get("failed to redirect request"))
}
return c.doWithRedirects(cli, redirectedReq, remote, via)
}
func (c *Client) configureProtocols(u *url.URL, tr *http.Transport) error {
func (c *Client) configureProtocols(u *url.URL, transport *http.Transport) error {
version, _ := c.uc.Get("http", u.String(), "version")
switch version {
case "HTTP/1.1":
// This disables HTTP/2, according to the documentation.
tr.TLSNextProto = make(map[string]func(authority string, c *tls.Conn) http.RoundTripper)
transport.TLSNextProto = make(map[string]func(authority string, c *tls.Conn) http.RoundTripper)
case "HTTP/2":
if u.Scheme != "https" {
return fmt.Errorf("HTTP/2 cannot be used except with TLS")
return errors.New(tr.Tr.Get("HTTP/2 cannot be used except with TLS"))
}
http2.ConfigureTransport(tr)
delete(tr.TLSNextProto, "http/1.1")
http2.ConfigureTransport(transport)
delete(transport.TLSNextProto, "http/1.1")
case "":
http2.ConfigureTransport(tr)
http2.ConfigureTransport(transport)
default:
return fmt.Errorf("Unknown HTTP version %q", version)
return errors.New(tr.Tr.Get("Unknown HTTP version %q", version))
}
return nil
}
@ -542,7 +539,7 @@ func newRequestForRetry(req *http.Request, location string) (*http.Request, erro
}
if req.URL.Scheme == "https" && newReq.URL.Scheme == "http" {
return nil, errors.New("lfsapi/client: refusing insecure redirect, https->http")
return nil, errors.New(tr.Tr.Get("lfsapi/client: refusing insecure redirect, https->http"))
}
sameHost := req.URL.Host == newReq.URL.Host

@ -6,6 +6,7 @@ import (
"strings"
"github.com/git-lfs/git-lfs/v3/errors"
"github.com/git-lfs/git-lfs/v3/tr"
)
type httpError interface {
@ -99,31 +100,28 @@ func (e *statusCodeError) HTTPResponse() *http.Response {
return e.response
}
var (
defaultErrors = map[int]string{
400: "Client error: %s",
401: "Authorization error: %s\nCheck that you have proper access to the repository",
403: "Authorization error: %s\nCheck that you have proper access to the repository",
404: "Repository or object not found: %s\nCheck that it exists and that you have proper access to it",
422: "Unprocessable entity: %s",
429: "Rate limit exceeded: %s",
500: "Server error: %s",
501: "Not Implemented: %s",
507: "Insufficient server storage: %s",
509: "Bandwidth limit exceeded: %s",
}
)
func defaultError(res *http.Response) error {
var msgFmt string
defaultErrors := map[int]string{
400: tr.Tr.Get("Client error: %%s"),
401: tr.Tr.Get("Authorization error: %%s\nCheck that you have proper access to the repository"),
403: tr.Tr.Get("Authorization error: %%s\nCheck that you have proper access to the repository"),
404: tr.Tr.Get("Repository or object not found: %%s\nCheck that it exists and that you have proper access to it"),
422: tr.Tr.Get("Unprocessable entity: %%s"),
429: tr.Tr.Get("Rate limit exceeded: %%s"),
500: tr.Tr.Get("Server error: %%s"),
501: tr.Tr.Get("Not Implemented: %%s"),
507: tr.Tr.Get("Insufficient server storage: %%s"),
509: tr.Tr.Get("Bandwidth limit exceeded: %%s"),
}
if f, ok := defaultErrors[res.StatusCode]; ok {
msgFmt = f
} else if res.StatusCode < 500 {
msgFmt = defaultErrors[400] + fmt.Sprintf(" from HTTP %d", res.StatusCode)
msgFmt = fmt.Sprintf("Client error %%s from HTTP %d", res.StatusCode)
} else {
msgFmt = defaultErrors[500] + fmt.Sprintf(" from HTTP %d", res.StatusCode)
msgFmt = fmt.Sprintf("Server error %%s from HTTP %d", res.StatusCode)
}
return errors.Errorf(msgFmt, res.Request.URL)
return errors.Errorf(fmt.Sprintf(msgFmt), res.Request.URL)
}

@ -9,6 +9,7 @@ import (
"github.com/git-lfs/git-lfs/v3/config"
"github.com/git-lfs/git-lfs/v3/errors"
"github.com/git-lfs/git-lfs/v3/git"
"github.com/git-lfs/git-lfs/v3/tr"
)
var (
@ -71,7 +72,7 @@ type decodeTypeError struct {
func (e *decodeTypeError) TypeError() {}
func (e *decodeTypeError) Error() string {
return fmt.Sprintf("Expected json type, got: %q", e.Type)
return fmt.Sprintf(tr.Tr.Get("Expected json type, got: %q", e.Type))
}
func DecodeJSON(res *http.Response, obj interface{}) error {
@ -84,7 +85,7 @@ func DecodeJSON(res *http.Response, obj interface{}) error {
res.Body.Close()
if err != nil {
return errors.Wrapf(err, "Unable to parse HTTP response for %s %s", res.Request.Method, res.Request.URL)
return errors.Wrapf(err, tr.Tr.Get("Unable to parse HTTP response for %s %s", res.Request.Method, res.Request.URL))
}
return nil

@ -18,6 +18,7 @@ import (
"github.com/git-lfs/git-lfs/v3/lfsapi"
"github.com/git-lfs/git-lfs/v3/subprocess"
"github.com/git-lfs/git-lfs/v3/tools"
"github.com/git-lfs/git-lfs/v3/tr"
"github.com/rubyist/tracerx"
)
@ -122,17 +123,17 @@ func gitDirAtPath(path string) (string, error) {
cmd.Cmd.Env = env
out, err := cmd.Output()
if err != nil {
return "", errors.Wrap(err, "failed to call git rev-parse --git-dir")
return "", errors.Wrap(err, tr.Tr.Get("failed to call git rev-parse --git-dir"))
}
gitdir, err := tools.TranslateCygwinPath(strings.TrimRight(string(out), "\n"))
if err != nil {
return "", errors.Wrap(err, "unable to translate path")
return "", errors.Wrap(err, tr.Tr.Get("unable to translate path"))
}
gitdir, err = filepath.Abs(gitdir)
if err != nil {
return "", errors.Wrap(err, "unable to canonicalize path")
return "", errors.Wrap(err, tr.Tr.Get("unable to canonicalize path"))
}
err = os.Chdir(curdir)
@ -165,7 +166,7 @@ func newHandler(cfg *config.Configuration, output *os.File, msg *inputMessage) (
return nil, err
}
if url == nil {
return nil, errors.New("no valid file:// URLs found")
return nil, errors.New(tr.Tr.Get("no valid file:// URLs found"))
}
path, err := tools.TranslateCygwinPath(fixUrlPath(url.Path))
@ -206,7 +207,7 @@ func (h *fileHandler) dispatch(msg *inputMessage) bool {
case "terminate":
return false
default:
standaloneFailure(fmt.Sprintf("unknown event %q", msg.Event), nil)
standaloneFailure(tr.Tr.Get("unknown event %q", msg.Event), nil)
}
return true
}
@ -244,7 +245,7 @@ func (h *fileHandler) upload(oid string, size int64, path string) (string, strin
func (h *fileHandler) download(oid string, size int64) (string, string, error) {
if !h.remoteConfig.LFSObjectExists(oid, size) {
tracerx.Printf("missing object in %q (%s)", h.remotePath, oid)
return oid, "", errors.Errorf("remote missing object %s", oid)
return oid, "", errors.Errorf(tr.Tr.Get("remote missing object %s", oid))
}
src, err := h.remoteConfig.Filesystem().ObjectPath(oid)
@ -278,13 +279,13 @@ func ProcessStandaloneData(cfg *config.Configuration, input *os.File, output *os
for scanner.Scan() {
var msg inputMessage
if err := json.NewDecoder(strings.NewReader(scanner.Text())).Decode(&msg); err != nil {
return errors.Wrapf(err, "error decoding json")
return errors.Wrapf(err, tr.Tr.Get("error decoding JSON"))
}
if handler == nil {
var err error
handler, err = newHandler(cfg, output, &msg)
if err != nil {
return errors.Wrapf(err, "error creating handler")
return errors.Wrapf(err, tr.Tr.Get("error creating handler"))
}
}
if !handler.dispatch(&msg) {
@ -295,7 +296,7 @@ func ProcessStandaloneData(cfg *config.Configuration, input *os.File, output *os
os.RemoveAll(handler.tempdir)
}
if err := scanner.Err(); err != nil {
return errors.Wrapf(err, "error reading input")
return errors.Wrapf(err, tr.Tr.Get("error reading input"))
}
return nil
}

@ -9,6 +9,8 @@ import (
"net/http/httputil"
"strings"
"github.com/git-lfs/git-lfs/v3/errors"
"github.com/git-lfs/git-lfs/v3/tr"
"github.com/rubyist/tracerx"
)
@ -23,7 +25,7 @@ func (c *Client) traceRequest(req *http.Request) (*tracedRequest, error) {
body, ok := req.Body.(ReadSeekCloser)
if body != nil && !ok {
return nil, fmt.Errorf("Request body must implement io.ReadCloser and io.Seeker. Got: %T", body)
return nil, errors.New(tr.Tr.Get("Request body must implement io.ReadCloser and io.Seeker. Got: %T", body))
}
if body != nil && ok {

@ -5,9 +5,11 @@ import (
"net/http"
"strconv"
"github.com/git-lfs/git-lfs/v3/errors"
"github.com/git-lfs/git-lfs/v3/git"
"github.com/git-lfs/git-lfs/v3/lfsapi"
"github.com/git-lfs/git-lfs/v3/lfshttp"
"github.com/git-lfs/git-lfs/v3/tr"
)
type lockClient interface {
@ -79,7 +81,7 @@ func (c *httpLockClient) Lock(remote string, lockReq *lockRequest) (*lockRespons
return nil, res.StatusCode, err
}
if lockRes.Lock == nil && len(lockRes.Message) == 0 {
return nil, res.StatusCode, fmt.Errorf("invalid server response")
return nil, res.StatusCode, errors.New(tr.Tr.Get("invalid server response"))
}
return lockRes, res.StatusCode, nil
}
@ -134,7 +136,7 @@ func (c *httpLockClient) Unlock(ref *git.Ref, remote, id string, force bool) (*u
return nil, res.StatusCode, err
}
if unlockRes.Lock == nil && len(unlockRes.Message) == 0 {
return nil, res.StatusCode, fmt.Errorf("invalid server response")
return nil, res.StatusCode, errors.New(tr.Tr.Get("invalid server response"))
}
return unlockRes, res.StatusCode, nil
}

@ -2,7 +2,6 @@ package locking
import (
"encoding/json"
"fmt"
"io"
"net/http"
"os"
@ -17,6 +16,7 @@ import (
"github.com/git-lfs/git-lfs/v3/lfsapi"
"github.com/git-lfs/git-lfs/v3/tools"
"github.com/git-lfs/git-lfs/v3/tools/kv"
"github.com/git-lfs/git-lfs/v3/tr"
"github.com/rubyist/tracerx"
)
@ -112,23 +112,23 @@ func (c *Client) LockFile(path string) (Lock, error) {
if len(lockRes.RequestID) > 0 {
tracerx.Printf("Server Request ID: %s", lockRes.RequestID)
}
return Lock{}, fmt.Errorf("server unable to create lock: %s", lockRes.Message)
return Lock{}, errors.New(tr.Tr.Get("server unable to create lock: %s", lockRes.Message))
}
lock := *lockRes.Lock
if err := c.cache.Add(lock); err != nil {
return Lock{}, errors.Wrap(err, "lock cache")
return Lock{}, errors.Wrap(err, tr.Tr.Get("lock cache"))
}
abs, err := getAbsolutePath(path)
if err != nil {
return Lock{}, errors.Wrap(err, "make lockpath absolute")
return Lock{}, errors.Wrap(err, tr.Tr.Get("make lockpath absolute"))
}
// If the file exists, ensure that it's writeable on return
if tools.FileExists(abs) {
if err := tools.SetFileWriteFlag(abs, true); err != nil {
return Lock{}, errors.Wrap(err, "set file write flag")
return Lock{}, errors.Wrap(err, tr.Tr.Get("set file write flag"))
}
}
@ -156,7 +156,7 @@ func getAbsolutePath(p string) (string, error) {
func (c *Client) UnlockFile(path string, force bool) error {
id, err := c.lockIdFromPath(path)
if err != nil {
return fmt.Errorf("unable to get lock id: %v", err)
return errors.New(tr.Tr.Get("unable to get lock id: %v", err))
}
return c.UnlockFileById(id, force)
@ -174,17 +174,17 @@ func (c *Client) UnlockFileById(id string, force bool) error {
if len(unlockRes.RequestID) > 0 {
tracerx.Printf("Server Request ID: %s", unlockRes.RequestID)
}
return fmt.Errorf("server unable to unlock: %s", unlockRes.Message)
return errors.New(tr.Tr.Get("server unable to unlock: %s", unlockRes.Message))
}
if err := c.cache.RemoveById(id); err != nil {
return fmt.Errorf("error caching unlock information: %v", err)
return errors.New(tr.Tr.Get("error caching unlock information: %v", err))
}
if unlockRes.Lock != nil {
abs, err := getAbsolutePath(unlockRes.Lock.Path)
if err != nil {
return errors.Wrap(err, "make lockpath absolute")
return errors.Wrap(err, tr.Tr.Get("make lockpath absolute"))
}
// Make non-writeable if required
@ -218,7 +218,7 @@ func (c *Client) SearchLocks(filter map[string]string, limit int, localOnly bool
return c.searchLocalLocks(filter, limit)
} else if cached {
if len(filter) > 0 || limit != 0 {
return []Lock{}, errors.New("can't search cached locks when filter or limit is set")
return []Lock{}, errors.New(tr.Tr.Get("can't search cached locks when filter or limit is set"))
}
locks := []Lock{}
@ -248,7 +248,7 @@ func (c *Client) SearchLocksVerifiable(limit int, cached bool) (ourLocks, theirL
if cached {
if limit != 0 {
return []Lock{}, []Lock{}, errors.New("can't search cached locks when limit is set")
return []Lock{}, []Lock{}, errors.New(tr.Tr.Get("can't search cached locks when limit is set"))
}
locks := &lockVerifiableList{}
@ -286,7 +286,7 @@ func (c *Client) SearchLocksVerifiable(limit int, cached bool) (ourLocks, theirL
if len(list.RequestID) > 0 {
tracerx.Printf("Server Request ID: %s", list.RequestID)
}
return ourLocks, theirLocks, fmt.Errorf("server error searching locks: %s", list.Message)
return ourLocks, theirLocks, errors.New(tr.Tr.Get("server error searching locks: %s", list.Message))
}
for _, l := range list.Ours {
@ -367,7 +367,7 @@ func (c *Client) searchRemoteLocks(filter map[string]string, limit int) ([]Lock,
if len(list.RequestID) > 0 {
tracerx.Printf("Server Request ID: %s", list.RequestID)
}
return locks, fmt.Errorf("server error searching for locks: %s", list.Message)
return locks, errors.New(tr.Tr.Get("server error searching for locks: %s", list.Message))
}
for _, l := range list.Locks {
@ -443,15 +443,15 @@ func (c *Client) prepareCacheDirectory(kind string) (string, error) {
stat, err := os.Stat(cacheDir)
if err == nil {
if !stat.IsDir() {
return cacheDir, errors.New("init cache directory " + cacheDir + " failed: already exists, but is no directory")
return cacheDir, errors.New(tr.Tr.Get("inititalization of cache directory %s failed: already exists, but is no directory", cacheDir))
}
} else if os.IsNotExist(err) {
err = tools.MkdirAll(cacheDir, c.cfg)
if err != nil {
return cacheDir, errors.Wrap(err, "init cache directory "+cacheDir+" failed: directory creation failed")
return cacheDir, errors.Wrap(err, tr.Tr.Get("initiailization of cache directory %s failed: directory creation failed", cacheDir))
}
} else {
return cacheDir, errors.Wrap(err, "init cache directory "+cacheDir+" failed")
return cacheDir, errors.Wrap(err, tr.Tr.Get("initialization of cache directory %s failed", cacheDir))
}
return filepath.Join(cacheDir, kind), nil
@ -466,7 +466,7 @@ func (c *Client) readLocksFromCacheFile(kind string, decoder func(*json.Decoder)
_, err = os.Stat(cacheFile)
if err != nil {
if os.IsNotExist(err) {
return errors.New("no cached locks present")
return errors.New(tr.Tr.Get("no cached locks present"))
}
return err

@ -5,9 +5,11 @@ import (
"strings"
"time"
"github.com/git-lfs/git-lfs/v3/errors"
"github.com/git-lfs/git-lfs/v3/git"
"github.com/git-lfs/git-lfs/v3/lfsapi"
"github.com/git-lfs/git-lfs/v3/ssh"
"github.com/git-lfs/git-lfs/v3/tr"
)
type sshLockClient struct {
@ -40,13 +42,13 @@ func (c *sshLockClient) parseLockResponse(status int, args []string, lines []str
} else if strings.HasPrefix(entry, "locked-at=") {
lock.LockedAt, err = time.Parse(time.RFC3339, entry[10:])
if err != nil {
return lock, "", fmt.Errorf("lock response: invalid locked-at: %s", entry)
return lock, "", errors.New(tr.Tr.Get("lock response: invalid locked-at: %s", entry))
}
seen["locked-at"] = struct{}{}
}
}
if len(seen) != 4 {
return nil, "", fmt.Errorf("incomplete fields for lock")
return nil, "", errors.New(tr.Tr.Get("incomplete fields for lock"))
}
}
if status > 299 && len(lines) > 0 {
@ -79,7 +81,7 @@ func (c *sshLockClient) parseListLockResponse(status int, args []string, lines [
for _, entry := range args {
if strings.HasPrefix(entry, "next-cursor=") {
if len(nextCursor) > 0 {
return nil, nil, nil, "", "", fmt.Errorf("lock response: multiple next-cursor responses")
return nil, nil, nil, "", "", errors.New(tr.Tr.Get("lock response: multiple next-cursor responses"))
}
nextCursor = entry[12:]
}
@ -92,18 +94,18 @@ func (c *sshLockClient) parseListLockResponse(status int, args []string, lines [
}
if cmd == "lock" {
if len(values) != 2 {
return nil, nil, nil, "", "", fmt.Errorf("lock response: invalid response: %q", entry)
return nil, nil, nil, "", "", errors.New(tr.Tr.Get("lock response: invalid response: %q", entry))
} else if last != nil && c.lockDataIsIncomplete(last) {
return nil, nil, nil, "", "", fmt.Errorf("lock response: incomplete lock data")
return nil, nil, nil, "", "", errors.New(tr.Tr.Get("lock response: incomplete lock data"))
}
id := values[1]
last = &lockData{who: ownerUnknown}
last.lock.Id = id
locks[id] = last
} else if len(values) != 3 {
return nil, nil, nil, "", "", fmt.Errorf("lock response: invalid response: %q", entry)
return nil, nil, nil, "", "", errors.New(tr.Tr.Get("lock response: invalid response: %q", entry))
} else if last == nil || last.lock.Id != values[1] {
return nil, nil, nil, "", "", fmt.Errorf("lock response: interspersed response: %q", entry)
return nil, nil, nil, "", "", errors.New(tr.Tr.Get("lock response: interspersed response: %q", entry))
} else {
switch cmd {
case "path":
@ -116,13 +118,13 @@ func (c *sshLockClient) parseListLockResponse(status int, args []string, lines [
case "locked-at":
last.lock.LockedAt, err = time.Parse(time.RFC3339, values[2])
if err != nil {
return nil, nil, nil, "", "", fmt.Errorf("lock response: invalid locked-at: %s", entry)
return nil, nil, nil, "", "", errors.New(tr.Tr.Get("lock response: invalid locked-at: %s", entry))
}
}
}
}
if last != nil && c.lockDataIsIncomplete(last) {
return nil, nil, nil, "", "", fmt.Errorf("lock response: incomplete lock data")
return nil, nil, nil, "", "", errors.New(tr.Tr.Get("lock response: incomplete lock data"))
}
for _, lock := range locks {
all = append(all, lock.lock)

97
script/gen-i-reverse Executable file

@ -0,0 +1,97 @@
#!/usr/bin/env ruby
if ARGV.length < 2
$stderr.puts "Usage: gen-i-reverse INPUT-FILE OUTPUT-FILE"
exit 1
end
input = File.open(ARGV[0])
output = File.open(ARGV[1], "w")
$state = :idle
$singular = nil
$plural = nil
def reset_state
$state = :idle
$singular = nil
$plural = nil
end
def translate(s)
items = s.split(/ /)
items = items.map do |chunk|
case chunk
when /^%/
chunk
else
chunk.split(/(\\n|\W+)/).map do |c|
c =~ /^\w/ ? c.reverse : c
end.join
end
end
items.join(" ").gsub("\n", "\\n")
end
while line = input.gets
line.chomp!
case $state
when :idle
case line
when /^msgid ""$/
$state = :copy
output.puts line
when /^msgid "(.*)"$/
$state = :msgid
$singular = $1
output.puts line
when /^msgid `(.*)$/
$state = :msgid_multi
$singular = $1.gsub('"', "\\\"") + "\n"
end
when :copy
if line == ""
reset_state
end
output.puts line
when :msgid_multi
case line
# Note that PO files are not supposed to contain backtick-delimited strings,
# but xgotext emits them anyway, so we fix them up until it gets fixed.
when /^(.*)`$/
$state = :msgid
$singular += $1.gsub('"', "\\\"")
output.puts "msgid \"#{$singular.gsub("\n", "\\n")}\""
else
$singular += line.gsub('"', "\\\"") + "\n"
end
when :msgid_plural_multi
case line
when /^(.*)`$/
$state = :msgid
$plural += $1.gsub('"', "\\\"")
output.puts "msgid_plural \"#{$plural.gsub("\n", "\\n")}\""
else
$plural += line.gsub('"', "\\\"") + "\n"
end
when :msgid
case line
when /^msgid_plural ""$/
output.puts line
when /^msgid_plural "(.*)"$/
$plural = $1
output.puts line
when /^msgid_plural `(.*)$/
$state = :msgid_plural_multi
$plural = $1.gsub('"', "\\\"") + "\n"
output.puts line
when /^msgstr(\[0\])? ""$/
output.puts "msgstr#{$1} \"#{translate($singular)}\""
when /^msgstr\[1\] ""$/
output.puts "msgstr[1] \"#{translate($plural)}\""
when ""
reset_state
output.puts line
end
end
end

@ -1,7 +1,6 @@
package ssh
import (
"fmt"
"io"
"strconv"
"strings"
@ -9,6 +8,7 @@ import (
"github.com/git-lfs/git-lfs/v3/errors"
"github.com/git-lfs/git-lfs/v3/subprocess"
"github.com/git-lfs/git-lfs/v3/tr"
)
type PktlineConnection struct {
@ -46,7 +46,7 @@ func (conn *PktlineConnection) End() error {
func (conn *PktlineConnection) negotiateVersion() error {
pkts, err := conn.pl.ReadPacketList()
if err != nil {
return errors.NewProtocolError("Unable to negotiate version with remote side (unable to read capabilities)", err)
return errors.NewProtocolError(tr.Tr.Get("Unable to negotiate version with remote side (unable to read capabilities)"), err)
}
ok := false
for _, line := range pkts {
@ -55,22 +55,22 @@ func (conn *PktlineConnection) negotiateVersion() error {
}
}
if !ok {
return errors.NewProtocolError("Unable to negotiate version with remote side (missing version=1)", nil)
return errors.NewProtocolError(tr.Tr.Get("Unable to negotiate version with remote side (missing version=1)"), nil)
}
err = conn.SendMessage("version 1", nil)
if err != nil {
return errors.NewProtocolError("Unable to negotiate version with remote side (unable to send version)", err)
return errors.NewProtocolError(tr.Tr.Get("Unable to negotiate version with remote side (unable to send version)"), err)
}
status, args, _, err := conn.ReadStatusWithLines()
if err != nil {
return errors.NewProtocolError("Unable to negotiate version with remote side (unable to read status)", err)
return errors.NewProtocolError(tr.Tr.Get("Unable to negotiate version with remote side (unable to read status)"), err)
}
if status != 200 {
text := "no error provided"
text := tr.Tr.Get("no error provided")
if len(args) > 0 {
text = fmt.Sprintf("server said: %q", args[0])
text = tr.Tr.Get("server said: %q", args[0])
}
return errors.NewProtocolError(fmt.Sprintf("Unable to negotiate version with remote side (unexpected status %d; %s)", status, text), nil)
return errors.NewProtocolError(tr.Tr.Get("Unable to negotiate version with remote side (unexpected status %d; %s)", status, text), nil)
}
return nil
}
@ -150,12 +150,12 @@ func (conn *PktlineConnection) ReadStatus() (int, error) {
for {
s, pktLen, err := conn.pl.ReadPacketTextWithLength()
if err != nil {
return 0, errors.NewProtocolError("error reading packet", err)
return 0, errors.NewProtocolError(tr.Tr.Get("error reading packet"), err)
}
switch {
case pktLen == 0:
if !seenStatus {
return 0, errors.NewProtocolError("no status seen", nil)
return 0, errors.NewProtocolError(tr.Tr.Get("no status seen"), nil)
}
return status, nil
case !seenStatus:
@ -165,11 +165,11 @@ func (conn *PktlineConnection) ReadStatus() (int, error) {
ok = err == nil
}
if !ok {
return 0, errors.NewProtocolError(fmt.Sprintf("expected status line, got %q", s), err)
return 0, errors.NewProtocolError(tr.Tr.Get("expected status line, got %q", s), err)
}
seenStatus = true
default:
return 0, errors.NewProtocolError(fmt.Sprintf("unexpected data, got %q", s), err)
return 0, errors.NewProtocolError(tr.Tr.Get("unexpected data, got %q", s), err)
}
}
}
@ -183,13 +183,13 @@ func (conn *PktlineConnection) ReadStatusWithData() (int, []string, io.Reader, e
for {
s, pktLen, err := conn.pl.ReadPacketTextWithLength()
if err != nil {
return 0, nil, nil, errors.NewProtocolError("error reading packet", err)
return 0, nil, nil, errors.NewProtocolError(tr.Tr.Get("error reading packet"), err)
}
if pktLen == 0 {
if !seenStatus {
return 0, nil, nil, errors.NewProtocolError("no status seen", nil)
return 0, nil, nil, errors.NewProtocolError(tr.Tr.Get("no status seen"), nil)
}
return 0, nil, nil, errors.NewProtocolError("unexpected flush packet", nil)
return 0, nil, nil, errors.NewProtocolError(tr.Tr.Get("unexpected flush packet"), nil)
} else if !seenStatus {
ok := false
if strings.HasPrefix(s, "status ") {
@ -197,7 +197,7 @@ func (conn *PktlineConnection) ReadStatusWithData() (int, []string, io.Reader, e
ok = err == nil
}
if !ok {
return 0, nil, nil, errors.NewProtocolError(fmt.Sprintf("expected status line, got %q", s), err)
return 0, nil, nil, errors.NewProtocolError(tr.Tr.Get("expected status line, got %q", s), err)
}
seenStatus = true
} else if pktLen == 1 {
@ -220,12 +220,12 @@ func (conn *PktlineConnection) ReadStatusWithLines() (int, []string, []string, e
for {
s, pktLen, err := conn.pl.ReadPacketTextWithLength()
if err != nil {
return 0, nil, nil, errors.NewProtocolError("error reading packet", err)
return 0, nil, nil, errors.NewProtocolError(tr.Tr.Get("error reading packet"), err)
}
switch {
case pktLen == 0:
if !seenStatus {
return 0, nil, nil, errors.NewProtocolError("no status seen", nil)
return 0, nil, nil, errors.NewProtocolError(tr.Tr.Get("no status seen"), nil)
}
return status, args, lines, nil
case seenDelim:
@ -237,12 +237,12 @@ func (conn *PktlineConnection) ReadStatusWithLines() (int, []string, []string, e
ok = err == nil
}
if !ok {
return 0, nil, nil, errors.NewProtocolError(fmt.Sprintf("expected status line, got %q", s), err)
return 0, nil, nil, errors.NewProtocolError(tr.Tr.Get("expected status line, got %q", s), err)
}
seenStatus = true
case pktLen == 1:
if seenDelim {
return 0, nil, nil, errors.NewProtocolError("unexpected delimiter packet", nil)
return 0, nil, nil, errors.NewProtocolError(tr.Tr.Get("unexpected delimiter packet"), nil)
}
seenDelim = true
default:

@ -4,6 +4,7 @@ package subprocess
import (
"bufio"
"errors"
"fmt"
"os"
"os/exec"
@ -11,6 +12,7 @@ import (
"strings"
"sync"
"github.com/git-lfs/git-lfs/v3/tr"
"github.com/rubyist/tracerx"
)
@ -65,7 +67,7 @@ func Output(cmd *Cmd) (string, error) {
if len(cmd.Args) > 1 {
ran = fmt.Sprintf("%s %s", cmd.Path, quotedArgs(cmd.Args[1:]))
}
formattedErr := fmt.Errorf("error running %s: '%s' '%s'", ran, errorOutput, strings.TrimSpace(exitError.Error()))
formattedErr := errors.New(tr.Tr.Get("error running %s: '%s' '%s'", ran, errorOutput, strings.TrimSpace(exitError.Error())))
// return "" as output in error case, for callers that don't care about errors but rely on "" returned, in-case stdout != ""
return "", formattedErr

@ -195,7 +195,7 @@ begin_test "ambiguous url alias"
git config -l | grep url
git lfs env 2>&1 | tee env2.log
grep "WARNING: Multiple 'url.*.insteadof'" env2.log
grep "warning: Multiple 'url.*.insteadof'" env2.log
)
end_test
@ -252,7 +252,7 @@ begin_test "config: ignoring unsafe lfsconfig keys"
git lfs env 2>&1 | tee status.log
grep "WARNING: These unsafe lfsconfig keys were ignored:" status.log
grep "warning: These unsafe lfsconfig keys were ignored:" status.log
grep " core.askpass" status.log
)
end_test

@ -1112,7 +1112,7 @@ begin_test "env with duplicate endpoints"
EOF
git lfs env 2>&1 | tee test.log
if grep 'WARNING.*same alias' test.log
if grep 'warning.*same alias' test.log
then
exit 1
fi
@ -1123,6 +1123,6 @@ EOF
EOF
git lfs env 2>&1 | tee test.log
grep 'WARNING.*same alias' test.log
grep 'warning.*same alias' test.log
)
end_test

@ -240,7 +240,7 @@ begin_test "ls-files: before first commit"
cd "$reponame"
if [ 0 -ne $(git lfs ls-files | wc -l) ]; then
echo >&2 "fatal: expected \`git lfs ls-files\` to produce no output"
echo >&2 "Expected \`git lfs ls-files\` to produce no output"
exit 1
fi
)
@ -334,7 +334,7 @@ begin_test "ls-files: --all with argument(s)"
exit 1
fi
[ "fatal: cannot use --all with explicit reference" = "$(cat ls-files.log)" ]
[ "Cannot use --all with explicit reference" = "$(cat ls-files.log)" ]
)
end_test
@ -379,10 +379,10 @@ begin_test "ls-files: invalid --all ordering"
git lfs ls-files -- --all 2>&1 | tee ls-files.out
if [ ${PIPESTATUS[0]} = "0" ]; then
echo >&2 "fatal: expected \`git lfs ls-files -- --all\' to fail"
echo >&2 "Expected \`git lfs ls-files -- --all\' to fail"
exit 1
fi
grep "fatal: did you mean \"git lfs ls-files --all --\" ?" ls-files.out
grep "Did you mean \"git lfs ls-files --all --\" ?" ls-files.out
)
end_test

@ -204,7 +204,7 @@ begin_test "migrate export (no filter)"
exit 1
fi
grep "fatal: one or more files must be specified with --include" migrate.log
grep "One or more files must be specified with --include" migrate.log
)
end_test
@ -439,7 +439,7 @@ begin_test "migrate export (invalid --remote)"
exit 1
fi
grep "fatal: invalid remote zz provided" migrate.log
grep "Invalid remote zz provided" migrate.log
)
end_test

@ -57,11 +57,11 @@ begin_test "migrate import (--fixup, --include)"
| tee migrate.log
if [ "${PIPESTATUS[0]}" -eq 0 ]; then
echo >&2 "fatal: expected 'git lfs migrate ...' to fail, didn't ..."
echo >&2 "Expected 'git lfs migrate ...' to fail, didn't ..."
exit 1
fi
grep -q "fatal: cannot use --fixup with --include, --exclude" migrate.log
grep -q "Cannot use --fixup with --include, --exclude" migrate.log
)
end_test
@ -75,11 +75,11 @@ begin_test "migrate import (--fixup, --exclude)"
| tee migrate.log
if [ "${PIPESTATUS[0]}" -eq 0 ]; then
echo >&2 "fatal: expected 'git lfs migrate ...' to fail, didn't ..."
echo >&2 "Expected 'git lfs migrate ...' to fail, didn't ..."
exit 1
fi
grep -q "fatal: cannot use --fixup with --include, --exclude" migrate.log
grep -q "Cannot use --fixup with --include, --exclude" migrate.log
)
end_test
@ -93,11 +93,11 @@ begin_test "migrate import (--fixup, --no-rewrite)"
| tee migrate.log
if [ "${PIPESTATUS[0]}" -eq 0 ]; then
echo >&2 "fatal: expected 'git lfs migrate ...' to fail, didn't ..."
echo >&2 "Expected 'git lfs migrate ...' to fail, didn't ..."
exit 1
fi
grep -q "fatal: --no-rewrite and --fixup cannot be combined" migrate.log
grep -qe "--no-rewrite and --fixup cannot be combined" migrate.log
)
end_test

@ -116,11 +116,11 @@ begin_test "migrate import --no-rewrite (no .gitattributes)"
# Ensure command fails if no .gitattributes files are present
git lfs migrate import --no-rewrite --yes *.txt *.md 2>&1 | tee migrate.log
if [ ${PIPESTATUS[0]} -eq 0 ]; then
echo >&2 "fatal: expected git lfs migrate import --no-rewrite to fail, didn't"
echo >&2 "Expected git lfs migrate import --no-rewrite to fail, didn't"
exit 1
fi
grep "no Git LFS filters found in .gitattributes" migrate.log
grep "No Git LFS filters found in .gitattributes" migrate.log
)
end_test
@ -157,7 +157,7 @@ begin_test "migrate import --no-rewrite (nested .gitattributes)"
# top-level .gitattributes file
git lfs migrate import --no-rewrite --yes a.md 2>&1 | tee migrate.log
if [ ${PIPESTATUS[0]} -eq 0 ]; then
echo >&2 "fatal: expected git lfs migrate import --no-rewrite to fail, didn't"
echo >&2 "Expected git lfs migrate import --no-rewrite to fail, didn't"
exit 1
fi

@ -705,7 +705,7 @@ begin_test "migrate import (--everything with args)"
setup_multiple_local_branches
[ "$(git lfs migrate import --everything main 2>&1)" = \
"fatal: cannot use --everything with explicit reference arguments" ]
"Cannot use --everything with explicit reference arguments" ]
)
end_test
@ -716,7 +716,7 @@ begin_test "migrate import (--everything with --include-ref)"
setup_multiple_local_branches
[ "$(git lfs migrate import --everything --include-ref=refs/heads/main 2>&1)" = \
"fatal: cannot use --everything with --include-ref or --exclude-ref" ]
"Cannot use --everything with --include-ref or --exclude-ref" ]
)
end_test
@ -727,7 +727,7 @@ begin_test "migrate import (--everything with --exclude-ref)"
setup_multiple_local_branches
[ "$(git lfs migrate import --everything --exclude-ref=refs/heads/main 2>&1)" = \
"fatal: cannot use --everything with --include-ref or --exclude-ref" ]
"Cannot use --everything with --include-ref or --exclude-ref" ]
)
end_test

@ -12,8 +12,8 @@ begin_test "migrate info (default branch)"
original_head="$(git rev-parse HEAD)"
diff -u <(git lfs migrate info 2>&1 | tail -n 2) <(cat <<-EOF
*.md 140 B 1/1 files(s) 100%
*.txt 120 B 1/1 files(s) 100%
*.md 140 B 1/1 file 100%
*.txt 120 B 1/1 file 100%
EOF)
migrated_head="$(git rev-parse HEAD)"
@ -42,8 +42,8 @@ begin_test "migrate info (given branch)"
original_feature="$(git rev-parse refs/heads/my-feature)"
diff -u <(git lfs migrate info my-feature 2>&1 | tail -n 2) <(cat <<-EOF
*.md 170 B 2/2 files(s) 100%
*.txt 120 B 1/1 files(s) 100%
*.md 170 B 2/2 files 100%
*.txt 120 B 1/1 file 100%
EOF)
migrated_main="$(git rev-parse refs/heads/main)"
@ -63,7 +63,7 @@ begin_test "migrate info (default branch with filter)"
original_head="$(git rev-parse HEAD)"
diff -u <(git lfs migrate info --include "*.md" 2>&1 | tail -n 1) <(cat <<-EOF
*.md 140 B 1/1 files(s) 100%
*.md 140 B 1/1 file 100%
EOF)
migrated_head="$(git rev-parse HEAD)"
@ -82,7 +82,7 @@ begin_test "migrate info (given branch with filter)"
original_feature="$(git rev-parse refs/heads/my-feature)"
diff -u <(git lfs migrate info --include "*.md" my-feature 2>&1 | tail -n 1) <(cat <<-EOF
*.md 170 B 2/2 files(s) 100%
*.md 170 B 2/2 files 100%
EOF)
migrated_main="$(git rev-parse refs/heads/main)"
@ -105,8 +105,8 @@ begin_test "migrate info (default branch, exclude remote refs)"
original_main="$(git rev-parse refs/heads/main)"
diff -u <(git lfs migrate info 2>&1 | tail -n 2) <(cat <<-EOF
*.md 50 B 1/1 files(s) 100%
*.txt 30 B 1/1 files(s) 100%
*.md 50 B 1/1 file 100%
*.txt 30 B 1/1 file 100%
EOF)
migrated_remote="$(git rev-parse refs/remotes/origin/main)"
@ -128,8 +128,8 @@ begin_test "migrate info (given branch, exclude remote refs)"
original_feature="$(git rev-parse refs/heads/my-feature)"
diff -u <(git lfs migrate info my-feature 2>&1 | tail -n 2) <(cat <<-EOF
*.md 52 B 2/2 files(s) 100%
*.txt 50 B 2/2 files(s) 100%
*.md 52 B 2/2 files 100%
*.txt 50 B 2/2 files 100%
EOF)
migrated_remote="$(git rev-parse refs/remotes/origin/main)"
@ -157,8 +157,8 @@ begin_test "migrate info (given ref, --skip-fetch)"
git update-ref -d refs/remotes/origin/main
diff -u <(git lfs migrate info --skip-fetch 2>&1 | tail -n 2) <(cat <<-EOF
*.md 190 B 2/2 files(s) 100%
*.txt 150 B 2/2 files(s) 100%
*.md 190 B 2/2 files 100%
*.txt 150 B 2/2 files 100%
EOF)
migrated_remote="$(git rev-parse pseudo-remote)"
@ -181,8 +181,8 @@ begin_test "migrate info (include/exclude ref)"
diff -u <(git lfs migrate info \
--include-ref=refs/heads/my-feature \
--exclude-ref=refs/heads/main 2>&1 | tail -n 2) <(cat <<-EOF
*.md 31 B 1/1 files(s) 100%
*.txt 30 B 1/1 files(s) 100%
*.md 31 B 1/1 file 100%
*.txt 30 B 1/1 file 100%
EOF)
migrated_main="$(git rev-parse refs/heads/main)"
@ -204,8 +204,8 @@ begin_test "migrate info (include/exclude ref args)"
diff -u <(git lfs migrate info \
my-feature ^main 2>&1 | tail -n 2) <(cat <<-EOF
*.md 31 B 1/1 files(s) 100%
*.txt 30 B 1/1 files(s) 100%
*.md 31 B 1/1 file 100%
*.txt 30 B 1/1 file 100%
EOF)
migrated_main="$(git rev-parse refs/heads/main)"
@ -229,7 +229,7 @@ begin_test "migrate info (include/exclude ref with filter)"
--include="*.txt" \
--include-ref=refs/heads/my-feature \
--exclude-ref=refs/heads/main 2>&1 | tail -n 1) <(cat <<-EOF
*.txt 30 B 1/1 files(s) 100%
*.txt 30 B 1/1 file 100%
EOF)
migrated_main="$(git rev-parse refs/heads/main)"
@ -249,7 +249,7 @@ begin_test "migrate info (nested sub-trees, no filter)"
original_main="$(git rev-parse refs/heads/main)"
diff -u <(git lfs migrate info 2>/dev/null) <(cat <<-EOF
*.txt 120 B 1/1 files(s) 100%
*.txt 120 B 1/1 file 100%
EOF)
migrated_main="$(git rev-parse refs/heads/main)"
@ -267,7 +267,7 @@ begin_test "migrate info (above threshold)"
original_head="$(git rev-parse HEAD)"
diff -u <(git lfs migrate info --above=130B 2>&1 | tail -n 1) <(cat <<-EOF
*.md 140 B 1/1 files(s) 100%
*.md 140 B 1/1 file 100%
EOF)
migrated_head="$(git rev-parse HEAD)"
@ -290,7 +290,7 @@ begin_test "migrate info (above threshold, top)"
# Ensure command reports only single highest entry due to --top=1 argument.
diff -u <(git lfs migrate info --above=130B --top=1 2>&1 | tail -n 1) <(cat <<-EOF
*.bin 160 B 1/1 files(s) 100%
*.bin 160 B 1/1 file 100%
EOF)
migrated_head="$(git rev-parse HEAD)"
@ -319,15 +319,15 @@ begin_test "migrate info (top)"
# Ensure command reports no more entries than specified by --top argument.
diff -u <(git lfs migrate info --everything --top=2 2>&1 | tail -n 2) <(cat <<-EOF
*.md 170 B 2/2 files(s) 100%
*.bin 160 B 1/1 files(s) 100%
*.md 170 B 2/2 files 100%
*.bin 160 B 1/1 file 100%
EOF)
# Ensure command succeeds if --top argument is greater than total number of entries.
diff -u <(git lfs migrate info --everything --top=10 2>&1 | tail -n 3) <(cat <<-EOF
*.md 170 B 2/2 files(s) 100%
*.bin 160 B 1/1 files(s) 100%
*.txt 120 B 1/1 files(s) 100%
*.md 170 B 2/2 files 100%
*.bin 160 B 1/1 file 100%
*.txt 120 B 1/1 file 100%
EOF)
migrated_head="$(git rev-parse HEAD)"
@ -345,8 +345,8 @@ begin_test "migrate info (given unit)"
original_head="$(git rev-parse HEAD)"
diff -u <(git lfs migrate info --unit=kb 2>&1 | tail -n 2) <(cat <<-EOF
*.md 0.1 1/1 files(s) 100%
*.txt 0.1 1/1 files(s) 100%
*.md 0.1 1/1 file 100%
*.txt 0.1 1/1 file 100%
EOF)
migrated_head="$(git rev-parse HEAD)"
@ -397,8 +397,8 @@ begin_test "migrate info (no-extension files)"
original_feature="$(git rev-parse refs/heads/my-feature)"
diff -u <(git lfs migrate info --everything 2>&1 | tail -n 2) <(cat <<-EOF
no_extension 220 B 2/2 files(s) 100%
*.txt 170 B 2/2 files(s) 100%
no_extension 220 B 2/2 files 100%
*.txt 170 B 2/2 files 100%
EOF)
migrated_main="$(git rev-parse refs/heads/main)"
@ -420,8 +420,8 @@ begin_test "migrate info (--everything)"
original_feature="$(git rev-parse refs/heads/my-feature)"
diff -u <(git lfs migrate info --everything 2>&1 | tail -n 2) <(cat <<-EOF
*.md 170 B 2/2 files(s) 100%
*.txt 120 B 1/1 files(s) 100%
*.md 170 B 2/2 files 100%
*.txt 120 B 1/1 file 100%
EOF)
migrated_main="$(git rev-parse refs/heads/main)"
@ -459,9 +459,9 @@ begin_test "migrate info (all files tracked)"
# Ensure default command reports objects if all files are tracked by LFS.
diff -u <(git lfs migrate info 2>&1 | tail -n 3) <(cat <<-EOF
*.gitattributes 83 B 1/1 files(s) 100%
*.gitattributes 83 B 1/1 file 100%
LFS Objects 260 B 2/2 files(s) 100%
LFS Objects 260 B 2/2 files 100%
EOF)
migrated_head="$(git rev-parse HEAD)"
@ -480,9 +480,9 @@ begin_test "migrate info (all files tracked, --pointers=follow)"
# Ensure "follow" command reports objects if all files are tracked by LFS.
diff -u <(git lfs migrate info --pointers=follow 2>&1 | tail -n 3) <(cat <<-EOF
*.gitattributes 83 B 1/1 files(s) 100%
*.gitattributes 83 B 1/1 file 100%
LFS Objects 260 B 2/2 files(s) 100%
LFS Objects 260 B 2/2 files 100%
EOF)
migrated_head="$(git rev-parse HEAD)"
@ -501,9 +501,9 @@ begin_test "migrate info (all files tracked, --pointers=no-follow)"
# Ensure "no-follow" command reports pointers if all files are tracked by LFS.
diff -u <(git lfs migrate info --pointers=no-follow 2>&1 | tail -n 3) <(cat <<-EOF
*.md 128 B 1/1 files(s) 100%
*.txt 128 B 1/1 files(s) 100%
*.gitattributes 83 B 1/1 files(s) 100%
*.md 128 B 1/1 file 100%
*.txt 128 B 1/1 file 100%
*.gitattributes 83 B 1/1 file 100%
EOF)
migrated_head="$(git rev-parse HEAD)"
@ -522,7 +522,7 @@ begin_test "migrate info (all files tracked, --pointers=ignore)"
# Ensure "ignore" command reports no objects if all files are tracked by LFS.
diff -u <(git lfs migrate info --pointers=ignore 2>&1 | tail -n 1) <(cat <<-EOF
*.gitattributes 83 B 1/1 files(s) 100%
*.gitattributes 83 B 1/1 file 100%
EOF)
migrated_head="$(git rev-parse HEAD)"
@ -559,9 +559,9 @@ begin_test "migrate info (all files tracked, --everything)"
# Ensure default command reports objects if all files are tracked by LFS.
diff -u <(git lfs migrate info --everything 2>&1 | tail -n 3) <(cat <<-EOF
*.gitattributes 83 B 1/1 files(s) 100%
*.gitattributes 83 B 1/1 file 100%
LFS Objects 290 B 3/3 files(s) 100%
LFS Objects 290 B 3/3 files 100%
EOF)
migrated_main="$(git rev-parse refs/heads/main)"
@ -583,9 +583,9 @@ begin_test "migrate info (all files tracked, --everything and --pointers=follow)
# Ensure "follow" command reports objects if all files are tracked by LFS.
diff -u <(git lfs migrate info --everything --pointers=follow 2>&1 | tail -n 3) <(cat <<-EOF
*.gitattributes 83 B 1/1 files(s) 100%
*.gitattributes 83 B 1/1 file 100%
LFS Objects 290 B 3/3 files(s) 100%
LFS Objects 290 B 3/3 files 100%
EOF)
migrated_main="$(git rev-parse refs/heads/main)"
@ -607,9 +607,9 @@ begin_test "migrate info (all files tracked, --everything and --pointers=no-foll
# Ensure "no-follow" command reports pointers if all files are tracked by LFS.
diff -u <(git lfs migrate info --everything --pointers=no-follow 2>&1 | tail -n 3) <(cat <<-EOF
*.md 255 B 2/2 files(s) 100%
*.txt 128 B 1/1 files(s) 100%
*.gitattributes 83 B 1/1 files(s) 100%
*.md 255 B 2/2 files 100%
*.txt 128 B 1/1 file 100%
*.gitattributes 83 B 1/1 file 100%
EOF)
migrated_main="$(git rev-parse refs/heads/main)"
@ -631,7 +631,7 @@ begin_test "migrate info (all files tracked, --everything and --pointers=ignore)
# Ensure "ignore" command reports no objects if all files are tracked by LFS.
diff -u <(git lfs migrate info --everything --pointers=ignore 2>&1 | tail -n 1) <(cat <<-EOF
*.gitattributes 83 B 1/1 files(s) 100%
*.gitattributes 83 B 1/1 file 100%
EOF)
migrated_main="$(git rev-parse refs/heads/main)"
@ -673,8 +673,8 @@ begin_test "migrate info (potential fixup)"
# Ensure command reports files which should be tracked but have not been
# stored properly as LFS pointers.
diff -u <(git lfs migrate info 2>&1 | tail -n 2) <(cat <<-EOF
*.txt 120 B 1/1 files(s) 100%
*.gitattributes 42 B 1/1 files(s) 100%
*.txt 120 B 1/1 file 100%
*.gitattributes 42 B 1/1 file 100%
EOF)
migrated_head="$(git rev-parse HEAD)"
@ -694,7 +694,7 @@ begin_test "migrate info (potential fixup, --fixup)"
# Ensure "fixup" command reports files which should be tracked but have not
# been stored properly as LFS pointers, and ignores .gitattributes files.
diff -u <(git lfs migrate info --fixup 2>&1 | tail -n 1) <(cat <<-EOF
*.txt 120 B 1/1 files(s) 100%
*.txt 120 B 1/1 file 100%
EOF)
migrated_head="$(git rev-parse HEAD)"
@ -714,8 +714,8 @@ begin_test "migrate info (potential fixup, complex nested)"
# Ensure command reports the file which should be tracked but has not been
# stored properly (a.txt) and the file which is not tracked (dir/b.txt).
diff -u <(git lfs migrate info 2>&1 | tail -n 2) <(cat <<-EOF
*.gitattributes 69 B 2/2 files(s) 100%
*.txt 2 B 2/2 files(s) 100%
*.gitattributes 69 B 2/2 files 100%
*.txt 2 B 2/2 files 100%
EOF)
migrated_head="$(git rev-parse HEAD)"
@ -736,7 +736,7 @@ begin_test "migrate info (potential fixup, complex nested, --fixup)"
# been stored properly (a.txt), and ignores .gitattributes files and
# the file which is not tracked (dir/b.txt).
diff -u <(git lfs migrate info --fixup 2>&1 | tail -n 1) <(cat <<-EOF
*.txt 1 B 1/1 files(s) 100%
*.txt 1 B 1/1 file 100%
EOF)
migrated_head="$(git rev-parse HEAD)"
@ -773,7 +773,7 @@ begin_test "migrate info (--everything with args)"
exit 1
fi
grep -q "fatal: cannot use --everything with explicit reference arguments" \
grep -q "Cannot use --everything with explicit reference arguments" \
migrate.log
)
end_test
@ -792,7 +792,7 @@ begin_test "migrate info (--everything with --include-ref)"
exit 1
fi
grep -q "fatal: cannot use --everything with --include-ref or --exclude-ref" \
grep -q "Cannot use --everything with --include-ref or --exclude-ref" \
migrate.log
)
end_test
@ -811,7 +811,7 @@ begin_test "migrate info (--everything with --exclude-ref)"
exit 1
fi
grep -q "fatal: cannot use --everything with --include-ref or --exclude-ref" \
grep -q "Cannot use --everything with --include-ref or --exclude-ref" \
migrate.log
)
end_test
@ -829,7 +829,7 @@ begin_test "migrate info (--pointers invalid)"
exit 1
fi
grep -q "fatal: unsupported --pointers option value" migrate.log
grep -q "Unsupported --pointers option value" migrate.log
)
end_test
@ -847,7 +847,7 @@ begin_test "migrate info (--fixup, --pointers=follow)"
exit 1
fi
grep -q "fatal: cannot use --fixup with --pointers=follow" migrate.log
grep -q "Cannot use --fixup with --pointers=follow" migrate.log
)
end_test
@ -865,7 +865,7 @@ begin_test "migrate info (--fixup, --pointers=no-follow)"
exit 1
fi
grep -q "fatal: cannot use --fixup with --pointers=no-follow" migrate.log
grep -q "Cannot use --fixup with --pointers=no-follow" migrate.log
)
end_test
@ -883,7 +883,7 @@ begin_test "migrate info (--fixup, --include)"
exit 1
fi
grep -q "fatal: cannot use --fixup with --include, --exclude" migrate.log
grep -q "Cannot use --fixup with --include, --exclude" migrate.log
)
end_test
@ -901,6 +901,6 @@ begin_test "migrate info (--fixup, --exclude)"
exit 1
fi
grep -q "fatal: cannot use --fixup with --include, --exclude" migrate.log
grep -q "Cannot use --fixup with --include, --exclude" migrate.log
)
end_test

@ -750,7 +750,7 @@ begin_test "pre-push with their lock on lfs file"
grep "Unable to push locked files" push.log
grep "* locked_theirs.dat - Git LFS Tests" push.log
grep "ERROR: Cannot update locked files." push.log
grep "Cannot update locked files." push.log
refute_server_object "$reponame" "$(calc_oid_file locked_theirs.dat)"
popd >/dev/null
)
@ -806,7 +806,7 @@ begin_test "pre-push with their lock on non-lfs lockable file"
grep "Unable to push locked files" push.log
grep "* large_locked_theirs.dat - Git LFS Tests" push.log
grep "* tiny_locked_theirs.dat - Git LFS Tests" push.log
grep "ERROR: Cannot update locked files." push.log
grep "Cannot update locked files." push.log
refute_server_object "$reponame" "$(calc_oid_file large_locked_theirs.dat)"
refute_server_object "$reponame" "$(calc_oid_file tiny_locked_theirs.dat)"
@ -1135,7 +1135,7 @@ begin_test "pre-push locks verify 403 with verification enabled"
git config "lfs.$endpoint.locksverify" true
git push origin main 2>&1 | tee push.log
grep "ERROR: Authentication error" push.log
grep "error: Authentication error" push.log
refute_server_object "$reponame" "$contents_oid"
[ "true" = "$(git config "lfs.$endpoint.locksverify")" ]
@ -1188,7 +1188,7 @@ begin_test "pre-push locks verify 403 with verification unset"
[ -z "$(git config "lfs.$endpoint.locksverify")" ]
git push origin main 2>&1 | tee push.log
grep "WARNING: Authentication error" push.log
grep "warning: Authentication error" push.log
assert_server_object "$reponame" "$contents_oid"
[ -z "$(git config "lfs.$endpoint.locksverify")" ]

@ -78,30 +78,30 @@ begin_test "prune worktree"
# before worktree, everything except current checkout would be pruned
git lfs prune --dry-run 2>&1 | tee prune.log
grep "prune: 6 local object(s), 1 retained, done." prune.log
grep "prune: 5 file(s) would be pruned" prune.log
grep "prune: 6 local objects, 1 retained, done." prune.log
grep "prune: 5 files would be pruned" prune.log
# now add worktrees on the other branches
git worktree add "../w1_$reponame" "branch1"
git worktree add "../w2_$reponame" "branch2"
# now should retain all 3 heads
git lfs prune --dry-run 2>&1 | tee prune.log
grep "prune: 6 local object(s), 3 retained, done." prune.log
grep "prune: 3 file(s) would be pruned" prune.log
grep "prune: 6 local objects, 3 retained, done." prune.log
grep "prune: 3 files would be pruned" prune.log
# also check that the same result is obtained when inside worktree rather than main
cd "../w1_$reponame"
git lfs prune --dry-run 2>&1 | tee prune.log
grep "prune: 6 local object(s), 3 retained, done." prune.log
grep "prune: 3 file(s) would be pruned" prune.log
grep "prune: 6 local objects, 3 retained, done." prune.log
grep "prune: 3 files would be pruned" prune.log
# now remove a worktree & prove that frees up 1 head while keeping the other
cd "../$reponame"
rm -rf "../w1_$reponame"
git worktree prune # required to get git to tidy worktree metadata
git lfs prune --dry-run 2>&1 | tee prune.log
grep "prune: 6 local object(s), 2 retained, done." prune.log
grep "prune: 4 file(s) would be pruned" prune.log
grep "prune: 6 local objects, 2 retained, done." prune.log
grep "prune: 4 files would be pruned" prune.log
)

@ -71,8 +71,8 @@ begin_test "prune unreferenced and old"
git lfs prune --dry-run --verbose 2>&1 | tee prune.log
grep "prune: 5 local object(s), 3 retained" prune.log
grep "prune: 2 file(s) would be pruned" prune.log
grep "prune: 5 local objects, 3 retained" prune.log
grep "prune: 2 files would be pruned" prune.log
grep "$oid_oldandpushed" prune.log
grep "$oid_unreferenced" prune.log
@ -88,7 +88,7 @@ begin_test "prune unreferenced and old"
git config lfs.fetchrecentcommitsdays 0
git lfs prune --verbose 2>&1 | tee prune.log
grep "prune: 3 local object(s), 2 retained" prune.log
grep "prune: 3 local objects, 2 retained" prune.log
grep "prune: Deleting objects: 100% (1/1), done." prune.log
grep "$oid_retain1" prune.log
refute_local_object "$oid_retain1"
@ -176,7 +176,7 @@ begin_test "prune keep unpushed"
git push origin main
git lfs prune --verbose 2>&1 | tee prune.log
grep "prune: 6 local object(s), 4 retained" prune.log
grep "prune: 6 local objects, 4 retained" prune.log
grep "prune: Deleting objects: 100% (2/2), done." prune.log
grep "$oid_keepunpushedhead1" prune.log
grep "$oid_keepunpushedhead2" prune.log
@ -192,7 +192,7 @@ begin_test "prune keep unpushed"
git push origin main
git lfs prune --verbose 2>&1 | tee prune.log
grep "prune: 4 local object(s), 1 retained" prune.log
grep "prune: 4 local objects, 1 retained" prune.log
grep "prune: Deleting objects: 100% (3/3), done." prune.log
grep "$oid_keepunpushedbranch1" prune.log
grep "$oid_keepunpushedbranch2" prune.log
@ -327,7 +327,7 @@ begin_test "prune keep recent"
git lfs prune --verbose 2>&1 | tee prune.log
grep "prune: 11 local object(s), 6 retained, done." prune.log
grep "prune: 11 local objects, 6 retained, done." prune.log
grep "prune: Deleting objects: 100% (5/5), done." prune.log
grep "$oid_prunecommitoldbranch" prune.log
grep "$oid_prunecommitoldbranch2" prune.log
@ -351,7 +351,7 @@ begin_test "prune keep recent"
# still retain tips of branches
git config lfs.fetchrecentcommitsdays 0
git lfs prune --verbose 2>&1 | tee prune.log
grep "prune: 6 local object(s), 3 retained, done." prune.log
grep "prune: 6 local objects, 3 retained, done." prune.log
grep "prune: Deleting objects: 100% (3/3), done." prune.log
assert_local_object "$oid_keephead" "${#content_keephead}"
assert_local_object "$oid_keeprecentbranch1tip" "${#content_keeprecentbranch1tip}"
@ -363,7 +363,7 @@ begin_test "prune keep recent"
# now don't include any recent refs at all, only keep HEAD
git config lfs.fetchrecentrefsdays 0
git lfs prune --verbose 2>&1 | tee prune.log
grep "prune: 3 local object(s), 1 retained, done." prune.log
grep "prune: 3 local objects, 1 retained, done." prune.log
grep "prune: Deleting objects: 100% (2/2), done." prune.log
assert_local_object "$oid_keephead" "${#content_keephead}"
refute_local_object "$oid_keeprecentbranch1tip"
@ -414,7 +414,7 @@ begin_test "prune remote tests"
# can never prune with no remote
git lfs prune --verbose 2>&1 | tee prune.log
grep "prune: 4 local object(s), 4 retained, done." prune.log
grep "prune: 4 local objects, 4 retained, done." prune.log
# also make sure nothing is pruned when remote is not origin
@ -426,15 +426,15 @@ begin_test "prune remote tests"
git push not_origin main
git lfs prune --verbose 2>&1 | tee prune.log
grep "prune: 4 local object(s), 4 retained, done." prune.log
grep "prune: 4 local objects, 4 retained, done." prune.log
# now set the prune remote to be not_origin, should now prune
# do a dry run so we can also verify
git config lfs.pruneremotetocheck not_origin
git lfs prune --verbose --dry-run 2>&1 | tee prune.log
grep "prune: 4 local object(s), 1 retained, done." prune.log
grep "prune: 3 file(s) would be pruned" prune.log
grep "prune: 4 local objects, 1 retained, done." prune.log
grep "prune: 3 files would be pruned" prune.log
@ -496,8 +496,8 @@ begin_test "prune verify"
# confirm that it would prune with verify when no issues
git lfs prune --dry-run --verify-remote --verbose 2>&1 | tee prune.log
grep "prune: 4 local object(s), 1 retained, 3 verified with remote, done." prune.log
grep "prune: 3 file(s) would be pruned" prune.log
grep "prune: 4 local objects, 1 retained, 3 verified with remote, done." prune.log
grep "prune: 3 files would be pruned" prune.log
grep "$oid_commit3" prune.log
grep "$oid_commit2_failverify" prune.log
grep "$oid_commit1" prune.log
@ -506,7 +506,7 @@ begin_test "prune verify"
delete_server_object "remote_$reponame" "$oid_commit2_failverify"
# this should now fail
git lfs prune --verify-remote 2>&1 | tee prune.log
grep "prune: 4 local object(s), 1 retained, 2 verified with remote, done." prune.log
grep "prune: 4 local objects, 1 retained, 2 verified with remote, done." prune.log
grep "missing on remote:" prune.log
grep "$oid_commit2_failverify" prune.log
# Nothing should have been deleted
@ -518,7 +518,7 @@ begin_test "prune verify"
git config lfs.pruneverifyremotealways true
# no verify arg but should be pulled from global
git lfs prune 2>&1 | tee prune.log
grep "prune: 4 local object(s), 1 retained, 2 verified with remote, done." prune.log
grep "prune: 4 local objects, 1 retained, 2 verified with remote, done." prune.log
grep "missing on remote:" prune.log
grep "$oid_commit2_failverify" prune.log
# Nothing should have been deleted
@ -528,7 +528,7 @@ begin_test "prune verify"
# now try overriding the global option
git lfs prune --no-verify-remote 2>&1 | tee prune.log
grep "prune: 4 local object(s), 1 retained, done." prune.log
grep "prune: 4 local objects, 1 retained, done." prune.log
grep "prune: Deleting objects: 100% (3/3), done." prune.log
# should now have been deleted
refute_local_object "$oid_commit1"

@ -137,7 +137,7 @@ begin_test "unlock multiple files"
git lfs lock a.dat
git lfs lock b.dat
git lfs unlock *.dat >log 2>&1
grep "Usage:" log && exit 1
grep "Exactly one of --id or a set of paths must be provided" log && exit 1
true
)
end_test
@ -321,7 +321,7 @@ begin_test "unlocking a lock without sufficient info"
assert_server_lock "$reponame" "$id"
git lfs unlock 2>&1 | tee unlock.log
grep "Usage: git lfs unlock" unlock.log
grep "Exactly one of --id or a set of paths must be provided" unlock.log
assert_server_lock "$reponame" "$id"
)
end_test
@ -372,7 +372,7 @@ begin_test "unlocking a lock with ambiguous arguments"
exit 1
fi
grep "Usage:" unlock.log
grep "Exactly one of --id or a set of paths must be provided" unlock.log
assert_server_lock "$reponame" "$id"
)
end_test
@ -393,7 +393,7 @@ begin_test "unlocking a lock while uncommitted with --force"
# should allow with --force
git lfs unlock --force "modforce.dat" 2>&1 | tee unlock.log
grep "Warning: unlocking with uncommitted changes" unlock.log
grep "warning: unlocking with uncommitted changes" unlock.log
refute_server_lock "$reponame" "$id"
)
end_test

@ -18,6 +18,7 @@ import (
"github.com/git-lfs/git-lfs/v3/errors"
"github.com/git-lfs/git-lfs/v3/filepathfilter"
"github.com/git-lfs/git-lfs/v3/tr"
)
// FileOrDirExists determines if a file/dir exists, returns IsDir() results too.
@ -76,12 +77,12 @@ func RenameFileCopyPermissions(srcfile, destfile string) error {
return err
} else {
if err := os.Chmod(srcfile, info.Mode()); err != nil {
return fmt.Errorf("can't set filemode on file %q: %v", srcfile, err)
return errors.New(tr.Tr.Get("can't set filemode on file %q: %v", srcfile, err))
}
}
if err := RobustRename(srcfile, destfile); err != nil {
return fmt.Errorf("cannot replace %q with %q: %v", destfile, srcfile, err)
return errors.New(tr.Tr.Get("cannot replace %q with %q: %v", destfile, srcfile, err))
}
return nil
}
@ -184,14 +185,14 @@ func ExpandPath(path string, expand bool) (string, error) {
}
if err != nil {
return "", errors.Wrapf(err, "could not find user %s", username)
return "", errors.Wrapf(err, tr.Tr.Get("could not find user %s", username))
}
homedir := who.HomeDir
if expand {
homedir, err = filepath.EvalSymlinks(homedir)
if err != nil {
return "", errors.Wrapf(err, "cannot eval symlinks for %s", homedir)
return "", errors.Wrapf(err, tr.Tr.Get("cannot eval symlinks for %s", homedir))
}
}
return filepath.Join(homedir, path[len(username)+1:]), nil
@ -230,7 +231,7 @@ func VerifyFileHash(oid, path string) error {
calcOid := hex.EncodeToString(h.Sum(nil))
if calcOid != oid {
return fmt.Errorf("file %q has an invalid hash %s, expected %s", path, calcOid, oid)
return errors.New(tr.Tr.Get("file %q has an invalid hash %s, expected %s", path, calcOid, oid))
}
return nil

@ -10,6 +10,7 @@ import (
"os"
"github.com/git-lfs/git-lfs/v3/errors"
"github.com/git-lfs/git-lfs/v3/tr"
)
const (
@ -123,16 +124,16 @@ func Spool(to io.Writer, from io.Reader, dir string) (n int64, err error) {
// file, and spool the remaining contents there.
tmp, err := ioutil.TempFile(dir, "")
if err != nil {
return 0, errors.Wrap(err, "spool tmp")
return 0, errors.Wrap(err, tr.Tr.Get("Unable to create temporary file for spooling"))
}
defer os.Remove(tmp.Name())
if n, err = io.Copy(tmp, from); err != nil {
return n, errors.Wrap(err, "unable to spool")
return n, errors.Wrap(err, tr.Tr.Get("unable to spool"))
}
if _, err = tmp.Seek(0, io.SeekStart); err != nil {
return 0, errors.Wrap(err, "unable to seek")
return 0, errors.Wrap(err, tr.Tr.Get("unable to seek"))
}
// The spooled contents will now be the concatenation of the

@ -2,12 +2,12 @@ package tools
import (
"bytes"
"fmt"
"os"
"os/exec"
"strings"
"github.com/git-lfs/git-lfs/v3/subprocess"
"github.com/git-lfs/git-lfs/v3/tr"
"github.com/pkg/errors"
)
@ -20,7 +20,7 @@ func Getwd() (dir string, err error) {
if isCygwin() {
dir, err = translateCygwinPath(dir)
if err != nil {
return "", errors.Wrap(err, "convert wd to cygwin")
return "", errors.Wrap(err, tr.Tr.Get("error converting working directory to cygwin"))
}
}
@ -48,7 +48,7 @@ func translateCygwinPath(path string) (string, error) {
if _, ok := err.(*exec.Error); ok {
return path, nil
}
return path, fmt.Errorf("failed to translate path from cygwin to windows: %s", buf.String())
return path, errors.New(tr.Tr.Get("failed to translate path from cygwin to windows: %s", buf.String()))
}
return output, nil
}

@ -4,7 +4,6 @@
package tools
import (
"fmt"
"io"
"io/ioutil"
"os"
@ -12,6 +11,7 @@ import (
"strings"
"github.com/git-lfs/git-lfs/v3/errors"
"github.com/git-lfs/git-lfs/v3/tr"
"golang.org/x/sys/unix"
)
@ -55,7 +55,7 @@ func checkCloneFileSupported() bool {
// If check failed (e.g. directory is read-only), returns err.
func CheckCloneFileSupported(dir string) (supported bool, err error) {
if !cloneFileSupported {
return false, errors.New("unsupported OS version. >= 10.12.x Sierra required")
return false, errors.New(tr.Tr.Get("unsupported OS version. >= 10.12.x Sierra required"))
}
src, err := ioutil.TempFile(dir, "src")
@ -88,7 +88,7 @@ func CloneFile(_ io.Writer, _ io.Reader) (bool, error) {
func CloneFileByPath(dst, src string) (bool, error) {
if !cloneFileSupported {
return false, &CloneFileError{Unsupported: true, errorString: "clonefile is not supported"}
return false, &CloneFileError{Unsupported: true, errorString: tr.Tr.Get("clonefile is not supported")}
}
if FileExists(dst) {
@ -109,7 +109,7 @@ func cloneFileSyscall(dst, src string) *CloneFileError {
if err != nil {
return &CloneFileError{
Unsupported: err == unix.ENOTSUP,
errorString: fmt.Sprintf("%s. from %v to %v", err, src, dst),
errorString: tr.Tr.Get("error cloning from %v to %v: %s", src, dst, err),
}
}

@ -7,10 +7,11 @@ import (
"io"
"github.com/git-lfs/git-lfs/v3/errors"
"github.com/git-lfs/git-lfs/v3/tr"
)
func CheckCloneFileSupported(dir string) (supported bool, err error) {
return false, errors.New("unsupported platform")
return false, errors.New(tr.Tr.Get("unsupported platform"))
}
func CloneFile(writer io.Writer, reader io.Reader) (bool, error) {

@ -7,8 +7,10 @@ import (
"strings"
"sync"
"github.com/git-lfs/git-lfs/v3/errors"
"github.com/git-lfs/git-lfs/v3/fs"
"github.com/git-lfs/git-lfs/v3/lfsapi"
"github.com/git-lfs/git-lfs/v3/tr"
"github.com/rubyist/tracerx"
)
@ -208,7 +210,7 @@ func (a *adapterBase) newHTTPRequest(method string, rel *Action) (*http.Request,
if !httpRE.MatchString(href) {
urlfragment := strings.SplitN(href, "?", 2)[0]
return nil, fmt.Errorf("missing protocol: %q", urlfragment)
return nil, errors.New(tr.Tr.Get("missing protocol: %q", urlfragment))
}
req, err := http.NewRequest(method, href, nil)

@ -1,13 +1,13 @@
package tq
import (
"fmt"
"time"
"github.com/git-lfs/git-lfs/v3/errors"
"github.com/git-lfs/git-lfs/v3/git"
"github.com/git-lfs/git-lfs/v3/lfsapi"
"github.com/git-lfs/git-lfs/v3/lfshttp"
"github.com/git-lfs/git-lfs/v3/tr"
"github.com/rubyist/tracerx"
)
@ -83,7 +83,7 @@ func (c *tqClient) Batch(remote string, bReq *batchRequest) (*BatchResponse, err
req, err := c.NewRequest("POST", bRes.endpoint, "objects/batch", bReq)
if err != nil {
return nil, errors.Wrap(err, "batch request")
return nil, errors.Wrap(err, tr.Tr.Get("batch request"))
}
tracerx.Printf("api: batch %d files", len(bReq.Objects))
@ -92,15 +92,15 @@ func (c *tqClient) Batch(remote string, bReq *batchRequest) (*BatchResponse, err
res, err := c.DoAPIRequestWithAuth(remote, lfshttp.WithRetries(req, c.MaxRetries()))
if err != nil {
tracerx.Printf("api error: %s", err)
return nil, errors.Wrap(err, "batch response")
return nil, errors.Wrap(err, tr.Tr.Get("batch response"))
}
if err := lfshttp.DecodeJSON(res, bRes); err != nil {
return bRes, errors.Wrap(err, "batch response")
return bRes, errors.Wrap(err, tr.Tr.Get("batch response"))
}
if bRes.HashAlgorithm != "" && bRes.HashAlgorithm != "sha256" {
return bRes, errors.Wrap(fmt.Errorf("unsupported hash algorithm"), "batch response")
return bRes, errors.Wrap(errors.New(tr.Tr.Get("unsupported hash algorithm")), tr.Tr.Get("batch response"))
}
if res.StatusCode != 200 {

@ -12,6 +12,7 @@ import (
"github.com/git-lfs/git-lfs/v3/lfsapi"
"github.com/git-lfs/git-lfs/v3/lfshttp"
"github.com/git-lfs/git-lfs/v3/tr"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
"github.com/xeipuuv/gojsonschema"
@ -167,7 +168,7 @@ func getSchema(wd, relpath string) *sourcedSchema {
abspath := filepath.ToSlash(filepath.Join(wd, relpath))
s, err := gojsonschema.NewSchema(gojsonschema.NewReferenceLoader(fmt.Sprintf("file:///%s", abspath)))
if err != nil {
fmt.Printf("schema load error for %q: %+v\n", relpath, err)
tr.Tr.Get("schema load error for %q: %+v\n", relpath, err)
}
return &sourcedSchema{Source: relpath, Schema: s}
}
@ -184,6 +185,6 @@ func assertSchema(t *testing.T, schema *sourcedSchema, dataLoader gojsonschema.J
for _, resErr := range resErrors {
valErrors = append(valErrors, resErr.String())
}
t.Errorf("Schema: %s\n%s", schema.Source, strings.Join(valErrors, "\n"))
t.Errorf(tr.Tr.Get("Schema: %s\n%s", schema.Source, strings.Join(valErrors, "\n")))
}
}

@ -12,6 +12,7 @@ import (
"github.com/git-lfs/git-lfs/v3/errors"
"github.com/git-lfs/git-lfs/v3/tools"
"github.com/git-lfs/git-lfs/v3/tr"
"github.com/rubyist/tracerx"
)
@ -116,7 +117,7 @@ func (a *basicDownloadAdapter) download(t *Transfer, cb ProgressCallback, authOk
return err
}
if rel == nil {
return errors.Errorf("Object %s not found on the server.", t.Oid)
return errors.Errorf(tr.Tr.Get("Object %s not found on the server.", t.Oid))
}
req, err := a.newHTTPRequest("GET", rel)
@ -241,15 +242,15 @@ func (a *basicDownloadAdapter) download(t *Transfer, cb ProgressCallback, authOk
}
written, err := tools.CopyWithCallback(dlFile, hasher, res.ContentLength, ccb)
if err != nil {
return errors.Wrapf(err, "cannot write data to tempfile %q", dlfilename)
return errors.Wrapf(err, tr.Tr.Get("cannot write data to temporary file %q", dlfilename))
}
if actual := hasher.Hash(); actual != t.Oid {
return fmt.Errorf("expected OID %s, got %s after %d bytes written", t.Oid, actual, written)
return errors.New(tr.Tr.Get("expected OID %s, got %s after %d bytes written", t.Oid, actual, written))
}
if err := dlFile.Close(); err != nil {
return fmt.Errorf("can't close tempfile %q: %v", dlfilename, err)
return errors.New(tr.Tr.Get("can't close temporary file %q: %v", dlfilename, err))
}
err = tools.RenameFileCopyPermissions(dlfilename, t.Path)

@ -13,6 +13,7 @@ import (
"github.com/git-lfs/git-lfs/v3/errors"
"github.com/git-lfs/git-lfs/v3/lfsapi"
"github.com/git-lfs/git-lfs/v3/tools"
"github.com/git-lfs/git-lfs/v3/tr"
)
const (
@ -46,7 +47,7 @@ func (a *basicUploadAdapter) DoTransfer(ctx interface{}, t *Transfer, cb Progres
return err
}
if rel == nil {
return errors.Errorf("No upload action for object: %s", t.Oid)
return errors.Errorf(tr.Tr.Get("No upload action for object: %s", t.Oid))
}
req, err := a.newHTTPRequest("PUT", rel)
@ -136,16 +137,16 @@ func (a *basicUploadAdapter) DoTransfer(ctx interface{}, t *Transfer, cb Progres
// A status code of 403 likely means that an authentication token for the
// upload has expired. This can be safely retried.
if res.StatusCode == 403 {
err = errors.New("http: received status 403")
err = errors.New(tr.Tr.Get("http: received status 403"))
return errors.NewRetriableError(err)
}
if res.StatusCode > 299 {
return errors.Wrapf(nil, "Invalid status for %s %s: %d",
return errors.Wrapf(nil, tr.Tr.Get("Invalid status for %s %s: %d",
req.Method,
strings.SplitN(req.URL.String(), "?", 2)[0],
res.StatusCode,
)
))
}
io.Copy(ioutil.Discard, res.Body)
@ -167,12 +168,12 @@ func (a *adapterBase) setContentTypeFor(req *http.Request, r io.ReadSeeker) erro
buffer := make([]byte, 512)
n, err := r.Read(buffer)
if err != nil && err != io.EOF {
return errors.Wrap(err, "content type detect")
return errors.Wrap(err, tr.Tr.Get("content type detection error"))
}
contentType = http.DetectContentType(buffer[:n])
if _, err := r.Seek(0, io.SeekStart); err != nil {
return errors.Wrap(err, "content type rewind")
return errors.Wrap(err, tr.Tr.Get("content type rewind failure"))
}
}

@ -14,6 +14,7 @@ import (
"github.com/git-lfs/git-lfs/v3/errors"
"github.com/git-lfs/git-lfs/v3/fs"
"github.com/git-lfs/git-lfs/v3/tools"
"github.com/git-lfs/git-lfs/v3/tr"
"github.com/git-lfs/git-lfs/v3/subprocess"
"github.com/rubyist/tracerx"
@ -127,11 +128,11 @@ func (a *customAdapter) WorkerStarting(workerNum int) (interface{}, error) {
cmd := subprocess.ExecCommand(cmdName, cmdArgs...)
outp, err := cmd.StdoutPipe()
if err != nil {
return nil, fmt.Errorf("failed to get stdout for custom transfer command %q remote: %v", a.path, err)
return nil, errors.New(tr.Tr.Get("failed to get stdout for custom transfer command %q remote: %v", a.path, err))
}
inp, err := cmd.StdinPipe()
if err != nil {
return nil, fmt.Errorf("failed to get stdin for custom transfer command %q remote: %v", a.path, err)
return nil, errors.New(tr.Tr.Get("failed to get stdin for custom transfer command %q remote: %v", a.path, err))
}
// Capture stderr to trace
tracer := &traceWriter{}
@ -139,7 +140,7 @@ func (a *customAdapter) WorkerStarting(workerNum int) (interface{}, error) {
cmd.Stderr = tracer
err = cmd.Start()
if err != nil {
return nil, fmt.Errorf("failed to start custom transfer command %q remote: %v", a.path, err)
return nil, errors.New(tr.Tr.Get("failed to start custom transfer command %q remote: %v", a.path, err))
}
// Set up buffered reader/writer since we operate on lines
ctx := &customAdapterWorkerContext{workerNum, cmd, outp, bufio.NewReader(outp), inp, tracer}
@ -155,7 +156,7 @@ func (a *customAdapter) WorkerStarting(workerNum int) (interface{}, error) {
}
if resp.Error != nil {
a.abortWorkerProcess(ctx)
return nil, fmt.Errorf("error initializing custom adapter %q worker %d: %v", a.name, workerNum, resp.Error)
return nil, errors.New(tr.Tr.Get("error initializing custom adapter %q worker %d: %v", a.name, workerNum, resp.Error))
}
a.Trace("xfer: started custom adapter process %q for worker %d OK", a.path, workerNum)
@ -227,7 +228,7 @@ func (a *customAdapter) shutdownWorkerProcess(ctx *customAdapterWorkerContext) e
case err := <-finishChan:
return err
case <-time.After(30 * time.Second):
return fmt.Errorf("timeout while shutting down worker process %d", ctx.workerNum)
return errors.New(tr.Tr.Get("timeout while shutting down worker process %d", ctx.workerNum))
}
}
@ -254,12 +255,12 @@ func (a *customAdapter) WorkerEnding(workerNum int, ctx interface{}) {
func (a *customAdapter) DoTransfer(ctx interface{}, t *Transfer, cb ProgressCallback, authOkFunc func()) error {
if ctx == nil {
return fmt.Errorf("custom transfer %q was not properly initialized, see previous errors", a.name)
return errors.New(tr.Tr.Get("custom transfer %q was not properly initialized, see previous errors", a.name))
}
customCtx, ok := ctx.(*customAdapterWorkerContext)
if !ok {
return fmt.Errorf("context object for custom transfer %q was of the wrong type", a.name)
return errors.New(tr.Tr.Get("context object for custom transfer %q was of the wrong type", a.name))
}
var authCalled bool
@ -268,7 +269,7 @@ func (a *customAdapter) DoTransfer(ctx interface{}, t *Transfer, cb ProgressCall
return err
}
if rel == nil && !a.standalone {
return errors.Errorf("Object %s not found on the server.", t.Oid)
return errors.Errorf(tr.Tr.Get("Object %s not found on the server.", t.Oid))
}
var req *customAdapterTransferRequest
if a.direction == Upload {
@ -292,7 +293,7 @@ func (a *customAdapter) DoTransfer(ctx interface{}, t *Transfer, cb ProgressCall
case "progress":
// Progress
if resp.Oid != t.Oid {
return fmt.Errorf("unexpected oid %q in response, expecting %q", resp.Oid, t.Oid)
return errors.New(tr.Tr.Get("unexpected OID %q in response, expecting %q", resp.Oid, t.Oid))
}
if cb != nil {
cb(t.Name, t.Size, resp.BytesSoFar, resp.BytesSinceLast)
@ -301,19 +302,19 @@ func (a *customAdapter) DoTransfer(ctx interface{}, t *Transfer, cb ProgressCall
case "complete":
// Download/Upload complete
if resp.Oid != t.Oid {
return fmt.Errorf("unexpected oid %q in response, expecting %q", resp.Oid, t.Oid)
return errors.New(tr.Tr.Get("unexpected OID %q in response, expecting %q", resp.Oid, t.Oid))
}
if resp.Error != nil {
return fmt.Errorf("error transferring %q: %v", t.Oid, resp.Error)
return errors.New(tr.Tr.Get("error transferring %q: %v", t.Oid, resp.Error))
}
if a.direction == Download {
// So we don't have to blindly trust external providers, check SHA
if err = tools.VerifyFileHash(t.Oid, resp.Path); err != nil {
return fmt.Errorf("downloaded file failed checks: %v", err)
return errors.New(tr.Tr.Get("downloaded file failed checks: %v", err))
}
// Move file to final location
if err = tools.RenameFileCopyPermissions(resp.Path, t.Path); err != nil {
return fmt.Errorf("failed to copy downloaded file: %v", err)
return errors.New(tr.Tr.Get("failed to copy downloaded file: %v", err))
}
} else if a.direction == Upload {
if err = verifyUpload(a.apiClient, a.remote, t); err != nil {
@ -323,7 +324,7 @@ func (a *customAdapter) DoTransfer(ctx interface{}, t *Transfer, cb ProgressCall
wasAuthOk = true
complete = true
default:
return fmt.Errorf("invalid message %q from custom adapter %q", resp.Event, a.name)
return errors.New(tr.Tr.Get("invalid message %q from custom adapter %q", resp.Event, a.name))
}
// Fall through from both progress and completion messages
// Call auth on first progress or success to free up other workers

@ -1,6 +1,6 @@
package tq
import "fmt"
import "github.com/git-lfs/git-lfs/v3/tr"
type MalformedObjectError struct {
Name string
@ -23,7 +23,7 @@ func (e MalformedObjectError) Corrupt() bool { return !e.Missing() }
func (e MalformedObjectError) Error() string {
if e.Corrupt() {
return fmt.Sprintf("corrupt object: %s (%s)", e.Name, e.Oid)
return tr.Tr.Get("corrupt object: %s (%s)", e.Name, e.Oid)
}
return fmt.Sprintf("missing object: %s (%s)", e.Name, e.Oid)
return tr.Tr.Get("missing object: %s (%s)", e.Name, e.Oid)
}

@ -13,6 +13,7 @@ import (
"github.com/git-lfs/git-lfs/v3/tasklog"
"github.com/git-lfs/git-lfs/v3/tools"
"github.com/git-lfs/git-lfs/v3/tools/humanize"
"github.com/git-lfs/git-lfs/v3/tr"
)
// Meter provides a progress bar type output for the TransferQueue. It
@ -54,11 +55,11 @@ func (m *Meter) LoggerFromEnv(os env) *tools.SyncWriter {
func (m *Meter) LoggerToFile(name string) *tools.SyncWriter {
printErr := func(err string) {
fmt.Fprintf(os.Stderr, "Error creating progress logger: %s\n", err)
fmt.Fprintf(os.Stderr, tr.Tr.Get("Error creating progress logger: %s\n", err))
}
if !filepath.IsAbs(name) {
printErr("GIT_LFS_PROGRESS must be an absolute path")
printErr(tr.Tr.Get("GIT_LFS_PROGRESS must be an absolute path"))
return nil
}
@ -235,8 +236,8 @@ func (m *Meter) str() string {
// (Uploading|Downloading) LFS objects: 100% (10/10) 100 MiB | 10 MiB/s
percentage := 100 * float64(m.finishedFiles) / float64(m.estimatedFiles)
return fmt.Sprintf("%s LFS objects: %3.f%% (%d/%d), %s | %s",
m.Direction.Verb(),
return fmt.Sprintf("%s: %3.f%% (%d/%d), %s | %s",
m.Direction.Progress(),
percentage,
m.finishedFiles, m.estimatedFiles,
humanize.FormatBytes(clamp(m.currentBytes)),

@ -16,6 +16,7 @@ import (
"github.com/git-lfs/git-lfs/v3/lfshttp"
"github.com/git-lfs/git-lfs/v3/ssh"
"github.com/git-lfs/git-lfs/v3/tools"
"github.com/git-lfs/git-lfs/v3/tr"
"github.com/rubyist/tracerx"
)
@ -30,12 +31,12 @@ func (a *SSHBatchClient) batchInternal(args []string, batchLines []string) (int,
defer conn.Unlock()
err := conn.SendMessageWithLines("batch", args, batchLines)
if err != nil {
return 0, nil, nil, errors.Wrap(err, "batch request")
return 0, nil, nil, errors.Wrap(err, tr.Tr.Get("batch request"))
}
status, args, lines, err := conn.ReadStatusWithLines()
if err != nil {
return status, nil, nil, errors.Wrap(err, "batch response")
return status, nil, nil, errors.Wrap(err, tr.Tr.Get("batch response"))
}
return status, args, lines, err
}
@ -66,11 +67,11 @@ func (a *SSHBatchClient) Batch(remote string, bReq *batchRequest) (*BatchRespons
}
if status != 200 {
msg := "no message provided"
msg := tr.Tr.Get("no message provided")
if len(lines) > 0 {
msg = lines[0]
}
return nil, fmt.Errorf("batch response: status %d from server (%s)", status, msg)
return nil, errors.New(tr.Tr.Get("batch response: status %d from server (%s)", status, msg))
}
for _, arg := range args {
@ -81,7 +82,7 @@ func (a *SSHBatchClient) Batch(remote string, bReq *batchRequest) (*BatchRespons
if entries[0] == "hash-algo" {
bRes.HashAlgorithm = entries[1]
if bRes.HashAlgorithm != "sha256" {
return nil, fmt.Errorf("batch response: unsupported hash algorithm: %q", entries[1])
return nil, errors.New(tr.Tr.Get("batch response: unsupported hash algorithm: %q", entries[1]))
}
}
}
@ -90,7 +91,7 @@ func (a *SSHBatchClient) Batch(remote string, bReq *batchRequest) (*BatchRespons
for _, line := range lines {
entries := strings.Split(line, " ")
if len(entries) < 3 {
return nil, fmt.Errorf("batch response: malformed response: %q", line)
return nil, errors.New(tr.Tr.Get("batch response: malformed response: %q", line))
}
length := len(bRes.Objects)
if length == 0 || bRes.Objects[length-1].Oid != entries[0] {
@ -100,7 +101,7 @@ func (a *SSHBatchClient) Batch(remote string, bReq *batchRequest) (*BatchRespons
transfer.Oid = entries[0]
transfer.Size, err = strconv.ParseInt(entries[1], 10, 64)
if err != nil {
return nil, fmt.Errorf("batch response: invalid size: %s", entries[1])
return nil, errors.New(tr.Tr.Get("batch response: invalid size: %s", entries[1]))
}
if entries[2] == "noop" {
continue
@ -115,12 +116,12 @@ func (a *SSHBatchClient) Batch(remote string, bReq *batchRequest) (*BatchRespons
} else if strings.HasPrefix(entry, "expires-in=") {
transfer.Actions[entries[2]].ExpiresIn, err = strconv.Atoi(entry[11:])
if err != nil {
return nil, fmt.Errorf("batch response: invalid expires-in: %s", entry)
return nil, errors.New(tr.Tr.Get("batch response: invalid expires-in: %s", entry))
}
} else if strings.HasPrefix(entry, "expires-at=") {
transfer.Actions[entries[2]].ExpiresAt, err = time.Parse(time.RFC3339, entry[11:])
if err != nil {
return nil, fmt.Errorf("batch response: invalid expires-at: %s", entry)
return nil, errors.New(tr.Tr.Get("batch response: invalid expires-at: %s", entry))
}
}
}
@ -191,7 +192,7 @@ func (a *SSHAdapter) download(t *Transfer, conn *ssh.PktlineConnection, cb Progr
return err
}
if rel == nil {
return errors.Errorf("No download action for object: %s", t.Oid)
return errors.Errorf(tr.Tr.Get("No download action for object: %s", t.Oid))
}
// Reserve a temporary filename. We need to make sure nobody operates on the file simultaneously with us.
f, err := tools.TempFile(a.tempDir(), t.Oid, a.fs)
@ -228,7 +229,7 @@ func (a *SSHAdapter) doDownload(t *Transfer, conn *ssh.PktlineConnection, f *os.
io.CopyN(buffer, data, 1024)
io.Copy(ioutil.Discard, data)
}
return errors.NewRetriableError(fmt.Errorf("got status %d when fetching OID %s: %s", status, t.Oid, buffer.String()))
return errors.NewRetriableError(errors.New(tr.Tr.Get("got status %d when fetching OID %s: %s", status, t.Oid, buffer.String())))
}
var actualSize int64
@ -236,11 +237,11 @@ func (a *SSHAdapter) doDownload(t *Transfer, conn *ssh.PktlineConnection, f *os.
for _, arg := range args {
if strings.HasPrefix(arg, "size=") {
if seenSize {
return errors.NewProtocolError("unexpected size argument", nil)
return errors.NewProtocolError(tr.Tr.Get("unexpected size argument"), nil)
}
actualSize, err = strconv.ParseInt(arg[5:], 10, 64)
if err != nil || actualSize < 0 {
return errors.NewProtocolError(fmt.Sprintf("expected valid size, got %q", arg[5:]), err)
return errors.NewProtocolError(tr.Tr.Get("expected valid size, got %q", arg[5:]), err)
}
seenSize = true
}
@ -260,15 +261,15 @@ func (a *SSHAdapter) doDownload(t *Transfer, conn *ssh.PktlineConnection, f *os.
hasher := tools.NewHashingReader(data)
written, err := tools.CopyWithCallback(f, hasher, t.Size, ccb)
if err != nil {
return errors.Wrapf(err, "cannot write data to tempfile %q", dlfilename)
return errors.Wrapf(err, tr.Tr.Get("cannot write data to temporary file %q", dlfilename))
}
if actual := hasher.Hash(); actual != t.Oid {
return fmt.Errorf("expected OID %s, got %s after %d bytes written", t.Oid, actual, written)
return errors.New(tr.Tr.Get("expected OID %s, got %s after %d bytes written", t.Oid, actual, written))
}
if err := f.Close(); err != nil {
return fmt.Errorf("can't close tempfile %q: %v", dlfilename, err)
return errors.New(tr.Tr.Get("can't close temporary file %q: %v", dlfilename, err))
}
err = tools.RenameFileCopyPermissions(dlfilename, t.Path)
@ -293,9 +294,9 @@ func (a *SSHAdapter) verifyUpload(t *Transfer, conn *ssh.PktlineConnection) erro
}
if status < 200 || status > 299 {
if len(lines) > 0 {
return fmt.Errorf("got status %d when verifying upload OID %s: %s", status, t.Oid, lines[0])
return errors.New(tr.Tr.Get("got status %d when verifying upload OID %s: %s", status, t.Oid, lines[0]))
}
return fmt.Errorf("got status %d when verifying upload OID %s", status, t.Oid)
return errors.New(tr.Tr.Get("got status %d when verifying upload OID %s", status, t.Oid))
}
return nil
}
@ -331,12 +332,12 @@ func (a *SSHAdapter) upload(t *Transfer, conn *ssh.PktlineConnection, cb Progres
return err
}
if rel == nil {
return errors.Errorf("No upload action for object: %s", t.Oid)
return errors.Errorf(tr.Tr.Get("No upload action for object: %s", t.Oid))
}
f, err := os.OpenFile(t.Path, os.O_RDONLY, 0644)
if err != nil {
return errors.Wrap(err, "SSH upload")
return errors.Wrap(err, tr.Tr.Get("SSH upload"))
}
defer f.Close()
@ -348,18 +349,18 @@ func (a *SSHAdapter) upload(t *Transfer, conn *ssh.PktlineConnection, cb Progres
// A status code of 403 likely means that an authentication token for the
// upload has expired. This can be safely retried.
if status == 403 {
err = errors.New("http: received status 403")
err = errors.New(tr.Tr.Get("http: received status 403"))
return errors.NewRetriableError(err)
}
if status == 429 {
return errors.NewRetriableError(fmt.Errorf("got status %d when uploading OID %s", status, t.Oid))
return errors.NewRetriableError(errors.New(tr.Tr.Get("got status %d when uploading OID %s", status, t.Oid)))
}
if len(lines) > 0 {
return fmt.Errorf("got status %d when uploading OID %s: %s", status, t.Oid, lines[0])
return errors.New(tr.Tr.Get("got status %d when uploading OID %s: %s", status, t.Oid, lines[0]))
}
return fmt.Errorf("got status %d when uploading OID %s", status, t.Oid)
return errors.New(tr.Tr.Get("got status %d when uploading OID %s", status, t.Oid))
}

@ -9,6 +9,7 @@ import (
"github.com/git-lfs/git-lfs/v3/errors"
"github.com/git-lfs/git-lfs/v3/lfsapi"
"github.com/git-lfs/git-lfs/v3/tools"
"github.com/git-lfs/git-lfs/v3/tr"
)
type Direction int
@ -19,15 +20,15 @@ const (
Checkout = Direction(iota)
)
// Verb returns a string containing the verb form of the receiving action.
func (d Direction) Verb() string {
// Progress returns a string containing the operation in progress.
func (d Direction) Progress() string {
switch d {
case Checkout:
return "Checking out"
return tr.Tr.Get("Checking out LFS objects")
case Download:
return "Downloading"
return tr.Tr.Get("Downloading LFS objects")
case Upload:
return "Uploading"
return tr.Tr.Get("Uploading LFS objects")
default:
return "<unknown>"
}

@ -11,6 +11,7 @@ import (
"github.com/git-lfs/git-lfs/v3/git"
"github.com/git-lfs/git-lfs/v3/lfshttp"
"github.com/git-lfs/git-lfs/v3/tools"
"github.com/git-lfs/git-lfs/v3/tr"
"github.com/rubyist/tracerx"
)
@ -626,7 +627,7 @@ func (q *TransferQueue) enqueueAndCollectRetriesFor(batch batch) (batch, error)
// Transfer object, then we give up on the
// transfer by telling the progress meter to
// skip the number of bytes in "o".
q.errorc <- errors.Errorf("[%v] The server returned an unknown OID.", o.Oid)
q.errorc <- errors.Errorf(tr.Tr.Get("[%v] The server returned an unknown OID.", o.Oid))
q.Skip(o.Size)
q.wait.Done()
@ -722,7 +723,7 @@ func (q *TransferQueue) partitionTransfers(transfers []*Transfer) (present []*Tr
var err error
if t.Size < 0 {
err = errors.Errorf("Git LFS: object %q has invalid size (got: %d)", t.Oid, t.Size)
err = errors.Errorf(tr.Tr.Get("Git LFS: object %q has invalid size (got: %d)", t.Oid, t.Size))
} else {
fd, serr := os.Stat(t.Path)
if serr != nil {
@ -921,17 +922,6 @@ func (q *TransferQueue) toAdapterCfg(e lfshttp.Endpoint) AdapterConfig {
}
}
var (
// contentTypeWarning is the message printed when a server returns an
// HTTP 422 at the end of a push.
contentTypeWarning = []string{
"Uploading failed due to unsupported Content-Type header(s).",
"Consider disabling Content-Type detection with:",
"",
" $ git config lfs.contenttype false",
}
)
// Wait waits for the queue to finish processing all transfers. Once Wait is
// called, Add will no longer add transfers to the queue. Any failed
// transfers will be automatically retried once.
@ -956,9 +946,10 @@ func (q *TransferQueue) Wait() {
}
if q.unsupportedContentType {
for _, line := range contentTypeWarning {
fmt.Fprintf(os.Stderr, "info: %s\n", line)
}
fmt.Fprintf(os.Stderr, tr.Tr.Get(`info: Uploading failed due to unsupported Content-Type header(s).
info: Consider disabling Content-Type detection with:
info:
info: $ git config lfs.contenttype false`))
}
}

Some files were not shown because too many files have changed in this diff Show More