Fix typos

This commit is contained in:
Dimitris Apostolou 2022-01-05 08:49:08 +02:00
parent e3893b1402
commit 21b0402690
No known key found for this signature in database
GPG Key ID: 4B5D20E938204A8A
29 changed files with 41 additions and 41 deletions

@ -116,7 +116,7 @@ Git LFS depends on having a working Go development environment. We officially
support the latest version of Go, although we try not to break backwards
compatibility with older versions if it's possible to avoid doing so.
On RHEL etc. e.g. Red Hat Enterprise Linux Server release 7.2 (Maipo), you will neet the minimum packages installed to build Git LFS:
On RHEL etc. e.g. Red Hat Enterprise Linux Server release 7.2 (Maipo), you will need the minimum packages installed to build Git LFS:
```ShellSession
$ sudo yum install gcc

@ -296,8 +296,8 @@ func checkoutNonBare(l *tasklog.Logger) error {
}
// trackedFromFilter returns an ordered set of strings where each entry is a
// line in the .gitattributes file. It adds/removes the fiter/diff/merge=lfs
// attributes based on patterns included/excldued in the given filter.
// line in the .gitattributes file. It adds/removes the filter/diff/merge=lfs
// attributes based on patterns included/excluded in the given filter.
func trackedFromFilter(filter *filepathfilter.Filter) *tools.OrderedSet {
tracked := tools.NewOrderedSet()

@ -164,7 +164,7 @@ ArgsLoop:
// Also, for any new patterns we've added, make sure any existing git
// tracked files have their timestamp updated so they will now show as
// modifed note this is relative to current dir which is how we write
// modified note this is relative to current dir which is how we write
// .gitattributes deliberately not done in parallel as a chan because
// we'll be marking modified
//

@ -53,7 +53,7 @@ func (e *delayedEnvironment) All() map[string][]string {
// also sets values on the configuration instance `g.config`.
//
// If Load has already been called, this method will bail out early,
// and return false. Otherwise it will preform the entire parse and return true.
// and return false. Otherwise it will perform the entire parse and return true.
//
// Load is safe to call across multiple goroutines.
func (e *delayedEnvironment) Load() {

@ -485,7 +485,7 @@ func (s *CredentialHelpers) Reject(what Creds) error {
}
// Approve implements CredentialHelper.Approve and approves the given Creds
// "what" with the first successful CredentialHelper. If an error occurrs,
// "what" with the first successful CredentialHelper. If an error occurs,
// it calls Reject() with the same Creds and returns the error immediately. This
// ensures a caching credential helper removes the cache, since the Erroring
// CredentialHelper never successfully saved it.

@ -68,7 +68,7 @@ you are executing has to be in the docker image.
There are currently three type of docker images:
1. Building images: `{OS NAME}_{OS VERSION}` - These build
git-lfs and save the package/repository in the `/repo` direrctory. This image
git-lfs and save the package/repository in the `/repo` directory. This image
also signs all rpms/debs if gpg signing is setup
2. Environment building images: `{OS_NAME}_{OS_VERSION}_env` -
These build or install the environment (dependencies) for building git-lfs. These
@ -263,7 +263,7 @@ CentOS will **not** work with subkeys[3]. CentOS 6 and 7 will work with 4096 bit
RSA signing keys
You can make a 4096 RSA key for Debian and CentOS 6/7 (4 for step 1 above, and
4096 for step 2). And only have two keys... Or optionally a 4096 RSA subkey for Debain
4096 for step 2). And only have two keys... Or optionally a 4096 RSA subkey for Debian
[1]. Or a key for each distro. Dealers choice.
[1] https://www.digitalocean.com/community/tutorials/how-to-use-reprepro-for-a-secure-package-repository-on-ubuntu-14-04

@ -65,7 +65,7 @@ if [[ ${#IMAGES[@]} == 0 ]]; then
fi
mkdir -p "${PACKAGE_DIR}"
#Run docker to build pacakges
#Run docker to build packages
for IMAGE_NAME in "${IMAGES[@]}"; do
split_image_name "${IMAGE_NAME}" #set IMAGE_NAME and IMAGE_INFO

@ -4,7 +4,7 @@ This is a proposal for a new transfer mode, designed to support multi-part HTTP
Git LFS, defining a new transfer mode to be implemented by Git LFS clients and servers in addition to the current `basic`
transfer mode.
This proposal is based on the experimental `multipart-basic` transfor mode originally
This proposal is based on the experimental `multipart-basic` transfer mode originally
[implemented by datopian/giftless](https://giftless.datopian.com/en/latest/multipart-spec.html).
## Reasoning
@ -40,7 +40,7 @@ the Storage Backend be a cloud storage service such as *Amazon S3* or *Google Cl
## High Level Protocol Specs
* The name of the transfer is `multipart`
* Batch requests are the same as `basic` requests except that `{"transfers": ["multipart", "basic"]}` is the
expected transfers value. Clients MUST retain `basic` as the fallback transfer mode to ensure compatiblity with
expected transfers value. Clients MUST retain `basic` as the fallback transfer mode to ensure compatibility with
servers not implementing this extension.
* `{"operation": "download"}` replies work exactly like `basic` download request with no change
* `{"operation": "upload"}` replies will break the upload into several `actions`:
@ -330,7 +330,7 @@ May be responded with:
]
}
```
Even if the server does support `multipart`, as `basic` can be preferrable in this case.
Even if the server does support `multipart`, as `basic` can be preferable in this case.
## Implementation Notes
@ -363,5 +363,5 @@ As `abort` requests do not have a body, any parameters required by the LFS serve
be passed as part of the URL in the `href` parameter.
It should be noted that clients will not always be able to `abort` partial uploads cleanly. Implementors are expected to
ensure proper cleanup of partially uploaded files via other means, such as a periodcal cron job that locates uncommitted
ensure proper cleanup of partially uploaded files via other means, such as a periodical cron job that locates uncommitted
uploaded parts and deletes them.

@ -11,12 +11,12 @@ go through the ntlm auth flow.
We will store NTLM credentials in the credential helper. When the user is prompted for their credentials they must use username:{DOMAIN}\{user} and password:{pass}
The ntlm protocol will be handled by an ntlm.go class that hides the implementation of InitHandshake, Authenticate, and Challenge. This allows miminal changesto the existing
The ntlm protocol will be handled by an ntlm.go class that hides the implementation of InitHandshake, Authenticate, and Challenge. This allows minimal changes to the existing
client.go class.
### Tech
There is a ntlm-go library available at https://github.com/ThomsonReutersEikon/go-ntlm that we can use. We will need to implementate the Negotiate method and publish docs on what NTLM switches we support. I think simple user/pass/domain is best here so we avoid supporting a million settings with conflicting docs.
There is a ntlm-go library available at https://github.com/ThomsonReutersEikon/go-ntlm that we can use. We will need to implement the Negotiate method and publish docs on what NTLM switches we support. I think simple user/pass/domain is best here so we avoid supporting a million settings with conflicting docs.
### Work

@ -68,7 +68,7 @@ we don't have to implement everything, or bloat the git-lfs binary with every
custom system possible.
Because Go is statically linked it's not possible to extend client functionality
at runtime through loading libaries, so instead I propose allowing an external
at runtime through loading libraries, so instead I propose allowing an external
process to be invoked, and communicated with via a defined stream protocol. This
protocol will be logically identical to the internal adapters; the core passing
oids and receiving back progress and completion notifications; just that the

@ -10,7 +10,7 @@ package errors
// always be `error`. The wrappedError details are not exported. If an error is
// the kind of error a caller should need to investigate, an IsXError()
// function is provided that tells the caller if the error is of that type.
// There should only be a handfull of cases where a simple `error` is
// There should only be a handful of cases where a simple `error` is
// insufficient.
//
// The error behaviors can be nested when created. For example, the not

@ -30,7 +30,7 @@ func TestBehaviorWraps(t *testing.T) {
ni := NewNotImplementedError(fatal)
if !IsNotImplementedError(ni) {
t.Error("expected erro to be not implemeted")
t.Error("expected error to be not implemented")
}
if !IsFatalError(ni) {

@ -105,7 +105,7 @@ func (f *Filesystem) RepositoryPermissions(executable bool) os.FileMode {
}
/**
* Revert non ascii chracters escaped by git or windows (as octal sequences \000) back to bytes.
* Revert non ascii characters escaped by git or windows (as octal sequences \000) back to bytes.
*/
func DecodePathBytes(path []byte) []byte {
var expression = regexp.MustCompile(`\\[0-9]{3}`)

@ -39,7 +39,7 @@ func NewConfig(workdir, gitdir string) *Configuration {
return &Configuration{WorkDir: workdir, GitDir: gitdir}
}
// NewReadOnlyConfig creates a new confguration that returns an error if an
// NewReadOnlyConfig creates a new configuration that returns an error if an
// attempt to write to the configuration is made.
func NewReadOnlyConfig(workdir, gitdir string) *Configuration {
cfg := NewConfig(workdir, gitdir)

@ -846,7 +846,7 @@ func GetAllWorkTreeHEADs(storageDir string) ([]*Ref, error) {
// This has only established the separate worktrees, not the original checkout
// If the storageDir contains a HEAD file then there is a main checkout
// as well; this mus tbe resolveable whether you're in the main checkout or
// as well; this must be resolveable whether you're in the main checkout or
// a worktree
headfile := filepath.Join(storageDir, "HEAD")
ref, err := parseRefFile(headfile)

@ -114,7 +114,7 @@ func AssertCommitParent(t *testing.T, db *gitobj.ObjectDatabase, sha, parent str
"gitobj: expected parents of commit: %s to contain: %s", sha, parent)
}
// AssertCommitTree asserts that the given commit has a tree equivelant to the
// AssertCommitTree asserts that the given commit has a tree equivalent to the
// one provided.
func AssertCommitTree(t *testing.T, db *gitobj.ObjectDatabase, sha, tree string) {
commit, err := db.Commit(HexDecode(t, sha))
@ -155,7 +155,7 @@ func HexDecode(t *testing.T, sha string) []byte {
return b
}
// copyToTmp copies the given fixutre to a folder in /tmp.
// copyToTmp copies the given fixture to a folder in /tmp.
func copyToTmp(fixture string) (string, error) {
p, err := ioutil.TempDir("", fmt.Sprintf("git-lfs-fixture-%s", filepath.Dir(fixture)))
if err != nil {

@ -77,7 +77,7 @@ type RewriteOptions struct {
TreeCallbackFn TreeCallbackFn
}
// blobFn returns a useable BlobRewriteFn, either the one that was given in the
// blobFn returns a usable BlobRewriteFn, either the one that was given in the
// *RewriteOptions, or a noopBlobFn.
func (r *RewriteOptions) blobFn() BlobRewriteFn {
if r.BlobFn == nil {
@ -86,7 +86,7 @@ func (r *RewriteOptions) blobFn() BlobRewriteFn {
return r.BlobFn
}
// treePreFn returns a useable TreePreCallbackFn, either the one that was given
// treePreFn returns a usable TreePreCallbackFn, either the one that was given
// in the *RewriteOptions, or a noopTreePreFn.
func (r *RewriteOptions) treePreFn() TreePreCallbackFn {
if r.TreePreCallbackFn == nil {
@ -95,7 +95,7 @@ func (r *RewriteOptions) treePreFn() TreePreCallbackFn {
return r.TreePreCallbackFn
}
// treeFn returns a useable TreeRewriteFn, either the one that was given in the
// treeFn returns a usable TreeRewriteFn, either the one that was given in the
// *RewriteOptions, or a noopTreeFn.
func (r *RewriteOptions) treeFn() TreeCallbackFn {
if r.TreeCallbackFn == nil {

@ -195,7 +195,7 @@ func parseLogOutputToPointers(log io.Reader, dir LogDiffDirection,
// logScanner parses log output formatted as per logLfsSearchArgs & returns
// pointers.
type logScanner struct {
// Filter will ensure file paths matching the include patterns, or not matchin
// Filter will ensure file paths matching the include patterns, or not matching
// the exclude patterns are skipped.
Filter *filepathfilter.Filter

@ -1,7 +1,7 @@
Name: git-lfs-repo-release
Version: 1
Release: 1%{?dist}
Summary: Packges for git-lfs for Enterprise Linux repository configuration
Summary: Packages for git-lfs for Enterprise Linux repository configuration
Group: System Environment/Base
License: MIT

@ -682,7 +682,7 @@ setup_local_branch_with_dirty_copy() {
# \
# refs/heads/main
#
# - Commit 'A' has the contents "a.txt" in a.txt, and anoter identical file
# - Commit 'A' has the contents "a.txt" in a.txt, and another identical file
# (same name and content) in another directory.
setup_local_branch_with_copied_file() {
set -e

@ -440,7 +440,7 @@ begin_test "fetch-all"
git clone --bare "$GITSERVER/$reponame" "$reponame-bare"
cd "$reponame-bare"
# Preform the same assertion as above, on the same data
# Perform the same assertion as above, on the same data
git lfs fetch --all origin
for ((a=0; a < NUMFILES ; a++))
do

@ -244,7 +244,7 @@ begin_test "prune keep recent"
oid_prunecommithead=$(calc_oid "$content_prunecommithead")
# use a single file so each commit supercedes the last, if different files
# use a single file so each commit supersedes the last, if different files
# then history becomes harder to track
# Also note that when considering 'recent' when editing a single file, it means
# that the snapshot state overlapped; so the latest commit *before* the day

@ -2,7 +2,7 @@
. "$(dirname "$0")/testlib.sh"
# push_fail_test preforms a test expecting a `git lfs push` to fail given the
# push_fail_test performs a test expecting a `git lfs push` to fail given the
# contents of a particular file contained within that push. The Git server used
# during tests has certain special cases that are triggered by finding specific
# keywords within a file (as given by the first argument).

@ -3,7 +3,7 @@
. "$(dirname "$0")/testlib.sh"
# sets up the repos for the first few push tests. The passed argument is the
# name of the repo to setup. The resuling repo will have a local file tracked
# name of the repo to setup. The resulting repo will have a local file tracked
# with LFS and committed, but not yet pushed to the remote
push_repo_setup() {
reponame="$1"

@ -354,11 +354,11 @@ begin_test "unlocking a lock while uncommitted"
)
end_test
begin_test "unlocking a lock with ambiguious arguments"
begin_test "unlocking a lock with ambiguous arguments"
(
set -e
reponame="unlock_ambiguious_args"
reponame="unlock_ambiguous_args"
setup_repo "$reponame" "a.dat"
git lfs lock --json "a.dat" | tee lock.log

@ -100,7 +100,7 @@ func (r *RetriableReader) Read(b []byte) (int, error) {
}
// Spool spools the contents from 'from' to 'to' by buffering the entire
// contents of 'from' into a temprorary file created in the directory "dir".
// contents of 'from' into a temporary file created in the directory "dir".
// That buffer is held in memory until the file grows to larger than
// 'memoryBufferLimit`, then the remaining contents are spooled to disk.
//

@ -13,7 +13,7 @@ var (
// QuotedFields is an alternative to strings.Fields (see:
// https://golang.org/pkg/strings#Fields) that respects spaces between matching
// pairs of quotation delimeters.
// pairs of quotation delimiters.
//
// For instance, the quoted fields of the string "foo bar 'baz etc'" would be:
// []string{"foo", "bar", "baz etc"}

@ -89,7 +89,7 @@ func CloneFile(writer io.Writer, reader io.Reader) (success bool, err error) {
fileSize := srcStat.Size()
err = dst.Truncate(fileSize) // set file size. Thre is a requirements "The destination region must not extend past the end of file."
err = dst.Truncate(fileSize) // set file size. There is a requirement "The destination region must not extend past the end of file."
if err != nil {
return
}

@ -95,7 +95,7 @@ type batch []*objectTuple
// receiver and "other" has cardinality less than "size", "right" will be
// returned as nil. Any object tuple that is not currently able to be retried
// (ie Retry-After response), will also go into the right batch. Also, when object(s)
// are returend that are rate-limited, return the minimum duration required to wait until
// are returned that are rate-limited, return the minimum duration required to wait until
// a object is ready.
func (b batch) Concat(other batch, size int) (left, right batch, minWait time.Duration) {
u := batch(append(b, other...))
@ -475,7 +475,7 @@ func (q *TransferQueue) collectBatches() {
var minWaitTime time.Duration
next, pending, minWaitTime = retries.Concat(append(pending, collected...), q.batchSize)
if len(next) == 0 && len(pending) != 0 {
// There are some pending that cound not be queued.
// There are some pending that could not be queued.
// Wait the requested time before resuming loop.
time.Sleep(minWaitTime)
} else if len(next) == 0 && len(pending) == 0 && closing {
@ -509,7 +509,7 @@ func (q *TransferQueue) collectPendingUntil(done <-chan struct{}) (pending batch
// enqueueAndCollectRetriesFor makes a Batch API call and returns a "next" batch
// containing all of the objects that failed from the previous batch and had
// retries availale to them.
// retries available to them.
//
// If an error was encountered while making the API request, _all_ of the items
// from the previous batch (that have retries available to them) will be