Merge pull request #5804 from blanet/main

Fix panic caused by accessing non-existent header
This commit is contained in:
Chris Darroch 2024-06-20 07:58:51 -07:00 committed by GitHub
commit b0e2f25d5d
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
10 changed files with 139 additions and 15 deletions

@ -412,19 +412,23 @@ type retriableLaterError struct {
}
func NewRetriableLaterError(err error, header string) error {
secs, err := strconv.Atoi(header)
if err == nil {
if header == "" {
return nil
}
secs, parseErr := strconv.Atoi(header)
if parseErr == nil {
return retriableLaterError{
wrappedError: newWrappedError(err, ""),
timeAvailable: time.Now().Add(time.Duration(secs) * time.Second),
}
}
time, err := time.Parse(time.RFC1123, header)
if err == nil {
parseTime, parseErr := time.Parse(time.RFC1123, header)
if parseErr == nil {
return retriableLaterError{
wrappedError: newWrappedError(err, ""),
timeAvailable: time,
timeAvailable: parseTime,
}
}

@ -70,6 +70,7 @@ func (c *Client) handleResponse(res *http.Response) error {
if retLaterErr != nil {
return retLaterErr
}
return errors.NewRetriableError(err)
}
if res.StatusCode > 499 && res.StatusCode != 501 && res.StatusCode != 507 && res.StatusCode != 509 {

@ -57,7 +57,7 @@ var (
"status-batch-403", "status-batch-404", "status-batch-410", "status-batch-422", "status-batch-500",
"status-storage-403", "status-storage-404", "status-storage-410", "status-storage-422", "status-storage-500", "status-storage-503",
"status-batch-resume-206", "batch-resume-fail-fallback", "return-expired-action", "return-expired-action-forever", "return-invalid-size",
"object-authenticated", "storage-download-retry", "storage-upload-retry", "storage-upload-retry-later", "unknown-oid",
"object-authenticated", "storage-download-retry", "storage-upload-retry", "storage-upload-retry-later", "storage-upload-retry-later-no-header", "unknown-oid",
"send-verify-action", "send-deprecated-links", "redirect-storage-upload", "storage-compress", "batch-hash-algo-empty", "batch-hash-algo-invalid",
"auth-bearer",
}
@ -422,6 +422,16 @@ func lfsBatchHandler(w http.ResponseWriter, r *http.Request, id, repo string) {
}
}
if strings.HasSuffix(repo, "batch-retry-later-no-header") {
if _, isWaiting := checkRateLimit("batch", "", repo, ""); isWaiting {
w.WriteHeader(http.StatusTooManyRequests)
w.Write([]byte("rate limit reached"))
fmt.Println("Not setting Retry-After header")
return
}
}
res := []lfsObject{}
testingChunked := testingChunkedTransferEncoding(r)
testingTus := testingTusUploadInBatchReq(r)
@ -709,6 +719,14 @@ func storageHandler(w http.ResponseWriter, r *http.Request) {
fmt.Println("Setting header to: ", strconv.Itoa(timeLeft))
return
}
case "storage-upload-retry-later-no-header":
if _, isWaiting := checkRateLimit("storage", "upload", repo, oid); isWaiting {
w.WriteHeader(http.StatusTooManyRequests)
w.Write([]byte("rate limit reached"))
fmt.Println("Not setting Retry-After header")
return
}
case "storage-compress":
if r.Header.Get("Accept-Encoding") != "gzip" {
w.WriteHeader(500)
@ -758,6 +776,12 @@ func storageHandler(w http.ResponseWriter, r *http.Request) {
by = []byte("rate limit reached")
fmt.Println("Setting header to: ", strconv.Itoa(secsToWait))
}
} else if len(by) == len("storage-download-retry-later-no-header") && string(by) == "storage-download-retry-later-no-header" {
if _, wait := checkRateLimit("storage", "download", repo, oid); wait {
statusCode = http.StatusTooManyRequests
by = []byte("rate limit reached")
fmt.Println("Not setting Retry-After header")
}
} else if len(by) == len("storage-download-retry") && string(by) == "storage-download-retry" {
if retries, ok := incrementRetriesFor("storage", "download", repo, oid, false); ok && retries < 3 {
statusCode = 500

26
t/t-batch-retries-ratelimit.sh Normal file → Executable file

@ -130,3 +130,29 @@ begin_test "batch clone with multiple files causes retries"
popd
)
end_test
begin_test "batch upload causes retries (missing header)"
(
set -e
reponame="upload-batch-retry-later-no-header"
setup_remote_repo "$reponame"
clone_repo "$reponame" batch-repo-upload-no-header
contents="content"
oid="$(calc_oid "$contents")"
printf "%s" "$contents" > a.dat
git lfs track "*.dat"
git add .gitattributes a.dat
git commit -m "initial commit"
GIT_TRACE=1 git push origin main 2>&1 | tee push.log
if [ "0" -ne "${PIPESTATUS[0]}" ]; then
echo >&2 "fatal: expected \`git push origin main\` to succeed ..."
exit 1
fi
assert_server_object "$reponame" "$oid"
)
end_test

67
t/t-batch-storage-retries-ratelimit.sh Normal file → Executable file

@ -101,3 +101,70 @@ begin_test "batch clone causes retries"
popd
)
end_test
begin_test "batch storage upload causes retries (missing header)"
(
set -e
reponame="batch-storage-upload-retry-later-no-header"
setup_remote_repo "$reponame"
clone_repo "$reponame" batch-storage-repo-upload-no-header
contents="storage-upload-retry-later-no-header"
oid="$(calc_oid "$contents")"
printf "%s" "$contents" > a.dat
git lfs track "*.dat"
git add .gitattributes a.dat
git commit -m "initial commit"
GIT_TRACE=1 git push origin main 2>&1 | tee push.log
if [ "0" -ne "${PIPESTATUS[0]}" ]; then
echo >&2 "fatal: expected \`git push origin main\` to succeed ..."
exit 1
fi
assert_server_object "$reponame" "$oid"
)
end_test
begin_test "batch storage download causes retries (missing header)"
(
set -e
reponame="batch-storage-download-retry-later-no-header"
setup_remote_repo "$reponame"
clone_repo "$reponame" batch-storage-repo-download-no-header
contents="storage-download-retry-later-no-header"
oid="$(calc_oid "$contents")"
printf "%s" "$contents" > a.dat
git lfs track "*.dat"
git add .gitattributes a.dat
git commit -m "initial commit"
git push origin main
assert_server_object "$reponame" "$oid"
pushd ..
git \
-c "filter.lfs.process=" \
-c "filter.lfs.smudge=cat" \
-c "filter.lfs.required=false" \
clone "$GITSERVER/$reponame" "$reponame-assert"
cd "$reponame-assert"
git config credential.helper lfstest
GIT_TRACE=1 git lfs pull origin main 2>&1 | tee pull.log
if [ "0" -ne "${PIPESTATUS[0]}" ]; then
echo >&2 "fatal: expected \`git lfs pull origin main\` to succeed ..."
exit 1
fi
assert_local_object "$oid" "${#contents}"
popd
)
end_test

0
t/t-cherry-pick-commits.sh Normal file → Executable file

0
t/t-submodule-recurse.sh Normal file → Executable file

@ -33,6 +33,7 @@ func (a *basicDownloadAdapter) tempDir() string {
func (a *basicDownloadAdapter) WorkerStarting(workerNum int) (interface{}, error) {
return nil, nil
}
func (a *basicDownloadAdapter) WorkerEnding(workerNum int, ctx interface{}) {
}
@ -153,7 +154,7 @@ func (a *basicDownloadAdapter) download(t *Transfer, cb ProgressCallback, authOk
// Special-cae status code 429 - retry after certain time
if res.StatusCode == 429 {
retLaterErr := errors.NewRetriableLaterError(err, res.Header["Retry-After"][0])
retLaterErr := errors.NewRetriableLaterError(err, res.Header.Get("Retry-After"))
if retLaterErr != nil {
return retLaterErr
}

@ -37,6 +37,7 @@ func (a *basicUploadAdapter) tempDir() string {
func (a *basicUploadAdapter) WorkerStarting(workerNum int) (interface{}, error) {
return nil, nil
}
func (a *basicUploadAdapter) WorkerEnding(workerNum int, ctx interface{}) {
}
@ -125,7 +126,7 @@ func (a *basicUploadAdapter) DoTransfer(ctx interface{}, t *Transfer, cb Progres
}
if res.StatusCode == 429 {
retLaterErr := errors.NewRetriableLaterError(err, res.Header["Retry-After"][0])
retLaterErr := errors.NewRetriableLaterError(err, res.Header.Get("Retry-After"))
if retLaterErr != nil {
return retLaterErr
}
@ -201,6 +202,7 @@ func (s *startCallbackReader) Read(p []byte) (n int, err error) {
}
return s.ReadSeekCloser.Read(p)
}
func newStartCallbackReader(r lfsapi.ReadSeekCloser, cb func() error) *startCallbackReader {
return &startCallbackReader{
ReadSeekCloser: r,

@ -580,26 +580,25 @@ func (q *TransferQueue) enqueueAndCollectRetriesFor(batch batch) (batch, error)
var err error
bRes, err = Batch(q.manifest, q.direction, q.remote, q.ref, batch.ToTransfers())
if err != nil {
var hasNonScheduledErrors = false
var hasNonRetriableObjects = false
// If there was an error making the batch API call, mark all of
// the objects for retry, and return them along with the error
// that was encountered. If any of the objects couldn't be
// retried, they will be marked as failed.
// the objects for retry if possible. If any should not be retried,
// they will be marked as failed.
for _, t := range batch {
if q.canRetryObject(t.Oid, err) {
hasNonScheduledErrors = true
enqueueRetry(t, err, nil)
} else if readyTime, canRetry := q.canRetryObjectLater(t.Oid, err); canRetry {
enqueueRetry(t, err, &readyTime)
} else {
hasNonScheduledErrors = true
hasNonRetriableObjects = true
q.wait.Done()
}
}
// Only return error and mark operation as failure if at least one object
// was not enqueued for retrial at a later point.
if hasNonScheduledErrors {
// Make sure to return an error which causes all other objects to be retried.
if hasNonRetriableObjects {
return next, errors.NewRetriableError(err)
} else {
return next, nil