git-lfs/test/cmd/lfstest-gitserver.go

1359 lines
34 KiB
Go
Raw Normal View History

// +build testtools
package main
import (
"bufio"
"bytes"
"crypto/rand"
"crypto/rsa"
"crypto/sha256"
"crypto/tls"
"crypto/x509"
"crypto/x509/pkix"
2015-08-04 17:13:46 +00:00
"encoding/base64"
"encoding/hex"
"encoding/json"
"encoding/pem"
"errors"
"fmt"
"io"
"io/ioutil"
"log"
"math"
"math/big"
"net/http"
"net/http/httptest"
"net/textproto"
"os"
"os/exec"
"regexp"
"sort"
"strconv"
"strings"
"sync"
"time"
2017-01-06 22:19:10 +00:00
"github.com/ThomsonReutersEikon/go-ntlm/ntlm"
)
var (
repoDir string
largeObjects = newLfsStorage()
server *httptest.Server
serverTLS *httptest.Server
serverClientCert *httptest.Server
2015-09-21 18:17:58 +00:00
// maps OIDs to content strings. Both the LFS and Storage test servers below
// see OIDs.
oidHandlers map[string]string
// These magic strings tell the test lfs server change their behavior so the
// integration tests can check those use cases. Tests will create objects with
// the magic strings as the contents.
//
// printf "status:lfs:404" > 404.dat
//
contentHandlers = []string{
"status-batch-403", "status-batch-404", "status-batch-410", "status-batch-422", "status-batch-500",
"status-storage-403", "status-storage-404", "status-storage-410", "status-storage-422", "status-storage-500", "status-storage-503",
"status-batch-resume-206", "batch-resume-fail-fallback", "return-expired-action", "return-expired-action-forever", "return-invalid-size",
"object-authenticated", "storage-download-retry", "storage-upload-retry", "unknown-oid",
2015-09-21 18:17:58 +00:00
}
)
func main() {
repoDir = os.Getenv("LFSTEST_DIR")
mux := http.NewServeMux()
server = httptest.NewServer(mux)
serverTLS = httptest.NewTLSServer(mux)
serverClientCert = httptest.NewUnstartedServer(mux)
//setup Client Cert server
rootKey, rootCert := generateCARootCertificates()
_, clientCertPEM, clientKeyPEM := generateClientCertificates(rootCert, rootKey)
certPool := x509.NewCertPool()
certPool.AddCert(rootCert)
serverClientCert.TLS = &tls.Config{
Certificates: []tls.Certificate{serverTLS.TLS.Certificates[0]},
ClientAuth: tls.RequireAndVerifyClientCert,
ClientCAs: certPool,
}
serverClientCert.StartTLS()
2017-01-06 22:19:10 +00:00
ntlmSession, err := ntlm.CreateServerSession(ntlm.Version2, ntlm.ConnectionOrientedMode)
if err != nil {
fmt.Println("Error creating ntlm session:", err)
os.Exit(1)
}
ntlmSession.SetUserInfo("ntlmuser", "ntlmpass", "NTLMDOMAIN")
stopch := make(chan bool)
mux.HandleFunc("/shutdown", func(w http.ResponseWriter, r *http.Request) {
stopch <- true
})
mux.HandleFunc("/storage/", storageHandler)
2015-06-29 11:57:50 +00:00
mux.HandleFunc("/redirect307/", redirect307Handler)
mux.HandleFunc("/", func(w http.ResponseWriter, r *http.Request) {
2016-07-15 20:08:26 +00:00
id, ok := reqId(w)
if !ok {
return
}
2016-12-22 23:59:54 +00:00
if strings.Contains(r.URL.Path, "/info/lfs/locks") {
2017-01-06 22:19:10 +00:00
if !skipIfBadAuth(w, r, id, ntlmSession) {
2016-12-22 23:59:54 +00:00
locksHandler(w, r)
}
return
}
if strings.Contains(r.URL.Path, "/info/lfs") {
2017-01-06 22:19:10 +00:00
if !skipIfBadAuth(w, r, id, ntlmSession) {
2016-07-15 20:08:26 +00:00
lfsHandler(w, r, id)
2015-08-04 17:13:46 +00:00
}
return
}
2016-07-15 20:08:26 +00:00
debug(id, "git http-backend %s %s", r.Method, r.URL)
gitHandler(w, r)
})
urlname := writeTestStateFile([]byte(server.URL), "LFSTEST_URL", "lfstest-gitserver")
defer os.RemoveAll(urlname)
sslurlname := writeTestStateFile([]byte(serverTLS.URL), "LFSTEST_SSL_URL", "lfstest-gitserver-ssl")
defer os.RemoveAll(sslurlname)
clientCertUrlname := writeTestStateFile([]byte(serverClientCert.URL), "LFSTEST_CLIENT_CERT_URL", "lfstest-gitserver-ssl")
defer os.RemoveAll(clientCertUrlname)
block := &pem.Block{}
block.Type = "CERTIFICATE"
block.Bytes = serverTLS.TLS.Certificates[0].Certificate[0]
pembytes := pem.EncodeToMemory(block)
certname := writeTestStateFile(pembytes, "LFSTEST_CERT", "lfstest-gitserver-cert")
defer os.RemoveAll(certname)
cccertname := writeTestStateFile(clientCertPEM, "LFSTEST_CLIENT_CERT", "lfstest-gitserver-client-cert")
defer os.RemoveAll(cccertname)
ckcertname := writeTestStateFile(clientKeyPEM, "LFSTEST_CLIENT_KEY", "lfstest-gitserver-client-key")
defer os.RemoveAll(ckcertname)
2016-07-15 20:08:26 +00:00
debug("init", "server url: %s", server.URL)
debug("init", "server tls url: %s", serverTLS.URL)
debug("init", "server client cert url: %s", serverClientCert.URL)
<-stopch
2016-07-15 20:08:26 +00:00
debug("init", "git server done")
}
// writeTestStateFile writes contents to either the file referenced by the
// environment variable envVar, or defaultFilename if that's not set. Returns
// the filename that was used
func writeTestStateFile(contents []byte, envVar, defaultFilename string) string {
f := os.Getenv(envVar)
if len(f) == 0 {
f = defaultFilename
}
file, err := os.Create(f)
if err != nil {
log.Fatalln(err)
}
file.Write(contents)
file.Close()
return f
}
type lfsObject struct {
Oid string `json:"oid,omitempty"`
Size int64 `json:"size,omitempty"`
Authenticated bool `json:"authenticated,omitempty"`
Actions map[string]lfsLink `json:"actions,omitempty"`
Err *lfsError `json:"error,omitempty"`
}
type lfsLink struct {
Href string `json:"href"`
Header map[string]string `json:"header,omitempty"`
ExpiresAt time.Time `json:"expires_at,omitempty"`
}
type lfsError struct {
2017-01-06 22:19:10 +00:00
Code int `json:"code,omitempty"`
2016-11-08 22:32:01 +00:00
Message string `json:"message"`
}
2017-01-06 17:07:46 +00:00
func writeLFSError(w http.ResponseWriter, code int, msg string) {
by, err := json.Marshal(&lfsError{Message: msg})
if err != nil {
2017-01-06 22:19:10 +00:00
http.Error(w, "json encoding error: "+err.Error(), 500)
2017-01-06 17:07:46 +00:00
return
}
w.Header().Set("Content-Type", "application/vnd.git-lfs+json")
w.WriteHeader(code)
w.Write(by)
}
// handles any requests with "{name}.server.git/info/lfs" in the path
2016-07-15 20:08:26 +00:00
func lfsHandler(w http.ResponseWriter, r *http.Request, id string) {
repo, err := repoFromLfsUrl(r.URL.Path)
if err != nil {
w.WriteHeader(500)
w.Write([]byte(err.Error()))
return
}
2016-07-15 20:08:26 +00:00
debug(id, "git lfs %s %s repo: %s", r.Method, r.URL, repo)
w.Header().Set("Content-Type", "application/vnd.git-lfs+json")
2015-05-19 21:18:14 +00:00
switch r.Method {
case "POST":
if strings.HasSuffix(r.URL.String(), "batch") {
2016-07-15 20:08:26 +00:00
lfsBatchHandler(w, r, id, repo)
} else if strings.HasSuffix(r.URL.String(), "locks") || strings.HasSuffix(r.URL.String(), "unlock") {
locksHandler(w, r)
} else {
2016-11-08 22:32:01 +00:00
w.WriteHeader(404)
}
2016-11-08 22:32:01 +00:00
case "DELETE":
lfsDeleteHandler(w, r, id, repo)
2015-05-19 21:18:14 +00:00
case "GET":
if strings.Contains(r.URL.String(), "/locks") {
locksHandler(w, r)
} else {
w.WriteHeader(404)
}
2015-05-19 21:18:14 +00:00
default:
w.WriteHeader(405)
}
}
func lfsUrl(repo, oid string) string {
return server.URL + "/storage/" + oid + "?r=" + repo
}
var (
retries = make(map[string]uint32)
retriesMu sync.Mutex
)
func incrementRetriesFor(api, direction, repo, oid string, check bool) (after uint32, ok bool) {
// fmtStr formats a string like "<api>-<direction>-[check]-<retry>",
// i.e., "legacy-upload-check-retry", or "storage-download-retry".
var fmtStr string
if check {
fmtStr = "%s-%s-check-retry"
} else {
fmtStr = "%s-%s-retry"
}
if oidHandlers[oid] != fmt.Sprintf(fmtStr, api, direction) {
return 0, false
}
retriesMu.Lock()
defer retriesMu.Unlock()
retryKey := strings.Join([]string{direction, repo, oid}, ":")
retries[retryKey]++
retries := retries[retryKey]
return retries, true
}
2016-11-08 22:32:01 +00:00
func lfsDeleteHandler(w http.ResponseWriter, r *http.Request, id, repo string) {
parts := strings.Split(r.URL.Path, "/")
oid := parts[len(parts)-1]
2016-11-08 22:32:01 +00:00
largeObjects.Delete(repo, oid)
debug(id, "DELETE:", oid)
2015-05-19 21:18:14 +00:00
w.WriteHeader(200)
}
2016-07-15 20:08:26 +00:00
func lfsBatchHandler(w http.ResponseWriter, r *http.Request, id, repo string) {
2016-11-08 22:32:01 +00:00
checkingObject := r.Header.Get("X-Check-Object") == "1"
if !checkingObject && repo == "batchunsupported" {
w.WriteHeader(404)
return
}
2016-11-08 22:32:01 +00:00
if !checkingObject && repo == "badbatch" {
w.WriteHeader(203)
return
}
2015-10-07 04:14:09 +00:00
if repo == "netrctest" {
user, pass, err := extractAuth(r.Header.Get("Authorization"))
if err != nil || (user != "netrcuser" || pass != "netrcpass") {
w.WriteHeader(403)
return
}
}
2016-04-08 20:53:02 +00:00
if missingRequiredCreds(w, r, repo) {
return
}
type batchReq struct {
2016-06-07 11:01:14 +00:00
Transfers []string `json:"transfers"`
Operation string `json:"operation"`
Objects []lfsObject `json:"objects"`
}
2016-06-07 11:01:14 +00:00
type batchResp struct {
Transfer string `json:"transfer,omitempty"`
Objects []lfsObject `json:"objects"`
}
buf := &bytes.Buffer{}
tee := io.TeeReader(r.Body, buf)
var objs batchReq
err := json.NewDecoder(tee).Decode(&objs)
io.Copy(ioutil.Discard, r.Body)
r.Body.Close()
2016-07-15 20:08:26 +00:00
debug(id, "REQUEST")
debug(id, buf.String())
if err != nil {
log.Fatal(err)
}
res := []lfsObject{}
testingChunked := testingChunkedTransferEncoding(r)
testingTus := testingTusUploadInBatchReq(r)
testingTusInterrupt := testingTusUploadInterruptedInBatchReq(r)
testingCustomTransfer := testingCustomTransfer(r)
2016-06-07 11:01:14 +00:00
var transferChoice string
var searchForTransfer string
if testingTus {
searchForTransfer = "tus"
} else if testingCustomTransfer {
searchForTransfer = "testcustom"
}
if len(searchForTransfer) > 0 {
for _, t := range objs.Transfers {
if t == searchForTransfer {
transferChoice = searchForTransfer
break
}
}
}
for _, obj := range objs.Objects {
handler := oidHandlers[obj.Oid]
action := objs.Operation
o := lfsObject{
2016-11-08 22:32:01 +00:00
Size: obj.Size,
Actions: make(map[string]lfsLink),
}
// Clobber the OID if told to do so.
if handler == "unknown-oid" {
o.Oid = "unknown-oid"
} else {
o.Oid = obj.Oid
}
exists := largeObjects.Has(repo, obj.Oid)
addAction := true
if action == "download" {
if !exists {
o.Err = &lfsError{Code: 404, Message: fmt.Sprintf("Object %v does not exist", obj.Oid)}
addAction = false
}
} else {
if exists {
// not an error but don't add an action
addAction = false
}
}
if handler == "object-authenticated" {
o.Authenticated = true
}
switch handler {
case "status-batch-403":
o.Err = &lfsError{Code: 403, Message: "welp"}
case "status-batch-404":
o.Err = &lfsError{Code: 404, Message: "welp"}
case "status-batch-410":
o.Err = &lfsError{Code: 410, Message: "welp"}
case "status-batch-422":
o.Err = &lfsError{Code: 422, Message: "welp"}
case "status-batch-500":
o.Err = &lfsError{Code: 500, Message: "welp"}
default: // regular 200 response
if handler == "return-invalid-size" {
o.Size = -1
}
if addAction {
a := lfsLink{
Href: lfsUrl(repo, obj.Oid),
Header: map[string]string{},
}
if handler == "return-expired-action-forever" || (handler == "return-expired-action" && canServeExpired(repo)) {
a.ExpiresAt = time.Now().Add(-5 * time.Minute)
serveExpired(repo)
}
2016-11-08 22:32:01 +00:00
o.Actions[action] = a
}
}
2016-11-08 22:32:01 +00:00
if testingChunked && addAction {
o.Actions[action].Header["Transfer-Encoding"] = "chunked"
}
2016-11-08 22:32:01 +00:00
if testingTusInterrupt && addAction {
o.Actions[action].Header["Lfs-Tus-Interrupt"] = "true"
}
res = append(res, o)
}
2016-06-07 11:01:14 +00:00
ores := batchResp{Transfer: transferChoice, Objects: res}
by, err := json.Marshal(ores)
if err != nil {
log.Fatal(err)
}
2016-07-15 20:08:26 +00:00
debug(id, "RESPONSE: 200")
debug(id, string(by))
w.WriteHeader(200)
w.Write(by)
}
// emu guards expiredRepos
var emu sync.Mutex
// expiredRepos is a map keyed by repository name, valuing to whether or not it
// has yet served an expired object.
var expiredRepos = map[string]bool{}
// canServeExpired returns whether or not a repository is capable of serving an
// expired object. In other words, canServeExpired returns whether or not the
// given repo has yet served an expired object.
func canServeExpired(repo string) bool {
emu.Lock()
defer emu.Unlock()
return !expiredRepos[repo]
}
// serveExpired marks the given repo as having served an expired object, making
// it unable for that same repository to return an expired object in the future
func serveExpired(repo string) {
emu.Lock()
defer emu.Unlock()
expiredRepos[repo] = true
}
// Persistent state across requests
var batchResumeFailFallbackStorageAttempts = 0
var tusStorageAttempts = 0
// handles any /storage/{oid} requests
func storageHandler(w http.ResponseWriter, r *http.Request) {
2016-07-15 20:08:26 +00:00
id, ok := reqId(w)
if !ok {
return
}
repo := r.URL.Query().Get("r")
2015-09-21 18:17:58 +00:00
parts := strings.Split(r.URL.Path, "/")
oid := parts[len(parts)-1]
2016-04-08 20:53:02 +00:00
if missingRequiredCreds(w, r, repo) {
return
}
2016-07-15 20:08:26 +00:00
debug(id, "storage %s %s repo: %s", r.Method, oid, repo)
switch r.Method {
case "PUT":
2015-09-21 18:17:58 +00:00
switch oidHandlers[oid] {
case "status-storage-403":
w.WriteHeader(403)
return
case "status-storage-404":
w.WriteHeader(404)
return
case "status-storage-410":
w.WriteHeader(410)
return
case "status-storage-422":
w.WriteHeader(422)
return
case "status-storage-500":
w.WriteHeader(500)
return
case "status-storage-503":
2017-01-06 17:07:46 +00:00
writeLFSError(w, 503, "LFS is temporarily unavailable")
2015-09-21 18:17:58 +00:00
return
case "object-authenticated":
if len(r.Header.Get("Authorization")) > 0 {
w.WriteHeader(400)
w.Write([]byte("Should not send authentication"))
}
return
case "storage-upload-retry":
if retries, ok := incrementRetriesFor("storage", "upload", repo, oid, false); ok && retries < 3 {
w.WriteHeader(500)
w.Write([]byte("malformed content"))
return
}
2015-09-21 18:17:58 +00:00
}
if testingChunkedTransferEncoding(r) {
valid := false
for _, value := range r.TransferEncoding {
if value == "chunked" {
valid = true
break
}
}
if !valid {
2016-07-15 20:08:26 +00:00
debug(id, "Chunked transfer encoding expected")
}
}
hash := sha256.New()
buf := &bytes.Buffer{}
io.Copy(io.MultiWriter(hash, buf), r.Body)
oid := hex.EncodeToString(hash.Sum(nil))
if !strings.HasSuffix(r.URL.Path, "/"+oid) {
w.WriteHeader(403)
return
}
largeObjects.Set(repo, oid, buf.Bytes())
case "GET":
parts := strings.Split(r.URL.Path, "/")
oid := parts[len(parts)-1]
2016-06-07 11:01:14 +00:00
statusCode := 200
byteLimit := 0
resumeAt := int64(0)
if by, ok := largeObjects.Get(repo, oid); ok {
if len(by) == len("storage-download-retry") && string(by) == "storage-download-retry" {
if retries, ok := incrementRetriesFor("storage", "download", repo, oid, false); ok && retries < 3 {
statusCode = 500
by = []byte("malformed content")
}
} else if len(by) == len("status-batch-resume-206") && string(by) == "status-batch-resume-206" {
2016-06-07 11:01:14 +00:00
// Resume if header includes range, otherwise deliberately interrupt
if rangeHdr := r.Header.Get("Range"); rangeHdr != "" {
regex := regexp.MustCompile(`bytes=(\d+)\-.*`)
match := regex.FindStringSubmatch(rangeHdr)
if match != nil && len(match) > 1 {
statusCode = 206
resumeAt, _ = strconv.ParseInt(match[1], 10, 32)
w.Header().Set("Content-Range", fmt.Sprintf("bytes %d-%d/%d", resumeAt, len(by), resumeAt-int64(len(by))))
2016-06-07 11:01:14 +00:00
}
} else {
byteLimit = 10
}
} else if len(by) == len("batch-resume-fail-fallback") && string(by) == "batch-resume-fail-fallback" {
// Fail any Range: request even though we said we supported it
// To make sure client can fall back
if rangeHdr := r.Header.Get("Range"); rangeHdr != "" {
w.WriteHeader(416)
return
}
if batchResumeFailFallbackStorageAttempts == 0 {
// Truncate output on FIRST attempt to cause resume
// Second attempt (without range header) is fallback, complete successfully
byteLimit = 8
batchResumeFailFallbackStorageAttempts++
}
2016-06-07 11:01:14 +00:00
}
w.WriteHeader(statusCode)
if byteLimit > 0 {
w.Write(by[0:byteLimit])
} else if resumeAt > 0 {
w.Write(by[resumeAt:])
} else {
w.Write(by)
}
return
}
w.WriteHeader(404)
case "HEAD":
// tus.io
2016-07-15 20:08:26 +00:00
if !validateTusHeaders(r, id) {
w.WriteHeader(400)
return
}
parts := strings.Split(r.URL.Path, "/")
oid := parts[len(parts)-1]
2016-07-04 11:44:19 +00:00
var offset int64
if by, ok := largeObjects.GetIncomplete(repo, oid); ok {
2016-07-04 11:44:19 +00:00
offset = int64(len(by))
}
2016-07-04 11:44:19 +00:00
w.Header().Set("Upload-Offset", strconv.FormatInt(offset, 10))
w.WriteHeader(200)
case "PATCH":
// tus.io
2016-07-15 20:08:26 +00:00
if !validateTusHeaders(r, id) {
w.WriteHeader(400)
return
}
parts := strings.Split(r.URL.Path, "/")
oid := parts[len(parts)-1]
offsetHdr := r.Header.Get("Upload-Offset")
offset, err := strconv.ParseInt(offsetHdr, 10, 64)
if err != nil {
log.Fatal("Unable to parse Upload-Offset header in request: ", err)
w.WriteHeader(400)
return
}
hash := sha256.New()
buf := &bytes.Buffer{}
out := io.MultiWriter(hash, buf)
if by, ok := largeObjects.GetIncomplete(repo, oid); ok {
if offset != int64(len(by)) {
log.Fatal(fmt.Sprintf("Incorrect offset in request, got %d expected %d", offset, len(by)))
w.WriteHeader(400)
return
}
_, err := out.Write(by)
if err != nil {
log.Fatal("Error reading incomplete bytes from store: ", err)
w.WriteHeader(500)
return
}
largeObjects.DeleteIncomplete(repo, oid)
2016-07-15 20:08:26 +00:00
debug(id, "Resuming upload of %v at byte %d", oid, offset)
}
// As a test, we intentionally break the upload from byte 0 by only
// reading some bytes the quitting & erroring, this forces a resume
// any offset > 0 will work ok
var copyErr error
if r.Header.Get("Lfs-Tus-Interrupt") == "true" && offset == 0 {
chdr := r.Header.Get("Content-Length")
contentLen, err := strconv.ParseInt(chdr, 10, 64)
if err != nil {
log.Fatal(fmt.Sprintf("Invalid Content-Length %q", chdr))
w.WriteHeader(400)
return
}
truncated := contentLen / 3
_, _ = io.CopyN(out, r.Body, truncated)
r.Body.Close()
copyErr = fmt.Errorf("Simulated copy error")
} else {
_, copyErr = io.Copy(out, r.Body)
}
if copyErr != nil {
b := buf.Bytes()
if len(b) > 0 {
2016-07-15 20:08:26 +00:00
debug(id, "Incomplete upload of %v, %d bytes", oid, len(b))
largeObjects.SetIncomplete(repo, oid, b)
}
w.WriteHeader(500)
} else {
checkoid := hex.EncodeToString(hash.Sum(nil))
if checkoid != oid {
log.Fatal(fmt.Sprintf("Incorrect oid after calculation, got %q expected %q", checkoid, oid))
w.WriteHeader(403)
return
}
b := buf.Bytes()
largeObjects.Set(repo, oid, b)
w.Header().Set("Upload-Offset", strconv.FormatInt(int64(len(b)), 10))
w.WriteHeader(204)
}
default:
2015-05-19 21:18:14 +00:00
w.WriteHeader(405)
}
}
2016-07-15 20:08:26 +00:00
func validateTusHeaders(r *http.Request, id string) bool {
2016-07-04 11:44:19 +00:00
if len(r.Header.Get("Tus-Resumable")) == 0 {
2016-07-15 20:08:26 +00:00
debug(id, "Missing Tus-Resumable header in request")
2016-07-04 11:44:19 +00:00
return false
}
return true
}
func gitHandler(w http.ResponseWriter, r *http.Request) {
defer func() {
io.Copy(ioutil.Discard, r.Body)
r.Body.Close()
}()
cmd := exec.Command("git", "http-backend")
cmd.Env = []string{
fmt.Sprintf("GIT_PROJECT_ROOT=%s", repoDir),
fmt.Sprintf("GIT_HTTP_EXPORT_ALL="),
fmt.Sprintf("PATH_INFO=%s", r.URL.Path),
fmt.Sprintf("QUERY_STRING=%s", r.URL.RawQuery),
fmt.Sprintf("REQUEST_METHOD=%s", r.Method),
fmt.Sprintf("CONTENT_TYPE=%s", r.Header.Get("Content-Type")),
}
buffer := &bytes.Buffer{}
cmd.Stdin = r.Body
cmd.Stdout = buffer
cmd.Stderr = os.Stderr
if err := cmd.Run(); err != nil {
log.Fatal(err)
}
text := textproto.NewReader(bufio.NewReader(buffer))
code, _, _ := text.ReadCodeLine(-1)
if code != 0 {
w.WriteHeader(code)
}
headers, _ := text.ReadMIMEHeader()
head := w.Header()
for key, values := range headers {
for _, value := range values {
head.Add(key, value)
}
}
io.Copy(w, text.R)
}
2015-06-29 11:57:50 +00:00
func redirect307Handler(w http.ResponseWriter, r *http.Request) {
2016-07-15 20:08:26 +00:00
id, ok := reqId(w)
if !ok {
return
}
2015-06-29 11:57:50 +00:00
// Send a redirect to info/lfs
// Make it either absolute or relative depending on subpath
parts := strings.Split(r.URL.Path, "/")
// first element is always blank since rooted
var redirectTo string
if parts[2] == "rel" {
redirectTo = "/" + strings.Join(parts[3:], "/")
} else if parts[2] == "abs" {
redirectTo = server.URL + "/" + strings.Join(parts[3:], "/")
} else {
2016-07-15 20:08:26 +00:00
debug(id, "Invalid URL for redirect: %v", r.URL)
2015-06-29 11:57:50 +00:00
w.WriteHeader(404)
return
}
w.Header().Set("Location", redirectTo)
w.WriteHeader(307)
}
type Committer struct {
Name string `json:"name"`
Email string `json:"email"`
}
type Lock struct {
Id string `json:"id"`
Path string `json:"path"`
Committer Committer `json:"committer"`
CommitSHA string `json:"commit_sha"`
LockedAt time.Time `json:"locked_at"`
UnlockedAt time.Time `json:"unlocked_at,omitempty"`
}
type LockRequest struct {
Path string `json:"path"`
LatestRemoteCommit string `json:"latest_remote_commit"`
Committer Committer `json:"committer"`
}
type LockResponse struct {
Lock *Lock `json:"lock"`
CommitNeeded string `json:"commit_needed,omitempty"`
Err string `json:"error,omitempty"`
}
type UnlockRequest struct {
Id string `json:"id"`
Force bool `json:"force"`
}
type UnlockResponse struct {
Lock *Lock `json:"lock"`
Err string `json:"error,omitempty"`
}
type LockList struct {
Locks []Lock `json:"locks"`
NextCursor string `json:"next_cursor,omitempty"`
Err string `json:"error,omitempty"`
}
var (
2017-01-31 23:55:31 +00:00
lmu sync.RWMutex
repoLocks = map[string][]Lock{}
)
2017-01-31 23:55:31 +00:00
func addLocks(repo string, l ...Lock) {
lmu.Lock()
defer lmu.Unlock()
2017-01-31 23:55:31 +00:00
repoLocks[repo] = append(repoLocks[repo], l...)
sort.Sort(LocksByCreatedAt(repoLocks[repo]))
}
func getLocks(repo string) []Lock {
lmu.RLock()
defer lmu.RUnlock()
2017-01-31 23:55:31 +00:00
found := repoLocks[repo]
locks := make([]Lock, len(found))
for i, l := range found {
locks[i] = l
}
2017-01-31 23:55:31 +00:00
return locks
}
2017-01-31 23:55:31 +00:00
func delLock(repo string, id string) *Lock {
lmu.RLock()
defer lmu.RUnlock()
2017-01-31 23:55:31 +00:00
var deleted *Lock
locks := make([]Lock, 0, len(repoLocks[repo]))
for _, l := range repoLocks[repo] {
if l.Id == id {
deleted = &l
continue
}
locks = append(locks, l)
}
repoLocks[repo] = locks
return deleted
}
type LocksByCreatedAt []Lock
func (c LocksByCreatedAt) Len() int { return len(c) }
func (c LocksByCreatedAt) Less(i, j int) bool { return c[i].LockedAt.Before(c[j].LockedAt) }
func (c LocksByCreatedAt) Swap(i, j int) { c[i], c[j] = c[j], c[i] }
2016-06-03 21:00:11 +00:00
var lockRe = regexp.MustCompile(`/locks/?$`)
func locksHandler(w http.ResponseWriter, r *http.Request) {
2017-01-31 23:55:31 +00:00
repo, err := repoFromLfsUrl(r.URL.Path)
if err != nil {
w.WriteHeader(500)
w.Write([]byte(err.Error()))
return
}
dec := json.NewDecoder(r.Body)
enc := json.NewEncoder(w)
switch r.Method {
case "GET":
2016-06-03 21:00:11 +00:00
if !lockRe.MatchString(r.URL.Path) {
2016-12-22 23:59:54 +00:00
w.Header().Set("Content-Type", "application/json")
w.WriteHeader(http.StatusNotFound)
w.Write([]byte(`{"message":"unknown path: ` + r.URL.Path + `"}`))
} else {
if err := r.ParseForm(); err != nil {
http.Error(w, "could not parse form values", http.StatusInternalServerError)
return
}
ll := &LockList{}
2017-01-31 23:55:31 +00:00
locks := getLocks(repo)
2016-12-22 23:59:54 +00:00
w.Header().Set("Content-Type", "application/json")
if cursor := r.FormValue("cursor"); cursor != "" {
lastSeen := -1
for i, l := range locks {
if l.Id == cursor {
lastSeen = i
break
}
}
if lastSeen > -1 {
locks = locks[lastSeen:]
} else {
enc.Encode(&LockList{
Err: fmt.Sprintf("cursor (%s) not found", cursor),
})
}
}
2016-06-03 21:34:20 +00:00
if path := r.FormValue("path"); path != "" {
var filtered []Lock
for _, l := range locks {
if l.Path == path {
filtered = append(filtered, l)
}
}
locks = filtered
}
if limit := r.FormValue("limit"); limit != "" {
size, err := strconv.Atoi(r.FormValue("limit"))
if err != nil {
enc.Encode(&LockList{
Err: "unable to parse limit amount",
})
} else {
size = int(math.Min(float64(len(locks)), 3))
if size < 0 {
locks = []Lock{}
} else {
locks = locks[:size]
if size+1 < len(locks) {
ll.NextCursor = locks[size+1].Id
}
}
}
}
ll.Locks = locks
enc.Encode(ll)
}
case "POST":
w.Header().Set("Content-Type", "application/json")
if strings.HasSuffix(r.URL.Path, "unlock") {
var unlockRequest UnlockRequest
if err := dec.Decode(&unlockRequest); err != nil {
enc.Encode(&UnlockResponse{
Err: err.Error(),
})
}
2017-01-31 23:55:31 +00:00
if l := delLock(repo, unlockRequest.Id); l != nil {
enc.Encode(&UnlockResponse{
2017-01-31 23:55:31 +00:00
Lock: l,
})
} else {
enc.Encode(&UnlockResponse{
Err: "unable to find lock",
})
}
} else {
var lockRequest LockRequest
if err := dec.Decode(&lockRequest); err != nil {
enc.Encode(&LockResponse{
Err: err.Error(),
})
}
2017-01-31 23:55:31 +00:00
for _, l := range getLocks(repo) {
if l.Path == lockRequest.Path {
enc.Encode(&LockResponse{
Err: "lock already created",
})
return
}
}
var id [20]byte
rand.Read(id[:])
lock := &Lock{
Id: fmt.Sprintf("%x", id[:]),
Path: lockRequest.Path,
Committer: lockRequest.Committer,
CommitSHA: lockRequest.LatestRemoteCommit,
LockedAt: time.Now(),
}
2017-01-31 23:55:31 +00:00
addLocks(repo, *lock)
// TODO(taylor): commit_needed case
// TODO(taylor): err case
enc.Encode(&LockResponse{
Lock: lock,
})
}
default:
http.NotFound(w, r)
}
}
2016-04-08 20:53:02 +00:00
func missingRequiredCreds(w http.ResponseWriter, r *http.Request, repo string) bool {
if repo != "requirecreds" {
return false
}
auth := r.Header.Get("Authorization")
user, pass, err := extractAuth(auth)
if err != nil {
2017-01-06 17:07:46 +00:00
writeLFSError(w, 403, err.Error())
2016-04-08 20:53:02 +00:00
return true
}
if user != "requirecreds" || pass != "pass" {
2017-01-06 17:07:46 +00:00
writeLFSError(w, 403, fmt.Sprintf("Got: '%s' => '%s' : '%s'", auth, user, pass))
2016-04-08 20:53:02 +00:00
return true
}
return false
}
func testingChunkedTransferEncoding(r *http.Request) bool {
return strings.HasPrefix(r.URL.String(), "/test-chunked-transfer-encoding")
}
func testingTusUploadInBatchReq(r *http.Request) bool {
return strings.HasPrefix(r.URL.String(), "/test-tus-upload")
}
func testingTusUploadInterruptedInBatchReq(r *http.Request) bool {
return strings.HasPrefix(r.URL.String(), "/test-tus-upload-interrupt")
}
func testingCustomTransfer(r *http.Request) bool {
return strings.HasPrefix(r.URL.String(), "/test-custom-transfer")
}
var lfsUrlRE = regexp.MustCompile(`\A/?([^/]+)/info/lfs`)
func repoFromLfsUrl(urlpath string) (string, error) {
matches := lfsUrlRE.FindStringSubmatch(urlpath)
if len(matches) != 2 {
return "", fmt.Errorf("LFS url '%s' does not match %v", urlpath, lfsUrlRE)
}
repo := matches[1]
if strings.HasSuffix(repo, ".git") {
return repo[0 : len(repo)-4], nil
}
return repo, nil
}
type lfsStorage struct {
objects map[string]map[string][]byte
incomplete map[string]map[string][]byte
mutex *sync.Mutex
}
func (s *lfsStorage) Get(repo, oid string) ([]byte, bool) {
s.mutex.Lock()
defer s.mutex.Unlock()
repoObjects, ok := s.objects[repo]
if !ok {
return nil, ok
}
by, ok := repoObjects[oid]
return by, ok
}
func (s *lfsStorage) Has(repo, oid string) bool {
s.mutex.Lock()
defer s.mutex.Unlock()
repoObjects, ok := s.objects[repo]
if !ok {
return false
}
_, ok = repoObjects[oid]
return ok
}
func (s *lfsStorage) Set(repo, oid string, by []byte) {
s.mutex.Lock()
defer s.mutex.Unlock()
repoObjects, ok := s.objects[repo]
if !ok {
repoObjects = make(map[string][]byte)
s.objects[repo] = repoObjects
}
repoObjects[oid] = by
}
func (s *lfsStorage) Delete(repo, oid string) {
s.mutex.Lock()
defer s.mutex.Unlock()
repoObjects, ok := s.objects[repo]
if ok {
delete(repoObjects, oid)
}
}
func (s *lfsStorage) GetIncomplete(repo, oid string) ([]byte, bool) {
s.mutex.Lock()
defer s.mutex.Unlock()
repoObjects, ok := s.incomplete[repo]
if !ok {
return nil, ok
}
by, ok := repoObjects[oid]
return by, ok
}
func (s *lfsStorage) SetIncomplete(repo, oid string, by []byte) {
s.mutex.Lock()
defer s.mutex.Unlock()
repoObjects, ok := s.incomplete[repo]
if !ok {
repoObjects = make(map[string][]byte)
s.incomplete[repo] = repoObjects
}
repoObjects[oid] = by
}
func (s *lfsStorage) DeleteIncomplete(repo, oid string) {
s.mutex.Lock()
defer s.mutex.Unlock()
repoObjects, ok := s.incomplete[repo]
if ok {
delete(repoObjects, oid)
}
}
func newLfsStorage() *lfsStorage {
return &lfsStorage{
objects: make(map[string]map[string][]byte),
incomplete: make(map[string]map[string][]byte),
mutex: &sync.Mutex{},
}
}
2015-08-04 17:13:46 +00:00
2015-10-07 04:14:09 +00:00
func extractAuth(auth string) (string, string, error) {
2015-08-04 17:13:46 +00:00
if strings.HasPrefix(auth, "Basic ") {
decodeBy, err := base64.StdEncoding.DecodeString(auth[6:len(auth)])
decoded := string(decodeBy)
2015-08-04 17:55:44 +00:00
2015-08-04 17:13:46 +00:00
if err != nil {
2015-10-07 04:14:09 +00:00
return "", "", err
2015-08-04 17:13:46 +00:00
}
parts := strings.SplitN(decoded, ":", 2)
if len(parts) == 2 {
2015-10-07 04:14:09 +00:00
return parts[0], parts[1], nil
2015-08-04 17:13:46 +00:00
}
2015-10-07 04:14:09 +00:00
return "", "", nil
}
2015-08-04 17:55:44 +00:00
2015-10-07 04:14:09 +00:00
return "", "", nil
}
2017-01-06 22:19:10 +00:00
func skipIfBadAuth(w http.ResponseWriter, r *http.Request, id string, ntlmSession ntlm.ServerSession) bool {
2015-10-07 04:14:09 +00:00
auth := r.Header.Get("Authorization")
2017-01-06 22:19:10 +00:00
if strings.Contains(r.URL.Path, "ntlm") {
return false
}
2015-10-07 04:14:09 +00:00
if auth == "" {
w.WriteHeader(401)
return true
}
user, pass, err := extractAuth(auth)
if err != nil {
w.WriteHeader(403)
2016-07-15 20:08:26 +00:00
debug(id, "Error decoding auth: %s", err)
2015-10-07 04:14:09 +00:00
return true
}
switch user {
case "user":
if pass == "pass" {
return false
}
case "netrcuser", "requirecreds":
2015-10-07 04:14:09 +00:00
return false
case "path":
if strings.HasPrefix(r.URL.Path, "/"+pass) {
return false
}
2016-07-15 20:08:26 +00:00
debug(id, "auth attempt against: %q", r.URL.Path)
2015-08-04 17:13:46 +00:00
}
w.WriteHeader(403)
2016-07-15 20:08:26 +00:00
debug(id, "Bad auth: %q", auth)
2015-08-04 17:13:46 +00:00
return true
}
2015-09-21 18:17:58 +00:00
2017-01-06 22:19:10 +00:00
func handleNTLM(w http.ResponseWriter, r *http.Request, authHeader string, session ntlm.ServerSession) {
if strings.HasPrefix(strings.ToUpper(authHeader), "BASIC ") {
authHeader = ""
}
switch authHeader {
case "":
w.Header().Set("Www-Authenticate", "ntlm")
w.WriteHeader(401)
// ntlmNegotiateMessage from httputil pkg
case "NTLM TlRMTVNTUAABAAAAB7IIogwADAAzAAAACwALACgAAAAKAAAoAAAAD1dJTExISS1NQUlOTk9SVEhBTUVSSUNB":
ch, err := session.GenerateChallengeMessage()
if err != nil {
writeLFSError(w, 500, err.Error())
return
}
chMsg := base64.StdEncoding.EncodeToString(ch.Bytes())
w.Header().Set("Www-Authenticate", "ntlm "+chMsg)
w.WriteHeader(401)
default:
if !strings.HasPrefix(strings.ToUpper(authHeader), "NTLM ") {
writeLFSError(w, 500, "bad authorization header: "+authHeader)
return
}
auth := authHeader[5:] // strip "ntlm " prefix
val, err := base64.StdEncoding.DecodeString(auth)
if err != nil {
writeLFSError(w, 500, "base64 decode error: "+err.Error())
return
}
_, err = ntlm.ParseAuthenticateMessage(val, 2)
if err != nil {
writeLFSError(w, 500, "auth parse error: "+err.Error())
return
}
}
}
2015-09-21 18:17:58 +00:00
func init() {
oidHandlers = make(map[string]string)
for _, content := range contentHandlers {
h := sha256.New()
h.Write([]byte(content))
oidHandlers[hex.EncodeToString(h.Sum(nil))] = content
}
}
2016-07-15 20:08:26 +00:00
func debug(reqid, msg string, args ...interface{}) {
fullargs := make([]interface{}, len(args)+1)
fullargs[0] = reqid
for i, a := range args {
fullargs[i+1] = a
}
log.Printf("[%s] "+msg+"\n", fullargs...)
}
func reqId(w http.ResponseWriter) (string, bool) {
b := make([]byte, 16)
_, err := rand.Read(b)
if err != nil {
http.Error(w, "error generating id: "+err.Error(), 500)
return "", false
}
return fmt.Sprintf("%x-%x-%x-%x-%x", b[0:4], b[4:6], b[6:8], b[8:10], b[10:]), true
}
// https://ericchiang.github.io/post/go-tls/
func generateCARootCertificates() (rootKey *rsa.PrivateKey, rootCert *x509.Certificate) {
// generate a new key-pair
rootKey, err := rsa.GenerateKey(rand.Reader, 2048)
if err != nil {
log.Fatalf("generating random key: %v", err)
}
rootCertTmpl, err := CertTemplate()
if err != nil {
log.Fatalf("creating cert template: %v", err)
}
// describe what the certificate will be used for
rootCertTmpl.IsCA = true
rootCertTmpl.KeyUsage = x509.KeyUsageCertSign | x509.KeyUsageDigitalSignature
rootCertTmpl.ExtKeyUsage = []x509.ExtKeyUsage{x509.ExtKeyUsageServerAuth, x509.ExtKeyUsageClientAuth}
// rootCertTmpl.IPAddresses = []net.IP{net.ParseIP("127.0.0.1")}
rootCert, _, err = CreateCert(rootCertTmpl, rootCertTmpl, &rootKey.PublicKey, rootKey)
return
}
func generateClientCertificates(rootCert *x509.Certificate, rootKey interface{}) (clientKey *rsa.PrivateKey, clientCertPEM []byte, clientKeyPEM []byte) {
// create a key-pair for the client
clientKey, err := rsa.GenerateKey(rand.Reader, 2048)
if err != nil {
log.Fatalf("generating random key: %v", err)
}
// create a template for the client
clientCertTmpl, err1 := CertTemplate()
if err1 != nil {
log.Fatalf("creating cert template: %v", err1)
}
clientCertTmpl.KeyUsage = x509.KeyUsageDigitalSignature
clientCertTmpl.ExtKeyUsage = []x509.ExtKeyUsage{x509.ExtKeyUsageClientAuth}
// the root cert signs the cert by again providing its private key
_, clientCertPEM, err2 := CreateCert(clientCertTmpl, rootCert, &clientKey.PublicKey, rootKey)
if err2 != nil {
log.Fatalf("error creating cert: %v", err2)
}
// encode and load the cert and private key for the client
clientKeyPEM = pem.EncodeToMemory(&pem.Block{
Type: "RSA PRIVATE KEY", Bytes: x509.MarshalPKCS1PrivateKey(clientKey),
})
return
}
// helper function to create a cert template with a serial number and other required fields
func CertTemplate() (*x509.Certificate, error) {
// generate a random serial number (a real cert authority would have some logic behind this)
serialNumberLimit := new(big.Int).Lsh(big.NewInt(1), 128)
serialNumber, err := rand.Int(rand.Reader, serialNumberLimit)
if err != nil {
return nil, errors.New("failed to generate serial number: " + err.Error())
}
tmpl := x509.Certificate{
SerialNumber: serialNumber,
Subject: pkix.Name{Organization: []string{"Yhat, Inc."}},
SignatureAlgorithm: x509.SHA256WithRSA,
NotBefore: time.Now(),
NotAfter: time.Now().Add(time.Hour), // valid for an hour
BasicConstraintsValid: true,
}
return &tmpl, nil
}
func CreateCert(template, parent *x509.Certificate, pub interface{}, parentPriv interface{}) (
cert *x509.Certificate, certPEM []byte, err error) {
certDER, err := x509.CreateCertificate(rand.Reader, template, parent, pub, parentPriv)
if err != nil {
return
}
// parse the resulting certificate so we can use it again
cert, err = x509.ParseCertificate(certDER)
if err != nil {
return
}
// PEM encode the certificate (this is a standard TLS encoding)
b := pem.Block{Type: "CERTIFICATE", Bytes: certDER}
certPEM = pem.EncodeToMemory(&b)
return
}