diff --git a/api/api.go b/api/api.go deleted file mode 100644 index 9cdc6005..00000000 --- a/api/api.go +++ /dev/null @@ -1,87 +0,0 @@ -// Package api provides the interface for querying LFS servers (metadata) -// NOTE: Subject to change, do not rely on this package from outside git-lfs source -package api - -import ( - "bytes" - "encoding/json" - "fmt" - "strconv" - - "github.com/git-lfs/git-lfs/config" - "github.com/git-lfs/git-lfs/errors" - "github.com/git-lfs/git-lfs/httputil" - "github.com/git-lfs/git-lfs/tools" - - "github.com/rubyist/tracerx" -) - -// BatchSingle calls the batch API with just a single object. -func BatchSingle(cfg *config.Configuration, inobj *ObjectResource, operation string, transferAdapters []string) (obj *ObjectResource, transferAdapter string, e error) { - objs, adapterName, err := Batch(cfg, []*ObjectResource{inobj}, operation, transferAdapters) - if err != nil { - return nil, "", err - } - if len(objs) > 0 { - return objs[0], adapterName, nil - } - return nil, "", fmt.Errorf("Object not found") -} - -// Batch calls the batch API and returns object results -func Batch(cfg *config.Configuration, objects []*ObjectResource, operation string, transferAdapters []string) (objs []*ObjectResource, transferAdapter string, e error) { - if len(objects) == 0 { - return nil, "", nil - } - - // Compatibility; omit transfers list when only basic - // older schemas included `additionalproperties=false` - if len(transferAdapters) == 1 && transferAdapters[0] == "basic" { - transferAdapters = nil - } - - o := &batchRequest{Operation: operation, Objects: objects, TransferAdapterNames: transferAdapters} - by, err := json.Marshal(o) - if err != nil { - return nil, "", errors.Wrap(err, "batch request") - } - - req, err := NewBatchRequest(cfg, operation) - if err != nil { - return nil, "", errors.Wrap(err, "batch request") - } - - req.Header.Set("Content-Type", MediaType) - req.Header.Set("Content-Length", strconv.Itoa(len(by))) - req.ContentLength = int64(len(by)) - req.Body = tools.NewReadSeekCloserWrapper(bytes.NewReader(by)) - - tracerx.Printf("api: batch %d files", len(objects)) - - res, bresp, err := DoBatchRequest(cfg, req) - - if err != nil { - if res == nil { - return nil, "", errors.NewRetriableError(err) - } - - if res.StatusCode == 0 { - return nil, "", errors.NewRetriableError(err) - } - - if errors.IsAuthError(err) { - httputil.SetAuthType(cfg, req, res) - return Batch(cfg, objects, operation, transferAdapters) - } - - tracerx.Printf("api error: %s", err) - return nil, "", errors.Wrap(err, "batch response") - } - httputil.LogTransfer(cfg, "lfs.batch", res) - - if res.StatusCode != 200 { - return nil, "", errors.Errorf("Invalid status for %s: %d", httputil.TraceHttpReq(req), res.StatusCode) - } - - return bresp.Objects, bresp.TransferAdapterName, nil -} diff --git a/api/client.go b/api/client.go deleted file mode 100644 index 0a4248d6..00000000 --- a/api/client.go +++ /dev/null @@ -1,75 +0,0 @@ -// NOTE: Subject to change, do not rely on this package from outside git-lfs source -package api - -import "github.com/git-lfs/git-lfs/config" - -type Operation string - -const ( - UploadOperation Operation = "upload" - DownloadOperation Operation = "download" -) - -// Client exposes the LFS API to callers through a multitude of different -// services and transport mechanisms. Callers can make a *RequestSchema using -// any service that is attached to the Client, and then execute a request based -// on that schema using the `Do()` method. -// -// A prototypical example follows: -// ``` -// apiResponse, schema := client.Locks.Lock(request) -// resp, err := client.Do(schema) -// if err != nil { -// handleErr(err) -// } -// -// fmt.Println(apiResponse.Lock) -// ``` -type Client struct { - // Locks is the LockService used to interact with the Git LFS file- - // locking API. - Locks LockService - - // lifecycle is the lifecycle used by all requests through this client. - lifecycle Lifecycle -} - -// NewClient instantiates and returns a new instance of *Client, with the given -// lifecycle. -// -// If no lifecycle is given, a HttpLifecycle is used by default. -func NewClient(lifecycle Lifecycle) *Client { - if lifecycle == nil { - lifecycle = NewHttpLifecycle(config.Config) - } - - return &Client{lifecycle: lifecycle} -} - -// Do preforms the request assosicated with the given *RequestSchema by -// delegating into the Lifecycle in use. -// -// If any error was encountered while either building, executing or cleaning up -// the request, then it will be returned immediately, and the request can be -// treated as invalid. -// -// If no error occured, an api.Response will be returned, along with a `nil` -// error. At this point, the body of the response has been serialized into -// `schema.Into`, and the body has been closed. -func (c *Client) Do(schema *RequestSchema) (Response, error) { - req, err := c.lifecycle.Build(schema) - if err != nil { - return nil, err - } - - resp, err := c.lifecycle.Execute(req, schema.Into) - if err != nil { - return nil, err - } - - if err = c.lifecycle.Cleanup(resp); err != nil { - return nil, err - } - - return resp, nil -} diff --git a/api/client_test.go b/api/client_test.go deleted file mode 100644 index 5c60134b..00000000 --- a/api/client_test.go +++ /dev/null @@ -1,88 +0,0 @@ -package api_test - -import ( - "errors" - "net/http" - "testing" - - "github.com/git-lfs/git-lfs/api" - "github.com/stretchr/testify/assert" -) - -func TestClientUsesLifecycleToExecuteSchemas(t *testing.T) { - schema := new(api.RequestSchema) - req := new(http.Request) - resp := new(api.HttpResponse) - - lifecycle := new(MockLifecycle) - lifecycle.On("Build", schema).Return(req, nil).Once() - lifecycle.On("Execute", req, schema.Into).Return(resp, nil).Once() - lifecycle.On("Cleanup", resp).Return(nil).Once() - - client := api.NewClient(lifecycle) - r1, err := client.Do(schema) - - assert.Equal(t, resp, r1) - assert.Nil(t, err) - lifecycle.AssertExpectations(t) -} - -func TestClientHaltsIfSchemaCannotBeBuilt(t *testing.T) { - schema := new(api.RequestSchema) - - lifecycle := new(MockLifecycle) - lifecycle.On("Build", schema).Return(nil, errors.New("uh-oh!")).Once() - - client := api.NewClient(lifecycle) - resp, err := client.Do(schema) - - lifecycle.AssertExpectations(t) - assert.Nil(t, resp) - assert.Equal(t, "uh-oh!", err.Error()) -} - -func TestClientHaltsIfSchemaCannotBeExecuted(t *testing.T) { - schema := new(api.RequestSchema) - req := new(http.Request) - - lifecycle := new(MockLifecycle) - lifecycle.On("Build", schema).Return(req, nil).Once() - lifecycle.On("Execute", req, schema.Into).Return(nil, errors.New("uh-oh!")).Once() - - client := api.NewClient(lifecycle) - resp, err := client.Do(schema) - - lifecycle.AssertExpectations(t) - assert.Nil(t, resp) - assert.Equal(t, "uh-oh!", err.Error()) -} - -func TestClientReturnsCleanupErrors(t *testing.T) { - schema := new(api.RequestSchema) - req := new(http.Request) - resp := new(api.HttpResponse) - - lifecycle := new(MockLifecycle) - lifecycle.On("Build", schema).Return(req, nil).Once() - lifecycle.On("Execute", req, schema.Into).Return(resp, nil).Once() - lifecycle.On("Cleanup", resp).Return(errors.New("uh-oh!")).Once() - - client := api.NewClient(lifecycle) - r1, err := client.Do(schema) - - lifecycle.AssertExpectations(t) - assert.Nil(t, r1) - assert.Equal(t, "uh-oh!", err.Error()) -} - -func newBatchResponse(operation string, objects ...*api.ObjectResource) batchResponse { - return batchResponse{ - Operation: operation, - Objects: objects, - } -} - -type batchResponse struct { - Operation string `json:"operation,omitempty"` - Objects []*api.ObjectResource `json:"objects"` -} diff --git a/api/download_test.go b/api/download_test.go deleted file mode 100644 index 3ff028c9..00000000 --- a/api/download_test.go +++ /dev/null @@ -1,403 +0,0 @@ -package api_test - -import ( - "encoding/base64" - "encoding/json" - "fmt" - "io/ioutil" - "net/http" - "net/http/httptest" - "net/url" - "os" - "strconv" - "strings" - "testing" - - "github.com/git-lfs/git-lfs/api" - "github.com/git-lfs/git-lfs/auth" - "github.com/git-lfs/git-lfs/config" - "github.com/git-lfs/git-lfs/errors" - "github.com/git-lfs/git-lfs/httputil" -) - -func TestSuccessfulDownload(t *testing.T) { - SetupTestCredentialsFunc() - defer func() { - RestoreCredentialsFunc() - }() - - mux := http.NewServeMux() - server := httptest.NewServer(mux) - defer server.Close() - - tmp := tempdir(t) - defer os.RemoveAll(tmp) - - mux.HandleFunc("/media/objects/batch", func(w http.ResponseWriter, r *http.Request) { - t.Logf("Server: %s %s", r.Method, r.URL) - t.Logf("request header: %v", r.Header) - - if r.Method != "POST" { - t.Error("bad http verb") - w.WriteHeader(405) - return - } - - if r.Header.Get("Accept") != api.MediaType { - t.Error("Invalid Accept") - w.WriteHeader(405) - return - } - - auth := r.Header.Get("Authorization") - if len(auth) == 0 { - w.WriteHeader(401) - return - } - - if auth != expectedAuth(t, server) { - t.Errorf("Invalid Authorization: %q", auth) - } - - obj := &api.ObjectResource{ - Oid: "oid", - Size: 4, - Actions: map[string]*api.LinkRelation{ - "download": &api.LinkRelation{ - Href: server.URL + "/download", - Header: map[string]string{"A": "1"}, - }, - }, - } - - by, err := json.Marshal(newBatchResponse("", obj)) - if err != nil { - t.Fatal(err) - } - - head := w.Header() - head.Set("Content-Type", api.MediaType) - head.Set("Content-Length", strconv.Itoa(len(by))) - w.WriteHeader(200) - w.Write(by) - }) - - cfg := config.NewFrom(config.Values{ - Git: map[string]string{ - "lfs.url": server.URL + "/media", - }, - }) - - obj, _, err := api.BatchSingle(cfg, &api.ObjectResource{Oid: "oid"}, "download", []string{"basic"}) - if err != nil { - if isDockerConnectionError(err) { - return - } - t.Fatalf("unexpected error: %s", err) - } - - if obj.Size != 4 { - t.Errorf("unexpected size: %d", obj.Size) - } - -} - -// nearly identical to TestSuccessfulDownload -// called multiple times to return different 3xx status codes -func TestSuccessfulDownloadWithRedirects(t *testing.T) { - SetupTestCredentialsFunc() - defer func() { - RestoreCredentialsFunc() - }() - - mux := http.NewServeMux() - server := httptest.NewServer(mux) - defer server.Close() - - tmp := tempdir(t) - defer os.RemoveAll(tmp) - - mux.HandleFunc("/redirect/objects/batch", func(w http.ResponseWriter, r *http.Request) { - t.Logf("Server: %s %s", r.Method, r.URL) - t.Logf("request header: %v", r.Header) - - if r.Method != "POST" { - w.WriteHeader(405) - return - } - - w.Header().Set("Location", server.URL+"/redirect2/objects/batch") - w.WriteHeader(307) - t.Log("redirect with 307") - }) - - mux.HandleFunc("/redirect2/objects/batch", func(w http.ResponseWriter, r *http.Request) { - t.Logf("Server: %s %s", r.Method, r.URL) - t.Logf("request header: %v", r.Header) - - if r.Method != "POST" { - w.WriteHeader(405) - return - } - - w.Header().Set("Location", server.URL+"/media/objects/batch") - w.WriteHeader(307) - t.Log("redirect with 307") - }) - - mux.HandleFunc("/media/objects/batch", func(w http.ResponseWriter, r *http.Request) { - t.Logf("Server: %s %s", r.Method, r.URL) - t.Logf("request header: %v", r.Header) - - if r.Method != "POST" { - w.WriteHeader(405) - return - } - - if r.Header.Get("Accept") != api.MediaType { - t.Error("Invalid Accept") - w.WriteHeader(406) - return - } - - auth := r.Header.Get("Authorization") - if len(auth) == 0 { - w.WriteHeader(401) - return - } - - if auth != expectedAuth(t, server) { - t.Errorf("Invalid Authorization: %q", auth) - } - - obj := &api.ObjectResource{ - Oid: "oid", - Size: 4, - Actions: map[string]*api.LinkRelation{ - "download": &api.LinkRelation{ - Href: server.URL + "/download", - Header: map[string]string{"A": "1"}, - }, - }, - } - - by, err := json.Marshal(newBatchResponse("", obj)) - if err != nil { - t.Fatal(err) - } - - head := w.Header() - head.Set("Content-Type", api.MediaType) - head.Set("Content-Length", strconv.Itoa(len(by))) - w.WriteHeader(200) - w.Write(by) - }) - - cfg := config.NewFrom(config.Values{ - Git: map[string]string{ - "lfs.url": server.URL + "/redirect", - }, - }) - - obj, _, err := api.BatchSingle(cfg, &api.ObjectResource{Oid: "oid"}, "download", []string{"basic"}) - if err != nil { - if isDockerConnectionError(err) { - return - } - t.Fatalf("unexpected error for 307 status: %s", err) - } - - if obj.Size != 4 { - t.Errorf("unexpected size for 307 status: %d", obj.Size) - } -} - -// nearly identical to TestSuccessfulDownload -// the api request returns a custom Authorization header -func TestSuccessfulDownloadWithAuthorization(t *testing.T) { - SetupTestCredentialsFunc() - defer func() { - RestoreCredentialsFunc() - }() - - mux := http.NewServeMux() - server := httptest.NewServer(mux) - defer server.Close() - - tmp := tempdir(t) - defer os.RemoveAll(tmp) - - mux.HandleFunc("/media/objects/batch", func(w http.ResponseWriter, r *http.Request) { - t.Logf("Server: %s %s", r.Method, r.URL) - t.Logf("request header: %v", r.Header) - - if r.Method != "POST" { - w.WriteHeader(405) - return - } - - if r.Header.Get("Accept") != api.MediaType { - t.Error("Invalid Accept") - } - - auth := r.Header.Get("Authorization") - if len(auth) == 0 { - w.WriteHeader(401) - return - } - - if auth != expectedAuth(t, server) { - t.Errorf("Invalid Authorization: %q", auth) - } - - obj := &api.ObjectResource{ - Oid: "oid", - Size: 4, - Actions: map[string]*api.LinkRelation{ - "download": &api.LinkRelation{ - Href: server.URL + "/download", - Header: map[string]string{ - "A": "1", - "Authorization": "custom", - }, - }, - }, - } - - by, err := json.Marshal(newBatchResponse("", obj)) - if err != nil { - t.Fatal(err) - } - - head := w.Header() - head.Set("Content-Type", "application/json; charset=utf-8") - head.Set("Content-Length", strconv.Itoa(len(by))) - w.WriteHeader(200) - w.Write(by) - }) - - cfg := config.NewFrom(config.Values{ - Git: map[string]string{ - "lfs.url": server.URL + "/media", - }, - }) - - obj, _, err := api.BatchSingle(cfg, &api.ObjectResource{Oid: "oid"}, "download", []string{"basic"}) - if err != nil { - if isDockerConnectionError(err) { - return - } - t.Fatalf("unexpected error: %s", err) - } - - if obj.Size != 4 { - t.Errorf("unexpected size: %d", obj.Size) - } - -} - -func TestDownloadAPIError(t *testing.T) { - SetupTestCredentialsFunc() - defer func() { - RestoreCredentialsFunc() - }() - - mux := http.NewServeMux() - server := httptest.NewServer(mux) - defer server.Close() - - tmp := tempdir(t) - defer os.RemoveAll(tmp) - - mux.HandleFunc("/media/objects/oid", func(w http.ResponseWriter, r *http.Request) { - w.WriteHeader(404) - }) - - cfg := config.NewFrom(config.Values{ - Git: map[string]string{ - "lfs.batch": "false", - "lfs.url": server.URL + "/media", - }, - }) - - _, _, err := api.BatchSingle(cfg, &api.ObjectResource{Oid: "oid"}, "download", []string{"basic"}) - if err == nil { - t.Fatal("no error?") - } - - if errors.IsFatalError(err) { - t.Fatal("should not panic") - } - - if isDockerConnectionError(err) { - return - } - - expected := "batch response: " + fmt.Sprintf(httputil.GetDefaultError(404), server.URL+"/media/objects/batch") - if err.Error() != expected { - t.Fatalf("Expected: %s\nGot: %s", expected, err.Error()) - } -} - -// guards against connection errors that only seem to happen on debian docker -// images. -func isDockerConnectionError(err error) bool { - if err == nil { - return false - } - - if os.Getenv("TRAVIS") == "true" { - return false - } - - e := err.Error() - return strings.Contains(e, "connection reset by peer") || - strings.Contains(e, "connection refused") -} - -func tempdir(t *testing.T) string { - dir, err := ioutil.TempDir("", "git-lfs-test") - if err != nil { - t.Fatalf("Error getting temp dir: %s", err) - } - return dir -} - -func expectedAuth(t *testing.T, server *httptest.Server) string { - u, err := url.Parse(server.URL) - if err != nil { - t.Fatal(err) - } - - token := fmt.Sprintf("%s:%s", u.Host, "monkey") - return "Basic " + strings.TrimSpace(base64.StdEncoding.EncodeToString([]byte(token))) -} - -var ( - TestCredentialsFunc auth.CredentialFunc - origCredentialsFunc auth.CredentialFunc -) - -func init() { - TestCredentialsFunc = func(cfg *config.Configuration, input auth.Creds, subCommand string) (auth.Creds, error) { - output := make(auth.Creds) - for key, value := range input { - output[key] = value - } - if _, ok := output["username"]; !ok { - output["username"] = input["host"] - } - output["password"] = "monkey" - return output, nil - } -} - -// Override the credentials func for testing -func SetupTestCredentialsFunc() { - origCredentialsFunc = auth.SetCredentialsFunc(TestCredentialsFunc) -} - -// Put the original credentials func back -func RestoreCredentialsFunc() { - auth.SetCredentialsFunc(origCredentialsFunc) -} diff --git a/api/http_lifecycle.go b/api/http_lifecycle.go deleted file mode 100644 index a8b6770a..00000000 --- a/api/http_lifecycle.go +++ /dev/null @@ -1,180 +0,0 @@ -// NOTE: Subject to change, do not rely on this package from outside git-lfs source -package api - -import ( - "bytes" - "encoding/json" - "errors" - "io" - "io/ioutil" - "net/http" - "net/url" - - "github.com/git-lfs/git-lfs/auth" - "github.com/git-lfs/git-lfs/config" - "github.com/git-lfs/git-lfs/httputil" -) - -var ( - // ErrNoOperationGiven is an error which is returned when no operation - // is provided in a RequestSchema object. - ErrNoOperationGiven = errors.New("lfs/api: no operation provided in schema") -) - -// HttpLifecycle serves as the default implementation of the Lifecycle interface -// for HTTP requests. Internally, it leverages the *http.Client type to execute -// HTTP requests against a root *url.URL, as given in `NewHttpLifecycle`. -type HttpLifecycle struct { - cfg *config.Configuration -} - -var _ Lifecycle = new(HttpLifecycle) - -// NewHttpLifecycle initializes a new instance of the *HttpLifecycle type with a -// new *http.Client, and the given root (see above). -// Passing a nil Configuration will use the global config -func NewHttpLifecycle(cfg *config.Configuration) *HttpLifecycle { - if cfg == nil { - cfg = config.Config - } - return &HttpLifecycle{ - cfg: cfg, - } -} - -// Build implements the Lifecycle.Build function. -// -// HttpLifecycle in particular, builds an absolute path by parsing and then -// relativizing the `schema.Path` with respsect to the `HttpLifecycle.root`. If -// there was an error in determining this URL, then that error will be returned, -// -// After this is complete, a body is attached to the request if the -// schema contained one. If a body was present, and there an error occurred while -// serializing it into JSON, then that error will be returned and the -// *http.Request will not be generated. -// -// In all cases, credentials are attached to the HTTP request as described in -// the `auth` package (see github.com/git-lfs/git-lfs/auth#GetCreds). -// -// Finally, all of these components are combined together and the resulting -// request is returned. -func (l *HttpLifecycle) Build(schema *RequestSchema) (*http.Request, error) { - path, err := l.absolutePath(schema.Operation, schema.Path) - if err != nil { - return nil, err - } - - body, err := l.body(schema) - if err != nil { - return nil, err - } - - req, err := http.NewRequest(schema.Method, path.String(), body) - if err != nil { - return nil, err - } - - if _, err = auth.GetCreds(l.cfg, req); err != nil { - return nil, err - } - - req.URL.RawQuery = l.queryParameters(schema).Encode() - - return req, nil -} - -// Execute implements the Lifecycle.Execute function. -// -// Internally, the *http.Client is used to execute the underlying *http.Request. -// If the client returned an error corresponding to a failure to make the -// request, then that error will be returned immediately, and the response is -// guaranteed not to be serialized. -// -// Once the response has been gathered from the server, it is unmarshled into -// the given `into interface{}` which is identical to the one provided in the -// original RequestSchema. If an error occured while decoding, then that error -// is returned. -// -// Otherwise, the api.Response is returned, along with no error, signaling that -// the request completed successfully. -func (l *HttpLifecycle) Execute(req *http.Request, into interface{}) (Response, error) { - resp, err := httputil.DoHttpRequestWithRedirects(l.cfg, req, []*http.Request{}, true) - if err != nil { - return nil, err - } - - // TODO(taylor): check status >=500, handle content type, return error, - // halt immediately. - - if into != nil { - decoder := json.NewDecoder(resp.Body) - if err = decoder.Decode(into); err != nil { - return nil, err - } - } - - return WrapHttpResponse(resp), nil -} - -// Cleanup implements the Lifecycle.Cleanup function by closing the Body -// attached to the response. -func (l *HttpLifecycle) Cleanup(resp Response) error { - return resp.Body().Close() -} - -// absolutePath returns the absolute path made by combining a given relative -// path with the root URL of the endpoint corresponding to the given operation. -// -// If there was an error in parsing the relative path, then that error will be -// returned. -func (l *HttpLifecycle) absolutePath(operation Operation, path string) (*url.URL, error) { - if len(operation) == 0 { - return nil, ErrNoOperationGiven - } - - root, err := url.Parse(l.cfg.Endpoint(string(operation)).Url) - if err != nil { - return nil, err - } - - rel, err := url.Parse(path) - if err != nil { - return nil, err - - } - - return root.ResolveReference(rel), nil -} - -// body returns an io.Reader which reads out a JSON-encoded copy of the payload -// attached to a given *RequestSchema, if it is present. If no body is present -// in the request, then nil is returned instead. -// -// If an error was encountered while attempting to marshal the body, then that -// will be returned instead, along with a nil io.Reader. -func (l *HttpLifecycle) body(schema *RequestSchema) (io.ReadCloser, error) { - if schema.Body == nil { - return nil, nil - } - - body, err := json.Marshal(schema.Body) - if err != nil { - return nil, err - } - - return ioutil.NopCloser(bytes.NewReader(body)), nil -} - -// queryParameters returns a url.Values containing all of the provided query -// parameters as given in the *RequestSchema. If no query parameters were given, -// then an empty url.Values is returned instead. -func (l *HttpLifecycle) queryParameters(schema *RequestSchema) url.Values { - vals := url.Values{} - if schema.Query != nil { - for k, v := range schema.Query { - vals.Add(k, v) - } - } - - return vals -} diff --git a/api/http_lifecycle_test.go b/api/http_lifecycle_test.go deleted file mode 100644 index 72013a99..00000000 --- a/api/http_lifecycle_test.go +++ /dev/null @@ -1,141 +0,0 @@ -package api_test - -import ( - "io/ioutil" - "net/http" - "net/http/httptest" - "testing" - - "github.com/git-lfs/git-lfs/api" - "github.com/git-lfs/git-lfs/config" - "github.com/stretchr/testify/assert" -) - -func NewTestConfig() *config.Configuration { - c := config.NewFrom(config.Values{}) - c.SetManualEndpoint(config.Endpoint{Url: "https://example.com"}) - return c -} -func TestHttpLifecycleMakesRequestsAgainstAbsolutePath(t *testing.T) { - SetupTestCredentialsFunc() - defer RestoreCredentialsFunc() - - l := api.NewHttpLifecycle(NewTestConfig()) - req, err := l.Build(&api.RequestSchema{ - Path: "/foo", - Operation: api.DownloadOperation, - }) - - assert.Nil(t, err) - assert.Equal(t, "https://example.com/foo", req.URL.String()) -} - -func TestHttpLifecycleAttachesQueryParameters(t *testing.T) { - SetupTestCredentialsFunc() - defer RestoreCredentialsFunc() - - l := api.NewHttpLifecycle(NewTestConfig()) - req, err := l.Build(&api.RequestSchema{ - Path: "/foo", - Operation: api.DownloadOperation, - Query: map[string]string{ - "a": "b", - }, - }) - - assert.Nil(t, err) - assert.Equal(t, "https://example.com/foo?a=b", req.URL.String()) -} - -func TestHttpLifecycleAttachesBodyWhenPresent(t *testing.T) { - SetupTestCredentialsFunc() - defer RestoreCredentialsFunc() - - l := api.NewHttpLifecycle(NewTestConfig()) - req, err := l.Build(&api.RequestSchema{ - Operation: api.DownloadOperation, - Body: struct { - Foo string `json:"foo"` - }{"bar"}, - }) - - assert.Nil(t, err) - - body, err := ioutil.ReadAll(req.Body) - assert.Nil(t, err) - assert.Equal(t, "{\"foo\":\"bar\"}", string(body)) -} - -func TestHttpLifecycleDoesNotAttachBodyWhenEmpty(t *testing.T) { - SetupTestCredentialsFunc() - defer RestoreCredentialsFunc() - - l := api.NewHttpLifecycle(NewTestConfig()) - req, err := l.Build(&api.RequestSchema{ - Operation: api.DownloadOperation, - }) - - assert.Nil(t, err) - assert.Nil(t, req.Body) -} - -func TestHttpLifecycleErrsWithoutOperation(t *testing.T) { - SetupTestCredentialsFunc() - defer RestoreCredentialsFunc() - - l := api.NewHttpLifecycle(NewTestConfig()) - req, err := l.Build(&api.RequestSchema{ - Path: "/foo", - }) - - assert.Equal(t, api.ErrNoOperationGiven, err) - assert.Nil(t, req) -} - -func TestHttpLifecycleExecutesRequestWithoutBody(t *testing.T) { - SetupTestCredentialsFunc() - defer RestoreCredentialsFunc() - - var called bool - server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - called = true - - assert.Equal(t, "/path", r.URL.RequestURI()) - })) - defer server.Close() - - req, _ := http.NewRequest("GET", server.URL+"/path", nil) - - l := api.NewHttpLifecycle(NewTestConfig()) - _, err := l.Execute(req, nil) - - assert.True(t, called) - assert.Nil(t, err) -} - -func TestHttpLifecycleExecutesRequestWithBody(t *testing.T) { - SetupTestCredentialsFunc() - defer RestoreCredentialsFunc() - - type Response struct { - Foo string `json:"foo"` - } - - var called bool - server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - called = true - - w.Write([]byte("{\"foo\":\"bar\"}")) - })) - defer server.Close() - - req, _ := http.NewRequest("GET", server.URL+"/path", nil) - - l := api.NewHttpLifecycle(NewTestConfig()) - resp := new(Response) - _, err := l.Execute(req, resp) - - assert.True(t, called) - assert.Nil(t, err) - assert.Equal(t, "bar", resp.Foo) -} diff --git a/api/http_response.go b/api/http_response.go deleted file mode 100644 index 5d923cff..00000000 --- a/api/http_response.go +++ /dev/null @@ -1,53 +0,0 @@ -// NOTE: Subject to change, do not rely on this package from outside git-lfs source -package api - -import ( - "io" - "net/http" -) - -// HttpResponse is an implementation of the Response interface capable of -// handling HTTP responses. At its core, it works by wrapping an *http.Response. -type HttpResponse struct { - // r is the underlying *http.Response that is being wrapped. - r *http.Response -} - -// WrapHttpResponse returns a wrapped *HttpResponse implementing the Repsonse -// type by using the given *http.Response. -func WrapHttpResponse(r *http.Response) *HttpResponse { - return &HttpResponse{ - r: r, - } -} - -var _ Response = new(HttpResponse) - -// Status implements the Response.Status function, and returns the status given -// by the underlying *http.Response. -func (h *HttpResponse) Status() string { - return h.r.Status -} - -// StatusCode implements the Response.StatusCode function, and returns the -// status code given by the underlying *http.Response. -func (h *HttpResponse) StatusCode() int { - return h.r.StatusCode -} - -// Proto implements the Response.Proto function, and returns the proto given by -// the underlying *http.Response. -func (h *HttpResponse) Proto() string { - return h.r.Proto -} - -// Body implements the Response.Body function, and returns the body as given by -// the underlying *http.Response. -func (h *HttpResponse) Body() io.ReadCloser { - return h.r.Body -} - -// Header returns the underlying *http.Response's header. -func (h *HttpResponse) Header() http.Header { - return h.r.Header -} diff --git a/api/http_response_test.go b/api/http_response_test.go deleted file mode 100644 index 3e929930..00000000 --- a/api/http_response_test.go +++ /dev/null @@ -1,27 +0,0 @@ -package api_test - -import ( - "bytes" - "io/ioutil" - "net/http" - "testing" - - "github.com/git-lfs/git-lfs/api" - "github.com/stretchr/testify/assert" -) - -func TestWrappedHttpResponsesMatchInternal(t *testing.T) { - resp := &http.Response{ - Status: "200 OK", - StatusCode: 200, - Proto: "HTTP/1.1", - Body: ioutil.NopCloser(new(bytes.Buffer)), - } - wrapped := api.WrapHttpResponse(resp) - - assert.Equal(t, resp.Status, wrapped.Status()) - assert.Equal(t, resp.StatusCode, wrapped.StatusCode()) - assert.Equal(t, resp.Proto, wrapped.Proto()) - assert.Equal(t, resp.Body, wrapped.Body()) - assert.Equal(t, resp.Header, wrapped.Header()) -} diff --git a/api/lifecycle.go b/api/lifecycle.go deleted file mode 100644 index d8737f58..00000000 --- a/api/lifecycle.go +++ /dev/null @@ -1,32 +0,0 @@ -// NOTE: Subject to change, do not rely on this package from outside git-lfs source -package api - -import "net/http" - -// TODO: extract interface for *http.Request; update methods. This will be in a -// later iteration of the API client. - -// A Lifecycle represents and encapsulates the behavior on an API request from -// inception to cleanup. -// -// At a high level, it turns an *api.RequestSchema into an -// api.Response (and optionally an error). Lifecycle does so by providing -// several more fine-grained methods that are used by the client to manage the -// lifecycle of a request in a platform-agnostic fashion. -type Lifecycle interface { - // Build creates a sendable request by using the given RequestSchema as - // a model. - Build(req *RequestSchema) (*http.Request, error) - - // Execute transforms generated request into a wrapped repsonse, (and - // optionally an error, if the request failed), and serializes the - // response into the `into interface{}`, if one was provided. - Execute(req *http.Request, into interface{}) (Response, error) - - // Cleanup is called after the request has been completed and its - // response has been processed. It is meant to preform any post-request - // actions necessary, like closing or resetting the connection. If an - // error was encountered in doing this operation, it should be returned - // from this method, otherwise nil. - Cleanup(resp Response) error -} diff --git a/api/lifecycle_test.go b/api/lifecycle_test.go deleted file mode 100644 index a063387f..00000000 --- a/api/lifecycle_test.go +++ /dev/null @@ -1,39 +0,0 @@ -package api_test - -import ( - "net/http" - - "github.com/git-lfs/git-lfs/api" - "github.com/stretchr/testify/mock" -) - -type MockLifecycle struct { - mock.Mock -} - -var _ api.Lifecycle = new(MockLifecycle) - -func (l *MockLifecycle) Build(req *api.RequestSchema) (*http.Request, error) { - args := l.Called(req) - - if args.Get(0) == nil { - return nil, args.Error(1) - } - - return args.Get(0).(*http.Request), args.Error(1) -} - -func (l *MockLifecycle) Execute(req *http.Request, into interface{}) (api.Response, error) { - args := l.Called(req, into) - - if args.Get(0) == nil { - return nil, args.Error(1) - } - - return args.Get(0).(api.Response), args.Error(1) -} - -func (l *MockLifecycle) Cleanup(resp api.Response) error { - args := l.Called(resp) - return args.Error(0) -} diff --git a/api/lock_api.go b/api/lock_api.go deleted file mode 100644 index 345f63f2..00000000 --- a/api/lock_api.go +++ /dev/null @@ -1,247 +0,0 @@ -package api - -import ( - "fmt" - "strconv" - "time" -) - -// LockService is an API service which encapsulates the Git LFS Locking API. -type LockService struct{} - -// Lock generates a *RequestSchema that is used to preform the "attempt lock" -// API method. -// -// If a lock is already present, or if the server was unable to generate the -// lock, the Err field of the LockResponse type will be populated with a more -// detailed error describing the situation. -// -// If the caller does not have the minimum commit necessary to obtain the lock -// on that file, then the CommitNeeded field will be populated in the -// LockResponse, signaling that more commits are needed. -// -// In the successful case, a new Lock will be returned and granted to the -// caller. -func (s *LockService) Lock(req *LockRequest) (*RequestSchema, *LockResponse) { - var resp LockResponse - - return &RequestSchema{ - Method: "POST", - Path: "/locks", - Operation: UploadOperation, - Body: req, - Into: &resp, - }, &resp -} - -// Search generates a *RequestSchema that is used to preform the "search for -// locks" API method. -// -// Searches can be scoped to match specific parameters by using the Filters -// field in the given LockSearchRequest. If no matching Locks were found, then -// the Locks field of the response will be empty. -// -// If the client expects that the server will return many locks, then the client -// can choose to paginate that response. Pagination is preformed by limiting the -// amount of results per page, and the server will inform the client of the ID -// of the last returned lock. Since the server is guaranteed to return results -// in reverse chronological order, the client simply sends the last ID it -// processed along with the next request, and the server will continue where it -// left off. -// -// If the server was unable to process the lock search request, then the Error -// field will be populated in the response. -// -// In the successful case, one or more locks will be returned as a part of the -// response. -func (s *LockService) Search(req *LockSearchRequest) (*RequestSchema, *LockList) { - var resp LockList - - query := make(map[string]string) - for _, filter := range req.Filters { - query[filter.Property] = filter.Value - } - - if req.Cursor != "" { - query["cursor"] = req.Cursor - } - - if req.Limit != 0 { - query["limit"] = strconv.Itoa(req.Limit) - } - - return &RequestSchema{ - Method: "GET", - Path: "/locks", - Operation: UploadOperation, - Query: query, - Into: &resp, - }, &resp -} - -// Unlock generates a *RequestSchema that is used to preform the "unlock" API -// method, against a particular lock potentially with --force. -// -// This method's corresponding response type will either contain a reference to -// the lock that was unlocked, or an error that was experienced by the server in -// unlocking it. -func (s *LockService) Unlock(id string, force bool) (*RequestSchema, *UnlockResponse) { - var resp UnlockResponse - - return &RequestSchema{ - Method: "POST", - Path: fmt.Sprintf("/locks/%s/unlock", id), - Operation: UploadOperation, - Body: &UnlockRequest{id, force}, - Into: &resp, - }, &resp -} - -// Lock represents a single lock that against a particular path. -// -// Locks returned from the API may or may not be currently active, according to -// the Expired flag. -type Lock struct { - // Id is the unique identifier corresponding to this particular Lock. It - // must be consistent with the local copy, and the server's copy. - Id string `json:"id"` - // Path is an absolute path to the file that is locked as a part of this - // lock. - Path string `json:"path"` - // Committer is the author who initiated this lock. - Committer Committer `json:"committer"` - // CommitSHA is the commit that this Lock was created against. It is - // strictly equal to the SHA of the minimum commit negotiated in order - // to create this lock. - CommitSHA string `json:"commit_sha"` - // LockedAt is a required parameter that represents the instant in time - // that this lock was created. For most server implementations, this - // should be set to the instant at which the lock was initially - // received. - LockedAt time.Time `json:"locked_at"` - // ExpiresAt is an optional parameter that represents the instant in - // time that the lock stopped being active. If the lock is still active, - // the server can either a) not send this field, or b) send the - // zero-value of time.Time. - UnlockedAt time.Time `json:"unlocked_at,omitempty"` -} - -// Active returns whether or not the given lock is still active against the file -// that it is protecting. -func (l *Lock) Active() bool { - return l.UnlockedAt.IsZero() -} - -// Committer represents a "First Last " pair. -type Committer struct { - // Name is the name of the individual who would like to obtain the - // lock, for instance: "Rick Olson". - Name string `json:"name"` - // Email is the email assopsicated with the individual who would - // like to obtain the lock, for instance: "rick@github.com". - Email string `json:"email"` -} - -func NewCommitter(name, email string) Committer { - return Committer{Name: name, Email: email} -} - -// LockRequest encapsulates the payload sent across the API when a client would -// like to obtain a lock against a particular path on a given remote. -type LockRequest struct { - // Path is the path that the client would like to obtain a lock against. - Path string `json:"path"` - // LatestRemoteCommit is the SHA of the last known commit from the - // remote that we are trying to create the lock against, as found in - // `.git/refs/origin/`. - LatestRemoteCommit string `json:"latest_remote_commit"` - // Committer is the individual that wishes to obtain the lock. - Committer Committer `json:"committer"` -} - -// LockResponse encapsulates the information sent over the API in response to -// a `LockRequest`. -type LockResponse struct { - // Lock is the Lock that was optionally created in response to the - // payload that was sent (see above). If the lock already exists, then - // the existing lock is sent in this field instead, and the author of - // that lock remains the same, meaning that the client failed to obtain - // that lock. An HTTP status of "409 - Conflict" is used here. - // - // If the lock was unable to be created, this field will hold the - // zero-value of Lock and the Err field will provide a more detailed set - // of information. - // - // If an error was experienced in creating this lock, then the - // zero-value of Lock should be sent here instead. - Lock *Lock `json:"lock"` - // CommitNeeded holds the minimum commit SHA that client must have to - // obtain the lock. - CommitNeeded string `json:"commit_needed,omitempty"` - // Err is the optional error that was encountered while trying to create - // the above lock. - Err string `json:"error,omitempty"` -} - -// UnlockRequest encapsulates the data sent in an API request to remove a lock. -type UnlockRequest struct { - // Id is the Id of the lock that the user wishes to unlock. - Id string `json:"id"` - // Force determines whether or not the lock should be "forcibly" - // unlocked; that is to say whether or not a given individual should be - // able to break a different individual's lock. - Force bool `json:"force"` -} - -// UnlockResponse is the result sent back from the API when asked to remove a -// lock. -type UnlockResponse struct { - // Lock is the lock corresponding to the asked-about lock in the - // `UnlockPayload` (see above). If no matching lock was found, this - // field will take the zero-value of Lock, and Err will be non-nil. - Lock *Lock `json:"lock"` - // Err is an optional field which holds any error that was experienced - // while removing the lock. - Err string `json:"error,omitempty"` -} - -// Filter represents a single qualifier to apply against a set of locks. -type Filter struct { - // Property is the property to search against. - // Value is the value that the property must take. - Property, Value string -} - -// LockSearchRequest encapsulates the request sent to the server when the client -// would like a list of locks that match the given criteria. -type LockSearchRequest struct { - // Filters is the set of filters to query against. If the client wishes - // to obtain a list of all locks, an empty array should be passed here. - Filters []Filter - // Cursor is an optional field used to tell the server which lock was - // seen last, if scanning through multiple pages of results. - // - // Servers must return a list of locks sorted in reverse chronological - // order, so the Cursor provides a consistent method of viewing all - // locks, even if more were created between two requests. - Cursor string - // Limit is the maximum number of locks to return in a single page. - Limit int -} - -// LockList encapsulates a set of Locks. -type LockList struct { - // Locks is the set of locks returned back, typically matching the query - // parameters sent in the LockListRequest call. If no locks were matched - // from a given query, then `Locks` will be represented as an empty - // array. - Locks []Lock `json:"locks"` - // NextCursor returns the Id of the Lock the client should update its - // cursor to, if there are multiple pages of results for a particular - // `LockListRequest`. - NextCursor string `json:"next_cursor,omitempty"` - // Err populates any error that was encountered during the search. If no - // error was encountered and the operation was succesful, then a value - // of nil will be passed here. - Err string `json:"error,omitempty"` -} diff --git a/api/lock_api_test.go b/api/lock_api_test.go deleted file mode 100644 index e697e2d9..00000000 --- a/api/lock_api_test.go +++ /dev/null @@ -1,220 +0,0 @@ -package api_test - -import ( - "testing" - "time" - - "github.com/git-lfs/git-lfs/api" - "github.com/git-lfs/git-lfs/api/schema" -) - -var LockService api.LockService - -func TestSuccessfullyObtainingALock(t *testing.T) { - got, body := LockService.Lock(new(api.LockRequest)) - - AssertRequestSchema(t, &api.RequestSchema{ - Method: "POST", - Path: "/locks", - Operation: api.UploadOperation, - Body: new(api.LockRequest), - Into: body, - }, got) -} - -func TestLockSearchWithFilters(t *testing.T) { - got, body := LockService.Search(&api.LockSearchRequest{ - Filters: []api.Filter{ - {"branch", "master"}, - {"path", "/path/to/file"}, - }, - }) - - AssertRequestSchema(t, &api.RequestSchema{ - Method: "GET", - Query: map[string]string{ - "branch": "master", - "path": "/path/to/file", - }, - Path: "/locks", - Operation: api.UploadOperation, - Into: body, - }, got) -} - -func TestLockSearchWithNextCursor(t *testing.T) { - got, body := LockService.Search(&api.LockSearchRequest{ - Cursor: "some-lock-id", - }) - - AssertRequestSchema(t, &api.RequestSchema{ - Method: "GET", - Query: map[string]string{ - "cursor": "some-lock-id", - }, - Path: "/locks", - Operation: api.UploadOperation, - Into: body, - }, got) -} - -func TestLockSearchWithLimit(t *testing.T) { - got, body := LockService.Search(&api.LockSearchRequest{ - Limit: 20, - }) - - AssertRequestSchema(t, &api.RequestSchema{ - Method: "GET", - Query: map[string]string{ - "limit": "20", - }, - Path: "/locks", - Operation: api.UploadOperation, - Into: body, - }, got) -} - -func TestUnlockingALock(t *testing.T) { - got, body := LockService.Unlock("some-lock-id", true) - - AssertRequestSchema(t, &api.RequestSchema{ - Method: "POST", - Path: "/locks/some-lock-id/unlock", - Operation: api.UploadOperation, - Body: &api.UnlockRequest{ - Id: "some-lock-id", - Force: true, - }, - Into: body, - }, got) -} - -func TestLockRequest(t *testing.T) { - schema.Validate(t, schema.LockRequestSchema, &api.LockRequest{ - Path: "/path/to/lock", - LatestRemoteCommit: "deadbeef", - Committer: api.Committer{ - Name: "Jane Doe", - Email: "jane@example.com", - }, - }) -} - -func TestLockResponseWithLockedLock(t *testing.T) { - schema.Validate(t, schema.LockResponseSchema, &api.LockResponse{ - Lock: &api.Lock{ - Id: "some-lock-id", - Path: "/lock/path", - Committer: api.Committer{ - Name: "Jane Doe", - Email: "jane@example.com", - }, - LockedAt: time.Now(), - }, - }) -} - -func TestLockResponseWithUnlockedLock(t *testing.T) { - schema.Validate(t, schema.LockResponseSchema, &api.LockResponse{ - Lock: &api.Lock{ - Id: "some-lock-id", - Path: "/lock/path", - Committer: api.Committer{ - Name: "Jane Doe", - Email: "jane@example.com", - }, - LockedAt: time.Now(), - UnlockedAt: time.Now(), - }, - }) -} - -func TestLockResponseWithError(t *testing.T) { - schema.Validate(t, schema.LockResponseSchema, &api.LockResponse{ - Err: "some error", - }) -} - -func TestLockResponseWithCommitNeeded(t *testing.T) { - schema.Validate(t, schema.LockResponseSchema, &api.LockResponse{ - CommitNeeded: "deadbeef", - }) -} - -func TestLockResponseInvalidWithCommitAndError(t *testing.T) { - schema.Refute(t, schema.LockResponseSchema, &api.LockResponse{ - Err: "some error", - CommitNeeded: "deadbeef", - }) -} - -func TestUnlockRequest(t *testing.T) { - schema.Validate(t, schema.UnlockRequestSchema, &api.UnlockRequest{ - Id: "some-lock-id", - Force: false, - }) -} - -func TestUnlockResponseWithLock(t *testing.T) { - schema.Validate(t, schema.UnlockResponseSchema, &api.UnlockResponse{ - Lock: &api.Lock{ - Id: "some-lock-id", - }, - }) -} - -func TestUnlockResponseWithError(t *testing.T) { - schema.Validate(t, schema.UnlockResponseSchema, &api.UnlockResponse{ - Err: "some-error", - }) -} - -func TestUnlockResponseDoesNotAllowLockAndError(t *testing.T) { - schema.Refute(t, schema.UnlockResponseSchema, &api.UnlockResponse{ - Lock: &api.Lock{ - Id: "some-lock-id", - }, - Err: "some-error", - }) -} - -func TestLockListWithLocks(t *testing.T) { - schema.Validate(t, schema.LockListSchema, &api.LockList{ - Locks: []api.Lock{ - api.Lock{Id: "foo"}, - api.Lock{Id: "bar"}, - }, - }) -} - -func TestLockListWithNoResults(t *testing.T) { - schema.Validate(t, schema.LockListSchema, &api.LockList{ - Locks: []api.Lock{}, - }) -} - -func TestLockListWithNextCursor(t *testing.T) { - schema.Validate(t, schema.LockListSchema, &api.LockList{ - Locks: []api.Lock{ - api.Lock{Id: "foo"}, - api.Lock{Id: "bar"}, - }, - NextCursor: "baz", - }) -} - -func TestLockListWithError(t *testing.T) { - schema.Validate(t, schema.LockListSchema, &api.LockList{ - Err: "some error", - }) -} - -func TestLockListWithErrorAndLocks(t *testing.T) { - schema.Refute(t, schema.LockListSchema, &api.LockList{ - Locks: []api.Lock{ - api.Lock{Id: "foo"}, - api.Lock{Id: "bar"}, - }, - Err: "this isn't possible!", - }) -} diff --git a/api/object.go b/api/object.go deleted file mode 100644 index b9ea4dc0..00000000 --- a/api/object.go +++ /dev/null @@ -1,86 +0,0 @@ -package api - -import ( - "errors" - "fmt" - "net/http" - "time" - - "github.com/git-lfs/git-lfs/httputil" -) - -type ObjectError struct { - Code int `json:"code"` - Message string `json:"message"` -} - -func (e *ObjectError) Error() string { - return fmt.Sprintf("[%d] %s", e.Code, e.Message) -} - -type ObjectResource struct { - Oid string `json:"oid,omitempty"` - Size int64 `json:"size"` - Authenticated bool `json:"authenticated,omitempty"` - Actions map[string]*LinkRelation `json:"actions,omitempty"` - Links map[string]*LinkRelation `json:"_links,omitempty"` - Error *ObjectError `json:"error,omitempty"` -} - -// TODO LEGACY API: remove when legacy API removed -func (o *ObjectResource) NewRequest(relation, method string) (*http.Request, error) { - rel, ok := o.Rel(relation) - if !ok { - if relation == "download" { - return nil, errors.New("Object not found on the server.") - } - return nil, fmt.Errorf("No %q action for this object.", relation) - - } - - req, err := httputil.NewHttpRequest(method, rel.Href, rel.Header) - if err != nil { - return nil, err - } - - return req, nil -} - -func (o *ObjectResource) Rel(name string) (*LinkRelation, bool) { - var rel *LinkRelation - var ok bool - - if o.Actions != nil { - rel, ok = o.Actions[name] - } else { - rel, ok = o.Links[name] - } - - return rel, ok -} - -// IsExpired returns true, and the time of the expired action, if any of the -// actions in this object resource have an ExpiresAt field that is after the -// given instant "now". -// -// If the object contains no actions, or none of the actions it does contain -// have non-zero ExpiresAt fields, the object is not expired. -func (o *ObjectResource) IsExpired(now time.Time) (time.Time, bool) { - for _, a := range o.Actions { - if !a.ExpiresAt.IsZero() && a.ExpiresAt.Before(now) { - return a.ExpiresAt, true - } - } - - return time.Time{}, false -} - -func (o *ObjectResource) NeedsAuth() bool { - return !o.Authenticated -} - -type LinkRelation struct { - Href string `json:"href"` - Header map[string]string `json:"header,omitempty"` - ExpiresAt time.Time `json:"expires_at,omitempty"` -} diff --git a/api/object_test.go b/api/object_test.go deleted file mode 100644 index 8bf2a9a6..00000000 --- a/api/object_test.go +++ /dev/null @@ -1,53 +0,0 @@ -package api_test - -import ( - "testing" - "time" - - "github.com/git-lfs/git-lfs/api" - "github.com/stretchr/testify/assert" -) - -func TestObjectsWithNoActionsAreNotExpired(t *testing.T) { - o := &api.ObjectResource{ - Oid: "some-oid", - Actions: map[string]*api.LinkRelation{}, - } - - _, expired := o.IsExpired(time.Now()) - assert.False(t, expired) -} - -func TestObjectsWithZeroValueTimesAreNotExpired(t *testing.T) { - o := &api.ObjectResource{ - Oid: "some-oid", - Actions: map[string]*api.LinkRelation{ - "upload": &api.LinkRelation{ - Href: "http://your-lfs-server.com", - ExpiresAt: time.Time{}, - }, - }, - } - - _, expired := o.IsExpired(time.Now()) - assert.False(t, expired) -} - -func TestObjectsWithExpirationDatesAreExpired(t *testing.T) { - now := time.Now() - expires := time.Now().Add(-60 * 60 * time.Second) - - o := &api.ObjectResource{ - Oid: "some-oid", - Actions: map[string]*api.LinkRelation{ - "upload": &api.LinkRelation{ - Href: "http://your-lfs-server.com", - ExpiresAt: expires, - }, - }, - } - - expiredAt, expired := o.IsExpired(now) - assert.Equal(t, expires, expiredAt) - assert.True(t, expired) -} diff --git a/api/request_schema.go b/api/request_schema.go deleted file mode 100644 index 2c1cb560..00000000 --- a/api/request_schema.go +++ /dev/null @@ -1,21 +0,0 @@ -// NOTE: Subject to change, do not rely on this package from outside git-lfs source -package api - -// RequestSchema provides a schema from which to generate sendable requests. -type RequestSchema struct { - // Method is the method that should be used when making a particular API - // call. - Method string - // Path is the relative path that this API call should be made against. - Path string - // Operation is the operation used to determine which endpoint to make - // the request against (see github.com/git-lfs/git-lfs/config). - Operation Operation - // Query is the query parameters used in the request URI. - Query map[string]string - // Body is the body of the request. - Body interface{} - // Into is an optional field used to represent the data structure into - // which a response should be serialized. - Into interface{} -} diff --git a/api/request_schema_test.go b/api/request_schema_test.go deleted file mode 100644 index 7f2e0fa5..00000000 --- a/api/request_schema_test.go +++ /dev/null @@ -1,23 +0,0 @@ -package api_test - -import ( - "testing" - - "github.com/git-lfs/git-lfs/api" - "github.com/stretchr/testify/assert" -) - -// AssertRequestSchema encapsulates a single assertion of equality against two -// generated RequestSchema instances. -// -// This assertion is meant only to test that the request schema generated by an -// API service matches what we expect it to be. It does not make use of the -// *api.Client, any particular lifecycle, or spin up a test server. All of that -// behavior is tested at a higher strata in the client/lifecycle tests. -// -// - t is the *testing.T used to preform the assertion. -// - Expected is the *api.RequestSchema that we expected to be generated. -// - Got is the *api.RequestSchema that was generated by a service. -func AssertRequestSchema(t *testing.T, expected, got *api.RequestSchema) { - assert.Equal(t, expected, got) -} diff --git a/api/response.go b/api/response.go deleted file mode 100644 index 294cd2f2..00000000 --- a/api/response.go +++ /dev/null @@ -1,24 +0,0 @@ -// NOTE: Subject to change, do not rely on this package from outside git-lfs source -package api - -import "io" - -// Response is an interface that represents a response returned as a result of -// executing an API call. It is designed to represent itself across multiple -// response type, be it HTTP, SSH, or something else. -// -// The Response interface is meant to be small enough such that it can be -// sufficiently general, but is easily accessible if a caller needs more -// information specific to a particular protocol. -type Response interface { - // Status is a human-readable string representing the status the - // response was returned with. - Status() string - // StatusCode is the numeric code associated with a particular status. - StatusCode() int - // Proto is the protocol with which the response was delivered. - Proto() string - // Body returns an io.ReadCloser containg the contents of the response's - // body. - Body() io.ReadCloser -} diff --git a/api/schema/lock_list_schema.json b/api/schema/lock_list_schema.json deleted file mode 100644 index e2fa6a36..00000000 --- a/api/schema/lock_list_schema.json +++ /dev/null @@ -1,65 +0,0 @@ -{ - "$schema": "http://json-schema.org/draft-04/schema#", - - "type": "object", - "oneOf": [ - { - "properties": { - "locks": { - "type": "array", - "items": { - "type": "object", - "properties": { - "id": { - "type": "string" - }, - "path": { - "type": "string" - }, - "committer": { - "type": "object", - "properties": { - "name": { - "type": "string" - }, - "email": { - "type": "string" - } - }, - "required": ["name", "email"] - }, - "commit_sha": { - "type": "string" - }, - "locked_at": { - "type": "string" - }, - "unlocked_at": { - "type": "string" - } - }, - "required": ["id", "path", "commit_sha", "locked_at"], - "additionalItems": false - } - }, - "next_cursor": { - "type": "string" - } - }, - "additionalProperties": false, - "required": ["locks"] - }, - { - "properties": { - "locks": { - "type": "null" - }, - "error": { - "type": "string" - } - }, - "additionalProperties": false, - "required": ["error"] - } - ] -} diff --git a/api/schema/lock_request_schema.json b/api/schema/lock_request_schema.json deleted file mode 100644 index 10d5e2f2..00000000 --- a/api/schema/lock_request_schema.json +++ /dev/null @@ -1,25 +0,0 @@ -{ - "type": "object", - "properties": { - "path": { - "type": "string" - }, - "latest_remote_commit": { - "type": "string" - }, - "committer": { - "type": "object", - "properties": { - "name": { - "type": "string" - }, - "email": { - "type": "string" - } - }, - "required": ["name", "email"] - } - }, - "required": ["path", "latest_remote_commit", "committer"] - -} diff --git a/api/schema/lock_response_schema.json b/api/schema/lock_response_schema.json deleted file mode 100644 index a5b458de..00000000 --- a/api/schema/lock_response_schema.json +++ /dev/null @@ -1,61 +0,0 @@ -{ - "$schema": "http://json-schema.org/draft-04/schema#", - - "type": "object", - "oneOf": [ - { - "properties": { - "error": { - "type": "string" - } - }, - "required": ["error"] - }, - { - "properties": { - "commit_needed": { - "type": "string" - } - }, - "required": ["commit_needed"] - }, - { - "properties": { - "lock": { - "type": "object", - "properties": { - "id": { - "type": "string" - }, - "path": { - "type": "string" - }, - "committer": { - "type": "object", - "properties": { - "name": { - "type": "string" - }, - "email": { - "type": "string" - } - }, - "required": ["name", "email"] - }, - "commit_sha": { - "type": "string" - }, - "locked_at": { - "type": "string" - }, - "unlocked_at": { - "type": "string" - } - }, - "required": ["id", "path", "commit_sha", "locked_at"] - } - }, - "required": ["lock"] - } - ] -} diff --git a/api/schema/schema_validator.go b/api/schema/schema_validator.go deleted file mode 100644 index cea66f16..00000000 --- a/api/schema/schema_validator.go +++ /dev/null @@ -1,84 +0,0 @@ -package schema - -import ( - "encoding/json" - "fmt" - "os" - "path/filepath" - "testing" - - "github.com/xeipuuv/gojsonschema" -) - -// SchemaValidator uses the gojsonschema library to validate the JSON encoding -// of Go objects against a pre-defined JSON schema. -type SchemaValidator struct { - // Schema is the JSON schema to validate against. - // - // Subject is the instance of Go type that will be validated. - Schema, Subject gojsonschema.JSONLoader -} - -func NewSchemaValidator(t *testing.T, schemaName string, got interface{}) *SchemaValidator { - dir, err := os.Getwd() - if err != nil { - t.Fatal(err) - } - - // Platform compatibility: use "/" separators always for file:// - dir = filepath.ToSlash(dir) - - schema := gojsonschema.NewReferenceLoader(fmt.Sprintf( - "file:///%s/schema/%s", dir, schemaName), - ) - - marshalled, err := json.Marshal(got) - if err != nil { - t.Fatal(err) - } - - subject := gojsonschema.NewStringLoader(string(marshalled)) - - return &SchemaValidator{ - Schema: schema, - Subject: subject, - } -} - -// Validate validates a Go object against JSON schema in a testing environment. -// If the validation fails, then the test will fail after logging all of the -// validation errors experienced by the validator. -func Validate(t *testing.T, schemaName string, got interface{}) { - NewSchemaValidator(t, schemaName, got).Assert(t) -} - -// Refute ensures that a particular Go object does not validate the JSON schema -// given. -// -// If validation against the schema is successful, then the test will fail after -// logging. -func Refute(t *testing.T, schemaName string, got interface{}) { - NewSchemaValidator(t, schemaName, got).Refute(t) -} - -// Assert preforms the validation assertion against the given *testing.T. -func (v *SchemaValidator) Assert(t *testing.T) { - if result, err := gojsonschema.Validate(v.Schema, v.Subject); err != nil { - t.Fatal(err) - } else if !result.Valid() { - for _, err := range result.Errors() { - t.Logf("Validation error: %s", err.Description()) - } - t.Fail() - } -} - -// Refute refutes that the given subject will validate against a particular -// schema. -func (v *SchemaValidator) Refute(t *testing.T) { - if result, err := gojsonschema.Validate(v.Schema, v.Subject); err != nil { - t.Fatal(err) - } else if result.Valid() { - t.Fatal("api/schema: expected validation to fail, succeeded") - } -} diff --git a/api/schema/schemas.go b/api/schema/schemas.go deleted file mode 100644 index 7d4c94d3..00000000 --- a/api/schema/schemas.go +++ /dev/null @@ -1,24 +0,0 @@ -// schema provides a testing utility for testing API types against a predefined -// JSON schema. -// -// The core philosophy for this package is as follows: when a new API is -// accepted, JSON Schema files should be added to document the types that are -// exchanged over this new API. Those files are placed in the `/api/schema` -// directory, and are used by the schema.Validate function to test that -// particular instances of these types as represented in Go match the predefined -// schema that was proposed as a part of the API. -// -// For ease of use, this file defines several constants, one for each schema -// file's name, to easily pass around during tests. -// -// As briefly described above, to validate that a Go type matches the schema for -// a particular API call, one should use the schema.Validate() function. -package schema - -const ( - LockListSchema = "lock_list_schema.json" - LockRequestSchema = "lock_request_schema.json" - LockResponseSchema = "lock_response_schema.json" - UnlockRequestSchema = "unlock_request_schema.json" - UnlockResponseSchema = "unlock_response_schema.json" -) diff --git a/api/schema/unlock_request_schema.json b/api/schema/unlock_request_schema.json deleted file mode 100644 index c4362bb9..00000000 --- a/api/schema/unlock_request_schema.json +++ /dev/null @@ -1,15 +0,0 @@ -{ - "$schema": "http://json-schema.org/draft-04/schema#", - - "type": "object", - "properties": { - "id": { - "type": "string" - }, - "force": { - "type": "boolean" - } - }, - "required": ["id", "force"], - "additionalItems": false -} diff --git a/api/schema/unlock_response_schema.json b/api/schema/unlock_response_schema.json deleted file mode 100644 index 5ede9024..00000000 --- a/api/schema/unlock_response_schema.json +++ /dev/null @@ -1,53 +0,0 @@ -{ - "$schema": "http://json-schema.org/draft-04/schema#", - - "type": "object", - "oneOf": [ - { - "properties": { - "lock": { - "type": "object", - "properties": { - "id": { - "type": "string" - }, - "path": { - "type": "string" - }, - "committer": { - "type": "object", - "properties": { - "name": { - "type": "string" - }, - "email": { - "type": "string" - } - }, - "required": ["name", "email"] - }, - "commit_sha": { - "type": "string" - }, - "locked_at": { - "type": "string" - }, - "unlocked_at": { - "type": "string" - } - }, - "required": ["id", "path", "commit_sha", "locked_at"] - } - }, - "required": ["lock"] - }, - { - "properties": { - "error": { - "type": "string" - } - }, - "required": ["error"] - } - ] -} diff --git a/api/upload_test.go b/api/upload_test.go deleted file mode 100644 index 933f8384..00000000 --- a/api/upload_test.go +++ /dev/null @@ -1,627 +0,0 @@ -package api_test // prevent import cycles - -import ( - "bytes" - "encoding/json" - "fmt" - "io" - "io/ioutil" - "net/http" - "net/http/httptest" - "os" - "path/filepath" - "strconv" - "testing" - - "github.com/git-lfs/git-lfs/api" - "github.com/git-lfs/git-lfs/config" - "github.com/git-lfs/git-lfs/errors" - "github.com/git-lfs/git-lfs/httputil" - "github.com/git-lfs/git-lfs/lfs" - "github.com/git-lfs/git-lfs/test" -) - -func TestExistingUpload(t *testing.T) { - SetupTestCredentialsFunc() - repo := test.NewRepo(t) - repo.Pushd() - defer func() { - repo.Popd() - repo.Cleanup() - RestoreCredentialsFunc() - }() - - mux := http.NewServeMux() - server := httptest.NewServer(mux) - tmp := tempdir(t) - defer server.Close() - defer os.RemoveAll(tmp) - - postCalled := false - - mux.HandleFunc("/media/objects/batch", func(w http.ResponseWriter, r *http.Request) { - t.Logf("Server: %s %s", r.Method, r.URL) - - if r.Method != "POST" { - w.WriteHeader(405) - return - } - - if r.Header.Get("Accept") != api.MediaType { - t.Errorf("Invalid Accept") - } - - if r.Header.Get("Content-Type") != api.MediaType { - t.Errorf("Invalid Content-Type") - } - - buf := &bytes.Buffer{} - tee := io.TeeReader(r.Body, buf) - reqObj := batchResponse{} - err := json.NewDecoder(tee).Decode(&reqObj) - t.Logf("request header: %v", r.Header) - t.Logf("request body: %s", buf.String()) - if err != nil { - t.Fatal(err) - } - - var obj *api.ObjectResource - if len(reqObj.Objects) != 1 { - t.Errorf("Invalid number of objects") - w.WriteHeader(400) - return - } else { - obj = reqObj.Objects[0] - if obj.Oid != "988881adc9fc3655077dc2d4d757d480b5ea0e11" { - t.Errorf("invalid oid from request: %s", obj.Oid) - } - - if obj.Size != 4 { - t.Errorf("invalid size from request: %d", obj.Size) - } - } - - obj.Actions = map[string]*api.LinkRelation{ - "download": &api.LinkRelation{ - Href: server.URL + "/download", - Header: map[string]string{"A": "1"}, - }, - } - - by, err := json.Marshal(newBatchResponse("", obj)) - if err != nil { - t.Fatal(err) - } - - postCalled = true - head := w.Header() - head.Set("Content-Type", api.MediaType) - head.Set("Content-Length", strconv.Itoa(len(by))) - w.WriteHeader(200) - w.Write(by) - }) - - cfg := config.NewFrom(config.Values{ - Git: map[string]string{ - "lfs.url": server.URL + "/media", - }, - }) - - oidPath, _ := lfs.LocalMediaPath("988881adc9fc3655077dc2d4d757d480b5ea0e11") - if err := ioutil.WriteFile(oidPath, []byte("test"), 0744); err != nil { - t.Fatal(err) - } - - oid := filepath.Base(oidPath) - stat, _ := os.Stat(oidPath) - o, _, err := api.BatchSingle(cfg, &api.ObjectResource{Oid: oid, Size: stat.Size()}, "upload", []string{"basic"}) - if err != nil { - if isDockerConnectionError(err) { - return - } - t.Fatal(err) - } - - if o == nil { - t.Fatal("Got no objects back") - } - - if _, ok := o.Rel("upload"); ok { - t.Errorf("has upload relation") - } - - if _, ok := o.Rel("download"); !ok { - t.Errorf("has no download relation") - } - - if !postCalled { - t.Errorf("POST not called") - } - -} - -func TestUploadWithRedirect(t *testing.T) { - SetupTestCredentialsFunc() - repo := test.NewRepo(t) - repo.Pushd() - defer func() { - repo.Popd() - repo.Cleanup() - RestoreCredentialsFunc() - }() - - mux := http.NewServeMux() - server := httptest.NewServer(mux) - tmp := tempdir(t) - defer server.Close() - defer os.RemoveAll(tmp) - - mux.HandleFunc("/redirect/objects/batch", func(w http.ResponseWriter, r *http.Request) { - t.Logf("Server: %s %s", r.Method, r.URL) - - if r.Method != "POST" { - w.WriteHeader(405) - return - } - - w.Header().Set("Location", server.URL+"/redirect2/objects/batch") - w.WriteHeader(307) - }) - - mux.HandleFunc("/redirect2/objects/batch", func(w http.ResponseWriter, r *http.Request) { - t.Logf("Server: %s %s", r.Method, r.URL) - - if r.Method != "POST" { - w.WriteHeader(405) - return - } - - w.Header().Set("Location", server.URL+"/media/objects/batch") - w.WriteHeader(307) - }) - - mux.HandleFunc("/media/objects/batch", func(w http.ResponseWriter, r *http.Request) { - t.Logf("Server: %s %s", r.Method, r.URL) - - if r.Method != "POST" { - w.WriteHeader(405) - return - } - - if r.Header.Get("Accept") != api.MediaType { - t.Errorf("Invalid Accept") - } - - if r.Header.Get("Content-Type") != api.MediaType { - t.Errorf("Invalid Content-Type") - } - - buf := &bytes.Buffer{} - tee := io.TeeReader(r.Body, buf) - reqObj := batchResponse{} - err := json.NewDecoder(tee).Decode(&reqObj) - t.Logf("request header: %v", r.Header) - t.Logf("request body: %s", buf.String()) - if err != nil { - t.Fatal(err) - } - - var obj *api.ObjectResource - if len(reqObj.Objects) != 1 { - t.Errorf("Invalid number of objects") - w.WriteHeader(400) - return - } else { - obj = reqObj.Objects[0] - if obj.Oid != "988881adc9fc3655077dc2d4d757d480b5ea0e11" { - t.Errorf("invalid oid from request: %s", obj.Oid) - } - - if obj.Size != 4 { - t.Errorf("invalid size from request: %d", obj.Size) - } - } - - obj.Actions = map[string]*api.LinkRelation{ - "upload": &api.LinkRelation{ - Href: server.URL + "/upload", - Header: map[string]string{"A": "1"}, - }, - "verify": &api.LinkRelation{ - Href: server.URL + "/verify", - Header: map[string]string{"B": "2"}, - }, - } - - by, err := json.Marshal(newBatchResponse("", obj)) - if err != nil { - t.Fatal(err) - } - - head := w.Header() - head.Set("Content-Type", api.MediaType) - head.Set("Content-Length", strconv.Itoa(len(by))) - w.WriteHeader(200) - w.Write(by) - }) - - cfg := config.NewFrom(config.Values{ - Git: map[string]string{ - "lfs.url": server.URL + "/redirect", - }, - }) - - oidPath, _ := lfs.LocalMediaPath("988881adc9fc3655077dc2d4d757d480b5ea0e11") - if err := ioutil.WriteFile(oidPath, []byte("test"), 0744); err != nil { - t.Fatal(err) - } - - oid := filepath.Base(oidPath) - stat, _ := os.Stat(oidPath) - o, _, err := api.BatchSingle(cfg, &api.ObjectResource{Oid: oid, Size: stat.Size()}, "upload", []string{"basic"}) - if err != nil { - if isDockerConnectionError(err) { - return - } - t.Fatal(err) - } - - if o == nil { - t.Fatal("Got no objects back") - } - - if _, ok := o.Rel("download"); ok { - t.Errorf("has download relation") - } - - if _, ok := o.Rel("upload"); !ok { - t.Errorf("has no upload relation") - } -} - -func TestSuccessfulUploadWithVerify(t *testing.T) { - SetupTestCredentialsFunc() - repo := test.NewRepo(t) - repo.Pushd() - defer func() { - repo.Popd() - repo.Cleanup() - RestoreCredentialsFunc() - }() - - mux := http.NewServeMux() - server := httptest.NewServer(mux) - tmp := tempdir(t) - defer server.Close() - defer os.RemoveAll(tmp) - - postCalled := false - verifyCalled := false - - mux.HandleFunc("/media/objects/batch", func(w http.ResponseWriter, r *http.Request) { - t.Logf("Server: %s %s", r.Method, r.URL) - - if r.Method != "POST" { - w.WriteHeader(405) - return - } - - if r.Header.Get("Accept") != api.MediaType { - t.Errorf("Invalid Accept") - } - - if r.Header.Get("Content-Type") != api.MediaType { - t.Errorf("Invalid Content-Type") - } - - buf := &bytes.Buffer{} - tee := io.TeeReader(r.Body, buf) - reqObj := batchResponse{} - err := json.NewDecoder(tee).Decode(&reqObj) - t.Logf("request header: %v", r.Header) - t.Logf("request body: %s", buf.String()) - if err != nil { - t.Fatal(err) - } - - var obj *api.ObjectResource - if len(reqObj.Objects) != 1 { - t.Errorf("Invalid number of objects") - w.WriteHeader(400) - return - } else { - obj = reqObj.Objects[0] - if obj.Oid != "988881adc9fc3655077dc2d4d757d480b5ea0e11" { - t.Errorf("invalid oid from request: %s", obj.Oid) - } - - if obj.Size != 4 { - t.Errorf("invalid size from request: %d", obj.Size) - } - } - - obj.Actions = map[string]*api.LinkRelation{ - "upload": &api.LinkRelation{ - Href: server.URL + "/upload", - Header: map[string]string{"A": "1"}, - }, - "verify": &api.LinkRelation{ - Href: server.URL + "/verify", - Header: map[string]string{"B": "2"}, - }, - } - - by, err := json.Marshal(newBatchResponse("", obj)) - if err != nil { - t.Fatal(err) - } - - postCalled = true - head := w.Header() - head.Set("Content-Type", api.MediaType) - head.Set("Content-Length", strconv.Itoa(len(by))) - w.WriteHeader(200) - w.Write(by) - }) - - mux.HandleFunc("/verify", func(w http.ResponseWriter, r *http.Request) { - t.Logf("Server: %s %s", r.Method, r.URL) - - if r.Method != "POST" { - w.WriteHeader(405) - return - } - - if r.Header.Get("B") != "2" { - t.Error("Invalid B") - } - - if r.Header.Get("Content-Type") != api.MediaType { - t.Error("Invalid Content-Type") - } - - buf := &bytes.Buffer{} - tee := io.TeeReader(r.Body, buf) - reqObj := &api.ObjectResource{} - err := json.NewDecoder(tee).Decode(reqObj) - t.Logf("request header: %v", r.Header) - t.Logf("request body: %s", buf.String()) - if err != nil { - t.Fatal(err) - } - - if reqObj.Oid != "988881adc9fc3655077dc2d4d757d480b5ea0e11" { - t.Errorf("invalid oid from request: %s", reqObj.Oid) - } - - if reqObj.Size != 4 { - t.Errorf("invalid size from request: %d", reqObj.Size) - } - - verifyCalled = true - w.WriteHeader(200) - }) - - cfg := config.NewFrom(config.Values{ - Git: map[string]string{ - "lfs.url": server.URL + "/media", - }, - }) - - oidPath, _ := lfs.LocalMediaPath("988881adc9fc3655077dc2d4d757d480b5ea0e11") - if err := ioutil.WriteFile(oidPath, []byte("test"), 0744); err != nil { - t.Fatal(err) - } - - oid := filepath.Base(oidPath) - stat, _ := os.Stat(oidPath) - o, _, err := api.BatchSingle(cfg, &api.ObjectResource{Oid: oid, Size: stat.Size()}, "upload", []string{"basic"}) - if err != nil { - if isDockerConnectionError(err) { - return - } - t.Fatal(err) - } - api.VerifyUpload(cfg, o) - - if !postCalled { - t.Errorf("POST not called") - } - - if !verifyCalled { - t.Errorf("verify not called") - } -} - -func TestUploadApiError(t *testing.T) { - SetupTestCredentialsFunc() - repo := test.NewRepo(t) - repo.Pushd() - defer func() { - repo.Popd() - repo.Cleanup() - RestoreCredentialsFunc() - }() - - mux := http.NewServeMux() - server := httptest.NewServer(mux) - tmp := tempdir(t) - defer server.Close() - defer os.RemoveAll(tmp) - - postCalled := false - - mux.HandleFunc("/media/objects/batch", func(w http.ResponseWriter, r *http.Request) { - postCalled = true - w.WriteHeader(404) - }) - - cfg := config.NewFrom(config.Values{ - Git: map[string]string{ - "lfs.url": server.URL + "/media", - }, - }) - - oidPath, _ := lfs.LocalMediaPath("988881adc9fc3655077dc2d4d757d480b5ea0e11") - if err := ioutil.WriteFile(oidPath, []byte("test"), 0744); err != nil { - t.Fatal(err) - } - - oid := filepath.Base(oidPath) - stat, _ := os.Stat(oidPath) - _, _, err := api.BatchSingle(cfg, &api.ObjectResource{Oid: oid, Size: stat.Size()}, "upload", []string{"basic"}) - if err == nil { - t.Fatal(err) - } - - if errors.IsFatalError(err) { - t.Fatal("should not panic") - } - - if isDockerConnectionError(err) { - return - } - - expected := "batch response: " + fmt.Sprintf(httputil.GetDefaultError(404), server.URL+"/media/objects/batch") - if err.Error() != expected { - t.Fatalf("Expected: %s\nGot: %s", expected, err.Error()) - } - - if !postCalled { - t.Errorf("POST not called") - } -} - -func TestUploadVerifyError(t *testing.T) { - SetupTestCredentialsFunc() - repo := test.NewRepo(t) - repo.Pushd() - defer func() { - repo.Popd() - repo.Cleanup() - RestoreCredentialsFunc() - }() - - mux := http.NewServeMux() - server := httptest.NewServer(mux) - tmp := tempdir(t) - defer server.Close() - defer os.RemoveAll(tmp) - - postCalled := false - verifyCalled := false - - mux.HandleFunc("/media/objects/batch", func(w http.ResponseWriter, r *http.Request) { - t.Logf("Server: %s %s", r.Method, r.URL) - - if r.Method != "POST" { - w.WriteHeader(405) - return - } - - if r.Header.Get("Accept") != api.MediaType { - t.Errorf("Invalid Accept") - } - - if r.Header.Get("Content-Type") != api.MediaType { - t.Errorf("Invalid Content-Type") - } - - buf := &bytes.Buffer{} - tee := io.TeeReader(r.Body, buf) - reqObj := batchResponse{} - err := json.NewDecoder(tee).Decode(&reqObj) - t.Logf("request header: %v", r.Header) - t.Logf("request body: %s", buf.String()) - if err != nil { - t.Fatal(err) - } - - var obj *api.ObjectResource - if len(reqObj.Objects) != 1 { - t.Errorf("Invalid number of objects") - w.WriteHeader(400) - return - } else { - obj = reqObj.Objects[0] - if obj.Oid != "988881adc9fc3655077dc2d4d757d480b5ea0e11" { - t.Errorf("invalid oid from request: %s", obj.Oid) - } - - if obj.Size != 4 { - t.Errorf("invalid size from request: %d", obj.Size) - } - } - - obj.Actions = map[string]*api.LinkRelation{ - "upload": &api.LinkRelation{ - Href: server.URL + "/upload", - Header: map[string]string{"A": "1"}, - }, - "verify": &api.LinkRelation{ - Href: server.URL + "/verify", - Header: map[string]string{"B": "2"}, - }, - } - - by, err := json.Marshal(newBatchResponse("", obj)) - if err != nil { - t.Fatal(err) - } - - postCalled = true - head := w.Header() - head.Set("Content-Type", api.MediaType) - head.Set("Content-Length", strconv.Itoa(len(by))) - w.WriteHeader(200) - w.Write(by) - }) - - mux.HandleFunc("/verify", func(w http.ResponseWriter, r *http.Request) { - verifyCalled = true - w.WriteHeader(404) - }) - - cfg := config.NewFrom(config.Values{ - Git: map[string]string{ - "lfs.url": server.URL + "/media", - }, - }) - - oidPath, _ := lfs.LocalMediaPath("988881adc9fc3655077dc2d4d757d480b5ea0e11") - if err := ioutil.WriteFile(oidPath, []byte("test"), 0744); err != nil { - t.Fatal(err) - } - - oid := filepath.Base(oidPath) - stat, _ := os.Stat(oidPath) - o, _, err := api.BatchSingle(cfg, &api.ObjectResource{Oid: oid, Size: stat.Size()}, "upload", []string{"basic"}) - if err != nil { - if isDockerConnectionError(err) { - return - } - t.Fatal(err) - } - err = api.VerifyUpload(cfg, o) - if err == nil { - t.Fatal("verify should fail") - } - - if errors.IsFatalError(err) { - t.Fatal("should not panic") - } - - expected := fmt.Sprintf(httputil.GetDefaultError(404), server.URL+"/verify") - if err.Error() != expected { - t.Fatalf("Expected: %s\nGot: %s", expected, err.Error()) - } - - if !postCalled { - t.Errorf("POST not called") - } - - if !verifyCalled { - t.Errorf("verify not called") - } - -} diff --git a/api/v1.go b/api/v1.go deleted file mode 100644 index 24de5884..00000000 --- a/api/v1.go +++ /dev/null @@ -1,143 +0,0 @@ -package api - -import ( - "net/http" - "net/url" - "path" - - "github.com/git-lfs/git-lfs/auth" - "github.com/git-lfs/git-lfs/config" - "github.com/git-lfs/git-lfs/errors" - "github.com/git-lfs/git-lfs/httputil" - - "github.com/rubyist/tracerx" -) - -const ( - MediaType = "application/vnd.git-lfs+json; charset=utf-8" -) - -type batchRequest struct { - TransferAdapterNames []string `json:"transfers,omitempty"` - Operation string `json:"operation"` - Objects []*ObjectResource `json:"objects"` -} -type batchResponse struct { - TransferAdapterName string `json:"transfer"` - Objects []*ObjectResource `json:"objects"` -} - -// doApiBatchRequest runs the request to the LFS batch API. If the API returns a -// 401, the repo will be marked as having private access and the request will be -// re-run. When the repo is marked as having private access, credentials will -// be retrieved. -func DoBatchRequest(cfg *config.Configuration, req *http.Request) (*http.Response, *batchResponse, error) { - res, err := DoRequest(req, cfg.PrivateAccess(auth.GetOperationForRequest(req))) - - if err != nil { - if res != nil && res.StatusCode == 401 { - return res, nil, errors.NewAuthError(err) - } - return res, nil, err - } - - resp := &batchResponse{} - err = httputil.DecodeResponse(res, resp) - - if err != nil { - httputil.SetErrorResponseContext(cfg, err, res) - } - - return res, resp, err -} - -// DoRequest runs a request to the LFS API, without parsing the response -// body. If the API returns a 401, the repo will be marked as having private -// access and the request will be re-run. When the repo is marked as having -// private access, credentials will be retrieved. -func DoRequest(req *http.Request, useCreds bool) (*http.Response, error) { - via := make([]*http.Request, 0, 4) - return httputil.DoHttpRequestWithRedirects(config.Config, req, via, useCreds) -} - -func NewRequest(cfg *config.Configuration, method, oid string) (*http.Request, error) { - objectOid := oid - operation := "download" - if method == "POST" { - if oid != "batch" { - objectOid = "" - operation = "upload" - } - } - - res, endpoint, err := auth.SshAuthenticate(cfg, operation, oid) - if err != nil { - tracerx.Printf("ssh: %s with %s failed, error: %s, message: %s", - operation, endpoint.SshUserAndHost, err.Error(), res.Message, - ) - return nil, errors.Wrap(errors.New(res.Message), err.Error()) - } - - if len(res.Href) > 0 { - endpoint.Url = res.Href - } - - u, err := ObjectUrl(endpoint, objectOid) - if err != nil { - return nil, err - } - - req, err := httputil.NewHttpRequest(method, u.String(), res.Header) - if err != nil { - return nil, err - } - - req.Header.Set("Accept", MediaType) - return req, nil -} - -func NewBatchRequest(cfg *config.Configuration, operation string) (*http.Request, error) { - res, endpoint, err := auth.SshAuthenticate(cfg, operation, "") - if err != nil { - tracerx.Printf("ssh: %s with %s failed, error: %s, message: %s", - operation, endpoint.SshUserAndHost, err.Error(), res.Message, - ) - return nil, errors.Wrap(errors.New(res.Message), err.Error()) - } - - if len(res.Href) > 0 { - endpoint.Url = res.Href - } - - u, err := ObjectUrl(endpoint, "batch") - if err != nil { - return nil, err - } - - req, err := httputil.NewHttpRequest("POST", u.String(), nil) - if err != nil { - return nil, err - } - - req.Header.Set("Accept", MediaType) - if res.Header != nil { - for key, value := range res.Header { - req.Header.Set(key, value) - } - } - - return req, nil -} - -func ObjectUrl(endpoint config.Endpoint, oid string) (*url.URL, error) { - u, err := url.Parse(endpoint.Url) - if err != nil { - return nil, err - } - - u.Path = path.Join(u.Path, "objects") - if len(oid) > 0 { - u.Path = path.Join(u.Path, oid) - } - return u, nil -} diff --git a/api/verify.go b/api/verify.go deleted file mode 100644 index af0b9b51..00000000 --- a/api/verify.go +++ /dev/null @@ -1,46 +0,0 @@ -package api - -import ( - "bytes" - "encoding/json" - "io" - "io/ioutil" - "strconv" - - "github.com/git-lfs/git-lfs/config" - "github.com/git-lfs/git-lfs/errors" - "github.com/git-lfs/git-lfs/httputil" -) - -// VerifyUpload calls the "verify" API link relation on obj if it exists -func VerifyUpload(cfg *config.Configuration, obj *ObjectResource) error { - // Do we need to do verify? - if _, ok := obj.Rel("verify"); !ok { - return nil - } - - req, err := obj.NewRequest("verify", "POST") - if err != nil { - return errors.Wrap(err, "verify") - } - - by, err := json.Marshal(obj) - if err != nil { - return errors.Wrap(err, "verify") - } - - req.Header.Set("Content-Type", MediaType) - req.Header.Set("Content-Length", strconv.Itoa(len(by))) - req.ContentLength = int64(len(by)) - req.Body = ioutil.NopCloser(bytes.NewReader(by)) - res, err := DoRequest(req, true) - if err != nil { - return err - } - - httputil.LogTransfer(cfg, "lfs.data.verify", res) - io.Copy(ioutil.Discard, res.Body) - res.Body.Close() - - return err -} diff --git a/auth/auth.go b/auth/auth.go deleted file mode 100644 index 99a3adb1..00000000 --- a/auth/auth.go +++ /dev/null @@ -1,3 +0,0 @@ -// Package auth provides common authentication tools -// NOTE: Subject to change, do not rely on this package from outside git-lfs source -package auth diff --git a/auth/credentials.go b/auth/credentials.go deleted file mode 100644 index 2ce8d289..00000000 --- a/auth/credentials.go +++ /dev/null @@ -1,290 +0,0 @@ -package auth - -import ( - "bytes" - "encoding/base64" - "fmt" - "net" - "net/http" - "net/url" - "os" - "os/exec" - "strings" - - "github.com/git-lfs/git-lfs/config" - "github.com/git-lfs/git-lfs/errors" - "github.com/rubyist/tracerx" -) - -// getCreds gets the credentials for a HTTP request and sets the given -// request's Authorization header with them using Basic Authentication. -// 1. Check the URL for authentication. Ex: http://user:pass@example.com -// 2. Check netrc for authentication. -// 3. Check the Git remote URL for authentication IF it's the same scheme and -// host of the URL. -// 4. Ask 'git credential' to fill in the password from one of the above URLs. -// -// This prefers the Git remote URL for checking credentials so that users only -// have to enter their passwords once for Git and Git LFS. It uses the same -// URL path that Git does, in case 'useHttpPath' is enabled in the Git config. -func GetCreds(cfg *config.Configuration, req *http.Request) (Creds, error) { - if skipCredsCheck(cfg, req) { - return nil, nil - } - - credsUrl, err := getCredURLForAPI(cfg, req) - if err != nil { - return nil, errors.Wrap(err, "creds") - } - - if credsUrl == nil { - return nil, nil - } - - if setCredURLFromNetrc(cfg, req) { - return nil, nil - } - - return fillCredentials(cfg, req, credsUrl) -} - -func getCredURLForAPI(cfg *config.Configuration, req *http.Request) (*url.URL, error) { - operation := GetOperationForRequest(req) - apiUrl, err := url.Parse(cfg.Endpoint(operation).Url) - if err != nil { - return nil, err - } - - // if the LFS request doesn't match the current LFS url, don't bother - // attempting to set the Authorization header from the LFS or Git remote URLs. - if req.URL.Scheme != apiUrl.Scheme || - req.URL.Host != apiUrl.Host { - return req.URL, nil - } - - if setRequestAuthFromUrl(cfg, req, apiUrl) { - return nil, nil - } - - credsUrl := apiUrl - if len(cfg.CurrentRemote) > 0 { - if u := cfg.GitRemoteUrl(cfg.CurrentRemote, operation == "upload"); u != "" { - gitRemoteUrl, err := url.Parse(u) - if err != nil { - return nil, err - } - - if gitRemoteUrl.Scheme == apiUrl.Scheme && - gitRemoteUrl.Host == apiUrl.Host { - - if setRequestAuthFromUrl(cfg, req, gitRemoteUrl) { - return nil, nil - } - - credsUrl = gitRemoteUrl - } - } - } - return credsUrl, nil -} - -func setCredURLFromNetrc(cfg *config.Configuration, req *http.Request) bool { - hostname := req.URL.Host - var host string - - if strings.Contains(hostname, ":") { - var err error - host, _, err = net.SplitHostPort(hostname) - if err != nil { - tracerx.Printf("netrc: error parsing %q: %s", hostname, err) - return false - } - } else { - host = hostname - } - - machine, err := cfg.FindNetrcHost(host) - if err != nil { - tracerx.Printf("netrc: error finding match for %q: %s", hostname, err) - return false - } - - if machine == nil { - return false - } - - setRequestAuth(cfg, req, machine.Login, machine.Password) - return true -} - -func skipCredsCheck(cfg *config.Configuration, req *http.Request) bool { - if cfg.NtlmAccess(GetOperationForRequest(req)) { - return false - } - - if len(req.Header.Get("Authorization")) > 0 { - return true - } - - q := req.URL.Query() - return len(q["token"]) > 0 -} - -func fillCredentials(cfg *config.Configuration, req *http.Request, u *url.URL) (Creds, error) { - path := strings.TrimPrefix(u.Path, "/") - input := Creds{"protocol": u.Scheme, "host": u.Host, "path": path} - if u.User != nil && u.User.Username() != "" { - input["username"] = u.User.Username() - } - - creds, err := execCreds(cfg, input, "fill") - if creds == nil || len(creds) < 1 { - errmsg := fmt.Sprintf("Git credentials for %s not found", u) - if err != nil { - errmsg = errmsg + ":\n" + err.Error() - } else { - errmsg = errmsg + "." - } - err = errors.New(errmsg) - } - - if err != nil { - return nil, err - } - - tracerx.Printf("Filled credentials for %s", u) - setRequestAuth(cfg, req, creds["username"], creds["password"]) - - return creds, err -} - -func SaveCredentials(cfg *config.Configuration, creds Creds, res *http.Response) { - if creds == nil { - return - } - - switch res.StatusCode { - case 401, 403: - execCreds(cfg, creds, "reject") - default: - if res.StatusCode < 300 { - execCreds(cfg, creds, "approve") - } - } -} - -type Creds map[string]string - -func (c Creds) Buffer() *bytes.Buffer { - buf := new(bytes.Buffer) - - for k, v := range c { - buf.Write([]byte(k)) - buf.Write([]byte("=")) - buf.Write([]byte(v)) - buf.Write([]byte("\n")) - } - - return buf -} - -// Credentials function which will be called whenever credentials are requested -type CredentialFunc func(*config.Configuration, Creds, string) (Creds, error) - -func execCredsCommand(cfg *config.Configuration, input Creds, subCommand string) (Creds, error) { - output := new(bytes.Buffer) - cmd := exec.Command("git", "credential", subCommand) - cmd.Stdin = input.Buffer() - cmd.Stdout = output - /* - There is a reason we don't hook up stderr here: - Git's credential cache daemon helper does not close its stderr, so if this - process is the process that fires up the daemon, it will wait forever - (until the daemon exits, really) trying to read from stderr. - - See https://github.com/git-lfs/git-lfs/issues/117 for more details. - */ - - err := cmd.Start() - if err == nil { - err = cmd.Wait() - } - - if _, ok := err.(*exec.ExitError); ok { - if !cfg.Os.Bool("GIT_TERMINAL_PROMPT", true) { - return nil, fmt.Errorf("Change the GIT_TERMINAL_PROMPT env var to be prompted to enter your credentials for %s://%s.", - input["protocol"], input["host"]) - } - - // 'git credential' exits with 128 if the helper doesn't fill the username - // and password values. - if subCommand == "fill" && err.Error() == "exit status 128" { - return nil, nil - } - } - - if err != nil { - return nil, fmt.Errorf("'git credential %s' error: %s\n", subCommand, err.Error()) - } - - creds := make(Creds) - for _, line := range strings.Split(output.String(), "\n") { - pieces := strings.SplitN(line, "=", 2) - if len(pieces) < 2 || len(pieces[1]) < 1 { - continue - } - creds[pieces[0]] = pieces[1] - } - - return creds, nil -} - -func setRequestAuthFromUrl(cfg *config.Configuration, req *http.Request, u *url.URL) bool { - if !cfg.NtlmAccess(GetOperationForRequest(req)) && u.User != nil { - if pass, ok := u.User.Password(); ok { - fmt.Fprintln(os.Stderr, "warning: current Git remote contains credentials") - setRequestAuth(cfg, req, u.User.Username(), pass) - return true - } - } - - return false -} - -func setRequestAuth(cfg *config.Configuration, req *http.Request, user, pass string) { - if cfg.NtlmAccess(GetOperationForRequest(req)) { - return - } - - if len(user) == 0 && len(pass) == 0 { - return - } - - token := fmt.Sprintf("%s:%s", user, pass) - auth := "Basic " + strings.TrimSpace(base64.StdEncoding.EncodeToString([]byte(token))) - req.Header.Set("Authorization", auth) -} - -var execCreds CredentialFunc = execCredsCommand - -// GetCredentialsFunc returns the current credentials function -func GetCredentialsFunc() CredentialFunc { - return execCreds -} - -// SetCredentialsFunc overrides the default credentials function (which is to call git) -// Returns the previous credentials func -func SetCredentialsFunc(f CredentialFunc) CredentialFunc { - oldf := execCreds - execCreds = f - return oldf -} - -// GetOperationForRequest determines the operation type for a http.Request -func GetOperationForRequest(req *http.Request) string { - operation := "download" - if req.Method == "POST" || req.Method == "PUT" { - operation = "upload" - } - return operation -} diff --git a/auth/credentials_test.go b/auth/credentials_test.go deleted file mode 100644 index e0887188..00000000 --- a/auth/credentials_test.go +++ /dev/null @@ -1,326 +0,0 @@ -package auth - -import ( - "encoding/base64" - "fmt" - "net/http" - "net/url" - "strings" - "testing" - - "github.com/bgentry/go-netrc/netrc" - "github.com/git-lfs/git-lfs/config" -) - -func TestGetCredentialsForApi(t *testing.T) { - SetupTestCredentialsFunc() - defer RestoreCredentialsFunc() - - checkGetCredentials(t, GetCreds, []*getCredentialCheck{ - { - Desc: "simple", - Config: map[string]string{"lfs.url": "https://git-server.com"}, - Method: "GET", - Href: "https://git-server.com/foo", - Protocol: "https", - Host: "git-server.com", - Username: "git-server.com", - Password: "monkey", - }, - { - Desc: "username in url", - Config: map[string]string{"lfs.url": "https://user@git-server.com"}, - Method: "GET", - Href: "https://git-server.com/foo", - Protocol: "https", - Host: "git-server.com", - Username: "user", - Password: "monkey", - }, - { - Desc: "auth header", - Config: map[string]string{"lfs.url": "https://git-server.com"}, - Header: map[string]string{"Authorization": "Test monkey"}, - Method: "GET", - Href: "https://git-server.com/foo", - Authorization: "Test monkey", - }, - { - Desc: "scheme mismatch", - Config: map[string]string{"lfs.url": "https://git-server.com"}, - Method: "GET", - Href: "http://git-server.com/foo", - Protocol: "http", - Host: "git-server.com", - Path: "foo", - Username: "git-server.com", - Password: "monkey", - }, - { - Desc: "host mismatch", - Config: map[string]string{"lfs.url": "https://git-server.com"}, - Method: "GET", - Href: "https://git-server2.com/foo", - Protocol: "https", - Host: "git-server2.com", - Path: "foo", - Username: "git-server2.com", - Password: "monkey", - }, - { - Desc: "port mismatch", - Config: map[string]string{"lfs.url": "https://git-server.com"}, - Method: "GET", - Href: "https://git-server.com:8080/foo", - Protocol: "https", - Host: "git-server.com:8080", - Path: "foo", - Username: "git-server.com:8080", - Password: "monkey", - }, - { - Desc: "api url auth", - Config: map[string]string{"lfs.url": "https://testuser:testpass@git-server.com"}, - Method: "GET", - Href: "https://git-server.com/foo", - Authorization: "Basic " + strings.TrimSpace(base64.StdEncoding.EncodeToString([]byte("testuser:testpass"))), - }, - { - Desc: "git url auth", - CurrentRemote: "origin", - Config: map[string]string{ - "lfs.url": "https://git-server.com", - "remote.origin.url": "https://gituser:gitpass@git-server.com", - }, - Method: "GET", - Href: "https://git-server.com/foo", - Authorization: "Basic " + strings.TrimSpace(base64.StdEncoding.EncodeToString([]byte("gituser:gitpass"))), - }, - { - Desc: "username in url", - Config: map[string]string{"lfs.url": "https://user@git-server.com"}, - Method: "GET", - Href: "https://git-server.com/foo", - Protocol: "https", - Host: "git-server.com", - Username: "user", - Password: "monkey", - }, - { - Desc: "?token query", - Config: map[string]string{"lfs.url": "https://git-server.com"}, - Method: "GET", - Href: "https://git-server.com/foo?token=abc", - SkipAuth: true, - }, - }) -} - -type fakeNetrc struct{} - -func (n *fakeNetrc) FindMachine(host string) *netrc.Machine { - if host == "some-host" { - return &netrc.Machine{Login: "abc", Password: "def"} - } - return nil -} - -func TestNetrcWithHostAndPort(t *testing.T) { - SetupTestCredentialsFunc() - defer RestoreCredentialsFunc() - - cfg := config.New() - cfg.SetNetrc(&fakeNetrc{}) - u, err := url.Parse("http://some-host:123/foo/bar") - if err != nil { - t.Fatal(err) - } - - req := &http.Request{ - URL: u, - Header: http.Header{}, - } - - if !setCredURLFromNetrc(cfg, req) { - t.Fatal("no netrc match") - } - - auth := req.Header.Get("Authorization") - if auth != "Basic YWJjOmRlZg==" { - t.Fatalf("bad basic auth: %q", auth) - } -} - -func TestNetrcWithHost(t *testing.T) { - SetupTestCredentialsFunc() - defer RestoreCredentialsFunc() - - cfg := config.New() - cfg.SetNetrc(&fakeNetrc{}) - u, err := url.Parse("http://some-host/foo/bar") - if err != nil { - t.Fatal(err) - } - - req := &http.Request{ - URL: u, - Header: http.Header{}, - } - - if !setCredURLFromNetrc(cfg, req) { - t.Fatalf("no netrc match") - } - - auth := req.Header.Get("Authorization") - if auth != "Basic YWJjOmRlZg==" { - t.Fatalf("bad basic auth: %q", auth) - } -} - -func TestNetrcWithBadHost(t *testing.T) { - SetupTestCredentialsFunc() - defer RestoreCredentialsFunc() - - cfg := config.New() - cfg.SetNetrc(&fakeNetrc{}) - u, err := url.Parse("http://other-host/foo/bar") - if err != nil { - t.Fatal(err) - } - - req := &http.Request{ - URL: u, - Header: http.Header{}, - } - - if setCredURLFromNetrc(cfg, req) { - t.Fatalf("unexpected netrc match") - } - - auth := req.Header.Get("Authorization") - if auth != "" { - t.Fatalf("bad basic auth: %q", auth) - } -} - -func checkGetCredentials(t *testing.T, getCredsFunc func(*config.Configuration, *http.Request) (Creds, error), checks []*getCredentialCheck) { - for _, check := range checks { - t.Logf("Checking %q", check.Desc) - cfg := config.NewFrom(config.Values{ - Git: check.Config, - }) - cfg.CurrentRemote = check.CurrentRemote - - req, err := http.NewRequest(check.Method, check.Href, nil) - if err != nil { - t.Errorf("[%s] %s", check.Desc, err) - continue - } - - for key, value := range check.Header { - req.Header.Set(key, value) - } - - creds, err := getCredsFunc(cfg, req) - if err != nil { - t.Errorf("[%s] %s", check.Desc, err) - continue - } - - if check.ExpectCreds() { - if creds == nil { - t.Errorf("[%s], no credentials returned", check.Desc) - continue - } - - if value := creds["protocol"]; len(check.Protocol) > 0 && value != check.Protocol { - t.Errorf("[%s] bad protocol: %q, expected: %q", check.Desc, value, check.Protocol) - } - - if value := creds["host"]; len(check.Host) > 0 && value != check.Host { - t.Errorf("[%s] bad host: %q, expected: %q", check.Desc, value, check.Host) - } - - if value := creds["username"]; len(check.Username) > 0 && value != check.Username { - t.Errorf("[%s] bad username: %q, expected: %q", check.Desc, value, check.Username) - } - - if value := creds["password"]; len(check.Password) > 0 && value != check.Password { - t.Errorf("[%s] bad password: %q, expected: %q", check.Desc, value, check.Password) - } - - if value := creds["path"]; len(check.Path) > 0 && value != check.Path { - t.Errorf("[%s] bad path: %q, expected: %q", check.Desc, value, check.Path) - } - } else { - if creds != nil { - t.Errorf("[%s], unexpected credentials: %v // %v", check.Desc, creds, check) - continue - } - } - - reqAuth := req.Header.Get("Authorization") - if check.SkipAuth { - } else if len(check.Authorization) > 0 { - if reqAuth != check.Authorization { - t.Errorf("[%s] Unexpected Authorization header: %s", check.Desc, reqAuth) - } - } else { - rawtoken := fmt.Sprintf("%s:%s", check.Username, check.Password) - expected := "Basic " + strings.TrimSpace(base64.StdEncoding.EncodeToString([]byte(rawtoken))) - if reqAuth != expected { - t.Errorf("[%s] Bad Authorization. Expected '%s', got '%s'", check.Desc, expected, reqAuth) - } - } - } -} - -type getCredentialCheck struct { - Desc string - Config map[string]string - Header map[string]string - Method string - Href string - Protocol string - Host string - Username string - Password string - Path string - Authorization string - CurrentRemote string - SkipAuth bool -} - -func (c *getCredentialCheck) ExpectCreds() bool { - return len(c.Protocol) > 0 || len(c.Host) > 0 || len(c.Username) > 0 || - len(c.Password) > 0 || len(c.Path) > 0 -} - -var ( - TestCredentialsFunc CredentialFunc - origCredentialsFunc CredentialFunc -) - -func init() { - TestCredentialsFunc = func(cfg *config.Configuration, input Creds, subCommand string) (Creds, error) { - output := make(Creds) - for key, value := range input { - output[key] = value - } - if _, ok := output["username"]; !ok { - output["username"] = input["host"] - } - output["password"] = "monkey" - return output, nil - } -} - -// Override the credentials func for testing -func SetupTestCredentialsFunc() { - origCredentialsFunc = SetCredentialsFunc(TestCredentialsFunc) -} - -// Put the original credentials func back -func RestoreCredentialsFunc() { - SetCredentialsFunc(origCredentialsFunc) -} diff --git a/circle.yml b/circle.yml index 78ecc161..bb5ad7d9 100644 --- a/circle.yml +++ b/circle.yml @@ -33,6 +33,7 @@ dependencies: # needed for git-lfs-test-server-api - go get -d -v github.com/spf13/cobra + - go get -d -v github.com/ThomsonReutersEikon/go-ntlm/ntlm test: override: diff --git a/commands/command_clone.go b/commands/command_clone.go index cc5b7dd3..72a9664d 100644 --- a/commands/command_clone.go +++ b/commands/command_clone.go @@ -62,19 +62,21 @@ func cloneCommand(cmd *cobra.Command, args []string) { // Now just call pull with default args // Support --origin option to clone + var remote string if len(cloneFlags.Origin) > 0 { - cfg.CurrentRemote = cloneFlags.Origin + remote = cloneFlags.Origin } else { - cfg.CurrentRemote = "origin" + remote = "origin" } includeArg, excludeArg := getIncludeExcludeArgs(cmd) filter := buildFilepathFilter(cfg, includeArg, excludeArg) if cloneFlags.NoCheckout || cloneFlags.Bare { // If --no-checkout or --bare then we shouldn't check out, just fetch instead + cfg.CurrentRemote = remote fetchRef("HEAD", filter) } else { - pull(filter) + pull(remote, filter) err := postCloneSubmodules(args) if err != nil { Exit("Error performing 'git lfs pull' for submodules: %v", err) diff --git a/commands/command_env.go b/commands/command_env.go index 4cb30f6c..20202e63 100644 --- a/commands/command_env.go +++ b/commands/command_env.go @@ -35,7 +35,7 @@ func envCommand(cmd *cobra.Command, args []string) { } } - for _, env := range lfs.Environ(cfg, TransferManifest()) { + for _, env := range lfs.Environ(cfg, getTransferManifest()) { Print(env) } diff --git a/commands/command_fetch.go b/commands/command_fetch.go index 05404fba..0baa357e 100644 --- a/commands/command_fetch.go +++ b/commands/command_fetch.go @@ -279,7 +279,7 @@ func fetchAndReportToChan(allpointers []*lfs.WrappedPointer, filter *filepathfil } ready, pointers, meter := readyAndMissingPointers(allpointers, filter) - q := newDownloadQueue(tq.WithProgress(meter)) + q := newDownloadQueue(getTransferManifest(), cfg.CurrentRemote, tq.WithProgress(meter)) if out != nil { // If we already have it, or it won't be fetched diff --git a/commands/command_lock.go b/commands/command_lock.go index 8ccf63e5..da4ca5dd 100644 --- a/commands/command_lock.go +++ b/commands/command_lock.go @@ -1,13 +1,13 @@ package commands import ( + "encoding/json" "fmt" "os" "path/filepath" "strings" "github.com/git-lfs/git-lfs/git" - "github.com/git-lfs/git-lfs/locking" "github.com/spf13/cobra" ) @@ -17,7 +17,6 @@ var ( ) func lockCommand(cmd *cobra.Command, args []string) { - if len(args) == 0 { Print("Usage: git lfs lock ") return @@ -28,20 +27,21 @@ func lockCommand(cmd *cobra.Command, args []string) { Exit(err.Error()) } - if len(lockRemote) > 0 { - cfg.CurrentRemote = lockRemote - } - - lockClient, err := locking.NewClient(cfg) - if err != nil { - Exit("Unable to create lock system: %v", err.Error()) - } + lockClient := newLockClient(lockRemote) defer lockClient.Close() + lock, err := lockClient.LockFile(path) if err != nil { Exit("Lock failed: %v", err) } + if locksCmdFlags.JSON { + if err := json.NewEncoder(os.Stdout).Encode(lock); err != nil { + Error(err.Error()) + } + return + } + Print("\n'%s' was locked (%s)", args[0], lock.Id) } @@ -91,5 +91,6 @@ func init() { RegisterCommand("lock", lockCommand, func(cmd *cobra.Command) { cmd.Flags().StringVarP(&lockRemote, "remote", "r", cfg.CurrentRemote, lockRemoteHelp) + cmd.Flags().BoolVarP(&locksCmdFlags.JSON, "json", "", false, "print output in json") }) } diff --git a/commands/command_locks.go b/commands/command_locks.go index 13fc6965..09269d65 100644 --- a/commands/command_locks.go +++ b/commands/command_locks.go @@ -1,7 +1,9 @@ package commands import ( - "github.com/git-lfs/git-lfs/locking" + "encoding/json" + "os" + "github.com/spf13/cobra" ) @@ -10,23 +12,25 @@ var ( ) func locksCommand(cmd *cobra.Command, args []string) { - filters, err := locksCmdFlags.Filters() if err != nil { Exit("Error building filters: %v", err) } - if len(lockRemote) > 0 { - cfg.CurrentRemote = lockRemote - } - lockClient, err := locking.NewClient(cfg) - if err != nil { - Exit("Unable to create lock system: %v", err.Error()) - } + lockClient := newLockClient(lockRemote) defer lockClient.Close() + var lockCount int locks, err := lockClient.SearchLocks(filters, locksCmdFlags.Limit, locksCmdFlags.Local) // Print any we got before exiting + + if locksCmdFlags.JSON { + if err := json.NewEncoder(os.Stdout).Encode(locks); err != nil { + Error(err.Error()) + } + return + } + for _, lock := range locks { Print("%s\t%s <%s>", lock.Path, lock.Name, lock.Email) lockCount++ @@ -35,7 +39,6 @@ func locksCommand(cmd *cobra.Command, args []string) { if err != nil { Exit("Error while retrieving locks: %v", err) } - Print("\n%d lock(s) matched query.", lockCount) } @@ -54,6 +57,8 @@ type locksFlags struct { // local limits the scope of lock reporting to the locally cached record // of locks for the current user & doesn't query the server Local bool + // JSON is an optional parameter to output data in json format. + JSON bool } // Filters produces a filter based on locksFlags instance. @@ -86,5 +91,6 @@ func init() { cmd.Flags().StringVarP(&locksCmdFlags.Id, "id", "i", "", "filter locks results matching a particular ID") cmd.Flags().IntVarP(&locksCmdFlags.Limit, "limit", "l", 0, "optional limit for number of results to return") cmd.Flags().BoolVarP(&locksCmdFlags.Local, "local", "", false, "only list cached local record of own locks") + cmd.Flags().BoolVarP(&locksCmdFlags.JSON, "json", "", false, "print output in json") }) } diff --git a/commands/command_pre_push.go b/commands/command_pre_push.go index 4e09b4f4..7ac9b25b 100644 --- a/commands/command_pre_push.go +++ b/commands/command_pre_push.go @@ -2,13 +2,11 @@ package commands import ( "bufio" - "fmt" "os" "strings" "github.com/git-lfs/git-lfs/git" "github.com/git-lfs/git-lfs/lfs" - "github.com/git-lfs/git-lfs/locking" "github.com/rubyist/tracerx" "github.com/spf13/cobra" ) @@ -53,11 +51,10 @@ func prePushCommand(cmd *cobra.Command, args []string) { Exit("Invalid remote name %q", args[0]) } - cfg.CurrentRemote = args[0] - ctx := newUploadContext(prePushDryRun) + ctx := newUploadContext(args[0], prePushDryRun) gitscanner := lfs.NewGitScanner(nil) - if err := gitscanner.RemoteForPush(cfg.CurrentRemote); err != nil { + if err := gitscanner.RemoteForPush(ctx.Remote); err != nil { ExitWithError(err) } @@ -66,21 +63,6 @@ func prePushCommand(cmd *cobra.Command, args []string) { // We can be passed multiple lines of refs scanner := bufio.NewScanner(os.Stdin) - name, email := cfg.CurrentCommitter() - lc, err := locking.NewClient(cfg) - if err != nil { - Exit("Unable to create lock system: %v", err.Error()) - } - defer lc.Close() - - lockSet, err := findLocks(lc, nil, 0, false) - if err != nil { - ExitWithError(err) - } - - lockConflicts := make([]string, 0, len(lockSet)) - myLocks := make([]string, 0, len(lockSet)) - for scanner.Scan() { line := strings.TrimSpace(scanner.Text()) @@ -95,62 +77,13 @@ func prePushCommand(cmd *cobra.Command, args []string) { continue } - pointers, err := scanLeftOrAll(gitscanner, left) - if err != nil { + if err := uploadLeftOrAll(gitscanner, ctx, left); err != nil { Print("Error scanning for Git LFS files in %q", left) ExitWithError(err) } - - for _, p := range pointers { - if l, ok := lockSet[p.Name]; ok { - if l.Name == name && l.Email == email { - myLocks = append(myLocks, l.Path) - } else { - lockConflicts = append(lockConflicts, p.Name) - } - } - } - - if len(lockConflicts) > 0 { - Error("Some files are locked in %s...%s", left, cfg.CurrentRemote) - for _, file := range lockConflicts { - Error("* %s", file) - } - os.Exit(1) - } - - uploadPointers(ctx, pointers) } - if len(myLocks) > 0 { - Print("Pushing your locked files:") - for _, file := range myLocks { - Print("* %s", file) - } - } -} - -func scanLeft(g *lfs.GitScanner, ref string) ([]*lfs.WrappedPointer, error) { - var pointers []*lfs.WrappedPointer - var multiErr error - cb := func(p *lfs.WrappedPointer, err error) { - if err != nil { - if multiErr != nil { - multiErr = fmt.Errorf("%v\n%v", multiErr, err) - } else { - multiErr = err - } - return - } - - pointers = append(pointers, p) - } - - if err := g.ScanLeftToRemote(ref, cb); err != nil { - return pointers, err - } - - return pointers, multiErr + ctx.Await() } // decodeRefs pulls the sha1s out of the line read from the pre-push diff --git a/commands/command_prune.go b/commands/command_prune.go index b1314c73..90927fe1 100644 --- a/commands/command_prune.go +++ b/commands/command_prune.go @@ -120,9 +120,7 @@ func prune(fetchPruneConfig config.FetchPruneConfig, verifyRemote, dryRun, verbo var verifywait sync.WaitGroup if verifyRemote { - cfg.CurrentRemote = fetchPruneConfig.PruneRemoteName - // build queue now, no estimates or progress output - verifyQueue = newDownloadCheckQueue() + verifyQueue = newDownloadCheckQueue(getTransferManifest(), fetchPruneConfig.PruneRemoteName) verifiedObjects = tools.NewStringSetWithCapacity(len(localObjects) / 2) // this channel is filled with oids for which Check() succeeded & Transfer() was called diff --git a/commands/command_pull.go b/commands/command_pull.go index 6bd6e97c..b4a212ea 100644 --- a/commands/command_pull.go +++ b/commands/command_pull.go @@ -18,27 +18,29 @@ func pullCommand(cmd *cobra.Command, args []string) { requireGitVersion() requireInRepo() + var remote string if len(args) > 0 { // Remote is first arg if err := git.ValidateRemote(args[0]); err != nil { Panic(err, fmt.Sprintf("Invalid remote name '%v'", args[0])) } - cfg.CurrentRemote = args[0] + remote = args[0] } else { // Actively find the default remote, don't just assume origin defaultRemote, err := git.DefaultRemote() if err != nil { Panic(err, "No default remote") } - cfg.CurrentRemote = defaultRemote + remote = defaultRemote } includeArg, excludeArg := getIncludeExcludeArgs(cmd) filter := buildFilepathFilter(cfg, includeArg, excludeArg) - pull(filter) + pull(remote, filter) } -func pull(filter *filepathfilter.Filter) { +func pull(remote string, filter *filepathfilter.Filter) { + cfg.CurrentRemote = remote ref, err := git.CurrentRef() if err != nil { Panic(err, "Could not pull") @@ -47,7 +49,7 @@ func pull(filter *filepathfilter.Filter) { pointers := newPointerMap() meter := progress.NewMeter(progress.WithOSEnv(cfg.Os)) singleCheckout := newSingleCheckout() - q := newDownloadQueue(tq.WithProgress(meter)) + q := newDownloadQueue(singleCheckout.manifest, remote, tq.WithProgress(meter)) gitscanner := lfs.NewGitScanner(func(p *lfs.WrappedPointer, err error) { if err != nil { LoggedError(err, "Scanner error") diff --git a/commands/command_push.go b/commands/command_push.go index 9b51ed24..6798be27 100644 --- a/commands/command_push.go +++ b/commands/command_push.go @@ -4,6 +4,7 @@ import ( "fmt" "os" + "github.com/git-lfs/git-lfs/errors" "github.com/git-lfs/git-lfs/git" "github.com/git-lfs/git-lfs/lfs" "github.com/rubyist/tracerx" @@ -20,10 +21,10 @@ var ( ) func uploadsBetweenRefAndRemote(ctx *uploadContext, refnames []string) { - tracerx.Printf("Upload refs %v to remote %v", refnames, cfg.CurrentRemote) + tracerx.Printf("Upload refs %v to remote %v", refnames, ctx.Remote) gitscanner := lfs.NewGitScanner(nil) - if err := gitscanner.RemoteForPush(cfg.CurrentRemote); err != nil { + if err := gitscanner.RemoteForPush(ctx.Remote); err != nil { ExitWithError(err) } defer gitscanner.Close() @@ -35,17 +36,16 @@ func uploadsBetweenRefAndRemote(ctx *uploadContext, refnames []string) { } for _, ref := range refs { - pointers, err := scanLeftOrAll(gitscanner, ref.Name) - if err != nil { + if err = uploadLeftOrAll(gitscanner, ctx, ref.Name); err != nil { Print("Error scanning for Git LFS files in the %q ref", ref.Name) ExitWithError(err) } - uploadPointers(ctx, pointers) } + + ctx.Await() } -func scanLeftOrAll(g *lfs.GitScanner, ref string) ([]*lfs.WrappedPointer, error) { - var pointers []*lfs.WrappedPointer +func uploadLeftOrAll(g *lfs.GitScanner, ctx *uploadContext, ref string) error { var multiErr error cb := func(p *lfs.WrappedPointer, err error) { if err != nil { @@ -57,26 +57,44 @@ func scanLeftOrAll(g *lfs.GitScanner, ref string) ([]*lfs.WrappedPointer, error) return } - pointers = append(pointers, p) + uploadPointers(ctx, p) } if pushAll { if err := g.ScanRefWithDeleted(ref, cb); err != nil { - return pointers, err + return err + } + } else { + if err := g.ScanLeftToRemote(ref, cb); err != nil { + return err } } - if err := g.ScanLeftToRemote(ref, cb); err != nil { - return pointers, err - } - return pointers, multiErr + + return multiErr } func uploadsWithObjectIDs(ctx *uploadContext, oids []string) { - pointers := make([]*lfs.WrappedPointer, len(oids)) - for idx, oid := range oids { - pointers[idx] = &lfs.WrappedPointer{Pointer: &lfs.Pointer{Oid: oid}} + for _, oid := range oids { + mp, err := lfs.LocalMediaPath(oid) + if err != nil { + ExitWithError(errors.Wrap(err, "Unable to find local media path:")) + } + + stat, err := os.Stat(mp) + if err != nil { + ExitWithError(errors.Wrap(err, "Unable to stat local media path")) + } + + uploadPointers(ctx, &lfs.WrappedPointer{ + Name: mp, + Pointer: &lfs.Pointer{ + Oid: oid, + Size: stat.Size(), + }, + }) } - uploadPointers(ctx, pointers) + + ctx.Await() } func refsByNames(refnames []string) ([]*git.Ref, error) { @@ -128,8 +146,7 @@ func pushCommand(cmd *cobra.Command, args []string) { Exit("Invalid remote name %q", args[0]) } - cfg.CurrentRemote = args[0] - ctx := newUploadContext(pushDryRun) + ctx := newUploadContext(args[0], pushDryRun) if pushObjectIDs { if len(args) < 2 { diff --git a/commands/command_smudge.go b/commands/command_smudge.go index 96ac19a0..ca60970c 100644 --- a/commands/command_smudge.go +++ b/commands/command_smudge.go @@ -50,7 +50,7 @@ func smudge(to io.Writer, from io.Reader, filename string, skip bool, filter *fi download = filter.Allows(filename) } - err = ptr.Smudge(to, filename, download, TransferManifest(), cb) + err = ptr.Smudge(to, filename, download, getTransferManifest(), cb) if file != nil { file.Close() } diff --git a/commands/command_unlock.go b/commands/command_unlock.go index 505d57fa..95dc1822 100644 --- a/commands/command_unlock.go +++ b/commands/command_unlock.go @@ -1,7 +1,8 @@ package commands import ( - "github.com/git-lfs/git-lfs/locking" + "encoding/json" + "os" "github.com/spf13/cobra" ) @@ -21,16 +22,9 @@ type unlockFlags struct { } func unlockCommand(cmd *cobra.Command, args []string) { - - if len(lockRemote) > 0 { - cfg.CurrentRemote = lockRemote - } - - lockClient, err := locking.NewClient(cfg) - if err != nil { - Exit("Unable to create lock system: %v", err.Error()) - } + lockClient := newLockClient(lockRemote) defer lockClient.Close() + if len(args) != 0 { path, err := lockPath(args[0]) if err != nil { @@ -50,6 +44,14 @@ func unlockCommand(cmd *cobra.Command, args []string) { Error("Usage: git lfs unlock (--id my-lock-id | )") } + if locksCmdFlags.JSON { + if err := json.NewEncoder(os.Stdout).Encode(struct { + Unlocked bool `json:"unlocked"` + }{true}); err != nil { + Error(err.Error()) + } + return + } Print("'%s' was unlocked", args[0]) } @@ -62,5 +64,6 @@ func init() { cmd.Flags().StringVarP(&lockRemote, "remote", "r", cfg.CurrentRemote, lockRemoteHelp) cmd.Flags().StringVarP(&unlockCmdFlags.Id, "id", "i", "", "unlock a lock by its ID") cmd.Flags().BoolVarP(&unlockCmdFlags.Force, "force", "f", false, "forcibly break another user's lock(s)") + cmd.Flags().BoolVarP(&locksCmdFlags.JSON, "json", "", false, "print output in json") }) } diff --git a/commands/command_version.go b/commands/command_version.go index 62bece48..bde27cd1 100644 --- a/commands/command_version.go +++ b/commands/command_version.go @@ -1,7 +1,7 @@ package commands import ( - "github.com/git-lfs/git-lfs/httputil" + "github.com/git-lfs/git-lfs/lfsapi" "github.com/spf13/cobra" ) @@ -10,7 +10,7 @@ var ( ) func versionCommand(cmd *cobra.Command, args []string) { - Print(httputil.UserAgent) + Print(lfsapi.UserAgent) if lovesComics { Print("Nothing may see Gah Lak Tus and survive!") diff --git a/commands/commands.go b/commands/commands.go index 39111678..d61a1060 100644 --- a/commands/commands.go +++ b/commands/commands.go @@ -9,6 +9,7 @@ import ( "os/exec" "path/filepath" "strings" + "sync" "time" "github.com/git-lfs/git-lfs/config" @@ -16,6 +17,7 @@ import ( "github.com/git-lfs/git-lfs/filepathfilter" "github.com/git-lfs/git-lfs/git" "github.com/git-lfs/git-lfs/lfs" + "github.com/git-lfs/git-lfs/lfsapi" "github.com/git-lfs/git-lfs/locking" "github.com/git-lfs/git-lfs/progress" "github.com/git-lfs/git-lfs/tools" @@ -33,29 +35,72 @@ var ( ManPages = make(map[string]string, 20) cfg = config.Config + tqManifest *tq.Manifest + apiClient *lfsapi.Client + global sync.Mutex + includeArg string excludeArg string ) -// TransferManifest builds a tq.Manifest from the commands package global -// cfg var. -func TransferManifest() *tq.Manifest { - return lfs.TransferManifest(cfg) +// getTransferManifest builds a tq.Manifest from the global os and git +// environments. +func getTransferManifest() *tq.Manifest { + c := getAPIClient() + + global.Lock() + defer global.Unlock() + + if tqManifest == nil { + tqManifest = tq.NewManifestWithClient(c) + } + + return tqManifest +} + +func getAPIClient() *lfsapi.Client { + global.Lock() + defer global.Unlock() + + if apiClient == nil { + c, err := lfsapi.NewClient(cfg.Os, cfg.Git) + if err != nil { + ExitWithError(err) + } + apiClient = c + } + return apiClient +} + +func newLockClient(remote string) *locking.Client { + lockClient, err := locking.NewClient(remote, getAPIClient()) + if err == nil { + err = lockClient.SetupFileCache(filepath.Join(config.LocalGitStorageDir, "lfs")) + } + + if err != nil { + Exit("Unable to create lock system: %v", err.Error()) + } + + return lockClient } // newDownloadCheckQueue builds a checking queue, checks that objects are there but doesn't download -func newDownloadCheckQueue(options ...tq.Option) *tq.TransferQueue { - return lfs.NewDownloadCheckQueue(cfg, options...) +func newDownloadCheckQueue(manifest *tq.Manifest, remote string, options ...tq.Option) *tq.TransferQueue { + allOptions := make([]tq.Option, 0, len(options)+1) + allOptions = append(allOptions, options...) + allOptions = append(allOptions, tq.DryRun(true)) + return newDownloadQueue(manifest, remote, allOptions...) } // newDownloadQueue builds a DownloadQueue, allowing concurrent downloads. -func newDownloadQueue(options ...tq.Option) *tq.TransferQueue { - return lfs.NewDownloadQueue(cfg, options...) +func newDownloadQueue(manifest *tq.Manifest, remote string, options ...tq.Option) *tq.TransferQueue { + return tq.NewTransferQueue(tq.Download, manifest, remote, options...) } // newUploadQueue builds an UploadQueue, allowing `workers` concurrent uploads. -func newUploadQueue(options ...tq.Option) *tq.TransferQueue { - return lfs.NewUploadQueue(cfg, options...) +func newUploadQueue(manifest *tq.Manifest, remote string, options ...tq.Option) *tq.TransferQueue { + return tq.NewTransferQueue(tq.Upload, manifest, remote, options...) } func buildFilepathFilter(config *config.Configuration, includeArg, excludeArg *string) *filepathfilter.Filter { @@ -69,28 +114,26 @@ func downloadTransfer(p *lfs.WrappedPointer) (name, path, oid string, size int64 return p.Name, path, p.Oid, p.Size } -func uploadTransfer(oid, filename string) (*tq.Transfer, error) { +func uploadTransfer(p *lfs.WrappedPointer) (*tq.Transfer, error) { + filename := p.Name + oid := p.Oid + localMediaPath, err := lfs.LocalMediaPath(oid) if err != nil { return nil, errors.Wrapf(err, "Error uploading file %s (%s)", filename, oid) } if len(filename) > 0 { - if err = ensureFile(filename, localMediaPath); err != nil { + if err = ensureFile(filename, localMediaPath); err != nil && !errors.IsCleanPointerError(err) { return nil, err } } - fi, err := os.Stat(localMediaPath) - if err != nil { - return nil, errors.Wrapf(err, "Error uploading file %s (%s)", filename, oid) - } - return &tq.Transfer{ Name: filename, Path: localMediaPath, Oid: oid, - Size: fi.Size(), + Size: p.Size, }, nil } @@ -101,7 +144,6 @@ func ensureFile(smudgePath, cleanPath string) error { return nil } - expectedOid := filepath.Base(cleanPath) localPath := filepath.Join(config.LocalWorkingDir, smudgePath) file, err := os.Open(localPath) if err != nil { @@ -123,11 +165,6 @@ func ensureFile(smudgePath, cleanPath string) error { if err != nil { return err } - - if expectedOid != cleaned.Oid { - return fmt.Errorf("Trying to push %q with OID %s.\nNot found in %s.", smudgePath, expectedOid, filepath.Dir(cleanPath)) - } - return nil } @@ -321,7 +358,7 @@ func logPanicToWriter(w io.Writer, loggedError error) { fmt.Fprintln(w, "\nENV:") // log the environment - for _, env := range lfs.Environ(cfg, TransferManifest()) { + for _, env := range lfs.Environ(cfg, getTransferManifest()) { fmt.Fprintln(w, env) } } diff --git a/commands/pull.go b/commands/pull.go index ce764982..0d1e6286 100644 --- a/commands/pull.go +++ b/commands/pull.go @@ -26,7 +26,7 @@ func newSingleCheckout() *singleCheckout { return &singleCheckout{ gitIndexer: &gitIndexer{}, pathConverter: pathConverter, - manifest: TransferManifest(), + manifest: getTransferManifest(), } } diff --git a/commands/run.go b/commands/run.go index 45c9026f..de603f56 100644 --- a/commands/run.go +++ b/commands/run.go @@ -3,11 +3,13 @@ package commands import ( "fmt" "os" + "path/filepath" "strings" "sync" + "time" "github.com/git-lfs/git-lfs/config" - "github.com/git-lfs/git-lfs/httputil" + "github.com/git-lfs/git-lfs/lfsapi" "github.com/git-lfs/git-lfs/localstorage" "github.com/spf13/cobra" ) @@ -64,7 +66,7 @@ func Run() { } root.Execute() - httputil.LogHttpStats(cfg) + logHTTPStats(getAPIClient()) } func gitlfsCommand(cmd *cobra.Command, args []string) { @@ -103,3 +105,28 @@ func printHelp(commandName string) { fmt.Fprintf(os.Stderr, "Sorry, no usage text found for %q\n", commandName) } } + +func logHTTPStats(c *lfsapi.Client) { + if !c.LoggingStats { + return + } + + file, err := statsLogFile() + if err != nil { + fmt.Fprintf(os.Stderr, "Error logging http stats: %s\n", err) + return + } + + defer file.Close() + c.LogStats(file) +} + +func statsLogFile() (*os.File, error) { + logBase := filepath.Join(config.LocalLogDir, "http") + if err := os.MkdirAll(logBase, 0755); err != nil { + return nil, err + } + + logFile := fmt.Sprintf("http-%d.log", time.Now().Unix()) + return os.Create(filepath.Join(logBase, logFile)) +} diff --git a/commands/uploader.go b/commands/uploader.go index 6634b7cf..b831b819 100644 --- a/commands/uploader.go +++ b/commands/uploader.go @@ -2,25 +2,46 @@ package commands import ( "os" + "sync/atomic" "github.com/git-lfs/git-lfs/errors" "github.com/git-lfs/git-lfs/lfs" + "github.com/git-lfs/git-lfs/locking" + "github.com/git-lfs/git-lfs/progress" "github.com/git-lfs/git-lfs/tools" "github.com/git-lfs/git-lfs/tq" ) -var uploadMissingErr = "%s does not exist in .git/lfs/objects. Tried %s, which matches %s." - type uploadContext struct { + Remote string DryRun bool + Manifest *tq.Manifest uploadedOids tools.StringSet + + meter progress.Meter + tq *tq.TransferQueue + + lockClient *locking.Client + committerName string + committerEmail string + unownedLocks uint64 } -func newUploadContext(dryRun bool) *uploadContext { - return &uploadContext{ +func newUploadContext(remote string, dryRun bool) *uploadContext { + cfg.CurrentRemote = remote + + ctx := &uploadContext{ + Remote: remote, + Manifest: getTransferManifest(), DryRun: dryRun, uploadedOids: tools.NewStringSet(), + lockClient: newLockClient(remote), } + + ctx.meter = buildProgressMeter(ctx.DryRun) + ctx.tq = newUploadQueue(ctx.Manifest, ctx.Remote, tq.WithProgress(ctx.meter), tq.DryRun(ctx.DryRun)) + + return ctx } // AddUpload adds the given oid to the set of oids that have been uploaded in @@ -35,12 +56,9 @@ func (c *uploadContext) HasUploaded(oid string) bool { return c.uploadedOids.Contains(oid) } -func (c *uploadContext) prepareUpload(unfiltered []*lfs.WrappedPointer) (*tq.TransferQueue, []*lfs.WrappedPointer) { +func (c *uploadContext) prepareUpload(unfiltered ...*lfs.WrappedPointer) (*tq.TransferQueue, []*lfs.WrappedPointer) { numUnfiltered := len(unfiltered) uploadables := make([]*lfs.WrappedPointer, 0, numUnfiltered) - missingLocalObjects := make([]*lfs.WrappedPointer, 0, numUnfiltered) - missingSize := int64(0) - meter := buildProgressMeter(c.DryRun) // XXX(taylor): temporary measure to fix duplicate (broken) results from // scanner @@ -56,75 +74,48 @@ func (c *uploadContext) prepareUpload(unfiltered []*lfs.WrappedPointer) (*tq.Tra } uniqOids.Add(p.Oid) - // estimate in meter early (even if it's not going into uploadables), since - // we will call Skip() based on the results of the download check queue. - meter.Add(p.Size) + // canUpload determines whether the current pointer "p" can be + // uploaded through the TransferQueue below. It is set to false + // only when the file is locked by someone other than the + // current committer. + var canUpload bool = true - if lfs.ObjectExistsOfSize(p.Oid, p.Size) { - uploadables = append(uploadables, p) - } else { - // We think we need to push this but we don't have it - // Store for server checking later - missingLocalObjects = append(missingLocalObjects, p) - missingSize += p.Size + lockQuery := map[string]string{"path": p.Name} + locks, err := c.lockClient.SearchLocks(lockQuery, 1, true) + if err != nil { + ExitWithError(err) } - } - // check to see if the server has the missing objects. - c.checkMissing(missingLocalObjects, missingSize) + if len(locks) > 0 { + lock := locks[0] + + owned := lock.Name == c.committerName && + lock.Email == c.committerEmail + + if owned { + Print("Consider unlocking your locked file: %s", lock.Path) + } else { + Print("Unable to push file %s locked by: %s", lock.Path, lock.Name) + + atomic.AddUint64(&c.unownedLocks, 1) + canUpload = false + } + } + + if canUpload { + // estimate in meter early (even if it's not going into + // uploadables), since we will call Skip() based on the + // results of the download check queue. + c.meter.Add(p.Size) - // build the TransferQueue, automatically skipping any missing objects that - // the server already has. - uploadQueue := newUploadQueue(tq.WithProgress(meter), tq.DryRun(c.DryRun)) - for _, p := range missingLocalObjects { - if c.HasUploaded(p.Oid) { - // if the server already has this object, call Skip() on - // the progressmeter to decrement the number of files by - // 1 and the number of bytes by `p.Size`. - uploadQueue.Skip(p.Size) - } else { uploadables = append(uploadables, p) } } - return uploadQueue, uploadables + return c.tq, uploadables } -// This checks the given slice of pointers that don't exist in .git/lfs/objects -// against the server. Anything the server already has does not need to be -// uploaded again. -func (c *uploadContext) checkMissing(missing []*lfs.WrappedPointer, missingSize int64) { - numMissing := len(missing) - if numMissing == 0 { - return - } - - checkQueue := newDownloadCheckQueue() - transferCh := checkQueue.Watch() - - done := make(chan int) - go func() { - // this channel is filled with oids for which Check() succeeded - // and Transfer() was called - for oid := range transferCh { - c.SetUploaded(oid) - } - done <- 1 - }() - - for _, p := range missing { - checkQueue.Add(downloadTransfer(p)) - } - - // Currently this is needed to flush the batch but is not enough to sync - // transferc completely. By the time that checkQueue.Wait() returns, the - // transferCh will have been closed, allowing the goroutine above to - // send "1" into the `done` channel. - checkQueue.Wait() - <-done -} - -func uploadPointers(c *uploadContext, unfiltered []*lfs.WrappedPointer) { +func uploadPointers(c *uploadContext, unfiltered ...*lfs.WrappedPointer) { if c.DryRun { for _, p := range unfiltered { if c.HasUploaded(p.Oid) { @@ -138,28 +129,30 @@ func uploadPointers(c *uploadContext, unfiltered []*lfs.WrappedPointer) { return } - q, pointers := c.prepareUpload(unfiltered) + q, pointers := c.prepareUpload(unfiltered...) for _, p := range pointers { - t, err := uploadTransfer(p.Oid, p.Name) - if err != nil { - if errors.IsCleanPointerError(err) { - Exit(uploadMissingErr, p.Oid, p.Name, errors.GetContext(err, "pointer").(*lfs.Pointer).Oid) - } else { - ExitWithError(err) - } + t, err := uploadTransfer(p) + if err != nil && !errors.IsCleanPointerError(err) { + ExitWithError(err) } q.Add(t.Name, t.Path, t.Oid, t.Size) c.SetUploaded(p.Oid) } +} - q.Wait() +func (c *uploadContext) Await() { + c.tq.Wait() - for _, err := range q.Errors() { + for _, err := range c.tq.Errors() { FullError(err) } - if len(q.Errors()) > 0 { + if len(c.tq.Errors()) > 0 { os.Exit(2) } + + if nl := atomic.LoadUint64(&c.unownedLocks); nl > 0 { + ExitWithError(errors.Errorf("lfs: refusing to push %d un-owned lock(s)", nl)) + } } diff --git a/config/config.go b/config/config.go index 756d2fab..b4404dfa 100644 --- a/config/config.go +++ b/config/config.go @@ -5,17 +5,12 @@ package config import ( "errors" "fmt" - "os" "reflect" "strconv" - "strings" "sync" - "github.com/ThomsonReutersEikon/go-ntlm/ntlm" - "github.com/bgentry/go-netrc/netrc" - "github.com/git-lfs/git-lfs/git" + "github.com/git-lfs/git-lfs/lfsapi" "github.com/git-lfs/git-lfs/tools" - "github.com/rubyist/tracerx" ) var ( @@ -57,34 +52,20 @@ type Configuration struct { // configuration. Git Environment - CurrentRemote string - NtlmSession ntlm.ClientSession - envVars map[string]string - envVarsMutex sync.Mutex - IsTracingHttp bool - IsDebuggingHttp bool - IsLoggingStats bool + CurrentRemote string loading sync.Mutex // guards initialization of gitConfig and remotes remotes []string extensions map[string]Extension - manualEndpoint *Endpoint - parsedNetrc netrcfinder - urlAliasesMap map[string]string - urlAliasMu sync.Mutex + manualEndpoint *lfsapi.Endpoint + endpointFinder lfsapi.EndpointFinder + endpointMu sync.Mutex } func New() *Configuration { - c := &Configuration{ - Os: EnvironmentOf(NewOsFetcher()), - CurrentRemote: defaultRemote, - envVars: make(map[string]string), - } - + c := &Configuration{Os: EnvironmentOf(NewOsFetcher())} c.Git = &gitEnvironment{config: c} - c.IsTracingHttp = c.Os.Bool("GIT_CURL_VERBOSE", false) - c.IsDebuggingHttp = c.Os.Bool("LFS_DEBUG_HTTP", false) - c.IsLoggingStats = c.Os.Bool("GIT_LOG_STATS", false) + initConfig(c) return c } @@ -103,12 +84,16 @@ type Values struct { // // This method should only be used during testing. func NewFrom(v Values) *Configuration { - return &Configuration{ + c := &Configuration{ Os: EnvironmentOf(mapFetcher(v.Os)), Git: EnvironmentOf(mapFetcher(v.Git)), - - envVars: make(map[string]string, 0), } + initConfig(c) + return c +} + +func initConfig(c *Configuration) { + c.CurrentRemote = defaultRemote } // Unmarshal unmarshals the *Configuration in context into all of `v`'s fields, @@ -202,51 +187,19 @@ func (c *Configuration) parseTag(tag reflect.StructTag) (key string, env Environ // GitRemoteUrl returns the git clone/push url for a given remote (blank if not found) // the forpush argument is to cater for separate remote.name.pushurl settings func (c *Configuration) GitRemoteUrl(remote string, forpush bool) string { - if forpush { - if u, ok := c.Git.Get("remote." + remote + ".pushurl"); ok { - return u - } - } - - if u, ok := c.Git.Get("remote." + remote + ".url"); ok { - return u - } - - if err := git.ValidateRemote(remote); err == nil { - return remote - } - - return "" - + return c.endpointConfig().GitRemoteURL(remote, forpush) } // Manually set an Endpoint to use instead of deriving from Git config -func (c *Configuration) SetManualEndpoint(e Endpoint) { +func (c *Configuration) SetManualEndpoint(e lfsapi.Endpoint) { c.manualEndpoint = &e } -func (c *Configuration) Endpoint(operation string) Endpoint { +func (c *Configuration) Endpoint(operation string) lfsapi.Endpoint { if c.manualEndpoint != nil { return *c.manualEndpoint } - - if operation == "upload" { - if url, ok := c.Git.Get("lfs.pushurl"); ok { - return NewEndpointWithConfig(url, c) - } - } - - if url, ok := c.Git.Get("lfs.url"); ok { - return NewEndpointWithConfig(url, c) - } - - if len(c.CurrentRemote) > 0 && c.CurrentRemote != defaultRemote { - if endpoint := c.RemoteEndpoint(c.CurrentRemote, operation); len(endpoint.Url) > 0 { - return endpoint - } - } - - return c.RemoteEndpoint(defaultRemote, operation) + return c.endpointConfig().Endpoint(operation, c.CurrentRemote) } func (c *Configuration) ConcurrentTransfers() int { @@ -283,74 +236,16 @@ func (c *Configuration) BatchTransfer() bool { } func (c *Configuration) NtlmAccess(operation string) bool { - return c.Access(operation) == "ntlm" -} - -// PrivateAccess will retrieve the access value and return true if -// the value is set to private. When a repo is marked as having private -// access, the http requests for the batch api will fetch the credentials -// before running, otherwise the request will run without credentials. -func (c *Configuration) PrivateAccess(operation string) bool { - return c.Access(operation) != "none" + return c.Access(operation) == lfsapi.NTLMAccess } // Access returns the access auth type. -func (c *Configuration) Access(operation string) string { +func (c *Configuration) Access(operation string) lfsapi.Access { return c.EndpointAccess(c.Endpoint(operation)) } -// SetAccess will set the private access flag in .git/config. -func (c *Configuration) SetAccess(operation string, authType string) { - c.SetEndpointAccess(c.Endpoint(operation), authType) -} - -func (c *Configuration) FindNetrcHost(host string) (*netrc.Machine, error) { - c.loading.Lock() - defer c.loading.Unlock() - if c.parsedNetrc == nil { - n, err := c.parseNetrc() - if err != nil { - return nil, err - } - c.parsedNetrc = n - } - - return c.parsedNetrc.FindMachine(host), nil -} - -// Manually override the netrc config -func (c *Configuration) SetNetrc(n netrcfinder) { - c.parsedNetrc = n -} - -func (c *Configuration) EndpointAccess(e Endpoint) string { - key := fmt.Sprintf("lfs.%s.access", e.Url) - if v, ok := c.Git.Get(key); ok && len(v) > 0 { - lower := strings.ToLower(v) - if lower == "private" { - return "basic" - } - return lower - } - return "none" -} - -func (c *Configuration) SetEndpointAccess(e Endpoint, authType string) { - c.loadGitConfig() - - tracerx.Printf("setting repository access to %s", authType) - key := fmt.Sprintf("lfs.%s.access", e.Url) - - // Modify the config cache because it's checked again in this process - // without being reloaded. - switch authType { - case "", "none": - git.Config.UnsetLocalKey("", key) - c.Git.del(key) - default: - git.Config.SetLocal("", key, authType) - c.Git.set(key, authType) - } +func (c *Configuration) EndpointAccess(e lfsapi.Endpoint) lfsapi.Access { + return c.endpointConfig().AccessFor(e.Url) } func (c *Configuration) FetchIncludePaths() []string { @@ -363,27 +258,8 @@ func (c *Configuration) FetchExcludePaths() []string { return tools.CleanPaths(patterns, ",") } -func (c *Configuration) RemoteEndpoint(remote, operation string) Endpoint { - if len(remote) == 0 { - remote = defaultRemote - } - - // Support separate push URL if specified and pushing - if operation == "upload" { - if url, ok := c.Git.Get("remote." + remote + ".lfspushurl"); ok { - return NewEndpointWithConfig(url, c) - } - } - if url, ok := c.Git.Get("remote." + remote + ".lfsurl"); ok { - return NewEndpointWithConfig(url, c) - } - - // finally fall back on git remote url (also supports pushurl) - if url := c.GitRemoteUrl(remote, operation == "upload"); url != "" { - return NewEndpointFromCloneURLWithConfig(url, c) - } - - return Endpoint{} +func (c *Configuration) RemoteEndpoint(remote, operation string) lfsapi.Endpoint { + return c.endpointConfig().RemoteEndpoint(operation, remote) } func (c *Configuration) Remotes() []string { @@ -392,18 +268,23 @@ func (c *Configuration) Remotes() []string { return c.remotes } -// GitProtocol returns the protocol for the LFS API when converting from a -// git:// remote url. func (c *Configuration) GitProtocol() string { - if value, ok := c.Git.Get("lfs.gitprotocol"); ok { - return value + return c.endpointConfig().GitProtocol() +} + +func (c *Configuration) endpointConfig() lfsapi.EndpointFinder { + c.endpointMu.Lock() + defer c.endpointMu.Unlock() + + if c.endpointFinder == nil { + c.endpointFinder = lfsapi.NewEndpointFinder(c.Git) } - return "https" + + return c.endpointFinder } func (c *Configuration) Extensions() map[string]Extension { c.loadGitConfig() - return c.extensions } @@ -412,50 +293,6 @@ func (c *Configuration) SortedExtensions() ([]Extension, error) { return SortExtensions(c.Extensions()) } -func (c *Configuration) urlAliases() map[string]string { - c.urlAliasMu.Lock() - defer c.urlAliasMu.Unlock() - - if c.urlAliasesMap == nil { - c.urlAliasesMap = make(map[string]string) - prefix := "url." - suffix := ".insteadof" - for gitkey, gitval := range c.Git.All() { - if strings.HasPrefix(gitkey, prefix) && strings.HasSuffix(gitkey, suffix) { - if _, ok := c.urlAliasesMap[gitval]; ok { - fmt.Fprintf(os.Stderr, "WARNING: Multiple 'url.*.insteadof' keys with the same alias: %q\n", gitval) - } - c.urlAliasesMap[gitval] = gitkey[len(prefix) : len(gitkey)-len(suffix)] - } - } - } - - return c.urlAliasesMap -} - -// ReplaceUrlAlias returns a url with a prefix from a `url.*.insteadof` git -// config setting. If multiple aliases match, use the longest one. -// See https://git-scm.com/docs/git-config for Git's docs. -func (c *Configuration) ReplaceUrlAlias(rawurl string) string { - var longestalias string - aliases := c.urlAliases() - for alias, _ := range aliases { - if !strings.HasPrefix(rawurl, alias) { - continue - } - - if longestalias < alias { - longestalias = alias - } - } - - if len(longestalias) > 0 { - return aliases[longestalias] + rawurl[len(longestalias):] - } - - return rawurl -} - func (c *Configuration) FetchPruneConfig() FetchPruneConfig { f := &FetchPruneConfig{ FetchRecentRefsDays: 7, diff --git a/config/config_test.go b/config/config_test.go index 29576be6..03a3db02 100644 --- a/config/config_test.go +++ b/config/config_test.go @@ -7,266 +7,6 @@ import ( "github.com/stretchr/testify/assert" ) -func TestEndpointDefaultsToOrigin(t *testing.T) { - cfg := NewFrom(Values{ - Git: map[string]string{"remote.origin.lfsurl": "abc"}, - }) - - endpoint := cfg.Endpoint("download") - assert.Equal(t, "abc", endpoint.Url) - assert.Equal(t, "", endpoint.SshUserAndHost) - assert.Equal(t, "", endpoint.SshPath) -} - -func TestEndpointOverridesOrigin(t *testing.T) { - cfg := NewFrom(Values{ - Git: map[string]string{ - "lfs.url": "abc", - "remote.origin.lfsurl": "def", - }, - }) - - endpoint := cfg.Endpoint("download") - assert.Equal(t, "abc", endpoint.Url) - assert.Equal(t, "", endpoint.SshUserAndHost) - assert.Equal(t, "", endpoint.SshPath) -} - -func TestEndpointNoOverrideDefaultRemote(t *testing.T) { - cfg := NewFrom(Values{ - Git: map[string]string{ - "remote.origin.lfsurl": "abc", - "remote.other.lfsurl": "def", - }, - }) - - endpoint := cfg.Endpoint("download") - assert.Equal(t, "abc", endpoint.Url) - assert.Equal(t, "", endpoint.SshUserAndHost) - assert.Equal(t, "", endpoint.SshPath) -} - -func TestEndpointUseAlternateRemote(t *testing.T) { - cfg := NewFrom(Values{ - Git: map[string]string{ - "remote.origin.lfsurl": "abc", - "remote.other.lfsurl": "def", - }, - }) - - cfg.CurrentRemote = "other" - - endpoint := cfg.Endpoint("download") - assert.Equal(t, "def", endpoint.Url) - assert.Equal(t, "", endpoint.SshUserAndHost) - assert.Equal(t, "", endpoint.SshPath) -} - -func TestEndpointAddsLfsSuffix(t *testing.T) { - cfg := NewFrom(Values{ - Git: map[string]string{"remote.origin.url": "https://example.com/foo/bar"}, - }) - - endpoint := cfg.Endpoint("download") - assert.Equal(t, "https://example.com/foo/bar.git/info/lfs", endpoint.Url) - assert.Equal(t, "", endpoint.SshUserAndHost) - assert.Equal(t, "", endpoint.SshPath) -} - -func TestBareEndpointAddsLfsSuffix(t *testing.T) { - cfg := NewFrom(Values{ - Git: map[string]string{"remote.origin.url": "https://example.com/foo/bar.git"}, - }) - - endpoint := cfg.Endpoint("download") - assert.Equal(t, "https://example.com/foo/bar.git/info/lfs", endpoint.Url) - assert.Equal(t, "", endpoint.SshUserAndHost) - assert.Equal(t, "", endpoint.SshPath) -} - -func TestEndpointSeparateClonePushUrl(t *testing.T) { - cfg := NewFrom(Values{ - Git: map[string]string{ - "remote.origin.url": "https://example.com/foo/bar.git", - "remote.origin.pushurl": "https://readwrite.com/foo/bar.git"}, - }) - - endpoint := cfg.Endpoint("download") - assert.Equal(t, "https://example.com/foo/bar.git/info/lfs", endpoint.Url) - assert.Equal(t, "", endpoint.SshUserAndHost) - assert.Equal(t, "", endpoint.SshPath) - - endpoint = cfg.Endpoint("upload") - assert.Equal(t, "https://readwrite.com/foo/bar.git/info/lfs", endpoint.Url) - assert.Equal(t, "", endpoint.SshUserAndHost) - assert.Equal(t, "", endpoint.SshPath) -} - -func TestEndpointOverriddenSeparateClonePushLfsUrl(t *testing.T) { - cfg := NewFrom(Values{ - Git: map[string]string{ - "remote.origin.url": "https://example.com/foo/bar.git", - "remote.origin.pushurl": "https://readwrite.com/foo/bar.git", - "remote.origin.lfsurl": "https://examplelfs.com/foo/bar", - "remote.origin.lfspushurl": "https://readwritelfs.com/foo/bar"}, - }) - - endpoint := cfg.Endpoint("download") - assert.Equal(t, "https://examplelfs.com/foo/bar", endpoint.Url) - assert.Equal(t, "", endpoint.SshUserAndHost) - assert.Equal(t, "", endpoint.SshPath) - - endpoint = cfg.Endpoint("upload") - assert.Equal(t, "https://readwritelfs.com/foo/bar", endpoint.Url) - assert.Equal(t, "", endpoint.SshUserAndHost) - assert.Equal(t, "", endpoint.SshPath) -} - -func TestEndpointGlobalSeparateLfsPush(t *testing.T) { - cfg := NewFrom(Values{ - Git: map[string]string{ - "lfs.url": "https://readonly.com/foo/bar", - "lfs.pushurl": "https://write.com/foo/bar", - }, - }) - - endpoint := cfg.Endpoint("download") - assert.Equal(t, "https://readonly.com/foo/bar", endpoint.Url) - assert.Equal(t, "", endpoint.SshUserAndHost) - assert.Equal(t, "", endpoint.SshPath) - - endpoint = cfg.Endpoint("upload") - assert.Equal(t, "https://write.com/foo/bar", endpoint.Url) - assert.Equal(t, "", endpoint.SshUserAndHost) - assert.Equal(t, "", endpoint.SshPath) -} - -func TestSSHEndpointOverridden(t *testing.T) { - cfg := NewFrom(Values{ - Git: map[string]string{ - "remote.origin.url": "git@example.com:foo/bar", - "remote.origin.lfsurl": "lfs", - }, - }) - - endpoint := cfg.Endpoint("download") - assert.Equal(t, "lfs", endpoint.Url) - assert.Equal(t, "", endpoint.SshUserAndHost) - assert.Equal(t, "", endpoint.SshPath) - assert.Equal(t, "", endpoint.SshPort) -} - -func TestSSHEndpointAddsLfsSuffix(t *testing.T) { - cfg := NewFrom(Values{ - Git: map[string]string{"remote.origin.url": "ssh://git@example.com/foo/bar"}, - }) - - endpoint := cfg.Endpoint("download") - assert.Equal(t, "https://example.com/foo/bar.git/info/lfs", endpoint.Url) - assert.Equal(t, "git@example.com", endpoint.SshUserAndHost) - assert.Equal(t, "foo/bar", endpoint.SshPath) - assert.Equal(t, "", endpoint.SshPort) -} - -func TestSSHCustomPortEndpointAddsLfsSuffix(t *testing.T) { - cfg := NewFrom(Values{ - Git: map[string]string{"remote.origin.url": "ssh://git@example.com:9000/foo/bar"}, - }) - - endpoint := cfg.Endpoint("download") - assert.Equal(t, "https://example.com/foo/bar.git/info/lfs", endpoint.Url) - assert.Equal(t, "git@example.com", endpoint.SshUserAndHost) - assert.Equal(t, "foo/bar", endpoint.SshPath) - assert.Equal(t, "9000", endpoint.SshPort) -} - -func TestBareSSHEndpointAddsLfsSuffix(t *testing.T) { - cfg := NewFrom(Values{ - Git: map[string]string{"remote.origin.url": "git@example.com:foo/bar.git"}, - }) - - endpoint := cfg.Endpoint("download") - assert.Equal(t, "https://example.com/foo/bar.git/info/lfs", endpoint.Url) - assert.Equal(t, "git@example.com", endpoint.SshUserAndHost) - assert.Equal(t, "foo/bar.git", endpoint.SshPath) - assert.Equal(t, "", endpoint.SshPort) -} - -func TestSSHEndpointFromGlobalLfsUrl(t *testing.T) { - cfg := NewFrom(Values{ - Git: map[string]string{"lfs.url": "git@example.com:foo/bar.git"}, - }) - - endpoint := cfg.Endpoint("download") - assert.Equal(t, "https://example.com/foo/bar.git", endpoint.Url) - assert.Equal(t, "git@example.com", endpoint.SshUserAndHost) - assert.Equal(t, "foo/bar.git", endpoint.SshPath) - assert.Equal(t, "", endpoint.SshPort) -} - -func TestHTTPEndpointAddsLfsSuffix(t *testing.T) { - cfg := NewFrom(Values{ - Git: map[string]string{"remote.origin.url": "http://example.com/foo/bar"}, - }) - - endpoint := cfg.Endpoint("download") - assert.Equal(t, "http://example.com/foo/bar.git/info/lfs", endpoint.Url) - assert.Equal(t, "", endpoint.SshUserAndHost) - assert.Equal(t, "", endpoint.SshPath) - assert.Equal(t, "", endpoint.SshPort) -} - -func TestBareHTTPEndpointAddsLfsSuffix(t *testing.T) { - cfg := NewFrom(Values{ - Git: map[string]string{"remote.origin.url": "http://example.com/foo/bar.git"}, - }) - - endpoint := cfg.Endpoint("download") - assert.Equal(t, "http://example.com/foo/bar.git/info/lfs", endpoint.Url) - assert.Equal(t, "", endpoint.SshUserAndHost) - assert.Equal(t, "", endpoint.SshPath) - assert.Equal(t, "", endpoint.SshPort) -} - -func TestGitEndpointAddsLfsSuffix(t *testing.T) { - cfg := NewFrom(Values{ - Git: map[string]string{"remote.origin.url": "git://example.com/foo/bar"}, - }) - - endpoint := cfg.Endpoint("download") - assert.Equal(t, "https://example.com/foo/bar.git/info/lfs", endpoint.Url) - assert.Equal(t, "", endpoint.SshUserAndHost) - assert.Equal(t, "", endpoint.SshPath) - assert.Equal(t, "", endpoint.SshPort) -} - -func TestGitEndpointAddsLfsSuffixWithCustomProtocol(t *testing.T) { - cfg := NewFrom(Values{ - Git: map[string]string{ - "remote.origin.url": "git://example.com/foo/bar", - "lfs.gitprotocol": "http", - }, - }) - - endpoint := cfg.Endpoint("download") - assert.Equal(t, "http://example.com/foo/bar.git/info/lfs", endpoint.Url) - assert.Equal(t, "", endpoint.SshUserAndHost) - assert.Equal(t, "", endpoint.SshPath) - assert.Equal(t, "", endpoint.SshPort) -} - -func TestBareGitEndpointAddsLfsSuffix(t *testing.T) { - cfg := NewFrom(Values{ - Git: map[string]string{"remote.origin.url": "git://example.com/foo/bar.git"}, - }) - - endpoint := cfg.Endpoint("download") - assert.Equal(t, "https://example.com/foo/bar.git/info/lfs", endpoint.Url) - assert.Equal(t, "", endpoint.SshUserAndHost) - assert.Equal(t, "", endpoint.SshPath) - assert.Equal(t, "", endpoint.SshPort) -} - func TestConcurrentTransfersSetValue(t *testing.T) { cfg := NewFrom(Values{ Git: map[string]string{ @@ -405,82 +145,6 @@ func TestBatchAbsentIsTrue(t *testing.T) { assert.True(t, v) } -func TestAccessConfig(t *testing.T) { - type accessTest struct { - Access string - PrivateAccess bool - } - - tests := map[string]accessTest{ - "": {"none", false}, - "basic": {"basic", true}, - "BASIC": {"basic", true}, - "private": {"basic", true}, - "PRIVATE": {"basic", true}, - "invalidauth": {"invalidauth", true}, - } - - for value, expected := range tests { - cfg := NewFrom(Values{ - Git: map[string]string{ - "lfs.url": "http://example.com", - "lfs.http://example.com.access": value, - "lfs.https://example.com.access": "bad", - }, - }) - - if access := cfg.Access("download"); access != expected.Access { - t.Errorf("Expected Access() with value %q to be %v, got %v", value, expected.Access, access) - } - if access := cfg.Access("upload"); access != expected.Access { - t.Errorf("Expected Access() with value %q to be %v, got %v", value, expected.Access, access) - } - - if priv := cfg.PrivateAccess("download"); priv != expected.PrivateAccess { - t.Errorf("Expected PrivateAccess() with value %q to be %v, got %v", value, expected.PrivateAccess, priv) - } - if priv := cfg.PrivateAccess("upload"); priv != expected.PrivateAccess { - t.Errorf("Expected PrivateAccess() with value %q to be %v, got %v", value, expected.PrivateAccess, priv) - } - } - - // Test again but with separate push url - for value, expected := range tests { - cfg := NewFrom(Values{ - Git: map[string]string{ - "lfs.url": "http://example.com", - "lfs.pushurl": "http://examplepush.com", - "lfs.http://example.com.access": value, - "lfs.http://examplepush.com.access": value, - "lfs.https://example.com.access": "bad", - }, - }) - - if access := cfg.Access("download"); access != expected.Access { - t.Errorf("Expected Access() with value %q to be %v, got %v", value, expected.Access, access) - } - if access := cfg.Access("upload"); access != expected.Access { - t.Errorf("Expected Access() with value %q to be %v, got %v", value, expected.Access, access) - } - - if priv := cfg.PrivateAccess("download"); priv != expected.PrivateAccess { - t.Errorf("Expected PrivateAccess() with value %q to be %v, got %v", value, expected.PrivateAccess, priv) - } - if priv := cfg.PrivateAccess("upload"); priv != expected.PrivateAccess { - t.Errorf("Expected PrivateAccess() with value %q to be %v, got %v", value, expected.PrivateAccess, priv) - } - } - -} - -func TestAccessAbsentConfig(t *testing.T) { - cfg := NewFrom(Values{}) - assert.Equal(t, "none", cfg.Access("download")) - assert.Equal(t, "none", cfg.Access("upload")) - assert.False(t, cfg.PrivateAccess("download")) - assert.False(t, cfg.PrivateAccess("upload")) -} - func TestLoadValidExtension(t *testing.T) { cfg := NewFrom(Values{ Git: map[string]string{}, diff --git a/config/environment.go b/config/environment.go index a9522d02..01a712d8 100644 --- a/config/environment.go +++ b/config/environment.go @@ -40,10 +40,6 @@ type Environment interface { // All returns a copy of all the key/value pairs for the current environment. All() map[string]string - - // deprecated, don't use - set(key, value string) - del(key string) } type environment struct { @@ -94,11 +90,3 @@ func (e *environment) Int(key string, def int) (val int) { func (e *environment) All() map[string]string { return e.Fetcher.All() } - -func (e *environment) set(key, value string) { - e.Fetcher.set(key, value) -} - -func (e *environment) del(key string) { - e.Fetcher.del(key) -} diff --git a/config/fetcher.go b/config/fetcher.go index df3df1e8..24e0dfad 100644 --- a/config/fetcher.go +++ b/config/fetcher.go @@ -10,8 +10,4 @@ type Fetcher interface { // All returns a copy of all the key/value pairs for the current environment. All() map[string]string - - // deprecated, don't use - set(key, value string) - del(key string) } diff --git a/config/git_environment.go b/config/git_environment.go index a8d441df..927c0485 100644 --- a/config/git_environment.go +++ b/config/git_environment.go @@ -44,18 +44,6 @@ func (g *gitEnvironment) All() map[string]string { return g.git.All() } -func (g *gitEnvironment) set(key, value string) { - g.loadGitConfig() - - g.git.set(key, value) -} - -func (g *gitEnvironment) del(key string) { - g.loadGitConfig() - - g.git.del(key) -} - // loadGitConfig reads and parses the .gitconfig by calling ReadGitConfig. It // also sets values on the configuration instance `g.config`. // diff --git a/config/git_fetcher.go b/config/git_fetcher.go index 5eb8ea65..722b4929 100644 --- a/config/git_fetcher.go +++ b/config/git_fetcher.go @@ -139,18 +139,6 @@ func (g *GitFetcher) All() map[string]string { return newmap } -func (g *GitFetcher) set(key, value string) { - g.vmu.Lock() - defer g.vmu.Unlock() - g.vals[strings.ToLower(key)] = value -} - -func (g *GitFetcher) del(key string) { - g.vmu.Lock() - defer g.vmu.Unlock() - delete(g.vals, strings.ToLower(key)) -} - func getGitConfigs() (sources []*GitConfig) { if lfsconfig := getFileGitConfig(".lfsconfig"); lfsconfig != nil { sources = append(sources, lfsconfig) diff --git a/config/map_fetcher.go b/config/map_fetcher.go index 658eef2d..43448006 100644 --- a/config/map_fetcher.go +++ b/config/map_fetcher.go @@ -21,11 +21,3 @@ func (m mapFetcher) All() map[string]string { } return newmap } - -func (m mapFetcher) set(key, value string) { - m[key] = value -} - -func (m mapFetcher) del(key string) { - delete(m, key) -} diff --git a/config/os_fetcher.go b/config/os_fetcher.go index b906bff8..ec585ff1 100644 --- a/config/os_fetcher.go +++ b/config/os_fetcher.go @@ -57,11 +57,3 @@ func (o *OsFetcher) Get(key string) (val string, ok bool) { func (o *OsFetcher) All() map[string]string { return nil } - -func (o *OsFetcher) set(key, value string) { - panic("cannot modify OS ENV keys") -} - -func (o *OsFetcher) del(key string) { - panic("cannot modify OS ENV keys") -} diff --git a/config/version.go b/config/version.go index f60c0b2c..96d2f8e8 100644 --- a/config/version.go +++ b/config/version.go @@ -4,6 +4,8 @@ import ( "fmt" "runtime" "strings" + + "github.com/git-lfs/git-lfs/lfsapi" ) var ( @@ -28,4 +30,5 @@ func init() { gitCommit, ) + lfsapi.UserAgent = VersionDesc } diff --git a/docs/custom-transfers.md b/docs/custom-transfers.md index b9c36c2d..3b2d2824 100644 --- a/docs/custom-transfers.md +++ b/docs/custom-transfers.md @@ -129,8 +129,10 @@ Or if there was an error: #### Stage 2: 0..N Transfers -After the initiation exchange, git-lfs will send any number of transfer -requests to the stdin of the transfer process. +After the initiation exchange, git-lfs will send any number of transfer +requests to the stdin of the transfer process, in a serial sequence. Once a +transfer request is sent to the process, it awaits a completion response before +sending the next request. ##### Uploads diff --git a/git/git.go b/git/git.go index 1768073b..ed7795be 100644 --- a/git/git.go +++ b/git/git.go @@ -573,6 +573,27 @@ func GetCommitSummary(commit string) (*CommitSummary, error) { } } +func isCygwin() bool { + cmd := subprocess.ExecCommand("uname") + out, err := cmd.Output() + if err != nil { + return false + } + return bytes.Contains(out, []byte("CYGWIN")) +} + +func translateCygwinPath(path string) (string, error) { + cmd := subprocess.ExecCommand("cygpath", "-w", path) + buf := &bytes.Buffer{} + cmd.Stderr = buf + out, err := cmd.Output() + output := strings.TrimSpace(string(out)) + if err != nil { + return path, fmt.Errorf("Failed to translate path from cygwin to windows: %s", buf.String()) + } + return output, nil +} + func GitAndRootDirs() (string, string, error) { cmd := subprocess.ExecCommand("git", "rev-parse", "--git-dir", "--show-toplevel") buf := &bytes.Buffer{} @@ -587,6 +608,12 @@ func GitAndRootDirs() (string, string, error) { paths := strings.Split(output, "\n") pathLen := len(paths) + if isCygwin() { + for i := 0; i < pathLen; i++ { + paths[i], err = translateCygwinPath(paths[i]) + } + } + if pathLen == 0 { return "", "", fmt.Errorf("Bad git rev-parse output: %q", output) } @@ -612,6 +639,9 @@ func RootDir() (string, error) { } path := strings.TrimSpace(string(out)) + if isCygwin() { + path, err = translateCygwinPath(path) + } if len(path) > 0 { return filepath.Abs(path) } diff --git a/httputil/http.go b/httputil/http.go deleted file mode 100644 index d93f5424..00000000 --- a/httputil/http.go +++ /dev/null @@ -1,351 +0,0 @@ -// Package httputil provides additional helper functions for http services -// NOTE: Subject to change, do not rely on this package from outside git-lfs source -package httputil - -import ( - "bufio" - "bytes" - "crypto/tls" - "errors" - "fmt" - "io" - "net" - "net/http" - "net/http/httputil" - "os" - "path/filepath" - "strings" - "sync" - "time" - - "github.com/git-lfs/git-lfs/config" - "github.com/rubyist/tracerx" -) - -type httpTransferStats struct { - HeaderSize int - BodySize int - Start time.Time - Stop time.Time -} - -type httpTransfer struct { - requestStats *httpTransferStats - responseStats *httpTransferStats -} - -var ( - // TODO should use some locks - httpTransfers = make(map[*http.Response]*httpTransfer) - httpTransferBuckets = make(map[string][]*http.Response) - httpTransfersLock sync.Mutex - httpTransferBucketsLock sync.Mutex - httpClients map[string]*HttpClient - httpClientsMutex sync.Mutex - UserAgent string -) - -func LogTransfer(cfg *config.Configuration, key string, res *http.Response) { - if cfg.IsLoggingStats { - httpTransferBucketsLock.Lock() - httpTransferBuckets[key] = append(httpTransferBuckets[key], res) - httpTransferBucketsLock.Unlock() - } -} - -type HttpClient struct { - Config *config.Configuration - *http.Client -} - -func (c *HttpClient) Do(req *http.Request) (*http.Response, error) { - traceHttpRequest(c.Config, req) - - crc := countingRequest(c.Config, req) - if req.Body != nil { - // Only set the body if we have a body, but create the countingRequest - // anyway to make using zeroed stats easier. - req.Body = crc - } - - start := time.Now() - res, err := c.Client.Do(req) - if err != nil { - return res, err - } - - traceHttpResponse(c.Config, res) - - cresp := countingResponse(c.Config, res) - res.Body = cresp - - if c.Config.IsLoggingStats { - reqHeaderSize := 0 - resHeaderSize := 0 - - if dump, err := httputil.DumpRequest(req, false); err == nil { - reqHeaderSize = len(dump) - } - - if dump, err := httputil.DumpResponse(res, false); err == nil { - resHeaderSize = len(dump) - } - - reqstats := &httpTransferStats{HeaderSize: reqHeaderSize, BodySize: crc.Count} - - // Response body size cannot be figured until it is read. Do not rely on a Content-Length - // header because it may not exist or be -1 in the case of chunked responses. - resstats := &httpTransferStats{HeaderSize: resHeaderSize, Start: start} - t := &httpTransfer{requestStats: reqstats, responseStats: resstats} - httpTransfersLock.Lock() - httpTransfers[res] = t - httpTransfersLock.Unlock() - } - - return res, err -} - -// NewHttpClient returns a new HttpClient for the given host (which may be "host:port") -func NewHttpClient(c *config.Configuration, host string) *HttpClient { - httpClientsMutex.Lock() - defer httpClientsMutex.Unlock() - - if httpClients == nil { - httpClients = make(map[string]*HttpClient) - } - if client, ok := httpClients[host]; ok { - return client - } - - dialtime := c.Git.Int("lfs.dialtimeout", 30) - keepalivetime := c.Git.Int("lfs.keepalive", 1800) // 30 minutes - tlstime := c.Git.Int("lfs.tlstimeout", 30) - - tr := &http.Transport{ - Proxy: ProxyFromGitConfigOrEnvironment(c), - Dial: (&net.Dialer{ - Timeout: time.Duration(dialtime) * time.Second, - KeepAlive: time.Duration(keepalivetime) * time.Second, - }).Dial, - TLSHandshakeTimeout: time.Duration(tlstime) * time.Second, - MaxIdleConnsPerHost: c.ConcurrentTransfers(), - } - - tr.TLSClientConfig = &tls.Config{} - if isCertVerificationDisabledForHost(c, host) { - tr.TLSClientConfig.InsecureSkipVerify = true - } else { - tr.TLSClientConfig.RootCAs = getRootCAsForHost(c, host) - } - - client := &HttpClient{ - Config: c, - Client: &http.Client{Transport: tr, CheckRedirect: CheckRedirect}, - } - httpClients[host] = client - - return client -} - -func CheckRedirect(req *http.Request, via []*http.Request) error { - if len(via) >= 3 { - return errors.New("stopped after 3 redirects") - } - - oldest := via[0] - for key, _ := range oldest.Header { - if key == "Authorization" { - if req.URL.Scheme != oldest.URL.Scheme || req.URL.Host != oldest.URL.Host { - continue - } - } - req.Header.Set(key, oldest.Header.Get(key)) - } - - oldestUrl := strings.SplitN(oldest.URL.String(), "?", 2)[0] - newUrl := strings.SplitN(req.URL.String(), "?", 2)[0] - tracerx.Printf("api: redirect %s %s to %s", oldest.Method, oldestUrl, newUrl) - - return nil -} - -var tracedTypes = []string{"json", "text", "xml", "html"} - -func traceHttpRequest(cfg *config.Configuration, req *http.Request) { - tracerx.Printf("HTTP: %s", TraceHttpReq(req)) - - if cfg.IsTracingHttp == false { - return - } - - dump, err := httputil.DumpRequest(req, false) - if err != nil { - return - } - - traceHttpDump(cfg, ">", dump) -} - -func traceHttpResponse(cfg *config.Configuration, res *http.Response) { - if res == nil { - return - } - - tracerx.Printf("HTTP: %d", res.StatusCode) - - if cfg.IsTracingHttp == false { - return - } - - dump, err := httputil.DumpResponse(res, false) - if err != nil { - return - } - - if isTraceableContent(res.Header) { - fmt.Fprintf(os.Stderr, "\n\n") - } else { - fmt.Fprintf(os.Stderr, "\n") - } - - traceHttpDump(cfg, "<", dump) -} - -func traceHttpDump(cfg *config.Configuration, direction string, dump []byte) { - scanner := bufio.NewScanner(bytes.NewBuffer(dump)) - - for scanner.Scan() { - line := scanner.Text() - if !cfg.IsDebuggingHttp && strings.HasPrefix(strings.ToLower(line), "authorization: basic") { - fmt.Fprintf(os.Stderr, "%s Authorization: Basic * * * * *\n", direction) - } else { - fmt.Fprintf(os.Stderr, "%s %s\n", direction, line) - } - } -} - -func isTraceableContent(h http.Header) bool { - ctype := strings.ToLower(strings.SplitN(h.Get("Content-Type"), ";", 2)[0]) - for _, tracedType := range tracedTypes { - if strings.Contains(ctype, tracedType) { - return true - } - } - return false -} - -func countingRequest(cfg *config.Configuration, req *http.Request) *CountingReadCloser { - return &CountingReadCloser{ - request: req, - cfg: cfg, - ReadCloser: req.Body, - isTraceableType: isTraceableContent(req.Header), - useGitTrace: false, - } -} - -func countingResponse(cfg *config.Configuration, res *http.Response) *CountingReadCloser { - return &CountingReadCloser{ - response: res, - cfg: cfg, - ReadCloser: res.Body, - isTraceableType: isTraceableContent(res.Header), - useGitTrace: true, - } -} - -type CountingReadCloser struct { - Count int - request *http.Request - response *http.Response - cfg *config.Configuration - isTraceableType bool - useGitTrace bool - io.ReadCloser -} - -func (c *CountingReadCloser) Read(b []byte) (int, error) { - n, err := c.ReadCloser.Read(b) - if err != nil && err != io.EOF { - return n, err - } - - c.Count += n - - if n > 0 && c.isTraceableType { - chunk := string(b[0:n]) - if c.useGitTrace { - tracerx.Printf("HTTP: %s", chunk) - } - - if c.cfg.IsTracingHttp { - fmt.Fprint(os.Stderr, chunk) - } - } - - if err == io.EOF && c.cfg.IsLoggingStats { - // This httpTransfer is done, we're checking it this way so we can also - // catch httpTransfers where the caller forgets to Close() the Body. - if c.response != nil { - httpTransfersLock.Lock() - if httpTransfer, ok := httpTransfers[c.response]; ok { - httpTransfer.responseStats.BodySize = c.Count - httpTransfer.responseStats.Stop = time.Now() - } - httpTransfersLock.Unlock() - } - } - return n, err -} - -// LogHttpStats is intended to be called after all HTTP operations for the -// commmand have finished. It dumps k/v logs, one line per httpTransfer into -// a log file with the current timestamp. -func LogHttpStats(cfg *config.Configuration) { - if !cfg.IsLoggingStats { - return - } - - file, err := statsLogFile() - if err != nil { - fmt.Fprintf(os.Stderr, "Error logging http stats: %s\n", err) - return - } - - fmt.Fprintf(file, "concurrent=%d batch=%v time=%d version=%s\n", cfg.ConcurrentTransfers(), cfg.BatchTransfer(), time.Now().Unix(), config.Version) - - for key, responses := range httpTransferBuckets { - for _, response := range responses { - stats := httpTransfers[response] - fmt.Fprintf(file, "key=%s reqheader=%d reqbody=%d resheader=%d resbody=%d restime=%d status=%d url=%s\n", - key, - stats.requestStats.HeaderSize, - stats.requestStats.BodySize, - stats.responseStats.HeaderSize, - stats.responseStats.BodySize, - stats.responseStats.Stop.Sub(stats.responseStats.Start).Nanoseconds(), - response.StatusCode, - response.Request.URL) - } - } - - fmt.Fprintf(os.Stderr, "HTTP Stats logged to file %s\n", file.Name()) -} - -func statsLogFile() (*os.File, error) { - logBase := filepath.Join(config.LocalLogDir, "http") - if err := os.MkdirAll(logBase, 0755); err != nil { - return nil, err - } - - logFile := fmt.Sprintf("http-%d.log", time.Now().Unix()) - return os.Create(filepath.Join(logBase, logFile)) -} - -func TraceHttpReq(req *http.Request) string { - return fmt.Sprintf("%s %s", req.Method, strings.SplitN(req.URL.String(), "?", 2)[0]) -} - -func init() { - UserAgent = config.VersionDesc -} diff --git a/httputil/ntlm.go b/httputil/ntlm.go deleted file mode 100644 index 5aa52c12..00000000 --- a/httputil/ntlm.go +++ /dev/null @@ -1,317 +0,0 @@ -package httputil - -import ( - "bytes" - "encoding/base64" - "errors" - "fmt" - "io" - "io/ioutil" - "net/http" - "os" - "strings" - "sync/atomic" - - "github.com/ThomsonReutersEikon/go-ntlm/ntlm" - "github.com/git-lfs/git-lfs/auth" - "github.com/git-lfs/git-lfs/config" -) - -func ntlmClientSession(c *config.Configuration, creds auth.Creds) (ntlm.ClientSession, error) { - if c.NtlmSession != nil { - return c.NtlmSession, nil - } - splits := strings.Split(creds["username"], "\\") - - if len(splits) != 2 { - errorMessage := fmt.Sprintf("Your user name must be of the form DOMAIN\\user. It is currently %s", creds["username"]) - return nil, errors.New(errorMessage) - } - - session, err := ntlm.CreateClientSession(ntlm.Version2, ntlm.ConnectionOrientedMode) - - if err != nil { - return nil, err - } - - session.SetUserInfo(splits[1], creds["password"], strings.ToUpper(splits[0])) - c.NtlmSession = session - return session, nil -} - -func doNTLMRequest(cfg *config.Configuration, request *http.Request, retry bool) (*http.Response, error) { - handReq, err := cloneRequest(request) - if err != nil { - return nil, err - } - - res, err := NewHttpClient(cfg, handReq.Host).Do(handReq) - if err != nil && res == nil { - return nil, err - } - - //If the status is 401 then we need to re-authenticate, otherwise it was successful - if res.StatusCode == 401 { - creds, err := auth.GetCreds(cfg, request) - if err != nil { - return nil, err - } - - negotiateReq, err := cloneRequest(request) - if err != nil { - return nil, err - } - - challengeMessage, err := negotiate(cfg, negotiateReq, ntlmNegotiateMessage) - if err != nil { - return nil, err - } - - challengeReq, err := cloneRequest(request) - if err != nil { - return nil, err - } - - res, err := challenge(cfg, challengeReq, challengeMessage, creds) - if err != nil { - return nil, err - } - - //If the status is 401 then we need to re-authenticate - if res.StatusCode == 401 && retry == true { - return doNTLMRequest(cfg, challengeReq, false) - } - - auth.SaveCredentials(cfg, creds, res) - - return res, nil - } - return res, nil -} - -func negotiate(cfg *config.Configuration, request *http.Request, message string) ([]byte, error) { - request.Header.Add("Authorization", message) - res, err := NewHttpClient(cfg, request.Host).Do(request) - - if res == nil && err != nil { - return nil, err - } - - io.Copy(ioutil.Discard, res.Body) - res.Body.Close() - - ret, err := parseChallengeResponse(res) - if err != nil { - return nil, err - } - - return ret, nil -} - -func challenge(cfg *config.Configuration, request *http.Request, challengeBytes []byte, creds auth.Creds) (*http.Response, error) { - challenge, err := ntlm.ParseChallengeMessage(challengeBytes) - if err != nil { - return nil, err - } - - session, err := ntlmClientSession(cfg, creds) - if err != nil { - return nil, err - } - - session.ProcessChallengeMessage(challenge) - authenticate, err := session.GenerateAuthenticateMessage() - if err != nil { - return nil, err - } - - authMsg := base64.StdEncoding.EncodeToString(authenticate.Bytes()) - request.Header.Add("Authorization", "NTLM "+authMsg) - return NewHttpClient(cfg, request.Host).Do(request) -} - -func parseChallengeResponse(response *http.Response) ([]byte, error) { - header := response.Header.Get("Www-Authenticate") - if len(header) < 6 { - return nil, fmt.Errorf("Invalid NTLM challenge response: %q", header) - } - - //parse out the "NTLM " at the beginning of the response - challenge := header[5:] - val, err := base64.StdEncoding.DecodeString(challenge) - - if err != nil { - return nil, err - } - return []byte(val), nil -} - -func cloneRequest(request *http.Request) (*http.Request, error) { - cloneReqBody, err := cloneRequestBody(request) - if err != nil { - return nil, err - } - - clonedReq, err := http.NewRequest(request.Method, request.URL.String(), cloneReqBody) - if err != nil { - return nil, err - } - - for k, _ := range request.Header { - clonedReq.Header.Add(k, request.Header.Get(k)) - } - - clonedReq.TransferEncoding = request.TransferEncoding - clonedReq.ContentLength = request.ContentLength - - return clonedReq, nil -} - -func cloneRequestBody(req *http.Request) (io.ReadCloser, error) { - if req.Body == nil { - return nil, nil - } - - var cb *cloneableBody - var err error - isCloneableBody := true - - // check to see if the request body is already a cloneableBody - body := req.Body - if existingCb, ok := body.(*cloneableBody); ok { - isCloneableBody = false - cb, err = existingCb.CloneBody() - } else { - cb, err = newCloneableBody(req.Body, 0) - } - - if err != nil { - return nil, err - } - - if isCloneableBody { - cb2, err := cb.CloneBody() - if err != nil { - return nil, err - } - - req.Body = cb2 - } - - return cb, nil -} - -type cloneableBody struct { - bytes []byte // in-memory buffer of body - file *os.File // file buffer of in-memory overflow - reader io.Reader // internal reader for Read() - closed bool // tracks whether body is closed - dup *dupTracker -} - -func newCloneableBody(r io.Reader, limit int64) (*cloneableBody, error) { - if limit < 1 { - limit = 1048576 // default - } - - b := &cloneableBody{} - buf := &bytes.Buffer{} - w, err := io.CopyN(buf, r, limit) - if err != nil && err != io.EOF { - return nil, err - } - - b.bytes = buf.Bytes() - byReader := bytes.NewBuffer(b.bytes) - - if w >= limit { - tmp, err := ioutil.TempFile("", "git-lfs-clone-reader") - if err != nil { - return nil, err - } - - _, err = io.Copy(tmp, r) - tmp.Close() - if err != nil { - os.RemoveAll(tmp.Name()) - return nil, err - } - - f, err := os.Open(tmp.Name()) - if err != nil { - os.RemoveAll(tmp.Name()) - return nil, err - } - - dups := int32(0) - b.dup = &dupTracker{name: f.Name(), dups: &dups} - b.file = f - b.reader = io.MultiReader(byReader, b.file) - } else { - // no file, so set the reader to just the in-memory buffer - b.reader = byReader - } - - return b, nil -} - -func (b *cloneableBody) Read(p []byte) (int, error) { - if b.closed { - return 0, io.EOF - } - return b.reader.Read(p) -} - -func (b *cloneableBody) Close() error { - if !b.closed { - b.closed = true - if b.file == nil { - return nil - } - - b.file.Close() - b.dup.Rm() - } - return nil -} - -func (b *cloneableBody) CloneBody() (*cloneableBody, error) { - if b.closed { - return &cloneableBody{closed: true}, nil - } - - b2 := &cloneableBody{bytes: b.bytes} - - if b.file == nil { - b2.reader = bytes.NewBuffer(b.bytes) - } else { - f, err := os.Open(b.file.Name()) - if err != nil { - return nil, err - } - b2.file = f - b2.reader = io.MultiReader(bytes.NewBuffer(b.bytes), b2.file) - b2.dup = b.dup - b.dup.Add() - } - - return b2, nil -} - -type dupTracker struct { - name string - dups *int32 -} - -func (t *dupTracker) Add() { - atomic.AddInt32(t.dups, 1) -} - -func (t *dupTracker) Rm() { - newval := atomic.AddInt32(t.dups, -1) - if newval < 0 { - os.RemoveAll(t.name) - } -} - -const ntlmNegotiateMessage = "NTLM TlRMTVNTUAABAAAAB7IIogwADAAzAAAACwALACgAAAAKAAAoAAAAD1dJTExISS1NQUlOTk9SVEhBTUVSSUNB" diff --git a/httputil/ntlm_test.go b/httputil/ntlm_test.go deleted file mode 100644 index 8333c87e..00000000 --- a/httputil/ntlm_test.go +++ /dev/null @@ -1,147 +0,0 @@ -package httputil - -import ( - "bytes" - "encoding/base64" - "io/ioutil" - "net/http" - "strings" - "testing" - - "github.com/git-lfs/git-lfs/auth" - "github.com/git-lfs/git-lfs/config" - "github.com/stretchr/testify/assert" -) - -func TestNtlmClientSession(t *testing.T) { - cfg := config.New() - creds := auth.Creds{"username": "MOOSEDOMAIN\\canadian", "password": "MooseAntlersYeah"} - _, err := ntlmClientSession(cfg, creds) - assert.Nil(t, err) - - //The second call should ignore creds and give the session we just created. - badCreds := auth.Creds{"username": "badusername", "password": "MooseAntlersYeah"} - _, err = ntlmClientSession(cfg, badCreds) - assert.Nil(t, err) -} - -func TestNtlmClientSessionBadCreds(t *testing.T) { - cfg := config.New() - creds := auth.Creds{"username": "badusername", "password": "MooseAntlersYeah"} - _, err := ntlmClientSession(cfg, creds) - assert.NotNil(t, err) -} - -func TestNtlmCloneRequest(t *testing.T) { - req1, _ := http.NewRequest("Method", "url", nil) - cloneOfReq1, err := cloneRequest(req1) - assert.Nil(t, err) - assertRequestsEqual(t, req1, cloneOfReq1) - - req2, _ := http.NewRequest("Method", "url", bytes.NewReader([]byte("Moose can be request bodies"))) - cloneOfReq2, err := cloneRequest(req2) - assert.Nil(t, err) - assertRequestsEqual(t, req2, cloneOfReq2) -} - -func assertRequestsEqual(t *testing.T, req1 *http.Request, req2 *http.Request) { - assert.Equal(t, req1.Method, req2.Method) - - for k, v := range req1.Header { - assert.Equal(t, v, req2.Header[k]) - } - - if req1.Body == nil { - assert.Nil(t, req2.Body) - } else { - bytes1, _ := ioutil.ReadAll(req1.Body) - bytes2, _ := ioutil.ReadAll(req2.Body) - assert.Equal(t, bytes1, bytes2) - } -} - -func TestNtlmHeaderParseValid(t *testing.T) { - res := http.Response{} - res.Header = make(map[string][]string) - res.Header.Add("Www-Authenticate", "NTLM "+base64.StdEncoding.EncodeToString([]byte("I am a moose"))) - bytes, err := parseChallengeResponse(&res) - assert.Nil(t, err) - assert.False(t, strings.HasPrefix(string(bytes), "NTLM")) -} - -func TestNtlmHeaderParseInvalidLength(t *testing.T) { - res := http.Response{} - res.Header = make(map[string][]string) - res.Header.Add("Www-Authenticate", "NTL") - ret, err := parseChallengeResponse(&res) - if ret != nil { - t.Errorf("Unexpected challenge response: %v", ret) - } - - if err == nil { - t.Errorf("Expected error, got none!") - } -} - -func TestNtlmHeaderParseInvalid(t *testing.T) { - res := http.Response{} - res.Header = make(map[string][]string) - res.Header.Add("Www-Authenticate", base64.StdEncoding.EncodeToString([]byte("NTLM I am a moose"))) - _, err := parseChallengeResponse(&res) - assert.NotNil(t, err) -} - -func TestCloneSmallBody(t *testing.T) { - cloneable1, err := newCloneableBody(strings.NewReader("abc"), 5) - if err != nil { - t.Fatal(err) - } - - cloneable2, err := cloneable1.CloneBody() - if err != nil { - t.Fatal(err) - } - - assertCloneableBody(t, cloneable2, "abc", "abc") - assertCloneableBody(t, cloneable1, "abc", "abc") -} - -func TestCloneBigBody(t *testing.T) { - cloneable1, err := newCloneableBody(strings.NewReader("abc"), 2) - if err != nil { - t.Fatal(err) - } - - cloneable2, err := cloneable1.CloneBody() - if err != nil { - t.Fatal(err) - } - - assertCloneableBody(t, cloneable2, "abc", "ab") - assertCloneableBody(t, cloneable1, "abc", "ab") -} - -func assertCloneableBody(t *testing.T, cloneable *cloneableBody, expectedBody, expectedBuffer string) { - buffer := string(cloneable.bytes) - if buffer != expectedBuffer { - t.Errorf("Expected buffer %q, got %q", expectedBody, buffer) - } - - if cloneable.closed { - t.Errorf("already closed?") - } - - by, err := ioutil.ReadAll(cloneable) - if err != nil { - t.Fatal(err) - } - - if err := cloneable.Close(); err != nil { - t.Errorf("Error closing: %v", err) - } - - actual := string(by) - if actual != expectedBody { - t.Errorf("Expected to read %q, got %q", expectedBody, actual) - } -} diff --git a/httputil/proxy_test.go b/httputil/proxy_test.go deleted file mode 100644 index 54ad3569..00000000 --- a/httputil/proxy_test.go +++ /dev/null @@ -1,104 +0,0 @@ -package httputil - -import ( - "net/http" - "testing" - - "github.com/git-lfs/git-lfs/config" - "github.com/stretchr/testify/assert" -) - -func TestProxyFromGitConfig(t *testing.T) { - cfg := config.NewFrom(config.Values{ - Git: map[string]string{ - "http.proxy": "https://proxy-from-git-config:8080", - }, - Os: map[string]string{ - "HTTPS_PROXY": "https://proxy-from-env:8080", - }, - }) - - req, err := http.NewRequest("GET", "https://some-host.com:123/foo/bar", nil) - if err != nil { - t.Fatal(err) - } - - proxyURL, err := ProxyFromGitConfigOrEnvironment(cfg)(req) - - assert.Equal(t, "proxy-from-git-config:8080", proxyURL.Host) - assert.Nil(t, err) -} - -func TestHttpProxyFromGitConfig(t *testing.T) { - cfg := config.NewFrom(config.Values{ - Git: map[string]string{ - "http.proxy": "http://proxy-from-git-config:8080", - }, - Os: map[string]string{ - "HTTPS_PROXY": "https://proxy-from-env:8080", - }, - }) - - req, err := http.NewRequest("GET", "https://some-host.com:123/foo/bar", nil) - if err != nil { - t.Fatal(err) - } - - proxyURL, err := ProxyFromGitConfigOrEnvironment(cfg)(req) - - assert.Equal(t, "proxy-from-env:8080", proxyURL.Host) - assert.Nil(t, err) -} - -func TestProxyFromEnvironment(t *testing.T) { - cfg := config.NewFrom(config.Values{ - Os: map[string]string{ - "HTTPS_PROXY": "https://proxy-from-env:8080", - }, - }) - - req, err := http.NewRequest("GET", "https://some-host.com:123/foo/bar", nil) - if err != nil { - t.Fatal(err) - } - - proxyURL, err := ProxyFromGitConfigOrEnvironment(cfg)(req) - - assert.Equal(t, "proxy-from-env:8080", proxyURL.Host) - assert.Nil(t, err) -} - -func TestProxyIsNil(t *testing.T) { - cfg := config.New() - - req, err := http.NewRequest("GET", "http://some-host.com:123/foo/bar", nil) - if err != nil { - t.Fatal(err) - } - - proxyURL, err := ProxyFromGitConfigOrEnvironment(cfg)(req) - - assert.Nil(t, proxyURL) - assert.Nil(t, err) -} - -func TestProxyNoProxy(t *testing.T) { - cfg := config.NewFrom(config.Values{ - Git: map[string]string{ - "http.proxy": "https://proxy-from-git-config:8080", - }, - Os: map[string]string{ - "NO_PROXY": "some-host", - }, - }) - - req, err := http.NewRequest("GET", "https://some-host:8080", nil) - if err != nil { - t.Fatal(err) - } - - proxyUrl, err := ProxyFromGitConfigOrEnvironment(cfg)(req) - - assert.Nil(t, proxyUrl) - assert.Nil(t, err) -} diff --git a/httputil/request.go b/httputil/request.go deleted file mode 100644 index bdc9073a..00000000 --- a/httputil/request.go +++ /dev/null @@ -1,201 +0,0 @@ -package httputil - -import ( - "bytes" - "io" - "io/ioutil" - "net/http" - "net/url" - "strings" - - "github.com/git-lfs/git-lfs/auth" - "github.com/git-lfs/git-lfs/config" - "github.com/git-lfs/git-lfs/errors" - - "github.com/rubyist/tracerx" -) - -type ClientError struct { - Message string `json:"message"` - DocumentationUrl string `json:"documentation_url,omitempty"` - RequestId string `json:"request_id,omitempty"` -} - -const ( - basicAuthType = "basic" - ntlmAuthType = "ntlm" - negotiateAuthType = "negotiate" -) - -var ( - authenticateHeaders = []string{"Lfs-Authenticate", "Www-Authenticate"} -) - -func (e *ClientError) Error() string { - msg := e.Message - if len(e.DocumentationUrl) > 0 { - msg += "\nDocs: " + e.DocumentationUrl - } - if len(e.RequestId) > 0 { - msg += "\nRequest ID: " + e.RequestId - } - return msg -} - -// Internal http request management -func doHttpRequest(cfg *config.Configuration, req *http.Request, creds auth.Creds) (*http.Response, error) { - var ( - res *http.Response - cause string - err error - ) - - if cfg.NtlmAccess(auth.GetOperationForRequest(req)) { - cause = "ntlm" - res, err = doNTLMRequest(cfg, req, true) - } else { - cause = "http" - res, err = NewHttpClient(cfg, req.Host).Do(req) - } - - if res == nil { - res = &http.Response{ - StatusCode: 0, - Header: make(http.Header), - Request: req, - Body: ioutil.NopCloser(bytes.NewBufferString("")), - } - } - - if err != nil { - if errors.IsAuthError(err) { - SetAuthType(cfg, req, res) - doHttpRequest(cfg, req, creds) - } else { - err = errors.Wrap(err, cause) - } - } else { - err = handleResponse(cfg, res, creds) - } - - if err != nil { - if res != nil { - SetErrorResponseContext(cfg, err, res) - } else { - setErrorRequestContext(cfg, err, req) - } - } - - return res, err -} - -// DoHttpRequest performs a single HTTP request -func DoHttpRequest(cfg *config.Configuration, req *http.Request, useCreds bool) (*http.Response, error) { - var creds auth.Creds - if useCreds { - c, err := auth.GetCreds(cfg, req) - if err != nil { - return nil, err - } - creds = c - } - - return doHttpRequest(cfg, req, creds) -} - -// DoHttpRequestWithRedirects runs a HTTP request and responds to redirects -func DoHttpRequestWithRedirects(cfg *config.Configuration, req *http.Request, via []*http.Request, useCreds bool) (*http.Response, error) { - var creds auth.Creds - if useCreds { - c, err := auth.GetCreds(cfg, req) - if err != nil { - return nil, err - } - creds = c - } - - res, err := doHttpRequest(cfg, req, creds) - if err != nil { - return res, err - } - - if res.StatusCode == 307 { - redirectTo := res.Header.Get("Location") - locurl, err := url.Parse(redirectTo) - if err == nil && !locurl.IsAbs() { - locurl = req.URL.ResolveReference(locurl) - redirectTo = locurl.String() - } - - redirectedReq, err := NewHttpRequest(req.Method, redirectTo, nil) - if err != nil { - return res, errors.Wrapf(err, err.Error()) - } - - via = append(via, req) - - // Avoid seeking and re-wrapping the CountingReadCloser, just get the "real" body - realBody := req.Body - if wrappedBody, ok := req.Body.(*CountingReadCloser); ok { - realBody = wrappedBody.ReadCloser - } - - seeker, ok := realBody.(io.Seeker) - if !ok { - return res, errors.Wrapf(nil, "Request body needs to be an io.Seeker to handle redirects.") - } - - if _, err := seeker.Seek(0, 0); err != nil { - return res, errors.Wrap(err, "request retry") - } - redirectedReq.Body = realBody - redirectedReq.ContentLength = req.ContentLength - - if err = CheckRedirect(redirectedReq, via); err != nil { - return res, errors.Wrapf(err, err.Error()) - } - - return DoHttpRequestWithRedirects(cfg, redirectedReq, via, useCreds) - } - - return res, nil -} - -// NewHttpRequest creates a template request, with the given headers & UserAgent supplied -func NewHttpRequest(method, rawurl string, header map[string]string) (*http.Request, error) { - req, err := http.NewRequest(method, rawurl, nil) - if err != nil { - return nil, err - } - - for key, value := range header { - req.Header.Set(key, value) - } - - req.Header.Set("User-Agent", UserAgent) - - return req, nil -} - -func SetAuthType(cfg *config.Configuration, req *http.Request, res *http.Response) { - authType := GetAuthType(res) - operation := auth.GetOperationForRequest(req) - cfg.SetAccess(operation, authType) - tracerx.Printf("api: http response indicates %q authentication. Resubmitting...", authType) -} - -func GetAuthType(res *http.Response) string { - for _, headerName := range authenticateHeaders { - for _, auth := range res.Header[headerName] { - - authLower := strings.ToLower(auth) - // When server sends Www-Authentication: Negotiate, it supports both Kerberos and NTLM. - // Since git-lfs current does not support Kerberos, we will return NTLM in this case. - if strings.HasPrefix(authLower, ntlmAuthType) || strings.HasPrefix(authLower, negotiateAuthType) { - return ntlmAuthType - } - } - } - - return basicAuthType -} diff --git a/httputil/request_error_test.go b/httputil/request_error_test.go deleted file mode 100644 index 8b023c19..00000000 --- a/httputil/request_error_test.go +++ /dev/null @@ -1,150 +0,0 @@ -package httputil - -import ( - "bytes" - "encoding/json" - "fmt" - "io/ioutil" - "net/http" - "net/url" - "strings" - "testing" - - "github.com/git-lfs/git-lfs/config" - "github.com/git-lfs/git-lfs/errors" -) - -func TestSuccessStatus(t *testing.T) { - cfg := config.New() - for _, status := range []int{200, 201, 202} { - res := &http.Response{StatusCode: status} - if err := handleResponse(cfg, res, nil); err != nil { - t.Errorf("Unexpected error for HTTP %d: %s", status, err.Error()) - } - } -} - -func TestErrorStatusWithCustomMessage(t *testing.T) { - cfg := config.New() - u, err := url.Parse("https://lfs-server.com/objects/oid") - if err != nil { - t.Fatal(err) - } - - statuses := map[int]string{ - 400: "not panic", - 401: "not panic", - 403: "not panic", - 404: "not panic", - 405: "not panic", - 406: "not panic", - 429: "not panic", - 500: "panic", - 501: "not panic", - 503: "panic", - 504: "panic", - 507: "not panic", - 509: "not panic", - } - - for status, panicMsg := range statuses { - cliErr := &ClientError{ - Message: fmt.Sprintf("custom error for %d", status), - } - - by, err := json.Marshal(cliErr) - if err != nil { - t.Errorf("Error building json for status %d: %s", status, err) - continue - } - - res := &http.Response{ - StatusCode: status, - Header: make(http.Header), - Body: ioutil.NopCloser(bytes.NewReader(by)), - Request: &http.Request{URL: u}, - } - res.Header.Set("Content-Type", "application/vnd.git-lfs+json; charset=utf-8") - - err = handleResponse(cfg, res, nil) - if err == nil { - t.Errorf("No error from HTTP %d", status) - continue - } - - expected := fmt.Sprintf("custom error for %d", status) - if actual := err.Error(); !strings.HasSuffix(actual, expected) { - t.Errorf("Expected for HTTP %d:\n%s\nACTUAL:\n%s", status, expected, actual) - continue - } - - if errors.IsFatalError(err) == (panicMsg != "panic") { - t.Errorf("Error for HTTP %d should %s", status, panicMsg) - continue - } - } -} - -func TestErrorStatusWithDefaultMessage(t *testing.T) { - cfg := config.New() - rawurl := "https://lfs-server.com/objects/oid" - u, err := url.Parse(rawurl) - if err != nil { - t.Fatal(err) - } - - statuses := map[int][]string{ - 400: {defaultErrors[400], "not panic"}, - 401: {defaultErrors[401], "not panic"}, - 403: {defaultErrors[401], "not panic"}, - 404: {defaultErrors[404], "not panic"}, - 405: {defaultErrors[400] + " from HTTP 405", "not panic"}, - 406: {defaultErrors[400] + " from HTTP 406", "not panic"}, - 429: {defaultErrors[429], "not panic"}, - 500: {defaultErrors[500], "panic"}, - 501: {defaultErrors[500] + " from HTTP 501", "not panic"}, - 503: {defaultErrors[500] + " from HTTP 503", "panic"}, - 504: {defaultErrors[500] + " from HTTP 504", "panic"}, - 507: {defaultErrors[507], "not panic"}, - 509: {defaultErrors[509], "not panic"}, - } - - for status, results := range statuses { - cliErr := &ClientError{ - Message: fmt.Sprintf("custom error for %d", status), - } - - by, err := json.Marshal(cliErr) - if err != nil { - t.Errorf("Error building json for status %d: %s", status, err) - continue - } - - res := &http.Response{ - StatusCode: status, - Header: make(http.Header), - Body: ioutil.NopCloser(bytes.NewReader(by)), - Request: &http.Request{URL: u}, - } - - // purposely wrong content type so it falls back to default - res.Header.Set("Content-Type", "application/vnd.git-lfs+json2") - - err = handleResponse(cfg, res, nil) - if err == nil { - t.Errorf("No error from HTTP %d", status) - continue - } - - expected := fmt.Sprintf(results[0], rawurl) - if actual := err.Error(); !strings.HasSuffix(actual, expected) { - t.Errorf("Expected for HTTP %d:\n%s\nACTUAL:\n%s", status, expected, actual) - continue - } - - if errors.IsFatalError(err) == (results[1] != "panic") { - t.Errorf("Error for HTTP %d should %s", status, results[1]) - continue - } - } -} diff --git a/httputil/request_test.go b/httputil/request_test.go deleted file mode 100644 index b139d33f..00000000 --- a/httputil/request_test.go +++ /dev/null @@ -1,45 +0,0 @@ -package httputil - -import ( - "net/http" - "testing" - - "github.com/stretchr/testify/assert" -) - -type AuthenticateHeaderTestCase struct { - ExpectedAuthType string - Headers map[string][]string -} - -func (c *AuthenticateHeaderTestCase) Assert(t *testing.T) { - t.Logf("lfs/httputil: asserting auth type: %q for: %v", c.ExpectedAuthType, c.Headers) - - assert.Equal(t, c.ExpectedAuthType, GetAuthType(c.HttpResponse())) -} - -func (c *AuthenticateHeaderTestCase) HttpResponse() *http.Response { - res := &http.Response{Header: make(http.Header)} - - for k, vv := range c.Headers { - for _, v := range vv { - res.Header.Add(k, v) - } - } - - return res -} - -func TestGetAuthType(t *testing.T) { - for _, c := range []AuthenticateHeaderTestCase{ - {basicAuthType, map[string][]string{}}, - {ntlmAuthType, map[string][]string{"WWW-Authenticate": {"Basic", "NTLM", "Bearer"}}}, - {ntlmAuthType, map[string][]string{"LFS-Authenticate": {"Basic", "NTLM", "Bearer"}}}, - {ntlmAuthType, map[string][]string{"LFS-Authenticate": {"Basic", "Ntlm"}}}, - {ntlmAuthType, map[string][]string{"Www-Authenticate": {"Basic", "Ntlm"}}}, - {ntlmAuthType, map[string][]string{"WWW-Authenticate": {"Basic"}, - "LFS-Authenticate": {"Ntlm"}}}, - } { - c.Assert(t) - } -} diff --git a/httputil/response.go b/httputil/response.go deleted file mode 100644 index 954164f5..00000000 --- a/httputil/response.go +++ /dev/null @@ -1,136 +0,0 @@ -package httputil - -import ( - "encoding/json" - "fmt" - "io" - "io/ioutil" - "net/http" - "regexp" - - "github.com/git-lfs/git-lfs/auth" - "github.com/git-lfs/git-lfs/config" - "github.com/git-lfs/git-lfs/errors" -) - -var ( - lfsMediaTypeRE = regexp.MustCompile(`\Aapplication/vnd\.git\-lfs\+json(;|\z)`) - jsonMediaTypeRE = regexp.MustCompile(`\Aapplication/json(;|\z)`) - hiddenHeaders = map[string]bool{ - "Authorization": true, - } - - defaultErrors = map[int]string{ - 400: "Client error: %s", - 401: "Authorization error: %s\nCheck that you have proper access to the repository", - 403: "Authorization error: %s\nCheck that you have proper access to the repository", - 404: "Repository or object not found: %s\nCheck that it exists and that you have proper access to it", - 429: "Rate limit exceeded: %s", - 500: "Server error: %s", - 507: "Insufficient server storage: %s", - 509: "Bandwidth limit exceeded: %s", - } -) - -// DecodeResponse attempts to decode the contents of the response as a JSON object -func DecodeResponse(res *http.Response, obj interface{}) error { - ctype := res.Header.Get("Content-Type") - if !(lfsMediaTypeRE.MatchString(ctype) || jsonMediaTypeRE.MatchString(ctype)) { - return nil - } - - err := json.NewDecoder(res.Body).Decode(obj) - io.Copy(ioutil.Discard, res.Body) - res.Body.Close() - - if err != nil { - return errors.Wrapf(err, "Unable to parse HTTP response for %s", TraceHttpReq(res.Request)) - } - - return nil -} - -// GetDefaultError returns the default text for standard error codes (blank if none) -func GetDefaultError(code int) string { - if s, ok := defaultErrors[code]; ok { - return s - } - return "" -} - -// Check the response from a HTTP request for problems -func handleResponse(cfg *config.Configuration, res *http.Response, creds auth.Creds) error { - auth.SaveCredentials(cfg, creds, res) - - if res.StatusCode < 400 { - return nil - } - - defer func() { - io.Copy(ioutil.Discard, res.Body) - res.Body.Close() - }() - - cliErr := &ClientError{} - err := DecodeResponse(res, cliErr) - if err == nil { - if len(cliErr.Message) == 0 { - err = defaultError(res) - } else { - err = errors.Wrap(cliErr, "http") - } - } - - if res.StatusCode == 401 { - if err == nil { - err = errors.New("api: received status 401") - } - return errors.NewAuthError(err) - } - - if res.StatusCode > 499 && res.StatusCode != 501 && res.StatusCode != 507 && res.StatusCode != 509 { - if err == nil { - err = errors.Errorf("api: received status %d", res.StatusCode) - } - return errors.NewFatalError(err) - } - - return err -} - -func defaultError(res *http.Response) error { - var msgFmt string - - if f, ok := defaultErrors[res.StatusCode]; ok { - msgFmt = f - } else if res.StatusCode < 500 { - msgFmt = defaultErrors[400] + fmt.Sprintf(" from HTTP %d", res.StatusCode) - } else { - msgFmt = defaultErrors[500] + fmt.Sprintf(" from HTTP %d", res.StatusCode) - } - - return errors.Errorf(msgFmt, res.Request.URL) -} - -func SetErrorResponseContext(cfg *config.Configuration, err error, res *http.Response) { - errors.SetContext(err, "Status", res.Status) - setErrorHeaderContext(err, "Request", res.Header) - setErrorRequestContext(cfg, err, res.Request) -} - -func setErrorRequestContext(cfg *config.Configuration, err error, req *http.Request) { - errors.SetContext(err, "Endpoint", cfg.Endpoint(auth.GetOperationForRequest(req)).Url) - errors.SetContext(err, "URL", TraceHttpReq(req)) - setErrorHeaderContext(err, "Response", req.Header) -} - -func setErrorHeaderContext(err error, prefix string, head http.Header) { - for key, _ := range head { - contextKey := fmt.Sprintf("%s:%s", prefix, key) - if _, skip := hiddenHeaders[key]; skip { - errors.SetContext(err, contextKey, "--") - } else { - errors.SetContext(err, contextKey, head.Get(key)) - } - } -} diff --git a/lfs/download_queue.go b/lfs/download_queue.go deleted file mode 100644 index 7e4b7ae2..00000000 --- a/lfs/download_queue.go +++ /dev/null @@ -1,19 +0,0 @@ -package lfs - -import ( - "github.com/git-lfs/git-lfs/config" - "github.com/git-lfs/git-lfs/tq" -) - -// NewDownloadCheckQueue builds a checking queue, checks that objects are there but doesn't download -func NewDownloadCheckQueue(cfg *config.Configuration, options ...tq.Option) *tq.TransferQueue { - allOptions := make([]tq.Option, len(options), len(options)+1) - allOptions = append(allOptions, options...) - allOptions = append(allOptions, tq.DryRun(true)) - return NewDownloadQueue(cfg, allOptions...) -} - -// NewDownloadQueue builds a DownloadQueue, allowing concurrent downloads. -func NewDownloadQueue(cfg *config.Configuration, options ...tq.Option) *tq.TransferQueue { - return tq.NewTransferQueue(tq.Download, TransferManifest(cfg), options...) -} diff --git a/lfs/lfs.go b/lfs/lfs.go index cbf48117..5734cf14 100644 --- a/lfs/lfs.go +++ b/lfs/lfs.go @@ -109,7 +109,7 @@ func Environ(cfg *config.Configuration, manifest *tq.Manifest) []string { } for _, e := range osEnviron { - if !strings.Contains(e, "GIT_") { + if !strings.Contains(strings.SplitN(e, "=", 2)[0], "GIT_") { continue } env = append(env, e) @@ -118,11 +118,6 @@ func Environ(cfg *config.Configuration, manifest *tq.Manifest) []string { return env } -// TransferManifest builds a tq.Manifest using the given cfg. -func TransferManifest(cfg *config.Configuration) *tq.Manifest { - return tq.NewManifestWithGitEnv(cfg.Access("download"), cfg.Git) -} - func InRepo() bool { return config.LocalGitDir != "" } diff --git a/lfs/manifest_test.go b/lfs/manifest_test.go deleted file mode 100644 index 2372c08d..00000000 --- a/lfs/manifest_test.go +++ /dev/null @@ -1,50 +0,0 @@ -package lfs - -import ( - "testing" - - "github.com/git-lfs/git-lfs/config" - "github.com/stretchr/testify/assert" -) - -func TestManifestIsConfigurable(t *testing.T) { - cfg := config.NewFrom(config.Values{ - Git: map[string]string{ - "lfs.transfer.maxretries": "3", - }, - }) - m := TransferManifest(cfg) - assert.Equal(t, 3, m.MaxRetries()) -} - -func TestManifestChecksNTLM(t *testing.T) { - cfg := config.NewFrom(config.Values{ - Git: map[string]string{ - "lfs.url": "http://foo", - "lfs.http://foo.access": "ntlm", - "lfs.concurrenttransfers": "3", - }, - }) - m := TransferManifest(cfg) - assert.Equal(t, 1, m.MaxRetries()) -} - -func TestManifestClampsValidValues(t *testing.T) { - cfg := config.NewFrom(config.Values{ - Git: map[string]string{ - "lfs.transfer.maxretries": "-1", - }, - }) - m := TransferManifest(cfg) - assert.Equal(t, 1, m.MaxRetries()) -} - -func TestManifestIgnoresNonInts(t *testing.T) { - cfg := config.NewFrom(config.Values{ - Git: map[string]string{ - "lfs.transfer.maxretries": "not_an_int", - }, - }) - m := TransferManifest(cfg) - assert.Equal(t, 1, m.MaxRetries()) -} diff --git a/lfs/pointer_clean.go b/lfs/pointer_clean.go index f1fb2c1e..88c5066e 100644 --- a/lfs/pointer_clean.go +++ b/lfs/pointer_clean.go @@ -5,7 +5,6 @@ import ( "crypto/sha256" "encoding/hex" "io" - "io/ioutil" "os" "github.com/git-lfs/git-lfs/config" @@ -78,7 +77,11 @@ func copyToTemp(reader io.Reader, fileSize int64, cb progress.CopyCallback) (oid } ptr, buf, err := DecodeFrom(reader) - by, rerr := ioutil.ReadAll(buf) + + by := make([]byte, blobSizeCutoff) + n, rerr := buf.Read(by) + by = by[:n] + if rerr != nil || (err == nil && len(by) < 512) { err = errors.NewCleanPointerError(ptr, by) return diff --git a/lfs/pointer_smudge.go b/lfs/pointer_smudge.go index df556396..1173383d 100644 --- a/lfs/pointer_smudge.go +++ b/lfs/pointer_smudge.go @@ -74,7 +74,7 @@ func PointerSmudge(writer io.Writer, ptr *Pointer, workingfile string, download func downloadFile(writer io.Writer, ptr *Pointer, workingfile, mediafile string, manifest *tq.Manifest, cb progress.CopyCallback) error { fmt.Fprintf(os.Stderr, "Downloading %s (%s)\n", workingfile, pb.FormatBytes(ptr.Size)) - q := tq.NewTransferQueue(tq.Download, manifest) + q := tq.NewTransferQueue(tq.Download, manifest, "") q.Add(filepath.Base(workingfile), mediafile, ptr.Oid, ptr.Size) q.Wait() diff --git a/lfs/upload_queue.go b/lfs/upload_queue.go deleted file mode 100644 index f8df98c3..00000000 --- a/lfs/upload_queue.go +++ /dev/null @@ -1,11 +0,0 @@ -package lfs - -import ( - "github.com/git-lfs/git-lfs/config" - "github.com/git-lfs/git-lfs/tq" -) - -// NewUploadQueue builds an UploadQueue, allowing `workers` concurrent uploads. -func NewUploadQueue(cfg *config.Configuration, options ...tq.Option) *tq.TransferQueue { - return tq.NewTransferQueue(tq.Upload, TransferManifest(cfg), options...) -} diff --git a/lfs/util_test.go b/lfs/util_test.go index 2697dc3d..ae0c61e8 100644 --- a/lfs/util_test.go +++ b/lfs/util_test.go @@ -8,7 +8,34 @@ import ( "github.com/stretchr/testify/assert" ) -func TestWriterWithCallback(t *testing.T) { +func TestBodyWithCallback(t *testing.T) { + called := 0 + calledRead := make([]int64, 0, 2) + + cb := func(total int64, read int64, current int) error { + called += 1 + calledRead = append(calledRead, read) + assert.Equal(t, 5, int(total)) + return nil + } + reader := progress.NewByteBodyWithCallback([]byte("BOOYA"), 5, cb) + + readBuf := make([]byte, 3) + n, err := reader.Read(readBuf) + assert.Nil(t, err) + assert.Equal(t, "BOO", string(readBuf[0:n])) + + n, err = reader.Read(readBuf) + assert.Nil(t, err) + assert.Equal(t, "YA", string(readBuf[0:n])) + + assert.Equal(t, 2, called) + assert.Len(t, calledRead, 2) + assert.Equal(t, 3, int(calledRead[0])) + assert.Equal(t, 5, int(calledRead[1])) +} + +func TestReadWithCallback(t *testing.T) { called := 0 calledRead := make([]int64, 0, 2) diff --git a/lfsapi/auth.go b/lfsapi/auth.go new file mode 100644 index 00000000..c78451a9 --- /dev/null +++ b/lfsapi/auth.go @@ -0,0 +1,312 @@ +package lfsapi + +import ( + "encoding/base64" + "fmt" + "net" + "net/http" + "net/url" + "os" + "strings" + + "github.com/bgentry/go-netrc/netrc" + "github.com/git-lfs/git-lfs/errors" + "github.com/rubyist/tracerx" +) + +var ( + defaultCredentialHelper = &commandCredentialHelper{} + defaultNetrcFinder = &noFinder{} + defaultEndpointFinder = NewEndpointFinder(nil) +) + +func (c *Client) DoWithAuth(remote string, req *http.Request) (*http.Response, error) { + credHelper := c.Credentials + if credHelper == nil { + credHelper = defaultCredentialHelper + } + + netrcFinder := c.Netrc + if netrcFinder == nil { + netrcFinder = defaultNetrcFinder + } + + ef := c.Endpoints + if ef == nil { + ef = defaultEndpointFinder + } + + apiEndpoint, access, creds, credsURL, err := getCreds(credHelper, netrcFinder, ef, remote, req) + if err != nil { + return nil, err + } + + res, err := c.doWithCreds(req, credHelper, creds, credsURL, access) + if err != nil { + if errors.IsAuthError(err) { + newAccess := getAuthAccess(res) + if newAccess != access { + c.Endpoints.SetAccess(apiEndpoint.Url, newAccess) + } + + if access == NoneAccess || creds != nil { + tracerx.Printf("api: http response indicates %q authentication. Resubmitting...", newAccess) + req.Header.Del("Authorization") + if creds != nil { + credHelper.Reject(creds) + } + return c.DoWithAuth(remote, req) + } + } + + err = errors.Wrap(err, "http") + } + + if res == nil { + return nil, err + } + + switch res.StatusCode { + case 401, 403: + credHelper.Reject(creds) + default: + if res.StatusCode < 300 && res.StatusCode > 199 { + credHelper.Approve(creds) + } + } + + return res, err +} + +func (c *Client) doWithCreds(req *http.Request, credHelper CredentialHelper, creds Creds, credsURL *url.URL, access Access) (*http.Response, error) { + if access == NTLMAccess { + return c.doWithNTLM(req, credHelper, creds, credsURL) + } + return c.Do(req) +} + +// getCreds fills the authorization header for the given request if possible, +// from the following sources: +// +// 1. NTLM access is handled elsewhere. +// 2. Existing Authorization or ?token query tells LFS that the request is ready. +// 3. Netrc based on the hostname. +// 4. URL authentication on the Endpoint URL or the Git Remote URL. +// 5. Git Credential Helper, potentially prompting the user. +// +// There are three URLs in play, that make this a little confusing. +// +// 1. The request URL, which should be something like "https://git.com/repo.git/info/lfs/objects/batch" +// 2. The LFS API URL, which should be something like "https://git.com/repo.git/info/lfs" +// This URL used for the "lfs.URL.access" git config key, which determines +// what kind of auth the LFS server expects. Could be BasicAccess, NTLMAccess, +// or NoneAccess, in which the Git Credential Helper step is skipped. We do +// not want to prompt the user for a password to fetch public repository data. +// 3. The Git Remote URL, which should be something like "https://git.com/repo.git" +// This URL is used for the Git Credential Helper. This way existing https +// Git remote credentials can be re-used for LFS. +func getCreds(credHelper CredentialHelper, netrcFinder NetrcFinder, ef EndpointFinder, remote string, req *http.Request) (Endpoint, Access, Creds, *url.URL, error) { + operation := getReqOperation(req) + apiEndpoint := ef.Endpoint(operation, remote) + access := ef.AccessFor(apiEndpoint.Url) + + if access != NTLMAccess { + if requestHasAuth(req) || setAuthFromNetrc(netrcFinder, req) || access == NoneAccess { + return apiEndpoint, access, nil, nil, nil + } + + credsURL, err := getCredURLForAPI(ef, operation, remote, apiEndpoint, req) + if err != nil { + return apiEndpoint, access, nil, nil, errors.Wrap(err, "creds") + } + + if credsURL == nil { + return apiEndpoint, access, nil, nil, nil + } + + creds, err := fillGitCreds(credHelper, ef, req, credsURL) + return apiEndpoint, access, creds, credsURL, err + } + + credsURL, err := url.Parse(apiEndpoint.Url) + if err != nil { + return apiEndpoint, access, nil, nil, errors.Wrap(err, "creds") + } + + if netrcMachine := getAuthFromNetrc(netrcFinder, req); netrcMachine != nil { + creds := Creds{ + "protocol": credsURL.Scheme, + "host": credsURL.Host, + "username": netrcMachine.Login, + "password": netrcMachine.Password, + "source": "netrc", + } + + return apiEndpoint, access, creds, credsURL, nil + } + + creds, err := getGitCreds(credHelper, ef, req, credsURL) + return apiEndpoint, access, creds, credsURL, err +} + +func getGitCreds(credHelper CredentialHelper, ef EndpointFinder, req *http.Request, u *url.URL) (Creds, error) { + path := strings.TrimPrefix(u.Path, "/") + input := Creds{"protocol": u.Scheme, "host": u.Host, "path": path} + if u.User != nil && u.User.Username() != "" { + input["username"] = u.User.Username() + } + + creds, err := credHelper.Fill(input) + if creds == nil || len(creds) < 1 { + errmsg := fmt.Sprintf("Git credentials for %s not found", u) + if err != nil { + errmsg = errmsg + ":\n" + err.Error() + } else { + errmsg = errmsg + "." + } + err = errors.New(errmsg) + } + + return creds, err +} + +func fillGitCreds(credHelper CredentialHelper, ef EndpointFinder, req *http.Request, u *url.URL) (Creds, error) { + creds, err := getGitCreds(credHelper, ef, req, u) + if err == nil { + tracerx.Printf("Filled credentials for %s", u) + setRequestAuth(req, creds["username"], creds["password"]) + } + + return creds, err +} + +func getAuthFromNetrc(netrcFinder NetrcFinder, req *http.Request) *netrc.Machine { + hostname := req.URL.Host + var host string + + if strings.Contains(hostname, ":") { + var err error + host, _, err = net.SplitHostPort(hostname) + if err != nil { + tracerx.Printf("netrc: error parsing %q: %s", hostname, err) + return nil + } + } else { + host = hostname + } + + return netrcFinder.FindMachine(host) +} + +func setAuthFromNetrc(netrcFinder NetrcFinder, req *http.Request) bool { + if machine := getAuthFromNetrc(netrcFinder, req); machine != nil { + setRequestAuth(req, machine.Login, machine.Password) + return true + } + + return false +} + +func getCredURLForAPI(ef EndpointFinder, operation, remote string, apiEndpoint Endpoint, req *http.Request) (*url.URL, error) { + apiURL, err := url.Parse(apiEndpoint.Url) + if err != nil { + return nil, err + } + + // if the LFS request doesn't match the current LFS url, don't bother + // attempting to set the Authorization header from the LFS or Git remote URLs. + if req.URL.Scheme != apiURL.Scheme || + req.URL.Host != apiURL.Host { + return req.URL, nil + } + + if setRequestAuthFromURL(req, apiURL) { + return nil, nil + } + + if len(remote) > 0 { + if u := ef.GitRemoteURL(remote, operation == "upload"); u != "" { + gitRemoteURL, err := url.Parse(u) + if err != nil { + return nil, err + } + + if gitRemoteURL.Scheme == apiURL.Scheme && + gitRemoteURL.Host == apiURL.Host { + + if setRequestAuthFromURL(req, gitRemoteURL) { + return nil, nil + } + + return gitRemoteURL, nil + } + } + } + + return apiURL, nil +} + +func requestHasAuth(req *http.Request) bool { + if len(req.Header.Get("Authorization")) > 0 { + return true + } + + return len(req.URL.Query().Get("token")) > 0 +} + +func setRequestAuthFromURL(req *http.Request, u *url.URL) bool { + if u.User == nil { + return false + } + + if pass, ok := u.User.Password(); ok { + fmt.Fprintln(os.Stderr, "warning: current Git remote contains credentials") + setRequestAuth(req, u.User.Username(), pass) + return true + } + + return false +} + +func setRequestAuth(req *http.Request, user, pass string) { + // better not be NTLM! + if len(user) == 0 && len(pass) == 0 { + return + } + + token := fmt.Sprintf("%s:%s", user, pass) + auth := "Basic " + strings.TrimSpace(base64.StdEncoding.EncodeToString([]byte(token))) + req.Header.Set("Authorization", auth) +} + +func getReqOperation(req *http.Request) string { + operation := "download" + if req.Method == "POST" || req.Method == "PUT" { + operation = "upload" + } + return operation +} + +var ( + authenticateHeaders = []string{"Lfs-Authenticate", "Www-Authenticate"} +) + +func getAuthAccess(res *http.Response) Access { + for _, headerName := range authenticateHeaders { + for _, auth := range res.Header[headerName] { + pieces := strings.SplitN(strings.ToLower(auth), " ", 2) + if len(pieces) == 0 { + continue + } + + switch Access(pieces[0]) { + case NegotiateAccess, NTLMAccess: + // When server sends Www-Authentication: Negotiate, it supports both Kerberos and NTLM. + // Since git-lfs current does not support Kerberos, we will return NTLM in this case. + return NTLMAccess + } + } + } + + return BasicAccess +} diff --git a/lfsapi/auth_test.go b/lfsapi/auth_test.go new file mode 100644 index 00000000..a902b10f --- /dev/null +++ b/lfsapi/auth_test.go @@ -0,0 +1,556 @@ +package lfsapi + +import ( + "encoding/base64" + "encoding/json" + "fmt" + "net/http" + "net/http/httptest" + "strings" + "sync/atomic" + "testing" + + "github.com/git-lfs/git-lfs/errors" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +type authRequest struct { + Test string +} + +func TestAuthenticateHeaderAccess(t *testing.T) { + tests := map[string]Access{ + "": BasicAccess, + "basic 123": BasicAccess, + "basic": BasicAccess, + "unknown": BasicAccess, + "NTLM": NTLMAccess, + "ntlm": NTLMAccess, + "NTLM 1 2 3": NTLMAccess, + "ntlm 1 2 3": NTLMAccess, + "NEGOTIATE": NTLMAccess, + "negotiate": NTLMAccess, + "NEGOTIATE 1 2 3": NTLMAccess, + "negotiate 1 2 3": NTLMAccess, + } + + for _, key := range authenticateHeaders { + for value, expected := range tests { + res := &http.Response{Header: make(http.Header)} + res.Header.Set(key, value) + t.Logf("%s: %s", key, value) + assert.Equal(t, expected, getAuthAccess(res)) + } + } +} + +func TestDoWithAuthApprove(t *testing.T) { + var called uint32 + + srv := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) { + atomic.AddUint32(&called, 1) + assert.Equal(t, "POST", req.Method) + + body := &authRequest{} + err := json.NewDecoder(req.Body).Decode(body) + assert.Nil(t, err) + assert.Equal(t, "Approve", body.Test) + + w.Header().Set("Lfs-Authenticate", "Basic") + actual := req.Header.Get("Authorization") + if len(actual) == 0 { + w.WriteHeader(http.StatusUnauthorized) + return + } + + expected := "Basic " + strings.TrimSpace( + base64.StdEncoding.EncodeToString([]byte("user:pass")), + ) + assert.Equal(t, expected, actual) + })) + defer srv.Close() + + creds := newMockCredentialHelper() + c, err := NewClient(nil, TestEnv(map[string]string{ + "lfs.url": srv.URL + "/repo/lfs", + })) + require.Nil(t, err) + c.Credentials = creds + + assert.Equal(t, NoneAccess, c.Endpoints.AccessFor(srv.URL+"/repo/lfs")) + + req, err := http.NewRequest("POST", srv.URL+"/repo/lfs/foo", nil) + require.Nil(t, err) + + err = MarshalToRequest(req, &authRequest{Test: "Approve"}) + require.Nil(t, err) + + res, err := c.DoWithAuth("", req) + require.Nil(t, err) + + assert.Equal(t, http.StatusOK, res.StatusCode) + assert.True(t, creds.IsApproved(Creds(map[string]string{ + "username": "user", + "password": "pass", + "path": "repo/lfs", + "protocol": "http", + "host": srv.Listener.Addr().String(), + }))) + assert.Equal(t, BasicAccess, c.Endpoints.AccessFor(srv.URL+"/repo/lfs")) + assert.EqualValues(t, 2, called) +} + +func TestDoWithAuthReject(t *testing.T) { + var called uint32 + + srv := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) { + atomic.AddUint32(&called, 1) + assert.Equal(t, "POST", req.Method) + + body := &authRequest{} + err := json.NewDecoder(req.Body).Decode(body) + assert.Nil(t, err) + assert.Equal(t, "Reject", body.Test) + + actual := req.Header.Get("Authorization") + expected := "Basic " + strings.TrimSpace( + base64.StdEncoding.EncodeToString([]byte("user:pass")), + ) + + w.Header().Set("Lfs-Authenticate", "Basic") + if actual != expected { + // Write http.StatusUnauthorized to force the credential + // helper to reject the credentials + w.WriteHeader(http.StatusUnauthorized) + } else { + w.WriteHeader(http.StatusOK) + } + })) + defer srv.Close() + + invalidCreds := Creds(map[string]string{ + "username": "user", + "password": "wrong_pass", + "path": "", + "protocol": "http", + "host": srv.Listener.Addr().String(), + }) + + creds := newMockCredentialHelper() + creds.Approve(invalidCreds) + assert.True(t, creds.IsApproved(invalidCreds)) + + c := &Client{ + Credentials: creds, + Endpoints: NewEndpointFinder(TestEnv(map[string]string{ + "lfs.url": srv.URL, + })), + } + + req, err := http.NewRequest("POST", srv.URL, nil) + require.Nil(t, err) + + err = MarshalToRequest(req, &authRequest{Test: "Reject"}) + require.Nil(t, err) + + res, err := c.DoWithAuth("", req) + require.Nil(t, err) + + assert.Equal(t, http.StatusOK, res.StatusCode) + assert.False(t, creds.IsApproved(invalidCreds)) + assert.True(t, creds.IsApproved(Creds(map[string]string{ + "username": "user", + "password": "pass", + "path": "", + "protocol": "http", + "host": srv.Listener.Addr().String(), + }))) + assert.EqualValues(t, 3, called) +} + +type mockCredentialHelper struct { + Approved map[string]Creds +} + +func newMockCredentialHelper() *mockCredentialHelper { + return &mockCredentialHelper{ + Approved: make(map[string]Creds), + } +} + +func (m *mockCredentialHelper) Fill(input Creds) (Creds, error) { + if found, ok := m.Approved[credsToKey(input)]; ok { + return found, nil + } + + output := make(Creds) + for key, value := range input { + output[key] = value + } + if _, ok := output["username"]; !ok { + output["username"] = "user" + } + output["password"] = "pass" + return output, nil +} + +func (m *mockCredentialHelper) Approve(creds Creds) error { + m.Approved[credsToKey(creds)] = creds + return nil +} + +func (m *mockCredentialHelper) Reject(creds Creds) error { + delete(m.Approved, credsToKey(creds)) + return nil +} + +func (m *mockCredentialHelper) IsApproved(creds Creds) bool { + if found, ok := m.Approved[credsToKey(creds)]; ok { + return found["password"] == creds["password"] + } + return false +} + +func credsToKey(creds Creds) string { + var kvs []string + for _, k := range []string{"protocol", "host", "path"} { + kvs = append(kvs, fmt.Sprintf("%s:%s", k, creds[k])) + } + + return strings.Join(kvs, " ") +} + +func basicAuth(user, pass string) string { + value := fmt.Sprintf("%s:%s", user, pass) + return fmt.Sprintf("Basic %s", strings.TrimSpace(base64.StdEncoding.EncodeToString([]byte(value)))) +} + +type getCredsExpected struct { + Endpoint string + Access Access + Creds Creds + CredsURL string + Authorization string +} + +type getCredsTest struct { + Remote string + Method string + Href string + Header map[string]string + Config map[string]string + Expected getCredsExpected +} + +func TestGetCreds(t *testing.T) { + tests := map[string]getCredsTest{ + "no access": getCredsTest{ + Remote: "origin", + Method: "GET", + Href: "https://git-server.com/repo/lfs/locks", + Config: map[string]string{ + "lfs.url": "https://git-server.com/repo/lfs", + }, + Expected: getCredsExpected{ + Access: NoneAccess, + Endpoint: "https://git-server.com/repo/lfs", + }, + }, + "basic access": getCredsTest{ + Remote: "origin", + Method: "GET", + Href: "https://git-server.com/repo/lfs/locks", + Config: map[string]string{ + "lfs.url": "https://git-server.com/repo/lfs", + "lfs.https://git-server.com/repo/lfs.access": "basic", + }, + Expected: getCredsExpected{ + Access: BasicAccess, + Endpoint: "https://git-server.com/repo/lfs", + Authorization: basicAuth("git-server.com", "monkey"), + CredsURL: "https://git-server.com/repo/lfs", + Creds: map[string]string{ + "protocol": "https", + "host": "git-server.com", + "username": "git-server.com", + "password": "monkey", + "path": "repo/lfs", + }, + }, + }, + "ntlm": getCredsTest{ + Remote: "origin", + Method: "GET", + Href: "https://git-server.com/repo/lfs/locks", + Config: map[string]string{ + "lfs.url": "https://git-server.com/repo/lfs", + "lfs.https://git-server.com/repo/lfs.access": "ntlm", + }, + Expected: getCredsExpected{ + Access: NTLMAccess, + Endpoint: "https://git-server.com/repo/lfs", + CredsURL: "https://git-server.com/repo/lfs", + Creds: map[string]string{ + "protocol": "https", + "host": "git-server.com", + "username": "git-server.com", + "password": "monkey", + "path": "repo/lfs", + }, + }, + }, + "ntlm with netrc": getCredsTest{ + Remote: "origin", + Method: "GET", + Href: "https://netrc-host.com/repo/lfs/locks", + Config: map[string]string{ + "lfs.url": "https://netrc-host.com/repo/lfs", + "lfs.https://netrc-host.com/repo/lfs.access": "ntlm", + }, + Expected: getCredsExpected{ + Access: NTLMAccess, + Endpoint: "https://netrc-host.com/repo/lfs", + CredsURL: "https://netrc-host.com/repo/lfs", + Creds: map[string]string{ + "protocol": "https", + "host": "netrc-host.com", + "username": "abc", + "password": "def", + "source": "netrc", + }, + }, + }, + "custom auth": getCredsTest{ + Remote: "origin", + Method: "GET", + Href: "https://git-server.com/repo/lfs/locks", + Header: map[string]string{ + "Authorization": "custom", + }, + Config: map[string]string{ + "lfs.url": "https://git-server.com/repo/lfs", + "lfs.https://git-server.com/repo/lfs.access": "basic", + }, + Expected: getCredsExpected{ + Access: BasicAccess, + Endpoint: "https://git-server.com/repo/lfs", + Authorization: "custom", + }, + }, + "netrc": getCredsTest{ + Remote: "origin", + Method: "GET", + Href: "https://netrc-host.com/repo/lfs/locks", + Config: map[string]string{ + "lfs.url": "https://netrc-host.com/repo/lfs", + "lfs.https://netrc-host.com/repo/lfs.access": "basic", + }, + Expected: getCredsExpected{ + Access: BasicAccess, + Endpoint: "https://netrc-host.com/repo/lfs", + Authorization: basicAuth("abc", "def"), + }, + }, + "username in url": getCredsTest{ + Remote: "origin", + Method: "GET", + Href: "https://git-server.com/repo/lfs/locks", + Config: map[string]string{ + "lfs.url": "https://user@git-server.com/repo/lfs", + "lfs.https://git-server.com/repo/lfs.access": "basic", + }, + Expected: getCredsExpected{ + Access: BasicAccess, + Endpoint: "https://user@git-server.com/repo/lfs", + Authorization: basicAuth("user", "monkey"), + CredsURL: "https://user@git-server.com/repo/lfs", + Creds: map[string]string{ + "protocol": "https", + "host": "git-server.com", + "username": "user", + "password": "monkey", + "path": "repo/lfs", + }, + }, + }, + "different remote url, basic access": getCredsTest{ + Remote: "origin", + Method: "GET", + Href: "https://git-server.com/repo/lfs/locks", + Config: map[string]string{ + "lfs.url": "https://git-server.com/repo/lfs", + "lfs.https://git-server.com/repo/lfs.access": "basic", + "remote.origin.url": "https://git-server.com/repo", + }, + Expected: getCredsExpected{ + Access: BasicAccess, + Endpoint: "https://git-server.com/repo/lfs", + Authorization: basicAuth("git-server.com", "monkey"), + CredsURL: "https://git-server.com/repo", + Creds: map[string]string{ + "protocol": "https", + "host": "git-server.com", + "username": "git-server.com", + "password": "monkey", + "path": "repo", + }, + }, + }, + "api url auth": getCredsTest{ + Remote: "origin", + Method: "GET", + Href: "https://git-server.com/repo/locks", + Config: map[string]string{ + "lfs.url": "https://user:pass@git-server.com/repo", + "lfs.https://git-server.com/repo.access": "basic", + }, + Expected: getCredsExpected{ + Access: BasicAccess, + Endpoint: "https://user:pass@git-server.com/repo", + Authorization: basicAuth("user", "pass"), + }, + }, + "git url auth": getCredsTest{ + Remote: "origin", + Method: "GET", + Href: "https://git-server.com/repo/locks", + Config: map[string]string{ + "lfs.url": "https://git-server.com/repo", + "lfs.https://git-server.com/repo.access": "basic", + "remote.origin.url": "https://user:pass@git-server.com/repo", + }, + Expected: getCredsExpected{ + Access: BasicAccess, + Endpoint: "https://git-server.com/repo", + Authorization: basicAuth("user", "pass"), + }, + }, + "scheme mismatch": getCredsTest{ + Remote: "origin", + Method: "GET", + Href: "http://git-server.com/repo/lfs/locks", + Config: map[string]string{ + "lfs.url": "https://git-server.com/repo/lfs", + "lfs.https://git-server.com/repo/lfs.access": "basic", + }, + Expected: getCredsExpected{ + Access: BasicAccess, + Endpoint: "https://git-server.com/repo/lfs", + Authorization: basicAuth("git-server.com", "monkey"), + CredsURL: "http://git-server.com/repo/lfs/locks", + Creds: map[string]string{ + "protocol": "http", + "host": "git-server.com", + "username": "git-server.com", + "password": "monkey", + "path": "repo/lfs/locks", + }, + }, + }, + "host mismatch": getCredsTest{ + Remote: "origin", + Method: "GET", + Href: "https://lfs-server.com/repo/lfs/locks", + Config: map[string]string{ + "lfs.url": "https://git-server.com/repo/lfs", + "lfs.https://git-server.com/repo/lfs.access": "basic", + }, + Expected: getCredsExpected{ + Access: BasicAccess, + Endpoint: "https://git-server.com/repo/lfs", + Authorization: basicAuth("lfs-server.com", "monkey"), + CredsURL: "https://lfs-server.com/repo/lfs/locks", + Creds: map[string]string{ + "protocol": "https", + "host": "lfs-server.com", + "username": "lfs-server.com", + "password": "monkey", + "path": "repo/lfs/locks", + }, + }, + }, + "port mismatch": getCredsTest{ + Remote: "origin", + Method: "GET", + Href: "https://git-server.com:8080/repo/lfs/locks", + Config: map[string]string{ + "lfs.url": "https://git-server.com/repo/lfs", + "lfs.https://git-server.com/repo/lfs.access": "basic", + }, + Expected: getCredsExpected{ + Access: BasicAccess, + Endpoint: "https://git-server.com/repo/lfs", + Authorization: basicAuth("git-server.com:8080", "monkey"), + CredsURL: "https://git-server.com:8080/repo/lfs/locks", + Creds: map[string]string{ + "protocol": "https", + "host": "git-server.com:8080", + "username": "git-server.com:8080", + "password": "monkey", + "path": "repo/lfs/locks", + }, + }, + }, + } + + credHelper := &fakeCredentialFiller{} + netrcFinder := &fakeNetrc{} + for desc, test := range tests { + t.Log(desc) + req, err := http.NewRequest(test.Method, test.Href, nil) + if err != nil { + t.Errorf("[%s] %s", desc, err) + continue + } + + for key, value := range test.Header { + req.Header.Set(key, value) + } + + ef := NewEndpointFinder(TestEnv(test.Config)) + endpoint, access, creds, credsURL, err := getCreds(credHelper, netrcFinder, ef, test.Remote, req) + if !assert.Nil(t, err) { + continue + } + assert.Equal(t, test.Expected.Endpoint, endpoint.Url, "endpoint") + assert.Equal(t, test.Expected.Access, access, "access") + assert.Equal(t, test.Expected.Authorization, req.Header.Get("Authorization"), "authorization") + + if test.Expected.Creds != nil { + assert.EqualValues(t, test.Expected.Creds, creds) + } else { + assert.Nil(t, creds, "creds") + } + + if len(test.Expected.CredsURL) > 0 { + if assert.NotNil(t, credsURL, "credURL") { + assert.Equal(t, test.Expected.CredsURL, credsURL.String(), "credURL") + } + } else { + assert.Nil(t, credsURL) + } + } +} + +type fakeCredentialFiller struct{} + +func (f *fakeCredentialFiller) Fill(input Creds) (Creds, error) { + output := make(Creds) + for key, value := range input { + output[key] = value + } + if _, ok := output["username"]; !ok { + output["username"] = input["host"] + } + output["password"] = "monkey" + return output, nil +} + +func (f *fakeCredentialFiller) Approve(creds Creds) error { + return errors.New("Not implemented") +} + +func (f *fakeCredentialFiller) Reject(creds Creds) error { + return errors.New("Not implemented") +} diff --git a/lfsapi/body.go b/lfsapi/body.go new file mode 100644 index 00000000..3b54c9f2 --- /dev/null +++ b/lfsapi/body.go @@ -0,0 +1,36 @@ +package lfsapi + +import ( + "bytes" + "encoding/json" + "io" + "net/http" +) + +type ReadSeekCloser interface { + io.Seeker + io.ReadCloser +} + +func MarshalToRequest(req *http.Request, obj interface{}) error { + by, err := json.Marshal(obj) + if err != nil { + return err + } + + req.ContentLength = int64(len(by)) + req.Body = NewByteBody(by) + return nil +} + +func NewByteBody(by []byte) ReadSeekCloser { + return &closingByteReader{Reader: bytes.NewReader(by)} +} + +type closingByteReader struct { + *bytes.Reader +} + +func (r *closingByteReader) Close() error { + return nil +} diff --git a/httputil/certs.go b/lfsapi/certs.go similarity index 75% rename from httputil/certs.go rename to lfsapi/certs.go index 95ad1f6c..a7e9ef3f 100644 --- a/httputil/certs.go +++ b/lfsapi/certs.go @@ -1,4 +1,4 @@ -package httputil +package lfsapi import ( "crypto/x509" @@ -6,25 +6,18 @@ import ( "io/ioutil" "path/filepath" - "github.com/git-lfs/git-lfs/config" "github.com/rubyist/tracerx" ) // isCertVerificationDisabledForHost returns whether SSL certificate verification // has been disabled for the given host, or globally -func isCertVerificationDisabledForHost(cfg *config.Configuration, host string) bool { - hostSslVerify, _ := cfg.Git.Get(fmt.Sprintf("http.https://%v/.sslverify", host)) +func isCertVerificationDisabledForHost(c *Client, host string) bool { + hostSslVerify, _ := c.gitEnv.Get(fmt.Sprintf("http.https://%v/.sslverify", host)) if hostSslVerify == "false" { return true } - globalSslVerify, _ := cfg.Git.Get("http.sslverify") - if globalSslVerify == "false" || cfg.Os.Bool("GIT_SSL_NO_VERIFY", false) { - return true - } - - return false - + return c.SkipSSLVerify } // getRootCAsForHost returns a certificate pool for that specific host (which may @@ -32,50 +25,47 @@ func isCertVerificationDisabledForHost(cfg *config.Configuration, host string) b // source which is not included by default in the golang certificate search) // May return nil if it doesn't have anything to add, in which case the default // RootCAs will be used if passed to TLSClientConfig.RootCAs -func getRootCAsForHost(cfg *config.Configuration, host string) *x509.CertPool { - +func getRootCAsForHost(c *Client, host string) *x509.CertPool { // don't init pool, want to return nil not empty if none found; init only on successful add cert var pool *x509.CertPool // gitconfig first - pool = appendRootCAsForHostFromGitconfig(cfg, pool, host) + pool = appendRootCAsForHostFromGitconfig(c.osEnv, c.gitEnv, pool, host) // Platform specific return appendRootCAsForHostFromPlatform(pool, host) - } -func appendRootCAsForHostFromGitconfig(cfg *config.Configuration, pool *x509.CertPool, host string) *x509.CertPool { +func appendRootCAsForHostFromGitconfig(osEnv Env, gitEnv Env, pool *x509.CertPool, host string) *x509.CertPool { // Accumulate certs from all these locations: // GIT_SSL_CAINFO first - if cafile, _ := cfg.Os.Get("GIT_SSL_CAINFO"); len(cafile) > 0 { + if cafile, _ := osEnv.Get("GIT_SSL_CAINFO"); len(cafile) > 0 { return appendCertsFromFile(pool, cafile) } // http./.sslcainfo or http..sslcainfo // we know we have simply "host" or "host:port" hostKeyWithSlash := fmt.Sprintf("http.https://%v/.sslcainfo", host) - if cafile, ok := cfg.Git.Get(hostKeyWithSlash); ok { + if cafile, ok := gitEnv.Get(hostKeyWithSlash); ok { return appendCertsFromFile(pool, cafile) } hostKeyWithoutSlash := fmt.Sprintf("http.https://%v.sslcainfo", host) - if cafile, ok := cfg.Git.Get(hostKeyWithoutSlash); ok { + if cafile, ok := gitEnv.Get(hostKeyWithoutSlash); ok { return appendCertsFromFile(pool, cafile) } // http.sslcainfo - if cafile, ok := cfg.Git.Get("http.sslcainfo"); ok { + if cafile, ok := gitEnv.Get("http.sslcainfo"); ok { return appendCertsFromFile(pool, cafile) } // GIT_SSL_CAPATH - if cadir, _ := cfg.Os.Get("GIT_SSL_CAPATH"); len(cadir) > 0 { + if cadir, _ := osEnv.Get("GIT_SSL_CAPATH"); len(cadir) > 0 { return appendCertsFromFilesInDir(pool, cadir) } // http.sslcapath - if cadir, ok := cfg.Git.Get("http.sslcapath"); ok { + if cadir, ok := gitEnv.Get("http.sslcapath"); ok { return appendCertsFromFilesInDir(pool, cadir) } return pool - } func appendCertsFromFilesInDir(pool *x509.CertPool, dir string) *x509.CertPool { @@ -120,6 +110,7 @@ func appendCerts(pool *x509.CertPool, certs []*x509.Certificate) *x509.CertPool return pool } + func appendCertsFromPEMData(pool *x509.CertPool, data []byte) *x509.CertPool { if len(data) == 0 { return pool @@ -138,5 +129,4 @@ func appendCertsFromPEMData(pool *x509.CertPool, data []byte) *x509.CertPool { return pool } return ret - } diff --git a/httputil/certs_darwin.go b/lfsapi/certs_darwin.go similarity index 99% rename from httputil/certs_darwin.go rename to lfsapi/certs_darwin.go index 3ff407d7..513678fc 100644 --- a/httputil/certs_darwin.go +++ b/lfsapi/certs_darwin.go @@ -1,4 +1,4 @@ -package httputil +package lfsapi import ( "crypto/x509" diff --git a/httputil/certs_freebsd.go b/lfsapi/certs_freebsd.go similarity index 90% rename from httputil/certs_freebsd.go rename to lfsapi/certs_freebsd.go index e665ccfa..989f4ee9 100644 --- a/httputil/certs_freebsd.go +++ b/lfsapi/certs_freebsd.go @@ -1,4 +1,4 @@ -package httputil +package lfsapi import "crypto/x509" diff --git a/httputil/certs_linux.go b/lfsapi/certs_linux.go similarity index 90% rename from httputil/certs_linux.go rename to lfsapi/certs_linux.go index e665ccfa..989f4ee9 100644 --- a/httputil/certs_linux.go +++ b/lfsapi/certs_linux.go @@ -1,4 +1,4 @@ -package httputil +package lfsapi import "crypto/x509" diff --git a/httputil/certs_openbsd.go b/lfsapi/certs_openbsd.go similarity index 90% rename from httputil/certs_openbsd.go rename to lfsapi/certs_openbsd.go index e665ccfa..989f4ee9 100644 --- a/httputil/certs_openbsd.go +++ b/lfsapi/certs_openbsd.go @@ -1,4 +1,4 @@ -package httputil +package lfsapi import "crypto/x509" diff --git a/httputil/certs_test.go b/lfsapi/certs_test.go similarity index 60% rename from httputil/certs_test.go rename to lfsapi/certs_test.go index c8ae1320..a1e8c421 100644 --- a/httputil/certs_test.go +++ b/lfsapi/certs_test.go @@ -1,13 +1,13 @@ -package httputil +package lfsapi import ( "fmt" "io/ioutil" + "net/http" "os" "path/filepath" "testing" - "github.com/git-lfs/git-lfs/config" "github.com/stretchr/testify/assert" ) @@ -60,12 +60,13 @@ func TestCertFromSSLCAInfoConfig(t *testing.T) { // Test http..sslcainfo for _, hostName := range sslCAInfoConfigHostNames { hostKey := fmt.Sprintf("http.https://%v.sslcainfo", hostName) - cfg := config.NewFrom(config.Values{ - Git: map[string]string{hostKey: tempfile.Name()}, - }) + c, err := NewClient(nil, TestEnv(map[string]string{ + hostKey: tempfile.Name(), + })) + assert.Nil(t, err) for _, matchedHostTest := range sslCAInfoMatchedHostTests { - pool := getRootCAsForHost(cfg, matchedHostTest.hostName) + pool := getRootCAsForHost(c, matchedHostTest.hostName) var shouldOrShouldnt string if matchedHostTest.shouldMatch { @@ -81,16 +82,16 @@ func TestCertFromSSLCAInfoConfig(t *testing.T) { } // Test http.sslcainfo - cfg := config.NewFrom(config.Values{ - Git: map[string]string{"http.sslcainfo": tempfile.Name()}, - }) + c, err := NewClient(nil, TestEnv(map[string]string{ + "http.sslcainfo": tempfile.Name(), + })) + assert.Nil(t, err) // Should match any host at all for _, matchedHostTest := range sslCAInfoMatchedHostTests { - pool := getRootCAsForHost(cfg, matchedHostTest.hostName) + pool := getRootCAsForHost(c, matchedHostTest.hostName) assert.NotNil(t, pool) } - } func TestCertFromSSLCAInfoEnv(t *testing.T) { @@ -102,18 +103,16 @@ func TestCertFromSSLCAInfoEnv(t *testing.T) { assert.Nil(t, err, "Error writing temp cert file") tempfile.Close() - cfg := config.NewFrom(config.Values{ - Os: map[string]string{ - "GIT_SSL_CAINFO": tempfile.Name(), - }, - }) + c, err := NewClient(TestEnv(map[string]string{ + "GIT_SSL_CAINFO": tempfile.Name(), + }), nil) + assert.Nil(t, err) // Should match any host at all for _, matchedHostTest := range sslCAInfoMatchedHostTests { - pool := getRootCAsForHost(cfg, matchedHostTest.hostName) + pool := getRootCAsForHost(c, matchedHostTest.hostName) assert.NotNil(t, pool) } - } func TestCertFromSSLCAPathConfig(t *testing.T) { @@ -124,16 +123,17 @@ func TestCertFromSSLCAPathConfig(t *testing.T) { err = ioutil.WriteFile(filepath.Join(tempdir, "cert1.pem"), []byte(testCert), 0644) assert.Nil(t, err, "Error creating cert file") - cfg := config.NewFrom(config.Values{ - Git: map[string]string{"http.sslcapath": tempdir}, - }) + c, err := NewClient(nil, TestEnv(map[string]string{ + "http.sslcapath": tempdir, + })) + + assert.Nil(t, err) // Should match any host at all for _, matchedHostTest := range sslCAInfoMatchedHostTests { - pool := getRootCAsForHost(cfg, matchedHostTest.hostName) + pool := getRootCAsForHost(c, matchedHostTest.hostName) assert.NotNil(t, pool) } - } func TestCertFromSSLCAPathEnv(t *testing.T) { @@ -144,52 +144,87 @@ func TestCertFromSSLCAPathEnv(t *testing.T) { err = ioutil.WriteFile(filepath.Join(tempdir, "cert1.pem"), []byte(testCert), 0644) assert.Nil(t, err, "Error creating cert file") - cfg := config.NewFrom(config.Values{ - Os: map[string]string{ - "GIT_SSL_CAPATH": tempdir, - }, - }) + c, err := NewClient(TestEnv(map[string]string{ + "GIT_SSL_CAPATH": tempdir, + }), nil) + assert.Nil(t, err) // Should match any host at all for _, matchedHostTest := range sslCAInfoMatchedHostTests { - pool := getRootCAsForHost(cfg, matchedHostTest.hostName) + pool := getRootCAsForHost(c, matchedHostTest.hostName) assert.NotNil(t, pool) } - } func TestCertVerifyDisabledGlobalEnv(t *testing.T) { - empty := config.NewFrom(config.Values{}) - assert.False(t, isCertVerificationDisabledForHost(empty, "anyhost.com")) + empty := &Client{} + httpClient := empty.httpClient("anyhost.com") + tr, ok := httpClient.Transport.(*http.Transport) + if assert.True(t, ok) { + assert.False(t, tr.TLSClientConfig.InsecureSkipVerify) + } - cfg := config.NewFrom(config.Values{ - Os: map[string]string{ - "GIT_SSL_NO_VERIFY": "1", - }, - }) - assert.True(t, isCertVerificationDisabledForHost(cfg, "anyhost.com")) + c, err := NewClient(TestEnv(map[string]string{ + "GIT_SSL_NO_VERIFY": "1", + }), nil) + + assert.Nil(t, err) + + httpClient = c.httpClient("anyhost.com") + tr, ok = httpClient.Transport.(*http.Transport) + if assert.True(t, ok) { + assert.True(t, tr.TLSClientConfig.InsecureSkipVerify) + } } func TestCertVerifyDisabledGlobalConfig(t *testing.T) { - def := config.New() - assert.False(t, isCertVerificationDisabledForHost(def, "anyhost.com")) + def := &Client{} + httpClient := def.httpClient("anyhost.com") + tr, ok := httpClient.Transport.(*http.Transport) + if assert.True(t, ok) { + assert.False(t, tr.TLSClientConfig.InsecureSkipVerify) + } - cfg := config.NewFrom(config.Values{ - Git: map[string]string{"http.sslverify": "false"}, - }) - assert.True(t, isCertVerificationDisabledForHost(cfg, "anyhost.com")) + c, err := NewClient(nil, TestEnv(map[string]string{ + "http.sslverify": "false", + })) + assert.Nil(t, err) + + httpClient = c.httpClient("anyhost.com") + tr, ok = httpClient.Transport.(*http.Transport) + if assert.True(t, ok) { + assert.True(t, tr.TLSClientConfig.InsecureSkipVerify) + } } func TestCertVerifyDisabledHostConfig(t *testing.T) { - def := config.New() - assert.False(t, isCertVerificationDisabledForHost(def, "specifichost.com")) - assert.False(t, isCertVerificationDisabledForHost(def, "otherhost.com")) + def := &Client{} + httpClient := def.httpClient("specifichost.com") + tr, ok := httpClient.Transport.(*http.Transport) + if assert.True(t, ok) { + assert.False(t, tr.TLSClientConfig.InsecureSkipVerify) + } - cfg := config.NewFrom(config.Values{ - Git: map[string]string{ - "http.https://specifichost.com/.sslverify": "false", - }, - }) - assert.True(t, isCertVerificationDisabledForHost(cfg, "specifichost.com")) - assert.False(t, isCertVerificationDisabledForHost(cfg, "otherhost.com")) + httpClient = def.httpClient("otherhost.com") + tr, ok = httpClient.Transport.(*http.Transport) + if assert.True(t, ok) { + assert.False(t, tr.TLSClientConfig.InsecureSkipVerify) + } + + c, err := NewClient(nil, TestEnv(map[string]string{ + "http.https://specifichost.com/.sslverify": "false", + })) + assert.Nil(t, err) + + httpClient = c.httpClient("specifichost.com") + tr, ok = httpClient.Transport.(*http.Transport) + if assert.True(t, ok) { + assert.True(t, tr.TLSClientConfig.InsecureSkipVerify) + } + + httpClient = c.httpClient("otherhost.com") + tr, ok = httpClient.Transport.(*http.Transport) + if assert.True(t, ok) { + assert.False(t, tr.TLSClientConfig.InsecureSkipVerify) + } } diff --git a/httputil/certs_windows.go b/lfsapi/certs_windows.go similarity index 92% rename from httputil/certs_windows.go rename to lfsapi/certs_windows.go index c762dba9..d2f8a890 100644 --- a/httputil/certs_windows.go +++ b/lfsapi/certs_windows.go @@ -1,4 +1,4 @@ -package httputil +package lfsapi import "crypto/x509" diff --git a/lfsapi/client.go b/lfsapi/client.go new file mode 100644 index 00000000..2f7213d8 --- /dev/null +++ b/lfsapi/client.go @@ -0,0 +1,217 @@ +package lfsapi + +import ( + "crypto/tls" + "net" + "net/http" + "net/url" + "os" + "strings" + "time" + + "github.com/git-lfs/git-lfs/errors" + "github.com/rubyist/tracerx" +) + +var UserAgent = "git-lfs" + +const MediaType = "application/vnd.git-lfs+json; charset=utf-8" + +func (c *Client) NewRequest(method string, e Endpoint, suffix string, body interface{}) (*http.Request, error) { + sshRes, err := c.resolveSSHEndpoint(e, method) + if err != nil { + tracerx.Printf("ssh: %s failed, error: %s, message: %s", + e.SshUserAndHost, err.Error(), sshRes.Message, + ) + + if len(sshRes.Message) > 0 { + return nil, errors.Wrap(err, sshRes.Message) + } + return nil, err + } + + prefix := e.Url + if len(sshRes.Href) > 0 { + prefix = sshRes.Href + } + + req, err := http.NewRequest(method, joinURL(prefix, suffix), nil) + if err != nil { + return req, err + } + + for key, value := range sshRes.Header { + req.Header.Set(key, value) + } + req.Header.Set("Accept", MediaType) + + if body != nil { + if merr := MarshalToRequest(req, body); merr != nil { + return req, merr + } + req.Header.Set("Content-Type", MediaType) + } + + return req, err +} + +const slash = "/" + +func joinURL(prefix, suffix string) string { + if strings.HasSuffix(prefix, slash) { + return prefix + suffix + } + return prefix + slash + suffix +} + +func (c *Client) Do(req *http.Request) (*http.Response, error) { + req.Header.Set("User-Agent", UserAgent) + + res, err := c.doWithRedirects(c.httpClient(req.Host), req, nil) + if err != nil { + return res, err + } + + return res, c.handleResponse(res) +} + +func (c *Client) doWithRedirects(cli *http.Client, req *http.Request, via []*http.Request) (*http.Response, error) { + c.traceRequest(req) + if err := c.prepareRequestBody(req); err != nil { + return nil, err + } + + start := time.Now() + res, err := cli.Do(req) + if err != nil { + return res, err + } + + c.traceResponse(res) + c.startResponseStats(res, start) + + if res.StatusCode != 307 { + return res, err + } + + redirectTo := res.Header.Get("Location") + locurl, err := url.Parse(redirectTo) + if err == nil && !locurl.IsAbs() { + locurl = req.URL.ResolveReference(locurl) + redirectTo = locurl.String() + } + + via = append(via, req) + if len(via) >= 3 { + return res, errors.New("too many redirects") + } + + redirectedReq, err := newRequestForRetry(req, redirectTo) + if err != nil { + return res, err + } + + return c.doWithRedirects(cli, redirectedReq, via) +} + +func (c *Client) httpClient(host string) *http.Client { + c.clientMu.Lock() + defer c.clientMu.Unlock() + + if c.gitEnv == nil { + c.gitEnv = make(TestEnv) + } + + if c.osEnv == nil { + c.osEnv = make(TestEnv) + } + + if c.hostClients == nil { + c.hostClients = make(map[string]*http.Client) + } + + if client, ok := c.hostClients[host]; ok { + return client + } + + concurrentTransfers := c.ConcurrentTransfers + if concurrentTransfers < 1 { + concurrentTransfers = 3 + } + + dialtime := c.DialTimeout + if dialtime < 1 { + dialtime = 30 + } + + keepalivetime := c.KeepaliveTimeout + if keepalivetime < 1 { + keepalivetime = 1800 + } + + tlstime := c.TLSTimeout + if tlstime < 1 { + tlstime = 30 + } + + tr := &http.Transport{ + Proxy: proxyFromClient(c), + Dial: (&net.Dialer{ + Timeout: time.Duration(dialtime) * time.Second, + KeepAlive: time.Duration(keepalivetime) * time.Second, + }).Dial, + TLSHandshakeTimeout: time.Duration(tlstime) * time.Second, + MaxIdleConnsPerHost: concurrentTransfers, + } + + tr.TLSClientConfig = &tls.Config{} + if isCertVerificationDisabledForHost(c, host) { + tr.TLSClientConfig.InsecureSkipVerify = true + } else { + tr.TLSClientConfig.RootCAs = getRootCAsForHost(c, host) + } + + httpClient := &http.Client{ + Transport: tr, + CheckRedirect: func(*http.Request, []*http.Request) error { + return http.ErrUseLastResponse + }, + } + + c.hostClients[host] = httpClient + if c.VerboseOut == nil { + c.VerboseOut = os.Stderr + } + + return httpClient +} + +func (c *Client) CurrentUser() (string, string) { + userName, _ := c.gitEnv.Get("user.name") + userEmail, _ := c.gitEnv.Get("user.email") + return userName, userEmail +} + +func newRequestForRetry(req *http.Request, location string) (*http.Request, error) { + newReq, err := http.NewRequest(req.Method, location, nil) + if err != nil { + return nil, err + } + + for key := range req.Header { + if key == "Authorization" { + if req.URL.Scheme != newReq.URL.Scheme || req.URL.Host != newReq.URL.Host { + continue + } + } + newReq.Header.Set(key, req.Header.Get(key)) + } + + oldestURL := strings.SplitN(req.URL.String(), "?", 2)[0] + newURL := strings.SplitN(newReq.URL.String(), "?", 2)[0] + tracerx.Printf("api: redirect %s %s to %s", req.Method, oldestURL, newURL) + + newReq.Body = req.Body + newReq.ContentLength = req.ContentLength + return newReq, nil +} diff --git a/lfsapi/client_test.go b/lfsapi/client_test.go new file mode 100644 index 00000000..4afe3ceb --- /dev/null +++ b/lfsapi/client_test.go @@ -0,0 +1,183 @@ +package lfsapi + +import ( + "encoding/json" + "fmt" + "net/http" + "net/http/httptest" + "sync/atomic" + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +type redirectTest struct { + Test string +} + +func TestClientRedirect(t *testing.T) { + var called1 uint32 + var called2 uint32 + srv2 := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + atomic.AddUint32(&called2, 1) + t.Logf("srv2 req %s %s", r.Method, r.URL.Path) + assert.Equal(t, "POST", r.Method) + + switch r.URL.Path { + case "/ok": + assert.Equal(t, "", r.Header.Get("Authorization")) + assert.Equal(t, "1", r.Header.Get("A")) + body := &redirectTest{} + err := json.NewDecoder(r.Body).Decode(body) + assert.Nil(t, err) + assert.Equal(t, "External", body.Test) + + w.WriteHeader(200) + default: + w.WriteHeader(404) + } + })) + + srv1 := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + atomic.AddUint32(&called1, 1) + t.Logf("srv1 req %s %s", r.Method, r.URL.Path) + assert.Equal(t, "POST", r.Method) + + switch r.URL.Path { + case "/local": + w.Header().Set("Location", "/ok") + w.WriteHeader(307) + case "/external": + w.Header().Set("Location", srv2.URL+"/ok") + w.WriteHeader(307) + case "/ok": + assert.Equal(t, "auth", r.Header.Get("Authorization")) + assert.Equal(t, "1", r.Header.Get("A")) + body := &redirectTest{} + err := json.NewDecoder(r.Body).Decode(body) + assert.Nil(t, err) + assert.Equal(t, "Local", body.Test) + + w.WriteHeader(200) + default: + w.WriteHeader(404) + } + })) + defer srv1.Close() + defer srv2.Close() + + c := &Client{} + + // local redirect + req, err := http.NewRequest("POST", srv1.URL+"/local", nil) + require.Nil(t, err) + req.Header.Set("Authorization", "auth") + req.Header.Set("A", "1") + + require.Nil(t, MarshalToRequest(req, &redirectTest{Test: "Local"})) + + res, err := c.Do(req) + require.Nil(t, err) + assert.Equal(t, 200, res.StatusCode) + assert.EqualValues(t, 2, called1) + assert.EqualValues(t, 0, called2) + + // external redirect + req, err = http.NewRequest("POST", srv1.URL+"/external", nil) + require.Nil(t, err) + req.Header.Set("Authorization", "auth") + req.Header.Set("A", "1") + + require.Nil(t, MarshalToRequest(req, &redirectTest{Test: "External"})) + + res, err = c.Do(req) + require.Nil(t, err) + assert.Equal(t, 200, res.StatusCode) + assert.EqualValues(t, 3, called1) + assert.EqualValues(t, 1, called2) +} + +func TestNewClient(t *testing.T) { + c, err := NewClient(TestEnv(map[string]string{}), TestEnv(map[string]string{ + "lfs.dialtimeout": "151", + "lfs.keepalive": "152", + "lfs.tlstimeout": "153", + "lfs.concurrenttransfers": "154", + })) + + require.Nil(t, err) + assert.Equal(t, 151, c.DialTimeout) + assert.Equal(t, 152, c.KeepaliveTimeout) + assert.Equal(t, 153, c.TLSTimeout) + assert.Equal(t, 154, c.ConcurrentTransfers) +} + +func TestNewClientWithGitSSLVerify(t *testing.T) { + c, err := NewClient(nil, nil) + assert.Nil(t, err) + assert.False(t, c.SkipSSLVerify) + + for _, value := range []string{"true", "1", "t"} { + c, err = NewClient(TestEnv(map[string]string{}), TestEnv(map[string]string{ + "http.sslverify": value, + })) + t.Logf("http.sslverify: %q", value) + assert.Nil(t, err) + assert.False(t, c.SkipSSLVerify) + } + + for _, value := range []string{"false", "0", "f"} { + c, err = NewClient(TestEnv(map[string]string{}), TestEnv(map[string]string{ + "http.sslverify": value, + })) + t.Logf("http.sslverify: %q", value) + assert.Nil(t, err) + assert.True(t, c.SkipSSLVerify) + } +} + +func TestNewClientWithOSSSLVerify(t *testing.T) { + c, err := NewClient(nil, nil) + assert.Nil(t, err) + assert.False(t, c.SkipSSLVerify) + + for _, value := range []string{"false", "0", "f"} { + c, err = NewClient(TestEnv(map[string]string{ + "GIT_SSL_NO_VERIFY": value, + }), TestEnv(map[string]string{})) + t.Logf("GIT_SSL_NO_VERIFY: %q", value) + assert.Nil(t, err) + assert.False(t, c.SkipSSLVerify) + } + + for _, value := range []string{"true", "1", "t"} { + c, err = NewClient(TestEnv(map[string]string{ + "GIT_SSL_NO_VERIFY": value, + }), TestEnv(map[string]string{})) + t.Logf("GIT_SSL_NO_VERIFY: %q", value) + assert.Nil(t, err) + assert.True(t, c.SkipSSLVerify) + } +} + +func TestNewRequest(t *testing.T) { + tests := [][]string{ + {"https://example.com", "a", "https://example.com/a"}, + {"https://example.com/", "a", "https://example.com/a"}, + {"https://example.com/a", "b", "https://example.com/a/b"}, + {"https://example.com/a/", "b", "https://example.com/a/b"}, + } + + for _, test := range tests { + c, err := NewClient(nil, TestEnv(map[string]string{ + "lfs.url": test[0], + })) + require.Nil(t, err) + + req, err := c.NewRequest("POST", c.Endpoints.Endpoint("", ""), test[1], nil) + require.Nil(t, err) + assert.Equal(t, "POST", req.Method) + assert.Equal(t, test[2], req.URL.String(), fmt.Sprintf("endpoint: %s, suffix: %s, expected: %s", test[0], test[1], test[2])) + } +} diff --git a/lfsapi/creds.go b/lfsapi/creds.go new file mode 100644 index 00000000..b0b2e604 --- /dev/null +++ b/lfsapi/creds.go @@ -0,0 +1,95 @@ +package lfsapi + +import ( + "bytes" + "fmt" + "os/exec" + "strings" +) + +type CredentialHelper interface { + Fill(Creds) (Creds, error) + Reject(Creds) error + Approve(Creds) error +} + +type Creds map[string]string + +func bufferCreds(c Creds) *bytes.Buffer { + buf := new(bytes.Buffer) + + for k, v := range c { + buf.Write([]byte(k)) + buf.Write([]byte("=")) + buf.Write([]byte(v)) + buf.Write([]byte("\n")) + } + + return buf +} + +type commandCredentialHelper struct { + SkipPrompt bool +} + +func (h *commandCredentialHelper) Fill(creds Creds) (Creds, error) { + return h.exec("fill", creds) +} + +func (h *commandCredentialHelper) Reject(creds Creds) error { + _, err := h.exec("reject", creds) + return err +} + +func (h *commandCredentialHelper) Approve(creds Creds) error { + _, err := h.exec("approve", creds) + return err +} + +func (h *commandCredentialHelper) exec(subcommand string, input Creds) (Creds, error) { + output := new(bytes.Buffer) + cmd := exec.Command("git", "credential", subcommand) + cmd.Stdin = bufferCreds(input) + cmd.Stdout = output + /* + There is a reason we don't hook up stderr here: + Git's credential cache daemon helper does not close its stderr, so if this + process is the process that fires up the daemon, it will wait forever + (until the daemon exits, really) trying to read from stderr. + + See https://github.com/git-lfs/git-lfs/issues/117 for more details. + */ + + err := cmd.Start() + if err == nil { + err = cmd.Wait() + } + + if _, ok := err.(*exec.ExitError); ok { + if h.SkipPrompt { + return nil, fmt.Errorf("Change the GIT_TERMINAL_PROMPT env var to be prompted to enter your credentials for %s://%s.", + input["protocol"], input["host"]) + } + + // 'git credential' exits with 128 if the helper doesn't fill the username + // and password values. + if subcommand == "fill" && err.Error() == "exit status 128" { + return nil, nil + } + } + + if err != nil { + return nil, fmt.Errorf("'git credential %s' error: %s\n", subcommand, err.Error()) + } + + creds := make(Creds) + for _, line := range strings.Split(output.String(), "\n") { + pieces := strings.SplitN(line, "=", 2) + if len(pieces) < 2 || len(pieces[1]) < 1 { + continue + } + creds[pieces[0]] = pieces[1] + } + + return creds, nil +} diff --git a/config/endpoint.go b/lfsapi/endpoint.go similarity index 56% rename from config/endpoint.go rename to lfsapi/endpoint.go index d54a79e5..5464d7bb 100644 --- a/config/endpoint.go +++ b/lfsapi/endpoint.go @@ -1,14 +1,13 @@ -package config +package lfsapi import ( "fmt" "net/url" - "path" "regexp" "strings" ) -const EndpointUrlUnknown = "" +const UrlUnknown = "" // An Endpoint describes how to access a Git LFS server. type Endpoint struct { @@ -18,62 +17,6 @@ type Endpoint struct { SshPort string } -// NewEndpointFromCloneURL creates an Endpoint from a git clone URL by appending -// "[.git]/info/lfs". -func NewEndpointFromCloneURL(url string) Endpoint { - return NewEndpointFromCloneURLWithConfig(url, New()) -} - -// NewEndpoint initializes a new Endpoint for a given URL. -func NewEndpoint(rawurl string) Endpoint { - return NewEndpointWithConfig(rawurl, New()) -} - -// NewEndpointFromCloneURLWithConfig creates an Endpoint from a git clone URL by appending -// "[.git]/info/lfs". -func NewEndpointFromCloneURLWithConfig(url string, c *Configuration) Endpoint { - e := NewEndpointWithConfig(url, c) - if e.Url == EndpointUrlUnknown { - return e - } - - if strings.HasSuffix(url, "/") { - e.Url = url[0 : len(url)-1] - } - - // When using main remote URL for HTTP, append info/lfs - if path.Ext(e.Url) == ".git" { - e.Url += "/info/lfs" - } else { - e.Url += ".git/info/lfs" - } - - return e -} - -// NewEndpointWithConfig initializes a new Endpoint for a given URL. -func NewEndpointWithConfig(rawurl string, c *Configuration) Endpoint { - rawurl = c.ReplaceUrlAlias(rawurl) - u, err := url.Parse(rawurl) - if err != nil { - return endpointFromBareSshUrl(rawurl) - } - - switch u.Scheme { - case "ssh": - return endpointFromSshUrl(u) - case "http", "https": - return endpointFromHttpUrl(u) - case "git": - return endpointFromGitUrl(u, c) - case "": - return endpointFromBareSshUrl(u.String()) - default: - // Just passthrough to preserve - return Endpoint{Url: rawurl} - } -} - // endpointFromBareSshUrl constructs a new endpoint from a bare SSH URL: // // user@host.com:path/to/repo.git @@ -95,7 +38,7 @@ func endpointFromBareSshUrl(rawurl string) Endpoint { newrawurl := fmt.Sprintf("ssh://%v", newPath) newu, err := url.Parse(newrawurl) if err != nil { - return Endpoint{Url: EndpointUrlUnknown} + return Endpoint{Url: UrlUnknown} } return endpointFromSshUrl(newu) @@ -108,7 +51,7 @@ func endpointFromSshUrl(u *url.URL) Endpoint { regex := regexp.MustCompile(`^([^\:]+)(?:\:(\d+))?$`) match := regex.FindStringSubmatch(u.Host) if match == nil || len(match) < 2 { - endpoint.Url = EndpointUrlUnknown + endpoint.Url = UrlUnknown return endpoint } @@ -145,7 +88,7 @@ func endpointFromHttpUrl(u *url.URL) Endpoint { return Endpoint{Url: u.String()} } -func endpointFromGitUrl(u *url.URL, c *Configuration) Endpoint { - u.Scheme = c.GitProtocol() +func endpointFromGitUrl(u *url.URL, e *endpointGitFinder) Endpoint { + u.Scheme = e.gitProtocol return Endpoint{Url: u.String()} } diff --git a/lfsapi/endpoint_finder.go b/lfsapi/endpoint_finder.go new file mode 100644 index 00000000..1022f763 --- /dev/null +++ b/lfsapi/endpoint_finder.go @@ -0,0 +1,284 @@ +package lfsapi + +import ( + "fmt" + "net/url" + "os" + "path" + "strings" + "sync" + + "github.com/git-lfs/git-lfs/git" + "github.com/rubyist/tracerx" +) + +type Access string + +const ( + NoneAccess Access = "none" + BasicAccess Access = "basic" + PrivateAccess Access = "private" + NegotiateAccess Access = "negotiate" + NTLMAccess Access = "ntlm" + emptyAccess Access = "" + defaultRemote = "origin" +) + +type EndpointFinder interface { + NewEndpointFromCloneURL(rawurl string) Endpoint + NewEndpoint(rawurl string) Endpoint + Endpoint(operation, remote string) Endpoint + RemoteEndpoint(operation, remote string) Endpoint + GitRemoteURL(remote string, forpush bool) string + AccessFor(rawurl string) Access + SetAccess(rawurl string, access Access) + GitProtocol() string +} + +type endpointGitFinder struct { + git Env + gitProtocol string + + aliasMu sync.Mutex + aliases map[string]string + + accessMu sync.Mutex + urlAccess map[string]Access +} + +func NewEndpointFinder(git Env) EndpointFinder { + e := &endpointGitFinder{ + gitProtocol: "https", + aliases: make(map[string]string), + urlAccess: make(map[string]Access), + } + + if git != nil { + e.git = git + if v, ok := git.Get("lfs.gitprotocol"); ok { + e.gitProtocol = v + } + initAliases(e, git) + } + + return e +} + +func (e *endpointGitFinder) Endpoint(operation, remote string) Endpoint { + if e.git == nil { + return Endpoint{} + } + + if operation == "upload" { + if url, ok := e.git.Get("lfs.pushurl"); ok { + return e.NewEndpoint(url) + } + } + + if url, ok := e.git.Get("lfs.url"); ok { + return e.NewEndpoint(url) + } + + if len(remote) > 0 && remote != defaultRemote { + if e := e.RemoteEndpoint(operation, remote); len(e.Url) > 0 { + return e + } + } + + return e.RemoteEndpoint(operation, defaultRemote) +} + +func (e *endpointGitFinder) RemoteEndpoint(operation, remote string) Endpoint { + if e.git == nil { + return Endpoint{} + } + + if len(remote) == 0 { + remote = defaultRemote + } + + // Support separate push URL if specified and pushing + if operation == "upload" { + if url, ok := e.git.Get("remote." + remote + ".lfspushurl"); ok { + return e.NewEndpoint(url) + } + } + if url, ok := e.git.Get("remote." + remote + ".lfsurl"); ok { + return e.NewEndpoint(url) + } + + // finally fall back on git remote url (also supports pushurl) + if url := e.GitRemoteURL(remote, operation == "upload"); url != "" { + return e.NewEndpointFromCloneURL(url) + } + + return Endpoint{} +} + +func (e *endpointGitFinder) GitRemoteURL(remote string, forpush bool) string { + if e.git != nil { + if forpush { + if u, ok := e.git.Get("remote." + remote + ".pushurl"); ok { + return u + } + } + + if u, ok := e.git.Get("remote." + remote + ".url"); ok { + return u + } + } + + if err := git.ValidateRemote(remote); err == nil { + return remote + } + + return "" +} + +func (e *endpointGitFinder) NewEndpointFromCloneURL(rawurl string) Endpoint { + ep := e.NewEndpoint(rawurl) + if ep.Url == UrlUnknown { + return ep + } + + if strings.HasSuffix(rawurl, "/") { + ep.Url = rawurl[0 : len(rawurl)-1] + } + + // When using main remote URL for HTTP, append info/lfs + if path.Ext(ep.Url) == ".git" { + ep.Url += "/info/lfs" + } else { + ep.Url += ".git/info/lfs" + } + + return ep +} + +func (e *endpointGitFinder) NewEndpoint(rawurl string) Endpoint { + rawurl = e.ReplaceUrlAlias(rawurl) + u, err := url.Parse(rawurl) + if err != nil { + return endpointFromBareSshUrl(rawurl) + } + + switch u.Scheme { + case "ssh": + return endpointFromSshUrl(u) + case "http", "https": + return endpointFromHttpUrl(u) + case "git": + return endpointFromGitUrl(u, e) + case "": + return endpointFromBareSshUrl(u.String()) + default: + // Just passthrough to preserve + return Endpoint{Url: rawurl} + } +} + +func (e *endpointGitFinder) AccessFor(rawurl string) Access { + if e.git == nil { + return NoneAccess + } + + accessurl := urlWithoutAuth(rawurl) + + e.accessMu.Lock() + defer e.accessMu.Unlock() + + if cached, ok := e.urlAccess[accessurl]; ok { + return cached + } + + key := fmt.Sprintf("lfs.%s.access", accessurl) + e.urlAccess[accessurl] = fetchGitAccess(e.git, key) + return e.urlAccess[accessurl] +} + +func (e *endpointGitFinder) SetAccess(rawurl string, access Access) { + accessurl := urlWithoutAuth(rawurl) + key := fmt.Sprintf("lfs.%s.access", accessurl) + tracerx.Printf("setting repository access to %s", access) + + e.accessMu.Lock() + defer e.accessMu.Unlock() + + switch access { + case emptyAccess, NoneAccess: + git.Config.UnsetLocalKey("", key) + e.urlAccess[accessurl] = NoneAccess + default: + git.Config.SetLocal("", key, string(access)) + e.urlAccess[accessurl] = access + } +} + +func urlWithoutAuth(rawurl string) string { + if !strings.Contains(rawurl, "@") { + return rawurl + } + + u, err := url.Parse(rawurl) + if err != nil { + fmt.Fprintf(os.Stderr, "Error parsing URL %q: %s", rawurl, err) + return rawurl + } + + u.User = nil + return u.String() +} + +func fetchGitAccess(git Env, key string) Access { + if v, _ := git.Get(key); len(v) > 0 { + access := Access(strings.ToLower(v)) + if access == PrivateAccess { + return BasicAccess + } + return access + } + return NoneAccess +} + +func (e *endpointGitFinder) GitProtocol() string { + return e.gitProtocol +} + +// ReplaceUrlAlias returns a url with a prefix from a `url.*.insteadof` git +// config setting. If multiple aliases match, use the longest one. +// See https://git-scm.com/docs/git-config for Git's docs. +func (e *endpointGitFinder) ReplaceUrlAlias(rawurl string) string { + e.aliasMu.Lock() + defer e.aliasMu.Unlock() + + var longestalias string + for alias, _ := range e.aliases { + if !strings.HasPrefix(rawurl, alias) { + continue + } + + if longestalias < alias { + longestalias = alias + } + } + + if len(longestalias) > 0 { + return e.aliases[longestalias] + rawurl[len(longestalias):] + } + + return rawurl +} + +func initAliases(e *endpointGitFinder, git Env) { + prefix := "url." + suffix := ".insteadof" + for gitkey, gitval := range git.All() { + if !(strings.HasPrefix(gitkey, prefix) && strings.HasSuffix(gitkey, suffix)) { + continue + } + if _, ok := e.aliases[gitval]; ok { + fmt.Fprintf(os.Stderr, "WARNING: Multiple 'url.*.insteadof' keys with the same alias: %q\n", gitval) + } + e.aliases[gitval] = gitkey[len(prefix) : len(gitkey)-len(suffix)] + } +} diff --git a/lfsapi/endpoint_finder_test.go b/lfsapi/endpoint_finder_test.go new file mode 100644 index 00000000..29ed0c2f --- /dev/null +++ b/lfsapi/endpoint_finder_test.go @@ -0,0 +1,350 @@ +package lfsapi + +import ( + "testing" + + "github.com/stretchr/testify/assert" +) + +func TestEndpointDefaultsToOrigin(t *testing.T) { + finder := NewEndpointFinder(TestEnv(map[string]string{ + "remote.origin.lfsurl": "abc", + })) + + e := finder.Endpoint("download", "") + assert.Equal(t, "abc", e.Url) + assert.Equal(t, "", e.SshUserAndHost) + assert.Equal(t, "", e.SshPath) +} + +func TestEndpointOverridesOrigin(t *testing.T) { + finder := NewEndpointFinder(TestEnv(map[string]string{ + "lfs.url": "abc", + "remote.origin.lfsurl": "def", + })) + + e := finder.Endpoint("download", "") + assert.Equal(t, "abc", e.Url) + assert.Equal(t, "", e.SshUserAndHost) + assert.Equal(t, "", e.SshPath) +} + +func TestEndpointNoOverrideDefaultRemote(t *testing.T) { + finder := NewEndpointFinder(TestEnv(map[string]string{ + "remote.origin.lfsurl": "abc", + "remote.other.lfsurl": "def", + })) + + e := finder.Endpoint("download", "") + assert.Equal(t, "abc", e.Url) + assert.Equal(t, "", e.SshUserAndHost) + assert.Equal(t, "", e.SshPath) +} + +func TestEndpointUseAlternateRemote(t *testing.T) { + finder := NewEndpointFinder(TestEnv(map[string]string{ + "remote.origin.lfsurl": "abc", + "remote.other.lfsurl": "def", + })) + + e := finder.Endpoint("download", "other") + assert.Equal(t, "def", e.Url) + assert.Equal(t, "", e.SshUserAndHost) + assert.Equal(t, "", e.SshPath) +} + +func TestEndpointAddsLfsSuffix(t *testing.T) { + finder := NewEndpointFinder(TestEnv(map[string]string{ + "remote.origin.url": "https://example.com/foo/bar", + })) + + e := finder.Endpoint("download", "") + assert.Equal(t, "https://example.com/foo/bar.git/info/lfs", e.Url) + assert.Equal(t, "", e.SshUserAndHost) + assert.Equal(t, "", e.SshPath) +} + +func TestBareEndpointAddsLfsSuffix(t *testing.T) { + finder := NewEndpointFinder(TestEnv(map[string]string{ + "remote.origin.url": "https://example.com/foo/bar.git", + })) + + e := finder.Endpoint("download", "") + assert.Equal(t, "https://example.com/foo/bar.git/info/lfs", e.Url) + assert.Equal(t, "", e.SshUserAndHost) + assert.Equal(t, "", e.SshPath) +} + +func TestEndpointSeparateClonePushUrl(t *testing.T) { + finder := NewEndpointFinder(TestEnv(map[string]string{ + "remote.origin.url": "https://example.com/foo/bar.git", + "remote.origin.pushurl": "https://readwrite.com/foo/bar.git", + })) + + e := finder.Endpoint("download", "") + assert.Equal(t, "https://example.com/foo/bar.git/info/lfs", e.Url) + assert.Equal(t, "", e.SshUserAndHost) + assert.Equal(t, "", e.SshPath) + + e = finder.Endpoint("upload", "") + assert.Equal(t, "https://readwrite.com/foo/bar.git/info/lfs", e.Url) + assert.Equal(t, "", e.SshUserAndHost) + assert.Equal(t, "", e.SshPath) +} + +func TestEndpointOverriddenSeparateClonePushLfsUrl(t *testing.T) { + finder := NewEndpointFinder(TestEnv(map[string]string{ + "remote.origin.url": "https://example.com/foo/bar.git", + "remote.origin.pushurl": "https://readwrite.com/foo/bar.git", + "remote.origin.lfsurl": "https://examplelfs.com/foo/bar", + "remote.origin.lfspushurl": "https://readwritelfs.com/foo/bar", + })) + + e := finder.Endpoint("download", "") + assert.Equal(t, "https://examplelfs.com/foo/bar", e.Url) + assert.Equal(t, "", e.SshUserAndHost) + assert.Equal(t, "", e.SshPath) + + e = finder.Endpoint("upload", "") + assert.Equal(t, "https://readwritelfs.com/foo/bar", e.Url) + assert.Equal(t, "", e.SshUserAndHost) + assert.Equal(t, "", e.SshPath) +} + +func TestEndpointGlobalSeparateLfsPush(t *testing.T) { + finder := NewEndpointFinder(TestEnv(map[string]string{ + "lfs.url": "https://readonly.com/foo/bar", + "lfs.pushurl": "https://write.com/foo/bar", + })) + + e := finder.Endpoint("download", "") + assert.Equal(t, "https://readonly.com/foo/bar", e.Url) + assert.Equal(t, "", e.SshUserAndHost) + assert.Equal(t, "", e.SshPath) + + e = finder.Endpoint("upload", "") + assert.Equal(t, "https://write.com/foo/bar", e.Url) + assert.Equal(t, "", e.SshUserAndHost) + assert.Equal(t, "", e.SshPath) +} + +func TestSSHEndpointOverridden(t *testing.T) { + finder := NewEndpointFinder(TestEnv(map[string]string{ + "remote.origin.url": "git@example.com:foo/bar", + "remote.origin.lfsurl": "lfs", + })) + + e := finder.Endpoint("download", "") + assert.Equal(t, "lfs", e.Url) + assert.Equal(t, "", e.SshUserAndHost) + assert.Equal(t, "", e.SshPath) + assert.Equal(t, "", e.SshPort) +} + +func TestSSHEndpointAddsLfsSuffix(t *testing.T) { + finder := NewEndpointFinder(TestEnv(map[string]string{ + "remote.origin.url": "ssh://git@example.com/foo/bar", + })) + + e := finder.Endpoint("download", "") + assert.Equal(t, "https://example.com/foo/bar.git/info/lfs", e.Url) + assert.Equal(t, "git@example.com", e.SshUserAndHost) + assert.Equal(t, "foo/bar", e.SshPath) + assert.Equal(t, "", e.SshPort) +} + +func TestSSHCustomPortEndpointAddsLfsSuffix(t *testing.T) { + finder := NewEndpointFinder(TestEnv(map[string]string{ + "remote.origin.url": "ssh://git@example.com:9000/foo/bar", + })) + + e := finder.Endpoint("download", "") + assert.Equal(t, "https://example.com/foo/bar.git/info/lfs", e.Url) + assert.Equal(t, "git@example.com", e.SshUserAndHost) + assert.Equal(t, "foo/bar", e.SshPath) + assert.Equal(t, "9000", e.SshPort) +} + +func TestBareSSHEndpointAddsLfsSuffix(t *testing.T) { + finder := NewEndpointFinder(TestEnv(map[string]string{ + "remote.origin.url": "git@example.com:foo/bar.git", + })) + + e := finder.Endpoint("download", "") + assert.Equal(t, "https://example.com/foo/bar.git/info/lfs", e.Url) + assert.Equal(t, "git@example.com", e.SshUserAndHost) + assert.Equal(t, "foo/bar.git", e.SshPath) + assert.Equal(t, "", e.SshPort) +} + +func TestSSHEndpointFromGlobalLfsUrl(t *testing.T) { + finder := NewEndpointFinder(TestEnv(map[string]string{ + "lfs.url": "git@example.com:foo/bar.git", + })) + + e := finder.Endpoint("download", "") + assert.Equal(t, "https://example.com/foo/bar.git", e.Url) + assert.Equal(t, "git@example.com", e.SshUserAndHost) + assert.Equal(t, "foo/bar.git", e.SshPath) + assert.Equal(t, "", e.SshPort) +} + +func TestHTTPEndpointAddsLfsSuffix(t *testing.T) { + finder := NewEndpointFinder(TestEnv(map[string]string{ + "remote.origin.url": "http://example.com/foo/bar", + })) + + e := finder.Endpoint("download", "") + assert.Equal(t, "http://example.com/foo/bar.git/info/lfs", e.Url) + assert.Equal(t, "", e.SshUserAndHost) + assert.Equal(t, "", e.SshPath) + assert.Equal(t, "", e.SshPort) +} + +func TestBareHTTPEndpointAddsLfsSuffix(t *testing.T) { + finder := NewEndpointFinder(TestEnv(map[string]string{ + "remote.origin.url": "http://example.com/foo/bar.git", + })) + + e := finder.Endpoint("download", "") + assert.Equal(t, "http://example.com/foo/bar.git/info/lfs", e.Url) + assert.Equal(t, "", e.SshUserAndHost) + assert.Equal(t, "", e.SshPath) + assert.Equal(t, "", e.SshPort) +} + +func TestGitEndpointAddsLfsSuffix(t *testing.T) { + finder := NewEndpointFinder(TestEnv(map[string]string{ + "remote.origin.url": "git://example.com/foo/bar", + })) + + e := finder.Endpoint("download", "") + assert.Equal(t, "https://example.com/foo/bar.git/info/lfs", e.Url) + assert.Equal(t, "", e.SshUserAndHost) + assert.Equal(t, "", e.SshPath) + assert.Equal(t, "", e.SshPort) +} + +func TestGitEndpointAddsLfsSuffixWithCustomProtocol(t *testing.T) { + finder := NewEndpointFinder(TestEnv(map[string]string{ + "remote.origin.url": "git://example.com/foo/bar", + "lfs.gitprotocol": "http", + })) + + e := finder.Endpoint("download", "") + assert.Equal(t, "http://example.com/foo/bar.git/info/lfs", e.Url) + assert.Equal(t, "", e.SshUserAndHost) + assert.Equal(t, "", e.SshPath) + assert.Equal(t, "", e.SshPort) +} + +func TestBareGitEndpointAddsLfsSuffix(t *testing.T) { + finder := NewEndpointFinder(TestEnv(map[string]string{ + "remote.origin.url": "git://example.com/foo/bar.git", + })) + + e := finder.Endpoint("download", "") + assert.Equal(t, "https://example.com/foo/bar.git/info/lfs", e.Url) + assert.Equal(t, "", e.SshUserAndHost) + assert.Equal(t, "", e.SshPath) + assert.Equal(t, "", e.SshPort) +} + +func TestAccessConfig(t *testing.T) { + type accessTest struct { + Access string + PrivateAccess bool + } + + tests := map[string]accessTest{ + "": {"none", false}, + "basic": {"basic", true}, + "BASIC": {"basic", true}, + "private": {"basic", true}, + "PRIVATE": {"basic", true}, + "invalidauth": {"invalidauth", true}, + } + + for value, expected := range tests { + finder := NewEndpointFinder(TestEnv(map[string]string{ + "lfs.url": "http://example.com", + "lfs.http://example.com.access": value, + "lfs.https://example.com.access": "bad", + })) + + dl := finder.Endpoint("upload", "") + ul := finder.Endpoint("download", "") + + if access := finder.AccessFor(dl.Url); access != Access(expected.Access) { + t.Errorf("Expected Access() with value %q to be %v, got %v", value, expected.Access, access) + } + if access := finder.AccessFor(ul.Url); access != Access(expected.Access) { + t.Errorf("Expected Access() with value %q to be %v, got %v", value, expected.Access, access) + } + } + + // Test again but with separate push url + for value, expected := range tests { + finder := NewEndpointFinder(TestEnv(map[string]string{ + "lfs.url": "http://example.com", + "lfs.pushurl": "http://examplepush.com", + "lfs.http://example.com.access": value, + "lfs.http://examplepush.com.access": value, + "lfs.https://example.com.access": "bad", + })) + + dl := finder.Endpoint("upload", "") + ul := finder.Endpoint("download", "") + + if access := finder.AccessFor(dl.Url); access != Access(expected.Access) { + t.Errorf("Expected Access() with value %q to be %v, got %v", value, expected.Access, access) + } + if access := finder.AccessFor(ul.Url); access != Access(expected.Access) { + t.Errorf("Expected Access() with value %q to be %v, got %v", value, expected.Access, access) + } + } +} + +func TestAccessAbsentConfig(t *testing.T) { + finder := NewEndpointFinder(nil) + assert.Equal(t, NoneAccess, finder.AccessFor(finder.Endpoint("download", "").Url)) + assert.Equal(t, NoneAccess, finder.AccessFor(finder.Endpoint("upload", "").Url)) +} + +func TestSetAccess(t *testing.T) { + finder := NewEndpointFinder(TestEnv(map[string]string{})) + + assert.Equal(t, NoneAccess, finder.AccessFor("http://example.com")) + finder.SetAccess("http://example.com", NTLMAccess) + assert.Equal(t, NTLMAccess, finder.AccessFor("http://example.com")) +} + +func TestChangeAccess(t *testing.T) { + finder := NewEndpointFinder(TestEnv(map[string]string{ + "lfs.http://example.com.access": "basic", + })) + + assert.Equal(t, BasicAccess, finder.AccessFor("http://example.com")) + finder.SetAccess("http://example.com", NTLMAccess) + assert.Equal(t, NTLMAccess, finder.AccessFor("http://example.com")) +} + +func TestDeleteAccessWithNone(t *testing.T) { + finder := NewEndpointFinder(TestEnv(map[string]string{ + "lfs.http://example.com.access": "basic", + })) + + assert.Equal(t, BasicAccess, finder.AccessFor("http://example.com")) + finder.SetAccess("http://example.com", NoneAccess) + assert.Equal(t, NoneAccess, finder.AccessFor("http://example.com")) +} + +func TestDeleteAccessWithEmptyString(t *testing.T) { + finder := NewEndpointFinder(TestEnv(map[string]string{ + "lfs.http://example.com.access": "basic", + })) + + assert.Equal(t, BasicAccess, finder.AccessFor("http://example.com")) + finder.SetAccess("http://example.com", Access("")) + assert.Equal(t, NoneAccess, finder.AccessFor("http://example.com")) +} diff --git a/config/endpoint_test.go b/lfsapi/endpoint_test.go similarity index 79% rename from config/endpoint_test.go rename to lfsapi/endpoint_test.go index af4c245d..5bf208bc 100644 --- a/config/endpoint_test.go +++ b/lfsapi/endpoint_test.go @@ -1,4 +1,4 @@ -package config +package lfsapi import ( "testing" @@ -13,9 +13,9 @@ func TestNewEndpointFromCloneURLWithConfig(t *testing.T) { "https://foo/bar.git/", } - cfg := New() + finder := NewEndpointFinder(nil) for _, actual := range tests { - e := NewEndpointFromCloneURLWithConfig(actual, cfg) + e := finder.NewEndpointFromCloneURL(actual) if e.Url != expected { t.Errorf("%s returned bad endpoint url %s", actual, e.Url) } diff --git a/lfsapi/errors.go b/lfsapi/errors.go new file mode 100644 index 00000000..d2c7beae --- /dev/null +++ b/lfsapi/errors.go @@ -0,0 +1,122 @@ +package lfsapi + +import ( + "fmt" + "net/http" + "strings" + + "github.com/git-lfs/git-lfs/errors" +) + +type httpError interface { + Error() string + HTTPResponse() *http.Response +} + +func IsHTTP(err error) (*http.Response, bool) { + if httpErr, ok := err.(httpError); ok { + return httpErr.HTTPResponse(), true + } + return nil, false +} + +type ClientError struct { + Message string `json:"message"` + DocumentationUrl string `json:"documentation_url,omitempty"` + RequestId string `json:"request_id,omitempty"` + response *http.Response +} + +func (e *ClientError) HTTPResponse() *http.Response { + return e.response +} + +func (e *ClientError) Error() string { + msg := e.Message + if len(e.DocumentationUrl) > 0 { + msg += "\nDocs: " + e.DocumentationUrl + } + if len(e.RequestId) > 0 { + msg += "\nRequest ID: " + e.RequestId + } + return msg +} + +func (c *Client) handleResponse(res *http.Response) error { + if res.StatusCode < 400 { + return nil + } + + cliErr := &ClientError{response: res} + err := DecodeJSON(res, cliErr) + if IsDecodeTypeError(err) { + err = nil + } + + if err == nil { + if len(cliErr.Message) == 0 { + err = defaultError(res) + } else { + err = errors.Wrap(cliErr, "http") + } + } + + if res.StatusCode == 401 { + return errors.NewAuthError(err) + } + + if res.StatusCode > 499 && res.StatusCode != 501 && res.StatusCode != 507 && res.StatusCode != 509 { + return errors.NewFatalError(err) + } + + return err +} + +type statusCodeError struct { + response *http.Response +} + +func NewStatusCodeError(res *http.Response) error { + return &statusCodeError{response: res} +} + +func (e *statusCodeError) Error() string { + req := e.response.Request + return fmt.Sprintf("Invalid HTTP status for %s %s: %d", + req.Method, + strings.SplitN(req.URL.String(), "?", 2)[0], + e.response.StatusCode, + ) +} + +func (e *statusCodeError) HTTPResponse() *http.Response { + return e.response +} + +var ( + defaultErrors = map[int]string{ + 400: "Client error: %s", + 401: "Authorization error: %s\nCheck that you have proper access to the repository", + 403: "Authorization error: %s\nCheck that you have proper access to the repository", + 404: "Repository or object not found: %s\nCheck that it exists and that you have proper access to it", + 429: "Rate limit exceeded: %s", + 500: "Server error: %s", + 501: "Not Implemented: %s", + 507: "Insufficient server storage: %s", + 509: "Bandwidth limit exceeded: %s", + } +) + +func defaultError(res *http.Response) error { + var msgFmt string + + if f, ok := defaultErrors[res.StatusCode]; ok { + msgFmt = f + } else if res.StatusCode < 500 { + msgFmt = defaultErrors[400] + fmt.Sprintf(" from HTTP %d", res.StatusCode) + } else { + msgFmt = defaultErrors[500] + fmt.Sprintf(" from HTTP %d", res.StatusCode) + } + + return errors.Errorf(msgFmt, res.Request.URL) +} diff --git a/lfsapi/lfsapi.go b/lfsapi/lfsapi.go new file mode 100644 index 00000000..ac083c70 --- /dev/null +++ b/lfsapi/lfsapi.go @@ -0,0 +1,186 @@ +package lfsapi + +import ( + "encoding/json" + "fmt" + "io" + "net/http" + "regexp" + "strconv" + "strings" + "sync" + + "github.com/ThomsonReutersEikon/go-ntlm/ntlm" + "github.com/git-lfs/git-lfs/errors" +) + +var ( + lfsMediaTypeRE = regexp.MustCompile(`\Aapplication/vnd\.git\-lfs\+json(;|\z)`) + jsonMediaTypeRE = regexp.MustCompile(`\Aapplication/json(;|\z)`) +) + +type Client struct { + Endpoints EndpointFinder + Credentials CredentialHelper + Netrc NetrcFinder + + DialTimeout int + KeepaliveTimeout int + TLSTimeout int + ConcurrentTransfers int + HTTPSProxy string + HTTPProxy string + NoProxy string + SkipSSLVerify bool + + Verbose bool + DebuggingVerbose bool + LoggingStats bool + VerboseOut io.Writer + + hostClients map[string]*http.Client + clientMu sync.Mutex + + ntlmSessions map[string]ntlm.ClientSession + ntlmMu sync.Mutex + + transferBuckets map[string][]*http.Response + transferBucketMu sync.Mutex + transfers map[*http.Response]*httpTransfer + transferMu sync.Mutex + + // only used for per-host ssl certs + gitEnv Env + osEnv Env +} + +func NewClient(osEnv Env, gitEnv Env) (*Client, error) { + if osEnv == nil { + osEnv = make(TestEnv) + } + + if gitEnv == nil { + gitEnv = make(TestEnv) + } + + netrc, err := ParseNetrc(osEnv) + if err != nil { + return nil, err + } + + httpsProxy, httpProxy, noProxy := getProxyServers(osEnv, gitEnv) + + c := &Client{ + Endpoints: NewEndpointFinder(gitEnv), + Credentials: &commandCredentialHelper{ + SkipPrompt: !osEnv.Bool("GIT_TERMINAL_PROMPT", true), + }, + Netrc: netrc, + DialTimeout: gitEnv.Int("lfs.dialtimeout", 0), + KeepaliveTimeout: gitEnv.Int("lfs.keepalive", 0), + TLSTimeout: gitEnv.Int("lfs.tlstimeout", 0), + ConcurrentTransfers: gitEnv.Int("lfs.concurrenttransfers", 0), + SkipSSLVerify: !gitEnv.Bool("http.sslverify", true) || osEnv.Bool("GIT_SSL_NO_VERIFY", false), + Verbose: osEnv.Bool("GIT_CURL_VERBOSE", false), + DebuggingVerbose: osEnv.Bool("LFS_DEBUG_HTTP", false), + LoggingStats: osEnv.Bool("GIT_LOG_STATS", false), + HTTPSProxy: httpsProxy, + HTTPProxy: httpProxy, + NoProxy: noProxy, + gitEnv: gitEnv, + osEnv: osEnv, + } + + return c, nil +} + +func (c *Client) GitEnv() Env { + return c.gitEnv +} + +func (c *Client) OSEnv() Env { + return c.osEnv +} + +func IsDecodeTypeError(err error) bool { + _, ok := err.(*decodeTypeError) + return ok +} + +type decodeTypeError struct { + Type string +} + +func (e *decodeTypeError) TypeError() {} + +func (e *decodeTypeError) Error() string { + return fmt.Sprintf("Expected json type, got: %q", e.Type) +} + +func DecodeJSON(res *http.Response, obj interface{}) error { + ctype := res.Header.Get("Content-Type") + if !(lfsMediaTypeRE.MatchString(ctype) || jsonMediaTypeRE.MatchString(ctype)) { + return &decodeTypeError{Type: ctype} + } + + err := json.NewDecoder(res.Body).Decode(obj) + res.Body.Close() + + if err != nil { + return errors.Wrapf(err, "Unable to parse HTTP response for %s %s", res.Request.Method, res.Request.URL) + } + + return nil +} + +// Env is an interface for the config.Environment methods that this package +// relies on. +type Env interface { + Get(string) (string, bool) + Int(string, int) int + Bool(string, bool) bool + All() map[string]string +} + +// TestEnv is a basic config.Environment implementation. Only used in tests, or +// as a zero value to NewClient(). +type TestEnv map[string]string + +func (e TestEnv) Get(key string) (string, bool) { + v, ok := e[key] + return v, ok +} + +func (e TestEnv) Int(key string, def int) (val int) { + s, _ := e.Get(key) + if len(s) == 0 { + return def + } + + i, err := strconv.Atoi(s) + if err != nil { + return def + } + + return i +} + +func (e TestEnv) Bool(key string, def bool) (val bool) { + s, _ := e.Get(key) + if len(s) == 0 { + return def + } + + switch strings.ToLower(s) { + case "true", "1", "on", "yes", "t": + return true + case "false", "0", "off", "no", "f": + return false + default: + return false + } +} + +func (e TestEnv) All() map[string]string { + return e +} diff --git a/lfsapi/lfsapi_nix.go b/lfsapi/lfsapi_nix.go new file mode 100644 index 00000000..a3eb87cb --- /dev/null +++ b/lfsapi/lfsapi_nix.go @@ -0,0 +1,5 @@ +// +build !windows + +package lfsapi + +var netrcBasename = ".netrc" diff --git a/lfsapi/lfsapi_windows.go b/lfsapi/lfsapi_windows.go new file mode 100644 index 00000000..ff8ea0ba --- /dev/null +++ b/lfsapi/lfsapi_windows.go @@ -0,0 +1,5 @@ +// +build windows + +package lfsapi + +var netrcBasename = "_netrc" diff --git a/lfsapi/netrc.go b/lfsapi/netrc.go new file mode 100644 index 00000000..d02bc257 --- /dev/null +++ b/lfsapi/netrc.go @@ -0,0 +1,32 @@ +package lfsapi + +import ( + "os" + "path/filepath" + + "github.com/bgentry/go-netrc/netrc" +) + +type NetrcFinder interface { + FindMachine(string) *netrc.Machine +} + +func ParseNetrc(osEnv Env) (NetrcFinder, error) { + home, _ := osEnv.Get("HOME") + if len(home) == 0 { + return &noFinder{}, nil + } + + nrcfilename := filepath.Join(home, netrcBasename) + if _, err := os.Stat(nrcfilename); err != nil { + return &noFinder{}, nil + } + + return netrc.ParseFile(nrcfilename) +} + +type noFinder struct{} + +func (f *noFinder) FindMachine(host string) *netrc.Machine { + return nil +} diff --git a/lfsapi/netrc_test.go b/lfsapi/netrc_test.go new file mode 100644 index 00000000..f07e7f01 --- /dev/null +++ b/lfsapi/netrc_test.go @@ -0,0 +1,85 @@ +package lfsapi + +import ( + "net/http" + "net/url" + "strings" + "testing" + + "github.com/bgentry/go-netrc/netrc" +) + +func TestNetrcWithHostAndPort(t *testing.T) { + netrcFinder := &fakeNetrc{} + u, err := url.Parse("http://netrc-host:123/foo/bar") + if err != nil { + t.Fatal(err) + } + + req := &http.Request{ + URL: u, + Header: http.Header{}, + } + + if !setAuthFromNetrc(netrcFinder, req) { + t.Fatal("no netrc match") + } + + auth := req.Header.Get("Authorization") + if auth != "Basic YWJjOmRlZg==" { + t.Fatalf("bad basic auth: %q", auth) + } +} + +func TestNetrcWithHost(t *testing.T) { + netrcFinder := &fakeNetrc{} + u, err := url.Parse("http://netrc-host/foo/bar") + if err != nil { + t.Fatal(err) + } + + req := &http.Request{ + URL: u, + Header: http.Header{}, + } + + if !setAuthFromNetrc(netrcFinder, req) { + t.Fatalf("no netrc match") + } + + auth := req.Header.Get("Authorization") + if auth != "Basic YWJjOmRlZg==" { + t.Fatalf("bad basic auth: %q", auth) + } +} + +func TestNetrcWithBadHost(t *testing.T) { + netrcFinder := &fakeNetrc{} + u, err := url.Parse("http://other-host/foo/bar") + if err != nil { + t.Fatal(err) + } + + req := &http.Request{ + URL: u, + Header: http.Header{}, + } + + if setAuthFromNetrc(netrcFinder, req) { + t.Fatalf("unexpected netrc match") + } + + auth := req.Header.Get("Authorization") + if auth != "" { + t.Fatalf("bad basic auth: %q", auth) + } +} + +type fakeNetrc struct{} + +func (n *fakeNetrc) FindMachine(host string) *netrc.Machine { + if strings.Contains(host, "netrc") { + return &netrc.Machine{Login: "abc", Password: "def"} + } + return nil +} diff --git a/lfsapi/ntlm.go b/lfsapi/ntlm.go new file mode 100644 index 00000000..b180b08a --- /dev/null +++ b/lfsapi/ntlm.go @@ -0,0 +1,166 @@ +package lfsapi + +import ( + "encoding/base64" + "fmt" + "io" + "io/ioutil" + "net/http" + "net/url" + "strings" + + "github.com/ThomsonReutersEikon/go-ntlm/ntlm" + "github.com/git-lfs/git-lfs/errors" +) + +func (c *Client) doWithNTLM(req *http.Request, credHelper CredentialHelper, creds Creds, credsURL *url.URL) (*http.Response, error) { + res, err := c.Do(req) + if err != nil && !errors.IsAuthError(err) { + return res, err + } + + if res.StatusCode != 401 { + return res, nil + } + + return c.ntlmReAuth(req, credHelper, creds, true) +} + +// If the status is 401 then we need to re-authenticate +func (c *Client) ntlmReAuth(req *http.Request, credHelper CredentialHelper, creds Creds, retry bool) (*http.Response, error) { + body, err := rewoundRequestBody(req) + if err != nil { + return nil, err + } + req.Body = body + + chRes, challengeMsg, err := c.ntlmNegotiate(req, ntlmNegotiateMessage) + if err != nil { + return chRes, err + } + + body, err = rewoundRequestBody(req) + if err != nil { + return nil, err + } + req.Body = body + + res, err := c.ntlmChallenge(req, challengeMsg, creds) + if err != nil { + return res, err + } + + switch res.StatusCode { + case 401: + credHelper.Reject(creds) + if retry { + return c.ntlmReAuth(req, credHelper, creds, false) + } + case 403: + credHelper.Reject(creds) + default: + if res.StatusCode < 300 && res.StatusCode > 199 { + credHelper.Approve(creds) + } + } + + return res, nil +} + +func (c *Client) ntlmNegotiate(req *http.Request, message string) (*http.Response, []byte, error) { + req.Header.Add("Authorization", message) + res, err := c.Do(req) + if err != nil && !errors.IsAuthError(err) { + return res, nil, err + } + + io.Copy(ioutil.Discard, res.Body) + res.Body.Close() + + by, err := parseChallengeResponse(res) + return res, by, err +} + +func (c *Client) ntlmChallenge(req *http.Request, challengeBytes []byte, creds Creds) (*http.Response, error) { + challenge, err := ntlm.ParseChallengeMessage(challengeBytes) + if err != nil { + return nil, err + } + + session, err := c.ntlmClientSession(creds) + if err != nil { + return nil, err + } + + session.ProcessChallengeMessage(challenge) + authenticate, err := session.GenerateAuthenticateMessage() + if err != nil { + return nil, err + } + + authMsg := base64.StdEncoding.EncodeToString(authenticate.Bytes()) + req.Header.Set("Authorization", "NTLM "+authMsg) + return c.Do(req) +} + +func (c *Client) ntlmClientSession(creds Creds) (ntlm.ClientSession, error) { + c.ntlmMu.Lock() + defer c.ntlmMu.Unlock() + + splits := strings.Split(creds["username"], "\\") + if len(splits) != 2 { + return nil, fmt.Errorf("Your user name must be of the form DOMAIN\\user. It is currently %s", creds["username"]) + } + + domain := strings.ToUpper(splits[0]) + username := splits[1] + + if c.ntlmSessions == nil { + c.ntlmSessions = make(map[string]ntlm.ClientSession) + } + + if ses, ok := c.ntlmSessions[domain]; ok { + return ses, nil + } + + session, err := ntlm.CreateClientSession(ntlm.Version2, ntlm.ConnectionOrientedMode) + if err != nil { + return nil, err + } + + session.SetUserInfo(username, creds["password"], strings.ToUpper(splits[0])) + c.ntlmSessions[domain] = session + return session, nil +} + +func parseChallengeResponse(res *http.Response) ([]byte, error) { + header := res.Header.Get("Www-Authenticate") + if len(header) < 6 { + return nil, fmt.Errorf("Invalid NTLM challenge response: %q", header) + } + + //parse out the "NTLM " at the beginning of the response + challenge := header[5:] + val, err := base64.StdEncoding.DecodeString(challenge) + + if err != nil { + return nil, err + } + return []byte(val), nil +} + +func rewoundRequestBody(req *http.Request) (io.ReadCloser, error) { + if req.Body == nil { + return nil, nil + } + + body, ok := req.Body.(ReadSeekCloser) + if !ok { + return nil, fmt.Errorf("Request body must implement io.ReadCloser and io.Seeker. Got: %T", body) + } + + _, err := body.Seek(0, io.SeekStart) + return body, err +} + +const ntlmNegotiateMessage = "NTLM TlRMTVNTUAABAAAAB7IIogwADAAzAAAACwALACgAAAAKAAAoAAAAD1dJTExISS1NQUlOTk9SVEhBTUVSSUNB" diff --git a/lfsapi/ntlm_test.go b/lfsapi/ntlm_test.go new file mode 100644 index 00000000..fa01fd57 --- /dev/null +++ b/lfsapi/ntlm_test.go @@ -0,0 +1,170 @@ +package lfsapi + +import ( + "encoding/base64" + "io/ioutil" + "net/http" + "net/http/httptest" + "net/url" + "strings" + "sync/atomic" + "testing" + + "github.com/ThomsonReutersEikon/go-ntlm/ntlm" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func TestNTLMAuth(t *testing.T) { + session, err := ntlm.CreateServerSession(ntlm.Version2, ntlm.ConnectionOrientedMode) + require.Nil(t, err) + session.SetUserInfo("ntlmuser", "ntlmpass", "NTLMDOMAIN") + + var called uint32 + srv := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) { + reqIndex := atomic.LoadUint32(&called) + atomic.AddUint32(&called, 1) + + authHeader := req.Header.Get("Authorization") + t.Logf("REQUEST %d: %s %s", reqIndex, req.Method, req.URL) + t.Logf("AUTH: %q", authHeader) + + // assert full body is sent each time + by, err := ioutil.ReadAll(req.Body) + req.Body.Close() + if assert.Nil(t, err) { + assert.Equal(t, "ntlm", string(by)) + } + + switch authHeader { + case "": + w.Header().Set("Www-Authenticate", "ntlm") + w.WriteHeader(401) + case ntlmNegotiateMessage: + assert.True(t, strings.HasPrefix(req.Header.Get("Authorization"), "NTLM ")) + ch, err := session.GenerateChallengeMessage() + if !assert.Nil(t, err) { + t.Logf("challenge gen error: %+v", err) + w.WriteHeader(500) + return + } + chMsg := base64.StdEncoding.EncodeToString(ch.Bytes()) + w.Header().Set("Www-Authenticate", "ntlm "+chMsg) + w.WriteHeader(401) + default: // should be an auth msg + authHeader := req.Header.Get("Authorization") + assert.True(t, strings.HasPrefix(strings.ToUpper(authHeader), "NTLM ")) + auth := authHeader[5:] // strip "ntlm " prefix + val, err := base64.StdEncoding.DecodeString(auth) + if !assert.Nil(t, err) { + t.Logf("auth base64 error: %+v", err) + w.WriteHeader(500) + return + } + + _, err = ntlm.ParseAuthenticateMessage(val, 2) + if !assert.Nil(t, err) { + t.Logf("auth parse error: %+v", err) + w.WriteHeader(500) + return + } + w.WriteHeader(200) + } + })) + defer srv.Close() + + req, err := http.NewRequest("POST", srv.URL+"/ntlm", NewByteBody([]byte("ntlm"))) + require.Nil(t, err) + + credHelper := newMockCredentialHelper() + cli, err := NewClient(nil, TestEnv(map[string]string{ + "lfs.url": srv.URL + "/ntlm", + "lfs." + srv.URL + "/ntlm.access": "ntlm", + })) + cli.Credentials = credHelper + require.Nil(t, err) + + // ntlm support pulls domain and login info from git credentials + srvURL, err := url.Parse(srv.URL) + require.Nil(t, err) + creds := Creds{ + "protocol": srvURL.Scheme, + "host": srvURL.Host, + "path": "ntlm", + "username": "ntlmdomain\\ntlmuser", + "password": "ntlmpass", + } + credHelper.Approve(creds) + + res, err := cli.DoWithAuth("remote", req) + require.Nil(t, err) + assert.Equal(t, 200, res.StatusCode) + assert.True(t, credHelper.IsApproved(creds)) +} + +func TestNtlmClientSession(t *testing.T) { + cli, err := NewClient(nil, nil) + require.Nil(t, err) + + creds := Creds{"username": "MOOSEDOMAIN\\canadian", "password": "MooseAntlersYeah"} + session1, err := cli.ntlmClientSession(creds) + assert.Nil(t, err) + assert.NotNil(t, session1) + + // The second call should ignore creds and give the session we just created. + badCreds := Creds{"username": "MOOSEDOMAIN\\badusername", "password": "MooseAntlersYeah"} + session2, err := cli.ntlmClientSession(badCreds) + assert.Nil(t, err) + assert.NotNil(t, session2) + assert.EqualValues(t, session1, session2) +} + +func TestNtlmClientSessionBadCreds(t *testing.T) { + cli, err := NewClient(nil, nil) + require.Nil(t, err) + creds := Creds{"username": "badusername", "password": "MooseAntlersYeah"} + _, err = cli.ntlmClientSession(creds) + assert.NotNil(t, err) +} + +func TestNtlmHeaderParseValid(t *testing.T) { + res := http.Response{} + res.Header = make(map[string][]string) + res.Header.Add("Www-Authenticate", "NTLM "+base64.StdEncoding.EncodeToString([]byte("I am a moose"))) + bytes, err := parseChallengeResponse(&res) + assert.Nil(t, err) + assert.False(t, strings.HasPrefix(string(bytes), "NTLM")) +} + +func TestNtlmHeaderParseInvalidLength(t *testing.T) { + res := http.Response{} + res.Header = make(map[string][]string) + res.Header.Add("Www-Authenticate", "NTL") + ret, err := parseChallengeResponse(&res) + assert.NotNil(t, err) + assert.Nil(t, ret) +} + +func TestNtlmHeaderParseInvalid(t *testing.T) { + res := http.Response{} + res.Header = make(map[string][]string) + res.Header.Add("Www-Authenticate", base64.StdEncoding.EncodeToString([]byte("NTLM I am a moose"))) + ret, err := parseChallengeResponse(&res) + assert.NotNil(t, err) + assert.Nil(t, ret) +} + +func assertRequestsEqual(t *testing.T, req1 *http.Request, req2 *http.Request, req1Body []byte) { + assert.Equal(t, req1.Method, req2.Method) + + for k, v := range req1.Header { + assert.Equal(t, v, req2.Header[k]) + } + + if req1.Body == nil { + assert.Nil(t, req2.Body) + } else { + bytes2, _ := ioutil.ReadAll(req2.Body) + assert.Equal(t, req1Body, bytes2) + } +} diff --git a/httputil/proxy.go b/lfsapi/proxy.go similarity index 63% rename from httputil/proxy.go rename to lfsapi/proxy.go index 3d01df14..250d1a62 100644 --- a/httputil/proxy.go +++ b/lfsapi/proxy.go @@ -1,4 +1,4 @@ -package httputil +package lfsapi import ( "net/http" @@ -6,54 +6,29 @@ import ( "strings" "fmt" - - "github.com/git-lfs/git-lfs/config" ) // Logic is copied, with small changes, from "net/http".ProxyFromEnvironment in the go std lib. -func ProxyFromGitConfigOrEnvironment(c *config.Configuration) func(req *http.Request) (*url.URL, error) { - var https_proxy string - http_proxy, _ := c.Git.Get("http.proxy") - if strings.HasPrefix(http_proxy, "https://") { - https_proxy = http_proxy - } - - if len(https_proxy) == 0 { - https_proxy, _ = c.Os.Get("HTTPS_PROXY") - } - - if len(https_proxy) == 0 { - https_proxy, _ = c.Os.Get("https_proxy") - } - - if len(http_proxy) == 0 { - http_proxy, _ = c.Os.Get("HTTP_PROXY") - } - - if len(http_proxy) == 0 { - http_proxy, _ = c.Os.Get("http_proxy") - } - - no_proxy, _ := c.Os.Get("NO_PROXY") - if len(no_proxy) == 0 { - no_proxy, _ = c.Os.Get("no_proxy") - } +func proxyFromClient(c *Client) func(req *http.Request) (*url.URL, error) { + httpProxy := c.HTTPProxy + httpsProxy := c.HTTPSProxy + noProxy := c.NoProxy return func(req *http.Request) (*url.URL, error) { var proxy string if req.URL.Scheme == "https" { - proxy = https_proxy + proxy = httpsProxy } if len(proxy) == 0 { - proxy = http_proxy + proxy = httpProxy } if len(proxy) == 0 { return nil, nil } - if !useProxy(no_proxy, canonicalAddr(req.URL)) { + if !useProxy(noProxy, canonicalAddr(req.URL)) { return nil, nil } @@ -62,8 +37,8 @@ func ProxyFromGitConfigOrEnvironment(c *config.Configuration) func(req *http.Req // proxy was bogus. Try prepending "http://" to it and // see if that parses correctly. If not, we fall // through and complain about the original one. - if proxyURL, err := url.Parse("http://" + proxy); err == nil { - return proxyURL, nil + if httpProxyURL, httpErr := url.Parse("http://" + proxy); httpErr == nil { + return httpProxyURL, nil } } if err != nil { @@ -73,6 +48,37 @@ func ProxyFromGitConfigOrEnvironment(c *config.Configuration) func(req *http.Req } } +func getProxyServers(osEnv Env, gitEnv Env) (string, string, string) { + var httpsProxy string + httpProxy, _ := gitEnv.Get("http.proxy") + if strings.HasPrefix(httpProxy, "https://") { + httpsProxy = httpProxy + } + + if len(httpsProxy) == 0 { + httpsProxy, _ = osEnv.Get("HTTPS_PROXY") + } + + if len(httpsProxy) == 0 { + httpsProxy, _ = osEnv.Get("https_proxy") + } + + if len(httpProxy) == 0 { + httpProxy, _ = osEnv.Get("HTTP_PROXY") + } + + if len(httpProxy) == 0 { + httpProxy, _ = osEnv.Get("http_proxy") + } + + noProxy, _ := osEnv.Get("NO_PROXY") + if len(noProxy) == 0 { + noProxy, _ = osEnv.Get("no_proxy") + } + + return httpsProxy, httpProxy, noProxy +} + // canonicalAddr returns url.Host but always with a ":port" suffix // Copied from "net/http".ProxyFromEnvironment in the go std lib. func canonicalAddr(url *url.URL) string { @@ -84,16 +90,16 @@ func canonicalAddr(url *url.URL) string { } // useProxy reports whether requests to addr should use a proxy, -// according to the NO_PROXY or no_proxy environment variable. +// according to the noProxy or noProxy environment variable. // addr is always a canonicalAddr with a host and port. // Copied from "net/http".ProxyFromEnvironment in the go std lib // and adapted to allow proxy usage even for localhost. -func useProxy(no_proxy, addr string) bool { +func useProxy(noProxy, addr string) bool { if len(addr) == 0 { return true } - if no_proxy == "*" { + if noProxy == "*" { return false } @@ -102,7 +108,7 @@ func useProxy(no_proxy, addr string) bool { addr = addr[:strings.LastIndex(addr, ":")] } - for _, p := range strings.Split(no_proxy, ",") { + for _, p := range strings.Split(noProxy, ",") { p = strings.ToLower(strings.TrimSpace(p)) if len(p) == 0 { continue @@ -114,11 +120,11 @@ func useProxy(no_proxy, addr string) bool { return false } if p[0] == '.' && (strings.HasSuffix(addr, p) || addr == p[1:]) { - // no_proxy ".foo.com" matches "bar.foo.com" or "foo.com" + // noProxy ".foo.com" matches "bar.foo.com" or "foo.com" return false } if p[0] != '.' && strings.HasSuffix(addr, p) && addr[len(addr)-len(p)-1] == '.' { - // no_proxy "foo.com" matches "bar.foo.com" + // noProxy "foo.com" matches "bar.foo.com" return false } } diff --git a/lfsapi/proxy_test.go b/lfsapi/proxy_test.go new file mode 100644 index 00000000..bd8ed4a3 --- /dev/null +++ b/lfsapi/proxy_test.go @@ -0,0 +1,82 @@ +package lfsapi + +import ( + "net/http" + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func TestProxyFromGitConfig(t *testing.T) { + c, err := NewClient(TestEnv(map[string]string{ + "HTTPS_PROXY": "https://proxy-from-env:8080", + }), TestEnv(map[string]string{ + "http.proxy": "https://proxy-from-git-config:8080", + })) + require.Nil(t, err) + + req, err := http.NewRequest("GET", "https://some-host.com:123/foo/bar", nil) + require.Nil(t, err) + + proxyURL, err := proxyFromClient(c)(req) + assert.Equal(t, "proxy-from-git-config:8080", proxyURL.Host) + assert.Nil(t, err) +} + +func TestHttpProxyFromGitConfig(t *testing.T) { + c, err := NewClient(TestEnv(map[string]string{ + "HTTPS_PROXY": "https://proxy-from-env:8080", + }), TestEnv(map[string]string{ + "http.proxy": "http://proxy-from-git-config:8080", + })) + require.Nil(t, err) + + req, err := http.NewRequest("GET", "https://some-host.com:123/foo/bar", nil) + require.Nil(t, err) + + proxyURL, err := proxyFromClient(c)(req) + assert.Equal(t, "proxy-from-env:8080", proxyURL.Host) + assert.Nil(t, err) +} + +func TestProxyFromEnvironment(t *testing.T) { + c, err := NewClient(TestEnv(map[string]string{ + "HTTPS_PROXY": "https://proxy-from-env:8080", + }), nil) + require.Nil(t, err) + + req, err := http.NewRequest("GET", "https://some-host.com:123/foo/bar", nil) + require.Nil(t, err) + + proxyURL, err := proxyFromClient(c)(req) + assert.Equal(t, "proxy-from-env:8080", proxyURL.Host) + assert.Nil(t, err) +} + +func TestProxyIsNil(t *testing.T) { + c := &Client{} + + req, err := http.NewRequest("GET", "http://some-host.com:123/foo/bar", nil) + require.Nil(t, err) + + proxyURL, err := proxyFromClient(c)(req) + assert.Nil(t, proxyURL) + assert.Nil(t, err) +} + +func TestProxyNoProxy(t *testing.T) { + c, err := NewClient(TestEnv(map[string]string{ + "NO_PROXY": "some-host", + }), TestEnv(map[string]string{ + "http.proxy": "https://proxy-from-git-config:8080", + })) + require.Nil(t, err) + + req, err := http.NewRequest("GET", "https://some-host:8080", nil) + require.Nil(t, err) + + proxyURL, err := proxyFromClient(c)(req) + assert.Nil(t, proxyURL) + assert.Nil(t, err) +} diff --git a/lfsapi/response_test.go b/lfsapi/response_test.go new file mode 100644 index 00000000..cf3a63dd --- /dev/null +++ b/lfsapi/response_test.go @@ -0,0 +1,184 @@ +package lfsapi + +import ( + "net/http" + "net/http/httptest" + "strings" + "sync/atomic" + "testing" + + "github.com/git-lfs/git-lfs/errors" + "github.com/stretchr/testify/assert" +) + +func TestAuthErrWithBody(t *testing.T) { + var called uint32 + srv := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + if r.URL.String() != "/test" { + w.WriteHeader(http.StatusNotFound) + return + } + + atomic.AddUint32(&called, 1) + w.Header().Set("Content-Type", "application/json") + w.WriteHeader(401) + w.Write([]byte(`{"message":"custom auth error"}`)) + })) + defer srv.Close() + + req, err := http.NewRequest("GET", srv.URL+"/test", nil) + assert.Nil(t, err) + + c := &Client{} + _, err = c.Do(req) + assert.NotNil(t, err) + assert.True(t, errors.IsAuthError(err)) + assert.Equal(t, "Authentication required: http: custom auth error", err.Error()) + assert.EqualValues(t, 1, called) +} + +func TestFatalWithBody(t *testing.T) { + var called uint32 + srv := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + if r.URL.String() != "/test" { + w.WriteHeader(http.StatusNotFound) + return + } + + atomic.AddUint32(&called, 1) + w.Header().Set("Content-Type", "application/json") + w.WriteHeader(500) + w.Write([]byte(`{"message":"custom fatal error"}`)) + })) + defer srv.Close() + + req, err := http.NewRequest("GET", srv.URL+"/test", nil) + assert.Nil(t, err) + + c := &Client{} + _, err = c.Do(req) + assert.NotNil(t, err) + assert.True(t, errors.IsFatalError(err)) + assert.Equal(t, "Fatal error: http: custom fatal error", err.Error()) + assert.EqualValues(t, 1, called) +} + +func TestWithNonFatal500WithBody(t *testing.T) { + c := &Client{} + + var called uint32 + + nonFatalCodes := map[int]string{ + 501: "custom 501 error", + 507: "custom 507 error", + 509: "custom 509 error", + } + + for nonFatalCode, expectedErr := range nonFatalCodes { + srv := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + if r.URL.String() != "/test" { + w.WriteHeader(http.StatusNotFound) + return + } + + atomic.AddUint32(&called, 1) + w.Header().Set("Content-Type", "application/json") + w.WriteHeader(nonFatalCode) + w.Write([]byte(`{"message":"` + expectedErr + `"}`)) + })) + + req, err := http.NewRequest("GET", srv.URL+"/test", nil) + assert.Nil(t, err) + + _, err = c.Do(req) + t.Logf("non fatal code %d", nonFatalCode) + assert.NotNil(t, err) + assert.Equal(t, "http: "+expectedErr, err.Error()) + srv.Close() + } + + assert.EqualValues(t, 3, called) +} + +func TestAuthErrWithoutBody(t *testing.T) { + var called uint32 + srv := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + if r.URL.String() != "/test" { + w.WriteHeader(http.StatusNotFound) + return + } + + atomic.AddUint32(&called, 1) + w.WriteHeader(401) + })) + defer srv.Close() + + req, err := http.NewRequest("GET", srv.URL+"/test", nil) + assert.Nil(t, err) + + c := &Client{} + _, err = c.Do(req) + assert.NotNil(t, err) + assert.True(t, errors.IsAuthError(err)) + assert.True(t, strings.HasPrefix(err.Error(), "Authentication required: Authorization error:"), err.Error()) + assert.EqualValues(t, 1, called) +} + +func TestFatalWithoutBody(t *testing.T) { + var called uint32 + srv := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + if r.URL.String() != "/test" { + w.WriteHeader(http.StatusNotFound) + return + } + + atomic.AddUint32(&called, 1) + w.WriteHeader(500) + })) + defer srv.Close() + + req, err := http.NewRequest("GET", srv.URL+"/test", nil) + assert.Nil(t, err) + + c := &Client{} + _, err = c.Do(req) + assert.NotNil(t, err) + assert.True(t, errors.IsFatalError(err)) + assert.True(t, strings.HasPrefix(err.Error(), "Fatal error: Server error:"), err.Error()) + assert.EqualValues(t, 1, called) +} + +func TestWithNonFatal500WithoutBody(t *testing.T) { + c := &Client{} + + var called uint32 + + nonFatalCodes := map[int]string{ + 501: "Not Implemented:", + 507: "Insufficient server storage:", + 509: "Bandwidth limit exceeded:", + } + + for nonFatalCode, errPrefix := range nonFatalCodes { + srv := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + if r.URL.String() != "/test" { + w.WriteHeader(http.StatusNotFound) + return + } + + atomic.AddUint32(&called, 1) + w.WriteHeader(nonFatalCode) + })) + + req, err := http.NewRequest("GET", srv.URL+"/test", nil) + assert.Nil(t, err) + + _, err = c.Do(req) + t.Logf("non fatal code %d", nonFatalCode) + assert.NotNil(t, err) + assert.True(t, strings.HasPrefix(err.Error(), errPrefix)) + srv.Close() + } + + assert.EqualValues(t, 3, called) +} diff --git a/lfsapi/schema/fixture/invalid.json b/lfsapi/schema/fixture/invalid.json new file mode 100644 index 00000000..09211088 --- /dev/null +++ b/lfsapi/schema/fixture/invalid.json @@ -0,0 +1,5 @@ +{ + "$schema": "http://json-schema.org/draft-04/schema#", + + "type": "not-a-type" +} diff --git a/lfsapi/schema/fixture/valid.json b/lfsapi/schema/fixture/valid.json new file mode 100644 index 00000000..2c2915cd --- /dev/null +++ b/lfsapi/schema/fixture/valid.json @@ -0,0 +1,7 @@ +{ + "$schema": "http://json-schema.org/draft-04/schema#", + + "type": "number", + "minimum": 0, + "exclusiveMinimum": false +} diff --git a/lfsapi/schema/reader.go b/lfsapi/schema/reader.go new file mode 100644 index 00000000..db2645a3 --- /dev/null +++ b/lfsapi/schema/reader.go @@ -0,0 +1,103 @@ +package schema + +import ( + "bytes" + "io" + "strings" + "sync/atomic" + + "github.com/git-lfs/git-lfs/errors" + "github.com/xeipuuv/gojsonschema" +) + +var ( + // errValidationIncomplete is an error returned when `ValidationErr()` + // is called while the reader is still processing data. + errValidationIncomplete = errors.New("lfsapi/schema: validation incomplete") +) + +// state represents the set of valid states a `*Reader` (see below) can be in +type state uint32 + +const ( + // stateNotStarted means the `*Reader` has no processed any data + stateNotStarted state = iota + // stateProcessing means the `*Reader` has received a `Read()` call at + // least once, but has not received an `io.EOF` yet. + stateProcessing + // stateProcessed means the `*Reader` has received a `Read()` call at + // least once and has gotten an `io.EOF`, meaning there is no more data + // to process. + stateProcessed +) + +type Reader struct { + // r is the underlying io.Reader this one is wrapping. + r io.Reader + // buf is the buffer of data read from the underlying reader + buf *bytes.Buffer + // schema is the *gojsonschema.Schema to valid the buffer against + schema *gojsonschema.Schema + + // state is the current state that this `*Reader` is in, and is updated + // atomically through `atomic.SetUint32`, and etc. + state uint32 + + // result stores the result of the schema validation + result *gojsonschema.Result + // resultErr stores the (optional) error returned from the schema + // validation + resultErr error +} + +var _ io.Reader = (*Reader)(nil) + +// Read implements io.Reader.Read, and returns exactly the data received from +// the underlying reader. +// +// Read also sometimes advances state, as according to the valid instances of +// the `state` from above. If transitioning into the `stateProcessed` state, the +// schema will be validated. +func (r *Reader) Read(p []byte) (n int, err error) { + atomic.CompareAndSwapUint32(&r.state, uint32(stateNotStarted), uint32(stateProcessing)) + + n, err = r.r.Read(p) + if err == io.EOF { + got := gojsonschema.NewStringLoader(r.buf.String()) + r.result, r.resultErr = r.schema.Validate(got) + + atomic.CompareAndSwapUint32(&r.state, uint32(stateProcessing), uint32(stateProcessed)) + } + + return +} + +// ValidationErr returns an error assosciated with validating the data. If +// there was an error performing the validation itself, that error will be +// returned with priority. If the validation has not started, or is incomplete, +// an appropriate error will be returned. +// +// Otherwise, if any validation errors were present, an error will be returned +// containing all of the validation errors. If the data passed validation, a +// value of 'nil' will be returned instead. +func (r *Reader) ValidationErr() error { + if r.resultErr != nil { + return r.resultErr + } else { + switch state(atomic.LoadUint32(&r.state)) { + case stateNotStarted, stateProcessing: + return errValidationIncomplete + } + } + + if r.result.Valid() { + return nil + } + + msg := "Validation errors:\n" + for _, e := range r.result.Errors() { + msg = strings.Join([]string{msg, e.Description()}, "\n") + } + + return errors.New(msg) +} diff --git a/lfsapi/schema/reader_test.go b/lfsapi/schema/reader_test.go new file mode 100644 index 00000000..c06c5842 --- /dev/null +++ b/lfsapi/schema/reader_test.go @@ -0,0 +1,55 @@ +package schema + +import ( + "io" + "io/ioutil" + "strings" + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func TestSchemaReaderWithValidPayload(t *testing.T) { + schema, err := FromJSON(ValidSchemaPath) + require.Nil(t, err) + + r := schema.Reader(strings.NewReader("1")) + io.Copy(ioutil.Discard, r) + + assert.Nil(t, r.ValidationErr()) +} + +func TestSchemaReaderWithInvalidPayload(t *testing.T) { + schema, err := FromJSON(ValidSchemaPath) + require.Nil(t, err) + + r := schema.Reader(strings.NewReader("-1")) + io.Copy(ioutil.Discard, r) + + assert.NotNil(t, r.ValidationErr()) +} + +func TestSchemaReaderBeforeValidation(t *testing.T) { + schema, err := FromJSON(ValidSchemaPath) + require.Nil(t, err) + + r := schema.Reader(strings.NewReader("1")) + + assert.Equal(t, errValidationIncomplete, r.ValidationErr()) +} + +func TestSchemaReaderDuringValidation(t *testing.T) { + schema, err := FromJSON(ValidSchemaPath) + require.Nil(t, err) + + r := schema.Reader(strings.NewReader("12")) + + var b [1]byte + n, err := r.Read(b[:]) + + assert.Equal(t, 1, n) + assert.Nil(t, err) + + assert.Equal(t, errValidationIncomplete, r.ValidationErr()) +} diff --git a/lfsapi/schema/schema.go b/lfsapi/schema/schema.go new file mode 100644 index 00000000..ba9ec7d3 --- /dev/null +++ b/lfsapi/schema/schema.go @@ -0,0 +1,68 @@ +package schema + +import ( + "bytes" + "fmt" + "io" + "os" + "path/filepath" + + "github.com/xeipuuv/gojsonschema" +) + +// Schema holds a JSON schema to be used for validation against various +// payloads. +type Schema struct { + // s is the internal handle on the implementation of the JSON schema + // specification. + s *gojsonschema.Schema +} + +// FromJSON constructs a new `*Schema` instance from the JSON schema at +// `schemaPath` relative to the package this code was called from. +// +// If the file could not be accessed, or was unable to be parsed as a valid JSON +// schema, an appropriate error will be returned. Otherwise, the `*Schema` will +// be returned with a nil error. +func FromJSON(schemaPath string) (*Schema, error) { + dir, err := os.Getwd() + if err != nil { + return nil, err + } + + dir = filepath.ToSlash(dir) + schemaPath = filepath.Join(dir, schemaPath) + + if _, err := os.Stat(schemaPath); err != nil { + return nil, err + } + + schema, err := gojsonschema.NewSchema(gojsonschema.NewReferenceLoader( + // Platform compatibility: use "/" separators always for file:// + fmt.Sprintf("file:///%s", filepath.ToSlash(schemaPath)), + )) + if err != nil { + return nil, err + } + + return &Schema{schema}, nil +} + +// Reader wraps the given `io.Reader`, "r" as a `*schema.Reader`, allowing the +// contents passed through the reader to be inspected as conforming to the JSON +// schema or not. +// +// If the reader "r" already _is_ a `*schema.Reader`, it will be returned as-is. +func (s *Schema) Reader(r io.Reader) *Reader { + if sr, ok := r.(*Reader); ok { + return sr + } + + rdr := &Reader{ + buf: new(bytes.Buffer), + schema: s.s, + } + rdr.r = io.TeeReader(r, rdr.buf) + + return rdr +} diff --git a/lfsapi/schema/schema_test.go b/lfsapi/schema/schema_test.go new file mode 100644 index 00000000..a6ebe46b --- /dev/null +++ b/lfsapi/schema/schema_test.go @@ -0,0 +1,46 @@ +package schema + +import ( + "bytes" + "os" + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +const ( + ValidSchemaPath = "fixture/valid.json" + InvalidSchemaPath = "fixture/invalid.json" + MissingSchemaPath = "fixture/missing.json" +) + +func TestCreatingAValidSchema(t *testing.T) { + _, err := FromJSON(ValidSchemaPath) + + assert.Nil(t, err) +} + +func TestCreatingAMissingSchema(t *testing.T) { + _, err := FromJSON(MissingSchemaPath) + + assert.NotNil(t, err) + assert.True(t, os.IsNotExist(err)) +} + +func TestCreatingAnInvalidSchema(t *testing.T) { + _, err := FromJSON(InvalidSchemaPath) + + assert.NotNil(t, err) + assert.Contains(t, err.Error(), "not-a-type is not a valid type") +} + +func TestWrappingASchemaReader(t *testing.T) { + s, err := FromJSON(ValidSchemaPath) + require.Nil(t, err) + + sr := s.Reader(new(bytes.Buffer)) + wrapped := s.Reader(sr) + + assert.Equal(t, sr, wrapped) +} diff --git a/auth/ssh.go b/lfsapi/ssh.go similarity index 59% rename from auth/ssh.go rename to lfsapi/ssh.go index a09e4ceb..13ebefba 100644 --- a/auth/ssh.go +++ b/lfsapi/ssh.go @@ -1,4 +1,4 @@ -package auth +package lfsapi import ( "bytes" @@ -8,33 +8,27 @@ import ( "path/filepath" "strings" - "github.com/git-lfs/git-lfs/config" "github.com/rubyist/tracerx" ) -type SshAuthResponse struct { - Message string `json:"-"` - Href string `json:"href"` - Header map[string]string `json:"header"` - ExpiresAt string `json:"expires_at"` -} - -func SshAuthenticate(cfg *config.Configuration, operation, oid string) (SshAuthResponse, config.Endpoint, error) { - // This is only used as a fallback where the Git URL is SSH but server doesn't support a full SSH binary protocol - // and therefore we derive a HTTPS endpoint for binaries instead; but check authentication here via SSH - - endpoint := cfg.Endpoint(operation) - res := SshAuthResponse{} - if len(endpoint.SshUserAndHost) == 0 { - return res, endpoint, nil +func (c *Client) resolveSSHEndpoint(e Endpoint, method string) (sshAuthResponse, error) { + res := sshAuthResponse{} + if len(e.SshUserAndHost) == 0 { + return res, nil } - tracerx.Printf("ssh: %s git-lfs-authenticate %s %s %s", - endpoint.SshUserAndHost, endpoint.SshPath, operation, oid) + operation := "upload" + switch method { + case "GET", "HEAD": + operation = "download" + } - exe, args := sshGetExeAndArgs(cfg, endpoint) + tracerx.Printf("ssh: %s git-lfs-authenticate %s %s", + e.SshUserAndHost, e.SshPath, operation) + + exe, args := sshGetExeAndArgs(c.osEnv, e) args = append(args, - fmt.Sprintf("git-lfs-authenticate %s %s %s", endpoint.SshPath, operation, oid)) + fmt.Sprintf("git-lfs-authenticate %s %s", e.SshPath, operation)) cmd := exec.Command(exe, args...) @@ -56,21 +50,24 @@ func SshAuthenticate(cfg *config.Configuration, operation, oid string) (SshAuthR err = json.Unmarshal(outbuf.Bytes(), &res) } - return res, endpoint, err + return res, err +} + +type sshAuthResponse struct { + Message string `json:"-"` + Href string `json:"href"` + Header map[string]string `json:"header"` + ExpiresAt string `json:"expires_at"` } // Return the executable name for ssh on this machine and the base args // Base args includes port settings, user/host, everything pre the command to execute -func sshGetExeAndArgs(cfg *config.Configuration, endpoint config.Endpoint) (exe string, baseargs []string) { - if len(endpoint.SshUserAndHost) == 0 { - return "", nil - } - +func sshGetExeAndArgs(osEnv Env, e Endpoint) (exe string, baseargs []string) { isPlink := false isTortoise := false - ssh, _ := cfg.Os.Get("GIT_SSH") - sshCmd, _ := cfg.Os.Get("GIT_SSH_COMMAND") + ssh, _ := osEnv.Get("GIT_SSH") + sshCmd, _ := osEnv.Get("GIT_SSH_COMMAND") cmdArgs := strings.Fields(sshCmd) if len(cmdArgs) > 0 { ssh = cmdArgs[0] @@ -99,15 +96,15 @@ func sshGetExeAndArgs(cfg *config.Configuration, endpoint config.Endpoint) (exe args = append(args, "-batch") } - if len(endpoint.SshPort) > 0 { + if len(e.SshPort) > 0 { if isPlink || isTortoise { args = append(args, "-P") } else { args = append(args, "-p") } - args = append(args, endpoint.SshPort) + args = append(args, e.SshPort) } - args = append(args, endpoint.SshUserAndHost) + args = append(args, e.SshUserAndHost) return ssh, args } diff --git a/auth/ssh_test.go b/lfsapi/ssh_test.go similarity index 52% rename from auth/ssh_test.go rename to lfsapi/ssh_test.go index ec706bd9..5035aa6d 100644 --- a/auth/ssh_test.go +++ b/lfsapi/ssh_test.go @@ -1,42 +1,40 @@ -package auth +package lfsapi import ( "path/filepath" "testing" - "github.com/git-lfs/git-lfs/config" "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" ) func TestSSHGetExeAndArgsSsh(t *testing.T) { - cfg := config.NewFrom(config.Values{ - Os: map[string]string{ - "GIT_SSH_COMMAND": "", - "GIT_SSH": "", - }, - }) + cli, err := NewClient(TestEnv(map[string]string{ + "GIT_SSH_COMMAND": "", + "GIT_SSH": "", + }), nil) + require.Nil(t, err) - endpoint := cfg.Endpoint("download") + endpoint := cli.Endpoints.Endpoint("download", "") endpoint.SshUserAndHost = "user@foo.com" - exe, args := sshGetExeAndArgs(cfg, endpoint) + exe, args := sshGetExeAndArgs(cli.OSEnv(), endpoint) assert.Equal(t, "ssh", exe) assert.Equal(t, []string{"user@foo.com"}, args) } func TestSSHGetExeAndArgsSshCustomPort(t *testing.T) { - cfg := config.NewFrom(config.Values{ - Os: map[string]string{ - "GIT_SSH_COMMAND": "", - "GIT_SSH": "", - }, - }) + cli, err := NewClient(TestEnv(map[string]string{ + "GIT_SSH_COMMAND": "", + "GIT_SSH": "", + }), nil) + require.Nil(t, err) - endpoint := cfg.Endpoint("download") + endpoint := cli.Endpoints.Endpoint("download", "") endpoint.SshUserAndHost = "user@foo.com" endpoint.SshPort = "8888" - exe, args := sshGetExeAndArgs(cfg, endpoint) + exe, args := sshGetExeAndArgs(cli.OSEnv(), endpoint) assert.Equal(t, "ssh", exe) assert.Equal(t, []string{"-p", "8888", "user@foo.com"}, args) } @@ -44,17 +42,16 @@ func TestSSHGetExeAndArgsSshCustomPort(t *testing.T) { func TestSSHGetExeAndArgsPlink(t *testing.T) { plink := filepath.Join("Users", "joebloggs", "bin", "plink.exe") - cfg := config.NewFrom(config.Values{ - Os: map[string]string{ - "GIT_SSH_COMMAND": "", - "GIT_SSH": plink, - }, - }) + cli, err := NewClient(TestEnv(map[string]string{ + "GIT_SSH_COMMAND": "", + "GIT_SSH": plink, + }), nil) + require.Nil(t, err) - endpoint := cfg.Endpoint("download") + endpoint := cli.Endpoints.Endpoint("download", "") endpoint.SshUserAndHost = "user@foo.com" - exe, args := sshGetExeAndArgs(cfg, endpoint) + exe, args := sshGetExeAndArgs(cli.OSEnv(), endpoint) assert.Equal(t, plink, exe) assert.Equal(t, []string{"user@foo.com"}, args) } @@ -62,18 +59,17 @@ func TestSSHGetExeAndArgsPlink(t *testing.T) { func TestSSHGetExeAndArgsPlinkCustomPort(t *testing.T) { plink := filepath.Join("Users", "joebloggs", "bin", "plink") - cfg := config.NewFrom(config.Values{ - Os: map[string]string{ - "GIT_SSH_COMMAND": "", - "GIT_SSH": plink, - }, - }) + cli, err := NewClient(TestEnv(map[string]string{ + "GIT_SSH_COMMAND": "", + "GIT_SSH": plink, + }), nil) + require.Nil(t, err) - endpoint := cfg.Endpoint("download") + endpoint := cli.Endpoints.Endpoint("download", "") endpoint.SshUserAndHost = "user@foo.com" endpoint.SshPort = "8888" - exe, args := sshGetExeAndArgs(cfg, endpoint) + exe, args := sshGetExeAndArgs(cli.OSEnv(), endpoint) assert.Equal(t, plink, exe) assert.Equal(t, []string{"-P", "8888", "user@foo.com"}, args) } @@ -81,17 +77,16 @@ func TestSSHGetExeAndArgsPlinkCustomPort(t *testing.T) { func TestSSHGetExeAndArgsTortoisePlink(t *testing.T) { plink := filepath.Join("Users", "joebloggs", "bin", "tortoiseplink.exe") - cfg := config.NewFrom(config.Values{ - Os: map[string]string{ - "GIT_SSH_COMMAND": "", - "GIT_SSH": plink, - }, - }) + cli, err := NewClient(TestEnv(map[string]string{ + "GIT_SSH_COMMAND": "", + "GIT_SSH": plink, + }), nil) + require.Nil(t, err) - endpoint := cfg.Endpoint("download") + endpoint := cli.Endpoints.Endpoint("download", "") endpoint.SshUserAndHost = "user@foo.com" - exe, args := sshGetExeAndArgs(cfg, endpoint) + exe, args := sshGetExeAndArgs(cli.OSEnv(), endpoint) assert.Equal(t, plink, exe) assert.Equal(t, []string{"-batch", "user@foo.com"}, args) } @@ -99,65 +94,61 @@ func TestSSHGetExeAndArgsTortoisePlink(t *testing.T) { func TestSSHGetExeAndArgsTortoisePlinkCustomPort(t *testing.T) { plink := filepath.Join("Users", "joebloggs", "bin", "tortoiseplink") - cfg := config.NewFrom(config.Values{ - Os: map[string]string{ - "GIT_SSH_COMMAND": "", - "GIT_SSH": plink, - }, - }) + cli, err := NewClient(TestEnv(map[string]string{ + "GIT_SSH_COMMAND": "", + "GIT_SSH": plink, + }), nil) + require.Nil(t, err) - endpoint := cfg.Endpoint("download") + endpoint := cli.Endpoints.Endpoint("download", "") endpoint.SshUserAndHost = "user@foo.com" endpoint.SshPort = "8888" - exe, args := sshGetExeAndArgs(cfg, endpoint) + exe, args := sshGetExeAndArgs(cli.OSEnv(), endpoint) assert.Equal(t, plink, exe) assert.Equal(t, []string{"-batch", "-P", "8888", "user@foo.com"}, args) } func TestSSHGetExeAndArgsSshCommandPrecedence(t *testing.T) { - cfg := config.NewFrom(config.Values{ - Os: map[string]string{ - "GIT_SSH_COMMAND": "sshcmd", - "GIT_SSH": "bad", - }, - }) + cli, err := NewClient(TestEnv(map[string]string{ + "GIT_SSH_COMMAND": "sshcmd", + "GIT_SSH": "bad", + }), nil) + require.Nil(t, err) - endpoint := cfg.Endpoint("download") + endpoint := cli.Endpoints.Endpoint("download", "") endpoint.SshUserAndHost = "user@foo.com" - exe, args := sshGetExeAndArgs(cfg, endpoint) + exe, args := sshGetExeAndArgs(cli.OSEnv(), endpoint) assert.Equal(t, "sshcmd", exe) assert.Equal(t, []string{"user@foo.com"}, args) } func TestSSHGetExeAndArgsSshCommandArgs(t *testing.T) { - cfg := config.NewFrom(config.Values{ - Os: map[string]string{ - "GIT_SSH_COMMAND": "sshcmd --args 1", - }, - }) + cli, err := NewClient(TestEnv(map[string]string{ + "GIT_SSH_COMMAND": "sshcmd --args 1", + }), nil) + require.Nil(t, err) - endpoint := cfg.Endpoint("download") + endpoint := cli.Endpoints.Endpoint("download", "") endpoint.SshUserAndHost = "user@foo.com" - exe, args := sshGetExeAndArgs(cfg, endpoint) + exe, args := sshGetExeAndArgs(cli.OSEnv(), endpoint) assert.Equal(t, "sshcmd", exe) assert.Equal(t, []string{"--args", "1", "user@foo.com"}, args) } func TestSSHGetExeAndArgsSshCommandCustomPort(t *testing.T) { - cfg := config.NewFrom(config.Values{ - Os: map[string]string{ - "GIT_SSH_COMMAND": "sshcmd", - }, - }) + cli, err := NewClient(TestEnv(map[string]string{ + "GIT_SSH_COMMAND": "sshcmd", + }), nil) + require.Nil(t, err) - endpoint := cfg.Endpoint("download") + endpoint := cli.Endpoints.Endpoint("download", "") endpoint.SshUserAndHost = "user@foo.com" endpoint.SshPort = "8888" - exe, args := sshGetExeAndArgs(cfg, endpoint) + exe, args := sshGetExeAndArgs(cli.OSEnv(), endpoint) assert.Equal(t, "sshcmd", exe) assert.Equal(t, []string{"-p", "8888", "user@foo.com"}, args) } @@ -165,16 +156,15 @@ func TestSSHGetExeAndArgsSshCommandCustomPort(t *testing.T) { func TestSSHGetExeAndArgsPlinkCommand(t *testing.T) { plink := filepath.Join("Users", "joebloggs", "bin", "plink.exe") - cfg := config.NewFrom(config.Values{ - Os: map[string]string{ - "GIT_SSH_COMMAND": plink, - }, - }) + cli, err := NewClient(TestEnv(map[string]string{ + "GIT_SSH_COMMAND": plink, + }), nil) + require.Nil(t, err) - endpoint := cfg.Endpoint("download") + endpoint := cli.Endpoints.Endpoint("download", "") endpoint.SshUserAndHost = "user@foo.com" - exe, args := sshGetExeAndArgs(cfg, endpoint) + exe, args := sshGetExeAndArgs(cli.OSEnv(), endpoint) assert.Equal(t, plink, exe) assert.Equal(t, []string{"user@foo.com"}, args) } @@ -182,17 +172,16 @@ func TestSSHGetExeAndArgsPlinkCommand(t *testing.T) { func TestSSHGetExeAndArgsPlinkCommandCustomPort(t *testing.T) { plink := filepath.Join("Users", "joebloggs", "bin", "plink") - cfg := config.NewFrom(config.Values{ - Os: map[string]string{ - "GIT_SSH_COMMAND": plink, - }, - }) + cli, err := NewClient(TestEnv(map[string]string{ + "GIT_SSH_COMMAND": plink, + }), nil) + require.Nil(t, err) - endpoint := cfg.Endpoint("download") + endpoint := cli.Endpoints.Endpoint("download", "") endpoint.SshUserAndHost = "user@foo.com" endpoint.SshPort = "8888" - exe, args := sshGetExeAndArgs(cfg, endpoint) + exe, args := sshGetExeAndArgs(cli.OSEnv(), endpoint) assert.Equal(t, plink, exe) assert.Equal(t, []string{"-P", "8888", "user@foo.com"}, args) } @@ -200,16 +189,15 @@ func TestSSHGetExeAndArgsPlinkCommandCustomPort(t *testing.T) { func TestSSHGetExeAndArgsTortoisePlinkCommand(t *testing.T) { plink := filepath.Join("Users", "joebloggs", "bin", "tortoiseplink.exe") - cfg := config.NewFrom(config.Values{ - Os: map[string]string{ - "GIT_SSH_COMMAND": plink, - }, - }) + cli, err := NewClient(TestEnv(map[string]string{ + "GIT_SSH_COMMAND": plink, + }), nil) + require.Nil(t, err) - endpoint := cfg.Endpoint("download") + endpoint := cli.Endpoints.Endpoint("download", "") endpoint.SshUserAndHost = "user@foo.com" - exe, args := sshGetExeAndArgs(cfg, endpoint) + exe, args := sshGetExeAndArgs(cli.OSEnv(), endpoint) assert.Equal(t, plink, exe) assert.Equal(t, []string{"-batch", "user@foo.com"}, args) } @@ -217,17 +205,16 @@ func TestSSHGetExeAndArgsTortoisePlinkCommand(t *testing.T) { func TestSSHGetExeAndArgsTortoisePlinkCommandCustomPort(t *testing.T) { plink := filepath.Join("Users", "joebloggs", "bin", "tortoiseplink") - cfg := config.NewFrom(config.Values{ - Os: map[string]string{ - "GIT_SSH_COMMAND": plink, - }, - }) + cli, err := NewClient(TestEnv(map[string]string{ + "GIT_SSH_COMMAND": plink, + }), nil) + require.Nil(t, err) - endpoint := cfg.Endpoint("download") + endpoint := cli.Endpoints.Endpoint("download", "") endpoint.SshUserAndHost = "user@foo.com" endpoint.SshPort = "8888" - exe, args := sshGetExeAndArgs(cfg, endpoint) + exe, args := sshGetExeAndArgs(cli.OSEnv(), endpoint) assert.Equal(t, plink, exe) assert.Equal(t, []string{"-batch", "-P", "8888", "user@foo.com"}, args) } diff --git a/lfsapi/stats.go b/lfsapi/stats.go new file mode 100644 index 00000000..3c7fe98f --- /dev/null +++ b/lfsapi/stats.go @@ -0,0 +1,111 @@ +package lfsapi + +import ( + "fmt" + "io" + "net/http" + "net/http/httputil" + "time" +) + +type httpTransferStats struct { + HeaderSize int + BodySize int64 + Start time.Time + Stop time.Time +} + +type httpTransfer struct { + requestStats *httpTransferStats + responseStats *httpTransferStats +} + +// LogStats is intended to be called after all HTTP operations for the +// commmand have finished. It dumps k/v logs, one line per httpTransfer into +// a log file with the current timestamp. +func (c *Client) LogStats(out io.Writer) { + if !c.LoggingStats { + return + } + + fmt.Fprintf(out, "concurrent=%d time=%d version=%s\n", c.ConcurrentTransfers, time.Now().Unix(), UserAgent) + + for key, responses := range c.transferBuckets { + for _, response := range responses { + stats := c.transfers[response] + fmt.Fprintf(out, "key=%s reqheader=%d reqbody=%d resheader=%d resbody=%d restime=%d status=%d url=%s\n", + key, + stats.requestStats.HeaderSize, + stats.requestStats.BodySize, + stats.responseStats.HeaderSize, + stats.responseStats.BodySize, + stats.responseStats.Stop.Sub(stats.responseStats.Start).Nanoseconds(), + response.StatusCode, + response.Request.URL) + } + } +} + +func (c *Client) LogResponse(key string, res *http.Response) { + if !c.LoggingStats { + return + } + + c.transferBucketMu.Lock() + defer c.transferBucketMu.Unlock() + + if c.transferBuckets == nil { + c.transferBuckets = make(map[string][]*http.Response) + } + + c.transferBuckets[key] = append(c.transferBuckets[key], res) +} + +func (c *Client) startResponseStats(res *http.Response, start time.Time) { + if !c.LoggingStats { + return + } + + reqHeaderSize := 0 + resHeaderSize := 0 + + if dump, err := httputil.DumpRequest(res.Request, false); err == nil { + reqHeaderSize = len(dump) + } + + if dump, err := httputil.DumpResponse(res, false); err == nil { + resHeaderSize = len(dump) + } + + reqstats := &httpTransferStats{HeaderSize: reqHeaderSize, BodySize: res.Request.ContentLength} + + // Response body size cannot be figured until it is read. Do not rely on a Content-Length + // header because it may not exist or be -1 in the case of chunked responses. + resstats := &httpTransferStats{HeaderSize: resHeaderSize, Start: start} + t := &httpTransfer{requestStats: reqstats, responseStats: resstats} + + c.transferMu.Lock() + if c.transfers == nil { + c.transfers = make(map[*http.Response]*httpTransfer) + } + c.transfers[res] = t + c.transferMu.Unlock() +} + +func (c *Client) finishResponseStats(res *http.Response, bodySize int64) { + if !c.LoggingStats || res == nil { + return + } + + c.transferMu.Lock() + defer c.transferMu.Unlock() + + if c.transfers == nil { + return + } + + if transfer, ok := c.transfers[res]; ok { + transfer.responseStats.BodySize = bodySize + transfer.responseStats.Stop = time.Now() + } +} diff --git a/lfsapi/stats_test.go b/lfsapi/stats_test.go new file mode 100644 index 00000000..d33cac0b --- /dev/null +++ b/lfsapi/stats_test.go @@ -0,0 +1,168 @@ +package lfsapi + +import ( + "bytes" + "encoding/json" + "io" + "io/ioutil" + "net/http" + "net/http/httptest" + "strings" + "sync/atomic" + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func TestStatsWithBucket(t *testing.T) { + var called uint32 + srv := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + atomic.AddUint32(&called, 1) + t.Logf("srv req %s %s", r.Method, r.URL.Path) + assert.Equal(t, "POST", r.Method) + + assert.Equal(t, "Basic ABC", r.Header.Get("Authorization")) + body := &verboseTest{} + err := json.NewDecoder(r.Body).Decode(body) + assert.Nil(t, err) + assert.Equal(t, "Verbose", body.Test) + w.Header().Set("Content-Type", "application/json") + w.Write([]byte(`{"Status":"Ok"}`)) + })) + defer srv.Close() + + c := &Client{ + ConcurrentTransfers: 5, + LoggingStats: true, + } + + req, err := http.NewRequest("POST", srv.URL, nil) + req.Header.Set("Authorization", "Basic ABC") + req.Header.Set("Content-Type", "application/json") + require.Nil(t, err) + require.Nil(t, MarshalToRequest(req, verboseTest{"Verbose"})) + + res, err := c.Do(req) + require.Nil(t, err) + + c.LogResponse("stats-test", res) + + io.Copy(ioutil.Discard, res.Body) + res.Body.Close() + + assert.Equal(t, 200, res.StatusCode) + assert.EqualValues(t, 1, called) + + out := &bytes.Buffer{} + c.LogStats(out) + + stats := strings.TrimSpace(out.String()) + t.Log(stats) + lines := strings.Split(stats, "\n") + assert.Equal(t, 2, len(lines)) + assert.True(t, strings.Contains(lines[0], "concurrent=5")) + expected := []string{ + "key=stats-test", + "reqheader=", + "reqbody=18", + "resheader=", + "resbody=15", + "restime=", + "status=200", + "url=" + srv.URL, + } + + for _, substr := range expected { + assert.True(t, strings.Contains(lines[1], substr), "missing: "+substr) + } +} + +func TestStatsWithoutBucket(t *testing.T) { + var called uint32 + srv := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + atomic.AddUint32(&called, 1) + t.Logf("srv req %s %s", r.Method, r.URL.Path) + assert.Equal(t, "POST", r.Method) + + assert.Equal(t, "Basic ABC", r.Header.Get("Authorization")) + body := &verboseTest{} + err := json.NewDecoder(r.Body).Decode(body) + assert.Nil(t, err) + assert.Equal(t, "Verbose", body.Test) + w.Header().Set("Content-Type", "application/json") + w.Write([]byte(`{"Status":"Ok"}`)) + })) + defer srv.Close() + + c := &Client{ + ConcurrentTransfers: 5, + LoggingStats: true, + } + + req, err := http.NewRequest("POST", srv.URL, nil) + req.Header.Set("Authorization", "Basic ABC") + req.Header.Set("Content-Type", "application/json") + require.Nil(t, err) + require.Nil(t, MarshalToRequest(req, verboseTest{"Verbose"})) + + res, err := c.Do(req) + require.Nil(t, err) + io.Copy(ioutil.Discard, res.Body) + res.Body.Close() + + assert.Equal(t, 200, res.StatusCode) + assert.EqualValues(t, 1, called) + + out := &bytes.Buffer{} + c.LogStats(out) + + stats := strings.TrimSpace(out.String()) + t.Log(stats) + assert.True(t, strings.Contains(stats, "concurrent=5")) + assert.Equal(t, 1, len(strings.Split(stats, "\n"))) +} + +func TestStatsDisabled(t *testing.T) { + var called uint32 + srv := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + atomic.AddUint32(&called, 1) + t.Logf("srv req %s %s", r.Method, r.URL.Path) + assert.Equal(t, "POST", r.Method) + + assert.Equal(t, "Basic ABC", r.Header.Get("Authorization")) + body := &verboseTest{} + err := json.NewDecoder(r.Body).Decode(body) + assert.Nil(t, err) + assert.Equal(t, "Verbose", body.Test) + w.Header().Set("Content-Type", "application/json") + w.Write([]byte(`{"Status":"Ok"}`)) + })) + defer srv.Close() + + c := &Client{ + ConcurrentTransfers: 5, + LoggingStats: false, + } + + req, err := http.NewRequest("POST", srv.URL, nil) + req.Header.Set("Authorization", "Basic ABC") + req.Header.Set("Content-Type", "application/json") + require.Nil(t, err) + require.Nil(t, MarshalToRequest(req, verboseTest{"Verbose"})) + + res, err := c.Do(req) + require.Nil(t, err) + + c.LogResponse("stats-test", res) + + io.Copy(ioutil.Discard, res.Body) + res.Body.Close() + + assert.Equal(t, 200, res.StatusCode) + assert.EqualValues(t, 1, called) + + out := &bytes.Buffer{} + c.LogStats(out) + assert.Equal(t, 0, out.Len()) +} diff --git a/lfsapi/verbose.go b/lfsapi/verbose.go new file mode 100644 index 00000000..633e4945 --- /dev/null +++ b/lfsapi/verbose.go @@ -0,0 +1,152 @@ +package lfsapi + +import ( + "bufio" + "bytes" + "fmt" + "io" + "net/http" + "net/http/httputil" + "strings" + + "github.com/rubyist/tracerx" +) + +func (c *Client) traceRequest(req *http.Request) { + tracerx.Printf("HTTP: %s", traceReq(req)) + + if !c.Verbose { + return + } + + if dump, err := httputil.DumpRequest(req, false); err == nil { + c.traceHTTPDump(">", dump) + } +} + +func (c *Client) prepareRequestBody(req *http.Request) error { + body, ok := req.Body.(ReadSeekCloser) + if body != nil && !ok { + return fmt.Errorf("Request body must implement io.ReadCloser and io.Seeker. Got: %T", body) + } + + if body != nil && ok { + body.Seek(0, io.SeekStart) + req.Body = &tracedRequest{ + verbose: c.Verbose && isTraceableContent(req.Header), + verboseOut: c.VerboseOut, + ReadSeekCloser: body, + } + } + + return nil +} + +type tracedRequest struct { + verbose bool + verboseOut io.Writer + ReadSeekCloser +} + +func (r *tracedRequest) Read(b []byte) (int, error) { + n, err := tracedRead(r.ReadSeekCloser, b, r.verboseOut, false, r.verbose) + return n, err +} + +func (c *Client) traceResponse(res *http.Response) { + if res == nil { + return + } + + tracerx.Printf("HTTP: %d", res.StatusCode) + + verboseBody := isTraceableContent(res.Header) + res.Body = &tracedResponse{ + client: c, + response: res, + gitTrace: verboseBody, + verbose: verboseBody && c.Verbose, + verboseOut: c.VerboseOut, + ReadCloser: res.Body, + } + + if !c.Verbose { + return + } + + if dump, err := httputil.DumpResponse(res, false); err == nil { + if verboseBody { + fmt.Fprintf(c.VerboseOut, "\n\n") + } else { + fmt.Fprintf(c.VerboseOut, "\n") + } + c.traceHTTPDump("<", dump) + } +} + +type tracedResponse struct { + Count int + client *Client + response *http.Response + verbose bool + gitTrace bool + verboseOut io.Writer + io.ReadCloser +} + +func (r *tracedResponse) Read(b []byte) (int, error) { + n, err := tracedRead(r.ReadCloser, b, r.verboseOut, r.gitTrace, r.verbose) + r.Count += n + + if err == io.EOF { + r.client.finishResponseStats(r.response, int64(r.Count)) + } + return n, err +} + +func tracedRead(r io.Reader, b []byte, verboseOut io.Writer, gitTrace, verbose bool) (int, error) { + n, err := r.Read(b) + if err == nil || err == io.EOF { + if n > 0 && (gitTrace || verbose) { + chunk := string(b[0:n]) + if gitTrace { + tracerx.Printf("HTTP: %s", chunk) + } + + if verbose { + fmt.Fprint(verboseOut, chunk) + } + } + } + + return n, err +} + +func (c *Client) traceHTTPDump(direction string, dump []byte) { + scanner := bufio.NewScanner(bytes.NewBuffer(dump)) + + for scanner.Scan() { + line := scanner.Text() + if !c.DebuggingVerbose && strings.HasPrefix(strings.ToLower(line), "authorization: basic") { + fmt.Fprintf(c.VerboseOut, "%s Authorization: Basic * * * * *\n", direction) + } else { + fmt.Fprintf(c.VerboseOut, "%s %s\n", direction, line) + } + } +} + +var tracedTypes = []string{"json", "text", "xml", "html"} + +func isTraceableContent(h http.Header) bool { + ctype := strings.ToLower(strings.SplitN(h.Get("Content-Type"), ";", 2)[0]) + for _, tracedType := range tracedTypes { + if strings.Contains(ctype, tracedType) { + return true + } + } + return false +} + +func traceReq(req *http.Request) string { + return fmt.Sprintf("%s %s", req.Method, strings.SplitN(req.URL.String(), "?", 2)[0]) +} diff --git a/lfsapi/verbose_test.go b/lfsapi/verbose_test.go new file mode 100644 index 00000000..f12b408a --- /dev/null +++ b/lfsapi/verbose_test.go @@ -0,0 +1,234 @@ +package lfsapi + +import ( + "bytes" + "encoding/json" + "io" + "io/ioutil" + "net/http" + "net/http/httptest" + "strings" + "sync/atomic" + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +type verboseTest struct { + Test string +} + +func TestVerboseEnabled(t *testing.T) { + var called uint32 + srv := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + atomic.AddUint32(&called, 1) + t.Logf("srv req %s %s", r.Method, r.URL.Path) + assert.Equal(t, "POST", r.Method) + + assert.Equal(t, "Basic ABC", r.Header.Get("Authorization")) + body := &verboseTest{} + err := json.NewDecoder(r.Body).Decode(body) + assert.Nil(t, err) + assert.Equal(t, "Verbose", body.Test) + w.Header().Set("Content-Type", "application/json") + w.Write([]byte(`{"Status":"Ok"}`)) + })) + defer srv.Close() + + out := &bytes.Buffer{} + c := &Client{ + Verbose: true, + VerboseOut: out, + } + + req, err := http.NewRequest("POST", srv.URL, nil) + req.Header.Set("Authorization", "Basic ABC") + req.Header.Set("Content-Type", "application/json") + require.Nil(t, err) + require.Nil(t, MarshalToRequest(req, verboseTest{"Verbose"})) + + res, err := c.Do(req) + require.Nil(t, err) + io.Copy(ioutil.Discard, res.Body) + res.Body.Close() + + assert.Equal(t, 200, res.StatusCode) + assert.EqualValues(t, 1, called) + + s := out.String() + t.Log(s) + + expected := []string{ + "> Host: 127.0.0.1:", + "\n> Authorization: Basic * * * * *\n", + "\n> Content-Type: application/json\n", + "\n> \n" + `{"Test":"Verbose"}` + "\n\n", + + "\n< HTTP/1.1 200 OK\n", + "\n< Content-Type: application/json\n", + "\n< \n" + `{"Status":"Ok"}`, + } + + for _, substr := range expected { + if !assert.True(t, strings.Contains(s, substr)) { + t.Logf("missing: %q", substr) + } + } +} + +func TestVerboseWithBinaryBody(t *testing.T) { + var called uint32 + srv := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + atomic.AddUint32(&called, 1) + t.Logf("srv req %s %s", r.Method, r.URL.Path) + assert.Equal(t, "POST", r.Method) + + assert.Equal(t, "Basic ABC", r.Header.Get("Authorization")) + by, err := ioutil.ReadAll(r.Body) + assert.Nil(t, err) + assert.Equal(t, "binary-request", string(by)) + w.Header().Set("Content-Type", "application/octet-stream") + w.Write([]byte(`binary-response`)) + })) + defer srv.Close() + + out := &bytes.Buffer{} + c := &Client{ + Verbose: true, + VerboseOut: out, + } + + buf := bytes.NewBufferString("binary-request") + req, err := http.NewRequest("POST", srv.URL, buf) + req.Header.Set("Authorization", "Basic ABC") + req.Header.Set("Content-Type", "application/octet-stream") + require.Nil(t, err) + + res, err := c.Do(req) + require.Nil(t, err) + io.Copy(ioutil.Discard, res.Body) + res.Body.Close() + + assert.Equal(t, 200, res.StatusCode) + assert.EqualValues(t, 1, called) + + s := out.String() + t.Log(s) + + expected := []string{ + "> Host: 127.0.0.1:", + "\n> Authorization: Basic * * * * *\n", + "\n> Content-Type: application/octet-stream\n", + + "\n< HTTP/1.1 200 OK\n", + "\n< Content-Type: application/octet-stream\n", + } + + for _, substr := range expected { + if !assert.True(t, strings.Contains(s, substr)) { + t.Logf("missing: %q", substr) + } + } + + assert.False(t, strings.Contains(s, "binary"), "contains binary request or response body") +} + +func TestVerboseEnabledWithDebugging(t *testing.T) { + var called uint32 + srv := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + atomic.AddUint32(&called, 1) + t.Logf("srv req %s %s", r.Method, r.URL.Path) + assert.Equal(t, "POST", r.Method) + + assert.Equal(t, "Basic ABC", r.Header.Get("Authorization")) + body := &verboseTest{} + err := json.NewDecoder(r.Body).Decode(body) + assert.Nil(t, err) + assert.Equal(t, "Verbose", body.Test) + w.Header().Set("Content-Type", "application/json") + w.Write([]byte(`{"Status":"Ok"}`)) + })) + defer srv.Close() + + out := &bytes.Buffer{} + c := &Client{ + Verbose: true, + VerboseOut: out, + DebuggingVerbose: true, + } + + req, err := http.NewRequest("POST", srv.URL, nil) + req.Header.Set("Authorization", "Basic ABC") + req.Header.Set("Content-Type", "application/json") + require.Nil(t, err) + require.Nil(t, MarshalToRequest(req, verboseTest{"Verbose"})) + + res, err := c.Do(req) + require.Nil(t, err) + io.Copy(ioutil.Discard, res.Body) + res.Body.Close() + + assert.Equal(t, 200, res.StatusCode) + assert.EqualValues(t, 1, called) + + s := out.String() + t.Log(s) + + expected := []string{ + "> Host: 127.0.0.1:", + "\n> Authorization: Basic ABC\n", + "\n> Content-Type: application/json\n", + "\n> \n" + `{"Test":"Verbose"}` + "\n\n", + + "\n< HTTP/1.1 200 OK\n", + "\n< Content-Type: application/json\n", + "\n< \n" + `{"Status":"Ok"}`, + } + + for _, substr := range expected { + if !assert.True(t, strings.Contains(s, substr)) { + t.Logf("missing: %q", substr) + } + } +} + +func TestVerboseDisabled(t *testing.T) { + var called uint32 + srv := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + atomic.AddUint32(&called, 1) + t.Logf("srv req %s %s", r.Method, r.URL.Path) + assert.Equal(t, "POST", r.Method) + + assert.Equal(t, "Basic ABC", r.Header.Get("Authorization")) + body := &verboseTest{} + err := json.NewDecoder(r.Body).Decode(body) + assert.Nil(t, err) + assert.Equal(t, "Verbose", body.Test) + w.Header().Set("Content-Type", "application/json") + w.Write([]byte(`{"Status":"Ok"}`)) + })) + defer srv.Close() + + out := &bytes.Buffer{} + c := &Client{ + Verbose: false, + VerboseOut: out, + DebuggingVerbose: true, + } + + req, err := http.NewRequest("POST", srv.URL, nil) + req.Header.Set("Authorization", "Basic ABC") + req.Header.Set("Content-Type", "application/json") + require.Nil(t, err) + require.Nil(t, MarshalToRequest(req, verboseTest{"Verbose"})) + + res, err := c.Do(req) + require.Nil(t, err) + io.Copy(ioutil.Discard, res.Body) + res.Body.Close() + + assert.Equal(t, 200, res.StatusCode) + assert.EqualValues(t, 1, called) + assert.EqualValues(t, 0, out.Len(), out.String()) +} diff --git a/locking/api.go b/locking/api.go new file mode 100644 index 00000000..20dbde2c --- /dev/null +++ b/locking/api.go @@ -0,0 +1,202 @@ +package locking + +import ( + "fmt" + "net/http" + "strconv" + + "github.com/git-lfs/git-lfs/lfsapi" +) + +type lockClient struct { + *lfsapi.Client +} + +// LockRequest encapsulates the payload sent across the API when a client would +// like to obtain a lock against a particular path on a given remote. +type lockRequest struct { + // Path is the path that the client would like to obtain a lock against. + Path string `json:"path"` + // LatestRemoteCommit is the SHA of the last known commit from the + // remote that we are trying to create the lock against, as found in + // `.git/refs/origin/`. + LatestRemoteCommit string `json:"latest_remote_commit"` + // Committer is the individual that wishes to obtain the lock. + Committer committer `json:"committer"` +} + +// LockResponse encapsulates the information sent over the API in response to +// a `LockRequest`. +type lockResponse struct { + // Lock is the Lock that was optionally created in response to the + // payload that was sent (see above). If the lock already exists, then + // the existing lock is sent in this field instead, and the author of + // that lock remains the same, meaning that the client failed to obtain + // that lock. An HTTP status of "409 - Conflict" is used here. + // + // If the lock was unable to be created, this field will hold the + // zero-value of Lock and the Err field will provide a more detailed set + // of information. + // + // If an error was experienced in creating this lock, then the + // zero-value of Lock should be sent here instead. + Lock *Lock `json:"lock"` + // CommitNeeded holds the minimum commit SHA that client must have to + // obtain the lock. + CommitNeeded string `json:"commit_needed,omitempty"` + // Err is the optional error that was encountered while trying to create + // the above lock. + Err string `json:"error,omitempty"` +} + +func (c *lockClient) Lock(remote string, lockReq *lockRequest) (*lockResponse, *http.Response, error) { + e := c.Endpoints.Endpoint("upload", remote) + req, err := c.NewRequest("POST", e, "locks", lockReq) + if err != nil { + return nil, nil, err + } + + res, err := c.DoWithAuth(remote, req) + if err != nil { + return nil, res, err + } + + lockRes := &lockResponse{} + return lockRes, res, lfsapi.DecodeJSON(res, lockRes) +} + +// UnlockRequest encapsulates the data sent in an API request to remove a lock. +type unlockRequest struct { + // Id is the Id of the lock that the user wishes to unlock. + Id string `json:"id"` + + // Force determines whether or not the lock should be "forcibly" + // unlocked; that is to say whether or not a given individual should be + // able to break a different individual's lock. + Force bool `json:"force"` +} + +// UnlockResponse is the result sent back from the API when asked to remove a +// lock. +type unlockResponse struct { + // Lock is the lock corresponding to the asked-about lock in the + // `UnlockPayload` (see above). If no matching lock was found, this + // field will take the zero-value of Lock, and Err will be non-nil. + Lock *Lock `json:"lock"` + // Err is an optional field which holds any error that was experienced + // while removing the lock. + Err string `json:"error,omitempty"` +} + +func (c *lockClient) Unlock(remote, id string, force bool) (*unlockResponse, *http.Response, error) { + e := c.Endpoints.Endpoint("upload", remote) + suffix := fmt.Sprintf("locks/%s/unlock", id) + req, err := c.NewRequest("POST", e, suffix, &unlockRequest{Id: id, Force: force}) + if err != nil { + return nil, nil, err + } + + res, err := c.DoWithAuth(remote, req) + if err != nil { + return nil, res, err + } + + unlockRes := &unlockResponse{} + err = lfsapi.DecodeJSON(res, unlockRes) + return unlockRes, res, err +} + +// Filter represents a single qualifier to apply against a set of locks. +type lockFilter struct { + // Property is the property to search against. + // Value is the value that the property must take. + Property, Value string +} + +// LockSearchRequest encapsulates the request sent to the server when the client +// would like a list of locks that match the given criteria. +type lockSearchRequest struct { + // Filters is the set of filters to query against. If the client wishes + // to obtain a list of all locks, an empty array should be passed here. + Filters []lockFilter + // Cursor is an optional field used to tell the server which lock was + // seen last, if scanning through multiple pages of results. + // + // Servers must return a list of locks sorted in reverse chronological + // order, so the Cursor provides a consistent method of viewing all + // locks, even if more were created between two requests. + Cursor string + // Limit is the maximum number of locks to return in a single page. + Limit int +} + +func (r *lockSearchRequest) QueryValues() map[string]string { + q := make(map[string]string) + for _, filter := range r.Filters { + q[filter.Property] = filter.Value + } + + if len(r.Cursor) > 0 { + q["cursor"] = r.Cursor + } + + if r.Limit > 0 { + q["limit"] = strconv.Itoa(r.Limit) + } + + return q +} + +// LockList encapsulates a set of Locks. +type lockList struct { + // Locks is the set of locks returned back, typically matching the query + // parameters sent in the LockListRequest call. If no locks were matched + // from a given query, then `Locks` will be represented as an empty + // array. + Locks []Lock `json:"locks"` + // NextCursor returns the Id of the Lock the client should update its + // cursor to, if there are multiple pages of results for a particular + // `LockListRequest`. + NextCursor string `json:"next_cursor,omitempty"` + // Err populates any error that was encountered during the search. If no + // error was encountered and the operation was succesful, then a value + // of nil will be passed here. + Err string `json:"error,omitempty"` +} + +func (c *lockClient) Search(remote string, searchReq *lockSearchRequest) (*lockList, *http.Response, error) { + e := c.Endpoints.Endpoint("upload", remote) + req, err := c.NewRequest("GET", e, "locks", nil) + if err != nil { + return nil, nil, err + } + + q := req.URL.Query() + for key, value := range searchReq.QueryValues() { + q.Add(key, value) + } + req.URL.RawQuery = q.Encode() + + res, err := c.DoWithAuth(remote, req) + if err != nil { + return nil, res, err + } + + locks := &lockList{} + err = lfsapi.DecodeJSON(res, locks) + return locks, res, err +} + +// Committer represents a "First Last " pair. +type committer struct { + // Name is the name of the individual who would like to obtain the + // lock, for instance: "Rick Olson". + Name string `json:"name"` + // Email is the email assopsicated with the individual who would + // like to obtain the lock, for instance: "rick@github.com". + Email string `json:"email"` +} + +func newCommitter(name, email string) committer { + return committer{Name: name, Email: email} +} diff --git a/locking/api_test.go b/locking/api_test.go new file mode 100644 index 00000000..44f6aa7f --- /dev/null +++ b/locking/api_test.go @@ -0,0 +1,142 @@ +package locking + +import ( + "encoding/json" + "net/http" + "net/http/httptest" + "testing" + + "github.com/git-lfs/git-lfs/lfsapi" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func TestAPILock(t *testing.T) { + srv := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + if r.URL.Path != "/api/locks" { + w.WriteHeader(404) + return + } + + assert.Equal(t, "POST", r.Method) + assert.Equal(t, lfsapi.MediaType, r.Header.Get("Accept")) + assert.Equal(t, lfsapi.MediaType, r.Header.Get("Content-Type")) + + lockReq := &lockRequest{} + err := json.NewDecoder(r.Body).Decode(lockReq) + r.Body.Close() + assert.Nil(t, err) + assert.Equal(t, "request", lockReq.Path) + + w.Header().Set("Content-Type", "application/json") + err = json.NewEncoder(w).Encode(&lockResponse{ + Lock: &Lock{ + Id: "1", + Path: "response", + }, + }) + assert.Nil(t, err) + })) + defer srv.Close() + + c, err := lfsapi.NewClient(nil, lfsapi.TestEnv(map[string]string{ + "lfs.url": srv.URL + "/api", + })) + require.Nil(t, err) + + lc := &lockClient{Client: c} + lockRes, res, err := lc.Lock("", &lockRequest{Path: "request"}) + require.Nil(t, err) + assert.Equal(t, 200, res.StatusCode) + assert.Equal(t, "1", lockRes.Lock.Id) + assert.Equal(t, "response", lockRes.Lock.Path) +} + +func TestAPIUnlock(t *testing.T) { + srv := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + if r.URL.Path != "/api/locks/123/unlock" { + w.WriteHeader(404) + return + } + + assert.Equal(t, "POST", r.Method) + assert.Equal(t, lfsapi.MediaType, r.Header.Get("Accept")) + assert.Equal(t, lfsapi.MediaType, r.Header.Get("Content-Type")) + + unlockReq := &unlockRequest{} + err := json.NewDecoder(r.Body).Decode(unlockReq) + r.Body.Close() + assert.Nil(t, err) + assert.Equal(t, "123", unlockReq.Id) + assert.True(t, unlockReq.Force) + + w.Header().Set("Content-Type", "application/json") + err = json.NewEncoder(w).Encode(&unlockResponse{ + Lock: &Lock{ + Id: "123", + Path: "response", + }, + }) + assert.Nil(t, err) + })) + defer srv.Close() + + c, err := lfsapi.NewClient(nil, lfsapi.TestEnv(map[string]string{ + "lfs.url": srv.URL + "/api", + })) + require.Nil(t, err) + + lc := &lockClient{Client: c} + unlockRes, res, err := lc.Unlock("", "123", true) + require.Nil(t, err) + assert.Equal(t, 200, res.StatusCode) + assert.Equal(t, "123", unlockRes.Lock.Id) + assert.Equal(t, "response", unlockRes.Lock.Path) +} + +func TestAPISearch(t *testing.T) { + srv := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + if r.URL.Path != "/api/locks" { + w.WriteHeader(404) + return + } + + assert.Equal(t, "GET", r.Method) + assert.Equal(t, lfsapi.MediaType, r.Header.Get("Accept")) + assert.Equal(t, "", r.Header.Get("Content-Type")) + + q := r.URL.Query() + assert.Equal(t, "A", q.Get("a")) + assert.Equal(t, "cursor", q.Get("cursor")) + assert.Equal(t, "5", q.Get("limit")) + + w.Header().Set("Content-Type", "application/json") + err := json.NewEncoder(w).Encode(&lockList{ + Locks: []Lock{ + {Id: "1"}, + {Id: "2"}, + }, + }) + assert.Nil(t, err) + })) + defer srv.Close() + + c, err := lfsapi.NewClient(nil, lfsapi.TestEnv(map[string]string{ + "lfs.url": srv.URL + "/api", + })) + require.Nil(t, err) + + lc := &lockClient{Client: c} + locks, res, err := lc.Search("", &lockSearchRequest{ + Filters: []lockFilter{ + {Property: "a", Value: "A"}, + }, + Cursor: "cursor", + Limit: 5, + }) + require.Nil(t, err) + assert.Equal(t, 200, res.StatusCode) + assert.Equal(t, 2, len(locks.Locks)) + assert.Equal(t, "1", locks.Locks[0].Id) + assert.Equal(t, "2", locks.Locks[1].Id) +} diff --git a/locking/cache.go b/locking/cache.go index 765e7298..b169a69b 100644 --- a/locking/cache.go +++ b/locking/cache.go @@ -25,24 +25,6 @@ func NewLockCache(filepath string) (*LockCache, error) { return &LockCache{kv}, nil } -func (c *LockCache) encodeIdKey(id string) string { - // Safety against accidents - if !c.isIdKey(id) { - return idKeyPrefix + id - } - return id -} -func (c *LockCache) decodeIdKey(key string) string { - // Safety against accidents - if c.isIdKey(key) { - return key[len(idKeyPrefix):] - } - return key -} -func (c *LockCache) isIdKey(key string) bool { - return strings.HasPrefix(key, idKeyPrefix) -} - // Cache a successful lock for faster local lookup later func (c *LockCache) Add(l Lock) error { // Store reference in both directions @@ -99,3 +81,23 @@ func (c *LockCache) Clear() { func (c *LockCache) Save() error { return c.kv.Save() } + +func (c *LockCache) encodeIdKey(id string) string { + // Safety against accidents + if !c.isIdKey(id) { + return idKeyPrefix + id + } + return id +} + +func (c *LockCache) decodeIdKey(key string) string { + // Safety against accidents + if c.isIdKey(key) { + return key[len(idKeyPrefix):] + } + return key +} + +func (c *LockCache) isIdKey(key string) bool { + return strings.HasPrefix(key, idKeyPrefix) +} diff --git a/locking/locks.go b/locking/locks.go index e1dd8ac2..f0170534 100644 --- a/locking/locks.go +++ b/locking/locks.go @@ -1,15 +1,14 @@ package locking import ( - "errors" "fmt" "os" "path/filepath" "time" - "github.com/git-lfs/git-lfs/api" - "github.com/git-lfs/git-lfs/config" + "github.com/git-lfs/git-lfs/errors" "github.com/git-lfs/git-lfs/git" + "github.com/git-lfs/git-lfs/lfsapi" "github.com/git-lfs/git-lfs/tools/kv" ) @@ -22,31 +21,51 @@ var ( ErrLockAmbiguous = errors.New("lfs: multiple locks found; ambiguous") ) +type LockCacher interface { + Add(l Lock) error + RemoveByPath(filePath string) error + RemoveById(id string) error + Locks() []Lock + Clear() + Save() error +} + // Client is the main interface object for the locking package type Client struct { - cfg *config.Configuration - apiClient *api.Client - cache *LockCache + Remote string + client *lockClient + cache LockCacher } // NewClient creates a new locking client with the given configuration // You must call the returned object's `Close` method when you are finished with // it -func NewClient(cfg *config.Configuration) (*Client, error) { +func NewClient(remote string, lfsClient *lfsapi.Client) (*Client, error) { + return &Client{ + Remote: remote, + client: &lockClient{Client: lfsClient}, + cache: &nilLockCacher{}, + }, nil +} - apiClient := api.NewClient(api.NewHttpLifecycle(cfg)) - - lockDir := filepath.Join(config.LocalGitStorageDir, "lfs") - err := os.MkdirAll(lockDir, 0755) +func (c *Client) SetupFileCache(path string) error { + stat, err := os.Stat(path) if err != nil { - return nil, err + return errors.Wrap(err, "init lock cache") } - lockFile := filepath.Join(lockDir, "lockcache.db") + + lockFile := path + if stat.IsDir() { + lockFile = filepath.Join(path, "lockcache.db") + } + cache, err := NewLockCache(lockFile) if err != nil { - return nil, err + return errors.Wrap(err, "init lock cache") } - return &Client{cfg, apiClient, cache}, nil + + c.cache = cache + return nil } // Close this client instance; must be called to dispose of resources @@ -58,31 +77,30 @@ func (c *Client) Close() error { // path must be relative to the root of the repository // Returns the lock id if successful, or an error func (c *Client) LockFile(path string) (Lock, error) { - // TODO: this is not really the constraint we need to avoid merges, improve as per proposal latest, err := git.CurrentRemoteRef() if err != nil { return Lock{}, err } - s, resp := c.apiClient.Locks.Lock(&api.LockRequest{ + lockReq := &lockRequest{ Path: path, - Committer: api.NewCommitter(c.cfg.CurrentCommitter()), LatestRemoteCommit: latest.Sha, - }) - - if _, err := c.apiClient.Do(s); err != nil { - return Lock{}, fmt.Errorf("Error communicating with LFS API: %v", err) + Committer: newCommitter(c.client.CurrentUser()), } - if len(resp.Err) > 0 { - return Lock{}, fmt.Errorf("Server unable to create lock: %v", resp.Err) + lockRes, _, err := c.client.Lock(c.Remote, lockReq) + if err != nil { + return Lock{}, errors.Wrap(err, "api") } - lock := c.newLockFromApi(*resp.Lock) + if len(lockRes.Err) > 0 { + return Lock{}, fmt.Errorf("Server unable to create lock: %v", lockRes.Err) + } + lock := *lockRes.Lock if err := c.cache.Add(lock); err != nil { - return Lock{}, fmt.Errorf("Error caching lock information: %v", err) + return Lock{}, errors.Wrap(err, "lock cache") } return lock, nil @@ -92,7 +110,6 @@ func (c *Client) LockFile(path string) (Lock, error) { // path must be relative to the root of the repository // Force causes the file to be unlocked from other users as well func (c *Client) UnlockFile(path string, force bool) error { - id, err := c.lockIdFromPath(path) if err != nil { return fmt.Errorf("Unable to get lock id: %v", err) @@ -105,14 +122,13 @@ func (c *Client) UnlockFile(path string, force bool) error { // UnlockFileById attempts to unlock a lock with a given id on the current remote // Force causes the file to be unlocked from other users as well func (c *Client) UnlockFileById(id string, force bool) error { - s, resp := c.apiClient.Locks.Unlock(id, force) - - if _, err := c.apiClient.Do(s); err != nil { - return fmt.Errorf("Error communicating with LFS API: %v", err) + unlockRes, _, err := c.client.Unlock(c.Remote, id, force) + if err != nil { + return errors.Wrap(err, "api") } - if len(resp.Err) > 0 { - return fmt.Errorf("Server unable to unlock lock: %v", resp.Err) + if len(unlockRes.Err) > 0 { + return fmt.Errorf("Server unable to unlock: %s", unlockRes.Err) } if err := c.cache.RemoveById(id); err != nil { @@ -126,33 +142,22 @@ func (c *Client) UnlockFileById(id string, force bool) error { type Lock struct { // Id is the unique identifier corresponding to this particular Lock. It // must be consistent with the local copy, and the server's copy. - Id string + Id string `json:"id"` // Path is an absolute path to the file that is locked as a part of this // lock. - Path string + Path string `json:"path"` // Name is the name of the person holding this lock - Name string + Name string `json:"name"` // Email address of the person holding this lock - Email string + Email string `json:"email"` // LockedAt is the time at which this lock was acquired. - LockedAt time.Time -} - -func (c *Client) newLockFromApi(a api.Lock) Lock { - return Lock{ - Id: a.Id, - Path: a.Path, - Name: a.Committer.Name, - Email: a.Committer.Email, - LockedAt: a.LockedAt, - } + LockedAt time.Time `json:"locked_at"` } // SearchLocks returns a channel of locks which match the given name/value filter // If limit > 0 then search stops at that number of locks // If localOnly = true, don't query the server & report only own local locks func (c *Client) SearchLocks(filter map[string]string, limit int, localOnly bool) (locks []Lock, err error) { - if localOnly { return c.searchCachedLocks(filter, limit) } else { @@ -184,38 +189,37 @@ func (c *Client) searchCachedLocks(filter map[string]string, limit int) ([]Lock, func (c *Client) searchRemoteLocks(filter map[string]string, limit int) ([]Lock, error) { locks := make([]Lock, 0, limit) - apifilters := make([]api.Filter, 0, len(filter)) + apifilters := make([]lockFilter, 0, len(filter)) for k, v := range filter { - apifilters = append(apifilters, api.Filter{k, v}) + apifilters = append(apifilters, lockFilter{Property: k, Value: v}) } - query := &api.LockSearchRequest{Filters: apifilters} + query := &lockSearchRequest{Filters: apifilters} for { - s, resp := c.apiClient.Locks.Search(query) - if _, err := c.apiClient.Do(s); err != nil { - return locks, fmt.Errorf("Error communicating with LFS API: %v", err) + list, _, err := c.client.Search(c.Remote, query) + if err != nil { + return locks, errors.Wrap(err, "locking") } - if resp.Err != "" { - return locks, fmt.Errorf("Error response from LFS API: %v", resp.Err) + if list.Err != "" { + return locks, errors.Wrap(err, "locking") } - for _, l := range resp.Locks { - locks = append(locks, c.newLockFromApi(l)) + for _, l := range list.Locks { + locks = append(locks, l) if limit > 0 && len(locks) >= limit { // Exit outer loop too return locks, nil } } - if resp.NextCursor != "" { - query.Cursor = resp.NextCursor + if list.NextCursor != "" { + query.Cursor = list.NextCursor } else { break } } return locks, nil - } // lockIdFromPath makes a call to the LFS API and resolves the ID for the locked @@ -228,21 +232,21 @@ func (c *Client) searchRemoteLocks(filter map[string]string, limit int) ([]Lock, // If the API call is successful, and only one lock matches the given filepath, // then its ID will be returned, along with a value of "nil" for the error. func (c *Client) lockIdFromPath(path string) (string, error) { - s, resp := c.apiClient.Locks.Search(&api.LockSearchRequest{ - Filters: []api.Filter{ - {"path", path}, + list, _, err := c.client.Search(c.Remote, &lockSearchRequest{ + Filters: []lockFilter{ + {Property: "path", Value: path}, }, }) - if _, err := c.apiClient.Do(s); err != nil { + if err != nil { return "", err } - switch len(resp.Locks) { + switch len(list.Locks) { case 0: return "", ErrNoMatchingLocks case 1: - return resp.Locks[0].Id, nil + return list.Locks[0].Id, nil default: return "", ErrLockAmbiguous } @@ -261,7 +265,7 @@ func (c *Client) refreshLockCache() error { // We're going to overwrite the entire local cache c.cache.Clear() - _, email := c.cfg.CurrentCommitter() + _, email := c.client.CurrentUser() for _, l := range locks { if l.Email == email { c.cache.Add(l) @@ -274,3 +278,22 @@ func (c *Client) refreshLockCache() error { func init() { kv.RegisterTypeForStorage(&Lock{}) } + +type nilLockCacher struct{} + +func (c *nilLockCacher) Add(l Lock) error { + return nil +} +func (c *nilLockCacher) RemoveByPath(filePath string) error { + return nil +} +func (c *nilLockCacher) RemoveById(id string) error { + return nil +} +func (c *nilLockCacher) Locks() []Lock { + return nil +} +func (c *nilLockCacher) Clear() {} +func (c *nilLockCacher) Save() error { + return nil +} diff --git a/locking/locks_test.go b/locking/locks_test.go index a5f8d7f2..da9f5fc6 100644 --- a/locking/locks_test.go +++ b/locking/locks_test.go @@ -1,55 +1,19 @@ package locking import ( - "bytes" "encoding/json" "io/ioutil" "net/http" - "os" + "net/http/httptest" "sort" "testing" "time" - "github.com/git-lfs/git-lfs/api" - "github.com/git-lfs/git-lfs/config" + "github.com/git-lfs/git-lfs/lfsapi" "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" ) -type TestLifecycle struct { -} - -func (l *TestLifecycle) Build(schema *api.RequestSchema) (*http.Request, error) { - return http.NewRequest("GET", "http://dummy", nil) -} - -func (l *TestLifecycle) Execute(req *http.Request, into interface{}) (api.Response, error) { - // Return test data including other users - locks := api.LockList{Locks: []api.Lock{ - api.Lock{Id: "99", Path: "folder/test3.dat", Committer: api.Committer{Name: "Alice", Email: "alice@wonderland.com"}}, - api.Lock{Id: "101", Path: "folder/test1.dat", Committer: api.Committer{Name: "Fred", Email: "fred@bloggs.com"}}, - api.Lock{Id: "102", Path: "folder/test2.dat", Committer: api.Committer{Name: "Fred", Email: "fred@bloggs.com"}}, - api.Lock{Id: "103", Path: "root.dat", Committer: api.Committer{Name: "Fred", Email: "fred@bloggs.com"}}, - api.Lock{Id: "199", Path: "other/test1.dat", Committer: api.Committer{Name: "Charles", Email: "charles@incharge.com"}}, - }} - locksJson, _ := json.Marshal(locks) - r := &http.Response{ - Status: "200 OK", - StatusCode: 200, - Proto: "HTTP/1.0", - Body: ioutil.NopCloser(bytes.NewReader(locksJson)), - } - if into != nil { - decoder := json.NewDecoder(r.Body) - if err := decoder.Decode(into); err != nil { - return nil, err - } - } - return api.WrapHttpResponse(r), nil -} -func (l *TestLifecycle) Cleanup(resp api.Response) error { - return resp.Body().Close() -} - type LocksById []Lock func (a LocksById) Len() int { return len(a) } @@ -58,20 +22,40 @@ func (a LocksById) Less(i, j int) bool { return a[i].Id < a[j].Id } func TestRefreshCache(t *testing.T) { var err error - oldStore := config.LocalGitStorageDir - config.LocalGitStorageDir, err = ioutil.TempDir("", "testCacheLock") + tempDir, err := ioutil.TempDir("", "testCacheLock") assert.Nil(t, err) + + srv := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + assert.Equal(t, "GET", r.Method) + assert.Equal(t, "/api/locks", r.URL.Path) + + w.Header().Set("Content-Type", "application/json") + err = json.NewEncoder(w).Encode(lockList{ + Locks: []Lock{ + Lock{Id: "99", Path: "folder/test3.dat", Name: "Alice", Email: "alice@wonderland.com"}, + Lock{Id: "101", Path: "folder/test1.dat", Name: "Fred", Email: "fred@bloggs.com"}, + Lock{Id: "102", Path: "folder/test2.dat", Name: "Fred", Email: "fred@bloggs.com"}, + Lock{Id: "103", Path: "root.dat", Name: "Fred", Email: "fred@bloggs.com"}, + Lock{Id: "199", Path: "other/test1.dat", Name: "Charles", Email: "charles@incharge.com"}, + }, + }) + assert.Nil(t, err) + })) + defer func() { - os.RemoveAll(config.LocalGitStorageDir) - config.LocalGitStorageDir = oldStore + srv.Close() }() - cfg := config.NewFrom(config.Values{ - Git: map[string]string{"user.name": "Fred", "user.email": "fred@bloggs.com"}}) - client, err := NewClient(cfg) + lfsclient, err := lfsapi.NewClient(nil, lfsapi.TestEnv(map[string]string{ + "lfs.url": srv.URL + "/api", + "user.name": "Fred", + "user.email": "fred@bloggs.com", + })) + require.Nil(t, err) + + client, err := NewClient("", lfsclient) assert.Nil(t, err) - // Override api client for testing - client.apiClient = api.NewClient(&TestLifecycle{}) + assert.Nil(t, client.SetupFileCache(tempDir)) // Should start with no cached items locks, err := client.SearchLocks(nil, 0, true) diff --git a/progress/copycallback.go b/progress/copycallback.go index b9075fd2..f291abe8 100644 --- a/progress/copycallback.go +++ b/progress/copycallback.go @@ -1,9 +1,45 @@ package progress -import "io" +import ( + "bytes" + "io" +) type CopyCallback func(totalSize int64, readSoFar int64, readSinceLast int) error +type bodyWithCallback struct { + c CopyCallback + totalSize int64 + readSize int64 + ReadSeekCloser +} + +func NewByteBodyWithCallback(by []byte, totalSize int64, cb CopyCallback) ReadSeekCloser { + return NewBodyWithCallback(NewByteBody(by), totalSize, cb) +} + +func NewBodyWithCallback(body ReadSeekCloser, totalSize int64, cb CopyCallback) ReadSeekCloser { + return &bodyWithCallback{ + c: cb, + totalSize: totalSize, + ReadSeekCloser: body, + } +} + +func (r *bodyWithCallback) Read(p []byte) (int, error) { + n, err := r.ReadSeekCloser.Read(p) + + if n > 0 { + r.readSize += int64(n) + } + + if err == nil && r.c != nil { + err = r.c(r.totalSize, r.readSize, n) + } + + return n, err +} + type CallbackReader struct { C CopyCallback TotalSize int64 @@ -24,3 +60,21 @@ func (w *CallbackReader) Read(p []byte) (int, error) { return n, err } + +// prevent import cycle +type ReadSeekCloser interface { + io.Seeker + io.ReadCloser +} + +func NewByteBody(by []byte) ReadSeekCloser { + return &closingByteReader{Reader: bytes.NewReader(by)} +} + +type closingByteReader struct { + *bytes.Reader +} + +func (r *closingByteReader) Close() error { + return nil +} diff --git a/test/cmd/lfstest-customadapter.go b/test/cmd/lfstest-customadapter.go index c126c398..65668581 100644 --- a/test/cmd/lfstest-customadapter.go +++ b/test/cmd/lfstest-customadapter.go @@ -8,13 +8,14 @@ import ( "fmt" "io" "io/ioutil" + "net/http" "os" "strconv" "strings" "time" "github.com/git-lfs/git-lfs/config" - "github.com/git-lfs/git-lfs/httputil" + "github.com/git-lfs/git-lfs/lfsapi" "github.com/git-lfs/git-lfs/progress" "github.com/git-lfs/git-lfs/tools" ) @@ -26,10 +27,14 @@ var cfg = config.New() // All we actually do is relay the requests back to the normal storage URLs // of our test server for simplicity, but this proves the principle func main() { - scanner := bufio.NewScanner(os.Stdin) writer := bufio.NewWriter(os.Stdout) errWriter := bufio.NewWriter(os.Stderr) + apiClient, err := lfsapi.NewClient(cfg.Os, cfg.Git) + if err != nil { + writeToStderr("Error creating api client: "+err.Error(), errWriter) + os.Exit(1) + } for scanner.Scan() { line := scanner.Text() @@ -46,10 +51,10 @@ func main() { sendResponse(resp, writer, errWriter) case "download": writeToStderr(fmt.Sprintf("Received download request for %s\n", req.Oid), errWriter) - performDownload(req.Oid, req.Size, req.Action, writer, errWriter) + performDownload(apiClient, req.Oid, req.Size, req.Action, writer, errWriter) case "upload": writeToStderr(fmt.Sprintf("Received upload request for %s\n", req.Oid), errWriter) - performUpload(req.Oid, req.Size, req.Action, req.Path, writer, errWriter) + performUpload(apiClient, req.Oid, req.Size, req.Action, req.Path, writer, errWriter) case "terminate": writeToStderr("Terminating test custom adapter gracefully.\n", errWriter) break @@ -98,15 +103,20 @@ func sendProgress(oid string, bytesSoFar int64, bytesSinceLast int, writer, errW } } -func performDownload(oid string, size int64, a *action, writer, errWriter *bufio.Writer) { +func performDownload(apiClient *lfsapi.Client, oid string, size int64, a *action, writer, errWriter *bufio.Writer) { // We just use the URLs we're given, so we're just a proxy for the direct method // but this is enough to test intermediate custom adapters - req, err := httputil.NewHttpRequest("GET", a.Href, a.Header) + req, err := http.NewRequest("GET", a.Href, nil) if err != nil { sendTransferError(oid, 2, err.Error(), writer, errWriter) return } - res, err := httputil.DoHttpRequest(cfg, req, true) + + for k := range a.Header { + req.Header.Set(k, a.Header[k]) + } + + res, err := apiClient.DoWithAuth("origin", req) if err != nil { sendTransferError(oid, res.StatusCode, err.Error(), writer, errWriter) return @@ -145,15 +155,19 @@ func performDownload(oid string, size int64, a *action, writer, errWriter *bufio } } -func performUpload(oid string, size int64, a *action, fromPath string, writer, errWriter *bufio.Writer) { +func performUpload(apiClient *lfsapi.Client, oid string, size int64, a *action, fromPath string, writer, errWriter *bufio.Writer) { // We just use the URLs we're given, so we're just a proxy for the direct method // but this is enough to test intermediate custom adapters - req, err := httputil.NewHttpRequest("PUT", a.Href, a.Header) + req, err := http.NewRequest("PUT", a.Href, nil) if err != nil { sendTransferError(oid, 2, err.Error(), writer, errWriter) return } + for k := range a.Header { + req.Header.Set(k, a.Header[k]) + } + if len(req.Header.Get("Content-Type")) == 0 { req.Header.Set("Content-Type", "application/octet-stream") } @@ -178,23 +192,18 @@ func performUpload(oid string, size int64, a *action, fromPath string, writer, e sendProgress(oid, readSoFar, readSinceLast, writer, errWriter) return nil } - var reader io.Reader - reader = &progress.CallbackReader{ - C: cb, - TotalSize: size, - Reader: f, - } + req.Body = progress.NewBodyWithCallback(f, size, cb) - req.Body = ioutil.NopCloser(reader) - - res, err := httputil.DoHttpRequest(cfg, req, true) + res, err := apiClient.DoWithAuth("origin", req) if err != nil { sendTransferError(oid, res.StatusCode, fmt.Sprintf("Error uploading data for %s: %v", oid, err), writer, errWriter) return } if res.StatusCode > 299 { - sendTransferError(oid, res.StatusCode, fmt.Sprintf("Invalid status for %s: %d", httputil.TraceHttpReq(req), res.StatusCode), writer, errWriter) + msg := fmt.Sprintf("Invalid status for %s %s: %d", + req.Method, strings.SplitN(req.URL.String(), "?", 2)[0], res.StatusCode) + sendTransferError(oid, res.StatusCode, msg, writer, errWriter) return } diff --git a/test/cmd/lfstest-gitserver.go b/test/cmd/lfstest-gitserver.go index 8d15c4c1..a39515fd 100644 --- a/test/cmd/lfstest-gitserver.go +++ b/test/cmd/lfstest-gitserver.go @@ -27,6 +27,8 @@ import ( "strings" "sync" "time" + + "github.com/ThomsonReutersEikon/go-ntlm/ntlm" ) var ( @@ -59,6 +61,12 @@ func main() { mux := http.NewServeMux() server = httptest.NewServer(mux) serverTLS = httptest.NewTLSServer(mux) + ntlmSession, err := ntlm.CreateServerSession(ntlm.Version2, ntlm.ConnectionOrientedMode) + if err != nil { + fmt.Println("Error creating ntlm session:", err) + os.Exit(1) + } + ntlmSession.SetUserInfo("ntlmuser", "ntlmpass", "NTLMDOMAIN") stopch := make(chan bool) @@ -68,16 +76,22 @@ func main() { mux.HandleFunc("/storage/", storageHandler) mux.HandleFunc("/redirect307/", redirect307Handler) - mux.HandleFunc("/locks", locksHandler) - mux.HandleFunc("/locks/", locksHandler) mux.HandleFunc("/", func(w http.ResponseWriter, r *http.Request) { id, ok := reqId(w) if !ok { return } + if strings.Contains(r.URL.Path, "/info/lfs/locks") { + if !skipIfBadAuth(w, r, id, ntlmSession) { + locksHandler(w, r) + } + + return + } + if strings.Contains(r.URL.Path, "/info/lfs") { - if !skipIfBadAuth(w, r, id) { + if !skipIfBadAuth(w, r, id, ntlmSession) { lfsHandler(w, r, id) } @@ -140,10 +154,22 @@ type lfsLink struct { } type lfsError struct { - Code int `json:"code"` + Code int `json:"code,omitempty"` Message string `json:"message"` } +func writeLFSError(w http.ResponseWriter, code int, msg string) { + by, err := json.Marshal(&lfsError{Message: msg}) + if err != nil { + http.Error(w, "json encoding error: "+err.Error(), 500) + return + } + + w.Header().Set("Content-Type", "application/vnd.git-lfs+json") + w.WriteHeader(code) + w.Write(by) +} + // handles any requests with "{name}.server.git/info/lfs" in the path func lfsHandler(w http.ResponseWriter, r *http.Request, id string) { repo, err := repoFromLfsUrl(r.URL.Path) @@ -159,13 +185,20 @@ func lfsHandler(w http.ResponseWriter, r *http.Request, id string) { case "POST": if strings.HasSuffix(r.URL.String(), "batch") { lfsBatchHandler(w, r, id, repo) + } else if strings.HasSuffix(r.URL.String(), "locks") || strings.HasSuffix(r.URL.String(), "unlock") { + locksHandler(w, r) } else { + panic("asdf") w.WriteHeader(404) } case "DELETE": lfsDeleteHandler(w, r, id, repo) case "GET": - w.WriteHeader(404) + if strings.Contains(r.URL.String(), "/locks") { + locksHandler(w, r) + } else { + w.WriteHeader(404) + } default: w.WriteHeader(405) } @@ -435,13 +468,7 @@ func storageHandler(w http.ResponseWriter, r *http.Request) { w.WriteHeader(500) return case "status-storage-503": - w.Header().Set("Content-Type", "application/vnd.git-lfs+json") - w.WriteHeader(503) - - json.NewEncoder(w).Encode(&struct { - Message string `json:"message"` - }{"LFS is temporarily unavailable"}) - + writeLFSError(w, 503, "LFS is temporarily unavailable") return case "object-authenticated": if len(r.Header.Get("Authorization")) > 0 { @@ -784,7 +811,9 @@ func locksHandler(w http.ResponseWriter, r *http.Request) { switch r.Method { case "GET": if !lockRe.MatchString(r.URL.Path) { - http.NotFound(w, r) + w.Header().Set("Content-Type", "application/json") + w.WriteHeader(http.StatusNotFound) + w.Write([]byte(`{"message":"unknown path: ` + r.URL.Path + `"}`)) } else { if err := r.ParseForm(); err != nil { http.Error(w, "could not parse form values", http.StatusInternalServerError) @@ -793,6 +822,7 @@ func locksHandler(w http.ResponseWriter, r *http.Request) { ll := &LockList{} locks := getLocks() + w.Header().Set("Content-Type", "application/json") if cursor := r.FormValue("cursor"); cursor != "" { lastSeen := -1 @@ -848,6 +878,7 @@ func locksHandler(w http.ResponseWriter, r *http.Request) { enc.Encode(ll) } case "POST": + w.Header().Set("Content-Type", "application/json") if strings.HasSuffix(r.URL.Path, "unlock") { var unlockRequest UnlockRequest if err := dec.Decode(&unlockRequest); err != nil { @@ -925,15 +956,12 @@ func missingRequiredCreds(w http.ResponseWriter, r *http.Request, repo string) b auth := r.Header.Get("Authorization") user, pass, err := extractAuth(auth) if err != nil { - w.WriteHeader(403) - w.Write([]byte(`{"message":"` + err.Error() + `"}`)) + writeLFSError(w, 403, err.Error()) return true } if user != "requirecreds" || pass != "pass" { - errmsg := fmt.Sprintf("Got: '%s' => '%s' : '%s'", auth, user, pass) - w.WriteHeader(403) - w.Write([]byte(`{"message":"` + errmsg + `"}`)) + writeLFSError(w, 403, fmt.Sprintf("Got: '%s' => '%s' : '%s'", auth, user, pass)) return true } @@ -1078,8 +1106,12 @@ func extractAuth(auth string) (string, string, error) { return "", "", nil } -func skipIfBadAuth(w http.ResponseWriter, r *http.Request, id string) bool { +func skipIfBadAuth(w http.ResponseWriter, r *http.Request, id string, ntlmSession ntlm.ServerSession) bool { auth := r.Header.Get("Authorization") + if strings.Contains(r.URL.Path, "ntlm") { + return false + } + if auth == "" { w.WriteHeader(401) return true @@ -1111,6 +1143,49 @@ func skipIfBadAuth(w http.ResponseWriter, r *http.Request, id string) bool { return true } +func handleNTLM(w http.ResponseWriter, r *http.Request, authHeader string, session ntlm.ServerSession) { + if strings.HasPrefix(strings.ToUpper(authHeader), "BASIC ") { + authHeader = "" + } + + switch authHeader { + case "": + w.Header().Set("Www-Authenticate", "ntlm") + w.WriteHeader(401) + + // ntlmNegotiateMessage from httputil pkg + case "NTLM TlRMTVNTUAABAAAAB7IIogwADAAzAAAACwALACgAAAAKAAAoAAAAD1dJTExISS1NQUlOTk9SVEhBTUVSSUNB": + ch, err := session.GenerateChallengeMessage() + if err != nil { + writeLFSError(w, 500, err.Error()) + return + } + + chMsg := base64.StdEncoding.EncodeToString(ch.Bytes()) + w.Header().Set("Www-Authenticate", "ntlm "+chMsg) + w.WriteHeader(401) + + default: + if !strings.HasPrefix(strings.ToUpper(authHeader), "NTLM ") { + writeLFSError(w, 500, "bad authorization header: "+authHeader) + return + } + + auth := authHeader[5:] // strip "ntlm " prefix + val, err := base64.StdEncoding.DecodeString(auth) + if err != nil { + writeLFSError(w, 500, "base64 decode error: "+err.Error()) + return + } + + _, err = ntlm.ParseAuthenticateMessage(val, 2) + if err != nil { + writeLFSError(w, 500, "auth parse error: "+err.Error()) + return + } + } +} + func init() { oidHandlers = make(map[string]string) for _, content := range contentHandlers { diff --git a/test/cmd/ssh-echo.go b/test/cmd/ssh-echo.go new file mode 100644 index 00000000..1092be1d --- /dev/null +++ b/test/cmd/ssh-echo.go @@ -0,0 +1,34 @@ +// +build testtools + +package main + +import ( + "encoding/json" + "fmt" + "os" + "strings" +) + +type sshResponse struct { + Href string `json:"href"` + Header map[string]string `json:"header"` +} + +func main() { + // expect args: + // ssh-echo -p PORT git@127.0.0.1 git-lfs-authenticate REPO OPERATION + if len(os.Args) != 5 { + fmt.Fprintf(os.Stderr, "got %d args: %v", len(os.Args), os.Args) + os.Exit(1) + } + + // just "git-lfs-authenticate REPO OPERATION" + authLine := strings.Split(os.Args[4], " ") + if len(authLine) < 13 { + fmt.Fprintf(os.Stderr, "bad git-lfs-authenticate line: %s\nargs: %v", authLine, os.Args) + } + + json.NewEncoder(os.Stdout).Encode(sshResponse{ + Href: fmt.Sprintf("http://127.0.0.1:%s/%s.git/info/lfs", os.Args[2], authLine[1]), + }) +} diff --git a/test/git-lfs-test-server-api/main.go b/test/git-lfs-test-server-api/main.go index c211cce7..249f9e24 100644 --- a/test/git-lfs-test-server-api/main.go +++ b/test/git-lfs-test-server-api/main.go @@ -10,10 +10,10 @@ import ( "strconv" "strings" - "github.com/git-lfs/git-lfs/api" "github.com/git-lfs/git-lfs/config" "github.com/git-lfs/git-lfs/errors" "github.com/git-lfs/git-lfs/lfs" + "github.com/git-lfs/git-lfs/lfsapi" "github.com/git-lfs/git-lfs/progress" "github.com/git-lfs/git-lfs/test" "github.com/git-lfs/git-lfs/tq" @@ -27,7 +27,7 @@ type TestObject struct { type ServerTest struct { Name string - F func(oidsExist, oidsMissing []TestObject) error + F func(m *tq.Manifest, oidsExist, oidsMissing []TestObject) error } var ( @@ -48,7 +48,6 @@ func main() { } func testServerApi(cmd *cobra.Command, args []string) { - if (len(apiUrl) == 0 && len(cloneUrl) == 0) || (len(apiUrl) != 0 && len(cloneUrl) != 0) { exit("Must supply either --url or --clone (and not both)") @@ -65,12 +64,18 @@ func testServerApi(cmd *cobra.Command, args []string) { // Force loading of config before we alter it config.Config.Git.All() + manifest, err := buildManifest() + if err != nil { + exit("error building tq.Manifest: " + err.Error()) + } + // Configure the endpoint manually - var endp config.Endpoint + var endp lfsapi.Endpoint + finder := lfsapi.NewEndpointFinder(config.Config.Git) if len(cloneUrl) > 0 { - endp = config.NewEndpointFromCloneURL(cloneUrl) + endp = finder.NewEndpointFromCloneURL(cloneUrl) } else { - endp = config.NewEndpoint(apiUrl) + endp = finder.NewEndpoint(apiUrl) } config.Config.SetManualEndpoint(endp) @@ -82,7 +87,7 @@ func testServerApi(cmd *cobra.Command, args []string) { } else { fmt.Printf("Creating test data (will upload to server)\n") var err error - oidsExist, oidsMissing, err = buildTestData() + oidsExist, oidsMissing, err = buildTestData(manifest) if err != nil { exit("Failed to set up test data, aborting") } @@ -96,7 +101,7 @@ func testServerApi(cmd *cobra.Command, args []string) { } - ok := runTests(oidsExist, oidsMissing) + ok := runTests(manifest, oidsExist, oidsMissing) if !ok { exit("One or more tests failed, see above") } @@ -135,7 +140,16 @@ func (*testDataCallback) Errorf(format string, args ...interface{}) { fmt.Printf(format, args...) } -func buildTestData() (oidsExist, oidsMissing []TestObject, err error) { +func buildManifest() (*tq.Manifest, error) { + cfg := config.Config + apiClient, err := lfsapi.NewClient(cfg.Os, cfg.Git) + if err != nil { + return nil, err + } + return tq.NewManifestWithClient(apiClient), nil +} + +func buildTestData(manifest *tq.Manifest) (oidsExist, oidsMissing []TestObject, err error) { const oidCount = 50 oidsExist = make([]TestObject, 0, oidCount) oidsMissing = make([]TestObject, 0, oidCount) @@ -159,7 +173,7 @@ func buildTestData() (oidsExist, oidsMissing []TestObject, err error) { outputs := repo.AddCommits([]*test.CommitInput{&commit}) // now upload - uploadQueue := lfs.NewUploadQueue(config.Config, tq.WithProgress(meter)) + uploadQueue := tq.NewTransferQueue(tq.Upload, manifest, "origin", tq.WithProgress(meter)) for _, f := range outputs[0].Files { oidsExist = append(oidsExist, TestObject{Oid: f.Oid, Size: f.Size}) @@ -203,12 +217,11 @@ func saveTestOids(filename string, objs []TestObject) { } -func runTests(oidsExist, oidsMissing []TestObject) bool { - +func runTests(manifest *tq.Manifest, oidsExist, oidsMissing []TestObject) bool { ok := true fmt.Printf("Running %d tests...\n", len(tests)) for _, t := range tests { - err := runTest(t, oidsExist, oidsMissing) + err := runTest(t, manifest, oidsExist, oidsMissing) if err != nil { ok = false } @@ -216,7 +229,7 @@ func runTests(oidsExist, oidsMissing []TestObject) bool { return ok } -func runTest(t ServerTest, oidsExist, oidsMissing []TestObject) error { +func runTest(t ServerTest, manifest *tq.Manifest, oidsExist, oidsMissing []TestObject) error { const linelen = 70 line := t.Name if len(line) > linelen { @@ -226,7 +239,7 @@ func runTest(t ServerTest, oidsExist, oidsMissing []TestObject) error { } fmt.Printf("%s...\r", line) - err := t.F(oidsExist, oidsMissing) + err := t.F(manifest, oidsExist, oidsMissing) if err != nil { fmt.Printf("%s FAILED\n", line) fmt.Println(err.Error()) @@ -242,21 +255,21 @@ func exit(format string, args ...interface{}) { os.Exit(2) } -func addTest(name string, f func(oidsExist, oidsMissing []TestObject) error) { +func addTest(name string, f func(manifest *tq.Manifest, oidsExist, oidsMissing []TestObject) error) { tests = append(tests, ServerTest{Name: name, F: f}) } -func callBatchApi(op string, objs []TestObject) ([]*api.ObjectResource, error) { - - apiobjs := make([]*api.ObjectResource, 0, len(objs)) +func callBatchApi(manifest *tq.Manifest, dir tq.Direction, objs []TestObject) ([]*tq.Transfer, error) { + apiobjs := make([]*tq.Transfer, 0, len(objs)) for _, o := range objs { - apiobjs = append(apiobjs, &api.ObjectResource{Oid: o.Oid, Size: o.Size}) + apiobjs = append(apiobjs, &tq.Transfer{Oid: o.Oid, Size: o.Size}) } - o, _, err := api.Batch(config.Config, apiobjs, op, []string{"basic"}) + + bres, err := tq.Batch(manifest, dir, "origin", apiobjs) if err != nil { return nil, err } - return o, nil + return bres.Objects, nil } // Combine 2 slices into one by "randomly" interleaving diff --git a/test/git-lfs-test-server-api/testdownload.go b/test/git-lfs-test-server-api/testdownload.go index 25e03e25..74d579a4 100644 --- a/test/git-lfs-test-server-api/testdownload.go +++ b/test/git-lfs-test-server-api/testdownload.go @@ -6,11 +6,12 @@ import ( "fmt" "github.com/git-lfs/git-lfs/tools" + "github.com/git-lfs/git-lfs/tq" ) // "download" - all present -func downloadAllExist(oidsExist, oidsMissing []TestObject) error { - retobjs, err := callBatchApi("download", oidsExist) +func downloadAllExist(manifest *tq.Manifest, oidsExist, oidsMissing []TestObject) error { + retobjs, err := callBatchApi(manifest, tq.Download, oidsExist) if err != nil { return err @@ -36,8 +37,8 @@ func downloadAllExist(oidsExist, oidsMissing []TestObject) error { } // "download" - all missing (test includes 404 error entry) -func downloadAllMissing(oidsExist, oidsMissing []TestObject) error { - retobjs, err := callBatchApi("download", oidsMissing) +func downloadAllMissing(manifest *tq.Manifest, oidsExist, oidsMissing []TestObject) error { + retobjs, err := callBatchApi(manifest, tq.Download, oidsMissing) if err != nil { return err @@ -54,7 +55,7 @@ func downloadAllMissing(oidsExist, oidsMissing []TestObject) error { errbuf.WriteString(fmt.Sprintf("Download link should not exist for %s, was %s\n", o.Oid, link)) } if o.Error == nil { - errbuf.WriteString(fmt.Sprintf("Download should include an error for missing object %s, was %s\n", o.Oid)) + errbuf.WriteString(fmt.Sprintf("Download should include an error for missing object %s\n", o.Oid)) } else if o.Error.Code != 404 { errbuf.WriteString(fmt.Sprintf("Download error code for missing object %s should be 404, got %d\n", o.Oid, o.Error.Code)) } @@ -68,8 +69,7 @@ func downloadAllMissing(oidsExist, oidsMissing []TestObject) error { } // "download" - mixture -func downloadMixed(oidsExist, oidsMissing []TestObject) error { - +func downloadMixed(manifest *tq.Manifest, oidsExist, oidsMissing []TestObject) error { existSet := tools.NewStringSetWithCapacity(len(oidsExist)) for _, o := range oidsExist { existSet.Add(o.Oid) @@ -80,7 +80,7 @@ func downloadMixed(oidsExist, oidsMissing []TestObject) error { } calloids := interleaveTestData(oidsExist, oidsMissing) - retobjs, err := callBatchApi("download", calloids) + retobjs, err := callBatchApi(manifest, tq.Download, calloids) if err != nil { return err diff --git a/test/git-lfs-test-server-api/testupload.go b/test/git-lfs-test-server-api/testupload.go index f72a40f1..ecefb10a 100644 --- a/test/git-lfs-test-server-api/testupload.go +++ b/test/git-lfs-test-server-api/testupload.go @@ -6,11 +6,12 @@ import ( "fmt" "github.com/git-lfs/git-lfs/tools" + "github.com/git-lfs/git-lfs/tq" ) // "upload" - all missing -func uploadAllMissing(oidsExist, oidsMissing []TestObject) error { - retobjs, err := callBatchApi("upload", oidsMissing) +func uploadAllMissing(manifest *tq.Manifest, oidsExist, oidsMissing []TestObject) error { + retobjs, err := callBatchApi(manifest, tq.Upload, oidsMissing) if err != nil { return err @@ -37,8 +38,8 @@ func uploadAllMissing(oidsExist, oidsMissing []TestObject) error { } // "upload" - all present -func uploadAllExists(oidsExist, oidsMissing []TestObject) error { - retobjs, err := callBatchApi("upload", oidsExist) +func uploadAllExists(manifest *tq.Manifest, oidsExist, oidsMissing []TestObject) error { + retobjs, err := callBatchApi(manifest, tq.Upload, oidsExist) if err != nil { return err @@ -64,8 +65,7 @@ func uploadAllExists(oidsExist, oidsMissing []TestObject) error { } // "upload" - mix of missing & present -func uploadMixed(oidsExist, oidsMissing []TestObject) error { - +func uploadMixed(manifest *tq.Manifest, oidsExist, oidsMissing []TestObject) error { existSet := tools.NewStringSetWithCapacity(len(oidsExist)) for _, o := range oidsExist { existSet.Add(o.Oid) @@ -76,7 +76,7 @@ func uploadMixed(oidsExist, oidsMissing []TestObject) error { } calloids := interleaveTestData(oidsExist, oidsMissing) - retobjs, err := callBatchApi("upload", calloids) + retobjs, err := callBatchApi(manifest, tq.Upload, calloids) if err != nil { return err @@ -109,7 +109,7 @@ func uploadMixed(oidsExist, oidsMissing []TestObject) error { } -func uploadEdgeCases(oidsExist, oidsMissing []TestObject) error { +func uploadEdgeCases(manifest *tq.Manifest, oidsExist, oidsMissing []TestObject) error { errorCases := make([]TestObject, 0, 5) errorCodeMap := make(map[string]int, 5) errorReasonMap := make(map[string]string, 5) @@ -149,7 +149,7 @@ func uploadEdgeCases(oidsExist, oidsMissing []TestObject) error { validReasonMap[sha] = "Zero size should be allowed" calloids := interleaveTestData(errorCases, validCases) - retobjs, err := callBatchApi("upload", calloids) + retobjs, err := callBatchApi(manifest, tq.Upload, calloids) if err != nil { return err diff --git a/test/test-batch-transfer.sh b/test/test-batch-transfer.sh index 52d1455a..50120a28 100755 --- a/test/test-batch-transfer.sh +++ b/test/test-batch-transfer.sh @@ -101,3 +101,47 @@ begin_test "batch transfers occur in reverse order by size" ) end_test +begin_test "batch transfers with ssh endpoint" +( + set -e + + reponame="batch-ssh" + setup_remote_repo "$reponame" + clone_repo "$reponame" "$reponame" + + sshurl="${GITSERVER/http:\/\//ssh://git@}/$reponame" + git config lfs.url "$sshurl" + git lfs env + + contents="test" + oid="$(calc_oid "$contents")" + git lfs track "*.dat" + printf "$contents" > test.dat + git add .gitattributes test.dat + git commit -m "initial commit" + + git push origin master 2>&1 +) +end_test + +begin_test "batch transfers with ntlm server" +( + set -e + + reponame="ntlmtest" + setup_remote_repo "$reponame" + + printf "ntlmdomain\\\ntlmuser:ntlmpass" > "$CREDSDIR/127.0.0.1--$reponame" + + clone_repo "$reponame" "$reponame" + + contents="test" + oid="$(calc_oid "$contents")" + git lfs track "*.dat" + printf "$contents" > test.dat + git add .gitattributes test.dat + git commit -m "initial commit" + + GIT_CURL_VERBOSE=1 git push origin master 2>&1 +) +end_test diff --git a/test/test-clone.sh b/test/test-clone.sh index 92c6cac3..9100be38 100755 --- a/test/test-clone.sh +++ b/test/test-clone.sh @@ -329,7 +329,18 @@ begin_test "clone (with .lfsconfig)" echo "test: clone with lfs.fetchinclude in .lfsconfig" local_reponame="clone_with_config_include" + set +x git lfs clone "$GITSERVER/$reponame" "$local_reponame" + ok="$?" + set -x + if [ "0" -ne "$ok" ]; then + # TEMP: used to catch transient failure from above `clone` command, as in: + # https://github.com/git-lfs/git-lfs/pull/1782#issuecomment-267678319 + echo >&2 "[!] \`git lfs clone $GITSERVER/$reponame $local_reponame\` failed" + git lfs logs last + + exit 1 + fi pushd "$local_reponame" assert_local_object "$contents_a_oid" 1 refute_local_object "$contents_b_oid" diff --git a/test/test-custom-transfers.sh b/test/test-custom-transfers.sh index e1ebd079..85fe7e26 100755 --- a/test/test-custom-transfers.sh +++ b/test/test-custom-transfers.sh @@ -102,7 +102,10 @@ begin_test "custom-transfer-upload-download" grep "xfer: started custom adapter process" fetchcustom.log grep "xfer\[lfstest-customadapter\]:" fetchcustom.log grep "11 of 11 files" fetchcustom.log - [ `find .git/lfs/objects -type f | wc -l` = 11 ] + grep "Terminating test custom adapter gracefully" fetchcustom.log + + objectlist=`find .git/lfs/objects -type f` + [ "$(echo "$objectlist" | wc -l)" -eq 11 ] ) end_test diff --git a/test/test-env.sh b/test/test-env.sh index e005ca01..8d2f6ad9 100755 --- a/test/test-env.sh +++ b/test/test-env.sh @@ -686,7 +686,8 @@ begin_test "env with multiple ssh remotes" expected='Endpoint=https://git-server.com/user/repo.git/info/lfs (auth=none) SSH=git@git-server.com:user/repo.git Endpoint (other)=https://other-git-server.com/user/repo.git/info/lfs (auth=none) - SSH=git@other-git-server.com:user/repo.git' + SSH=git@other-git-server.com:user/repo.git +GIT_SSH=ssh-echo' contains_same_elements "$expected" "$(git lfs env | grep -e "Endpoint" -e "SSH=")" ) diff --git a/test/test-lock.sh b/test/test-lock.sh index e56d1d12..c5bf135e 100755 --- a/test/test-lock.sh +++ b/test/test-lock.sh @@ -6,13 +6,29 @@ begin_test "creating a lock" ( set -e - setup_remote_repo_with_file "lock_create_simple" "a.dat" + reponame="lock_create_simple" + setup_remote_repo_with_file "$reponame" "a.dat" GITLFSLOCKSENABLED=1 git lfs lock "a.dat" | tee lock.log grep "'a.dat' was locked" lock.log id=$(grep -oh "\((.*)\)" lock.log | tr -d "()") - assert_server_lock $id + assert_server_lock "$reponame" "$id" +) +end_test + +begin_test "creating a lock (--json)" +( + set -e + + reponame="lock_create_simple_json" + setup_remote_repo_with_file "$reponame" "a_json.dat" + + GITLFSLOCKSENABLED=1 git lfs lock --json "a_json.dat" | tee lock.log + grep "\"path\":\"a_json.dat\"" lock.log + + id=$(grep -o "\"id\":\".*\"" lock.log | cut -d \" -f 4) + assert_server_lock "$reponame" "$id" ) end_test @@ -20,13 +36,14 @@ begin_test "locking a previously locked file" ( set -e - setup_remote_repo_with_file "lock_create_previously_created" "b.dat" + reponame="lock_create_previously_created" + setup_remote_repo_with_file "$reponame" "b.dat" GITLFSLOCKSENABLED=1 git lfs lock "b.dat" | tee lock.log grep "'b.dat' was locked" lock.log id=$(grep -oh "\((.*)\)" lock.log | tr -d "()") - assert_server_lock $id + assert_server_lock "$reponame" "$id" grep "lock already created" <(GITLFSLOCKSENABLED=1 git lfs lock "b.dat" 2>&1) ) diff --git a/test/test-locks.sh b/test/test-locks.sh index 43f1a94a..52d7afe2 100755 --- a/test/test-locks.sh +++ b/test/test-locks.sh @@ -6,12 +6,13 @@ begin_test "list a single lock" ( set -e - setup_remote_repo_with_file "locks_list_single" "f.dat" + reponame="locks_list_single" + setup_remote_repo_with_file "$reponame" "f.dat" GITLFSLOCKSENABLED=1 git lfs lock "f.dat" | tee lock.log id=$(grep -oh "\((.*)\)" lock.log | tr -d "()") - assert_server_lock $id + assert_server_lock "$reponame" "$id" GITLFSLOCKSENABLED=1 git lfs locks --path "f.dat" | tee locks.log grep "1 lock(s) matched query" locks.log @@ -19,6 +20,23 @@ begin_test "list a single lock" ) end_test +begin_test "list a single lock (--json)" +( + set -e + + reponame="locks_list_single_json" + setup_remote_repo_with_file "$reponame" "f_json.dat" + + GITLFSLOCKSENABLED=1 git lfs lock "f_json.dat" | tee lock.log + + id=$(grep -oh "\((.*)\)" lock.log | tr -d "()") + assert_server_lock "$reponame" "$id" + + GITLFSLOCKSENABLED=1 git lfs locks --json --path "f_json.dat" | tee locks.log + grep "\"path\":\"f_json.dat\"" locks.log +) +end_test + begin_test "list locks with a limit" ( set -e @@ -43,10 +61,10 @@ begin_test "list locks with a limit" grep "master -> master" push.log GITLFSLOCKSENABLED=1 git lfs lock "g_1.dat" | tee lock.log - assert_server_lock "$(grep -oh "\((.*)\)" lock.log | tr -d "()")" + assert_server_lock "$reponame" "$(grep -oh "\((.*)\)" lock.log | tr -d "()")" GITLFSLOCKSENABLED=1 git lfs lock "g_2.dat" | tee lock.log - assert_server_lock "$(grep -oh "\((.*)\)" lock.log | tr -d "()")" + assert_server_lock "$reponame" "$(grep -oh "\((.*)\)" lock.log | tr -d "()")" GITLFSLOCKSENABLED=1 git lfs locks --limit 1 | tee locks.log grep "1 lock(s) matched query" locks.log @@ -80,7 +98,7 @@ begin_test "list locks with pagination" for i in $(seq 1 5); do GITLFSLOCKSENABLED=1 git lfs lock "h_$i.dat" | tee lock.log - assert_server_lock "$(grep -oh "\((.*)\)" lock.log | tr -d "()")" + assert_server_lock "$reponame" "$(grep -oh "\((.*)\)" lock.log | tr -d "()")" done # The server will return, at most, three locks at a time diff --git a/test/test-pre-push.sh b/test/test-pre-push.sh index cd07302b..19c41a49 100755 --- a/test/test-pre-push.sh +++ b/test/test-pre-push.sh @@ -189,7 +189,7 @@ begin_test "pre-push with missing pointer not on server" git lfs pre-push origin "$GITSERVER/$reponame" 2>&1 | tee push.log set -e - grep "7aa7a5359173d05b63cfd682e3c38487f3cb4f7f1d60659fe59fab1505977d4c does not exist in .git/lfs/objects. Tried new.dat, which matches 7aa7a5359173d05b63cfd682e3c38487f3cb4f7f1d60659fe59fab1505977d4c." push.log + grep "Unable to find object (7aa7a5359173d05b63cfd682e3c38487f3cb4f7f1d60659fe59fab1505977d4c) locally." push.log ) end_test diff --git a/test/test-push.sh b/test/test-push.sh index d161c915..45f8ba5e 100755 --- a/test/test-push.sh +++ b/test/test-push.sh @@ -132,7 +132,7 @@ begin_test "push --all (no ref args)" [ $(grep -c "push" < push.log) -eq 6 ] git push --all origin 2>&1 | tee push.log - [ $(grep -c "(3 of 3 files)" push.log) -eq 2 ] + [ $(grep -c "(6 of 6 files)" push.log) -eq 1 ] assert_server_object "$reponame-$suffix" "$oid1" assert_server_object "$reponame-$suffix" "$oid2" assert_server_object "$reponame-$suffix" "$oid3" @@ -162,10 +162,9 @@ begin_test "push --all (no ref args)" [ $(grep -c "push" push.log) -eq 6 ] git push --all origin 2>&1 | tee push.log - grep "(2 of 2 files, 1 skipped)" push.log - grep "(3 of 3 files)" push.log - [ $(grep -c "files)" push.log) -eq 1 ] - [ $(grep -c "skipped)" push.log) -eq 1 ] + grep "(5 of 5 files, 1 skipped)" push.log + [ $(grep -c "files" push.log) -eq 1 ] + [ $(grep -c "skipped" push.log) -eq 1 ] assert_server_object "$reponame-$suffix-2" "$oid2" assert_server_object "$reponame-$suffix-2" "$oid3" assert_server_object "$reponame-$suffix-2" "$oid4" diff --git a/test/test-unlock.sh b/test/test-unlock.sh index e6641ca1..d7418aa5 100755 --- a/test/test-unlock.sh +++ b/test/test-unlock.sh @@ -6,15 +6,35 @@ begin_test "unlocking a lock by path" ( set -e + reponame="unlock_by_path" setup_remote_repo_with_file "unlock_by_path" "c.dat" GITLFSLOCKSENABLED=1 git lfs lock "c.dat" | tee lock.log id=$(grep -oh "\((.*)\)" lock.log | tr -d "()") - assert_server_lock $id + assert_server_lock "$reponame" "$id" GITLFSLOCKSENABLED=1 git lfs unlock "c.dat" 2>&1 | tee unlock.log - refute_server_lock $id + refute_server_lock "$reponame" "$id" +) +end_test + +begin_test "unlocking a lock (--json)" +( + set -e + + reponame="unlock_by_path_json" + setup_remote_repo_with_file "$reponame" "c_json.dat" + + GITLFSLOCKSENABLED=1 git lfs lock "c_json.dat" | tee lock.log + + id=$(grep -oh "\((.*)\)" lock.log | tr -d "()") + assert_server_lock "$reponame" "$id" + + GITLFSLOCKSENABLED=1 git lfs unlock --json "c_json.dat" 2>&1 | tee unlock.log + grep "\"unlocked\":true" unlock.log + + refute_server_lock "$reponame" "$id" ) end_test @@ -22,15 +42,16 @@ begin_test "unlocking a lock by id" ( set -e + reponame="unlock_by_id" setup_remote_repo_with_file "unlock_by_id" "d.dat" GITLFSLOCKSENABLED=1 git lfs lock "d.dat" | tee lock.log id=$(grep -oh "\((.*)\)" lock.log | tr -d "()") - assert_server_lock $id + assert_server_lock "$reponame" "$id" GITLFSLOCKSENABLED=1 git lfs unlock --id="$id" 2>&1 | tee unlock.log - refute_server_lock $id + refute_server_lock "$reponame" "$id" ) end_test @@ -38,15 +59,16 @@ begin_test "unlocking a lock without sufficient info" ( set -e - setup_remote_repo_with_file "unlock_ambiguous" "e.dat" + reponame="unlock_ambiguous" + setup_remote_repo_with_file "$reponame" "e.dat" GITLFSLOCKSENABLED=1 git lfs lock "e.dat" | tee lock.log id=$(grep -oh "\((.*)\)" lock.log | tr -d "()") - assert_server_lock $id + assert_server_lock "$reponame" "$id" GITLFSLOCKSENABLED=1 git lfs unlock 2>&1 | tee unlock.log grep "Usage: git lfs unlock" unlock.log - assert_server_lock $id + assert_server_lock "$reponame" "$id" ) end_test diff --git a/test/testenv.sh b/test/testenv.sh index 3c041e92..1e591d35 100644 --- a/test/testenv.sh +++ b/test/testenv.sh @@ -110,9 +110,13 @@ TESTHOME="$REMOTEDIR/home" GIT_CONFIG_NOSYSTEM=1 GIT_TERMINAL_PROMPT=0 +GIT_SSH=ssh-echo +APPVEYOR_REPO_COMMIT_MESSAGE="test: env test should look for GIT_SSH too" export CREDSDIR export GIT_CONFIG_NOSYSTEM +export GIT_SSH +export APPVEYOR_REPO_COMMIT_MESSAGE mkdir -p "$TMPDIR" mkdir -p "$TRASHDIR" diff --git a/test/testhelpers.sh b/test/testhelpers.sh index fc0d702d..0878e89f 100644 --- a/test/testhelpers.sh +++ b/test/testhelpers.sh @@ -122,9 +122,10 @@ assert_server_object() { # assert that a lock with the given ID exists on the test server assert_server_lock() { - local id="$1" + local reponame="$1" + local id="$2" - curl -v "$GITSERVER/locks/" \ + curl -v "$GITSERVER/$reponame.git/info/lfs/locks" \ -u "user:pass" \ -o http.json \ -H "Accept:application/vnd.git-lfs+json" 2>&1 | @@ -139,9 +140,10 @@ assert_server_lock() { # refute that a lock with the given ID exists on the test server refute_server_lock() { - local id="$1" + local reponame="$1" + local id="$2" - curl -v "$GITSERVER/locks/" \ + curl -v "$GITSERVER/$reponame.git/info/lfs/locks" \ -u "user:pass" \ -o http.json \ -H "Accept:application/vnd.git-lfs+json" 2>&1 | tee http.log diff --git a/tools/iotools.go b/tools/iotools.go index 8c1077fa..13b5f7ed 100644 --- a/tools/iotools.go +++ b/tools/iotools.go @@ -10,28 +10,6 @@ import ( "github.com/git-lfs/git-lfs/progress" ) -type readSeekCloserWrapper struct { - readSeeker io.ReadSeeker -} - -func (r *readSeekCloserWrapper) Read(p []byte) (n int, err error) { - return r.readSeeker.Read(p) -} - -func (r *readSeekCloserWrapper) Seek(offset int64, whence int) (int64, error) { - return r.readSeeker.Seek(offset, whence) -} - -func (r *readSeekCloserWrapper) Close() error { - return nil -} - -// NewReadSeekCloserWrapper wraps an io.ReadSeeker and implements a no-op Close() function -// to make it an io.ReadCloser -func NewReadSeekCloserWrapper(r io.ReadSeeker) io.ReadCloser { - return &readSeekCloserWrapper{r} -} - // CopyWithCallback copies reader to writer while performing a progress callback func CopyWithCallback(writer io.Writer, reader io.Reader, totalSize int64, cb progress.CopyCallback) (int64, error) { if success, _ := CloneFile(writer, reader); success { diff --git a/tq/adapterbase.go b/tq/adapterbase.go index 85262e51..e1bf9fba 100644 --- a/tq/adapterbase.go +++ b/tq/adapterbase.go @@ -2,8 +2,10 @@ package tq import ( "fmt" + "net/http" "sync" + "github.com/git-lfs/git-lfs/lfsapi" "github.com/rubyist/tracerx" ) @@ -14,6 +16,8 @@ type adapterBase struct { name string direction Direction transferImpl transferImplementation + apiClient *lfsapi.Client + remote string jobChan chan *job cb ProgressCallback // WaitGroup to sync the completion of all workers @@ -47,8 +51,7 @@ func newAdapterBase(name string, dir Direction, ti transferImplementation) *adap name: name, direction: dir, transferImpl: ti, - - jobWait: new(sync.WaitGroup), + jobWait: new(sync.WaitGroup), } } @@ -61,6 +64,8 @@ func (a *adapterBase) Direction() Direction { } func (a *adapterBase) Begin(cfg AdapterConfig, cb ProgressCallback) error { + a.apiClient = cfg.APIClient() + a.remote = cfg.Remote() a.cb = cb a.jobChan = make(chan *job, 100) maxConcurrency := cfg.ConcurrentTransfers() @@ -123,7 +128,6 @@ func (a *adapterBase) End() { // worker function, many of these run per adapter func (a *adapterBase) worker(workerNum int, ctx interface{}) { - tracerx.Printf("xfer: adapter %q worker %d starting", a.Name(), workerNum) waitForAuth := workerNum > 0 signalAuthOnResponse := workerNum == 0 @@ -172,6 +176,26 @@ func (a *adapterBase) worker(workerNum int, ctx interface{}) { a.workerWait.Done() } +func (a *adapterBase) newHTTPRequest(method string, rel *Action) (*http.Request, error) { + req, err := http.NewRequest(method, rel.Href, nil) + if err != nil { + return nil, err + } + + for key, value := range rel.Header { + req.Header.Set(key, value) + } + + return req, nil +} + +func (a *adapterBase) doHTTP(t *Transfer, req *http.Request) (*http.Response, error) { + if t.Authenticated { + return a.apiClient.Do(req) + } + return a.apiClient.DoWithAuth(a.remote, req) +} + func advanceCallbackProgress(cb ProgressCallback, t *Transfer, numBytes int64) { if cb != nil { // Must split into max int sizes since read count is int diff --git a/tq/api.go b/tq/api.go new file mode 100644 index 00000000..0452355e --- /dev/null +++ b/tq/api.go @@ -0,0 +1,71 @@ +package tq + +import ( + "github.com/git-lfs/git-lfs/errors" + "github.com/git-lfs/git-lfs/lfsapi" + "github.com/rubyist/tracerx" +) + +type tqClient struct { + *lfsapi.Client +} + +type batchRequest struct { + Operation string `json:"operation"` + Objects []*Transfer `json:"objects"` + TransferAdapterNames []string `json:"transfers,omitempty"` +} + +type BatchResponse struct { + Objects []*Transfer `json:"objects"` + TransferAdapterName string `json:"transfer"` + endpoint lfsapi.Endpoint +} + +func Batch(m *Manifest, dir Direction, remote string, objects []*Transfer) (*BatchResponse, error) { + if len(objects) == 0 { + return &BatchResponse{}, nil + } + + return m.batchClient().Batch(remote, &batchRequest{ + Operation: dir.String(), + Objects: objects, + TransferAdapterNames: m.GetAdapterNames(dir), + }) +} + +func (c *tqClient) Batch(remote string, bReq *batchRequest) (*BatchResponse, error) { + bRes := &BatchResponse{} + if len(bReq.Objects) == 0 { + return bRes, nil + } + + if len(bReq.TransferAdapterNames) == 1 && bReq.TransferAdapterNames[0] == "basic" { + bReq.TransferAdapterNames = nil + } + + bRes.endpoint = c.Endpoints.Endpoint(bReq.Operation, remote) + req, err := c.NewRequest("POST", bRes.endpoint, "objects/batch", bReq) + if err != nil { + return nil, errors.Wrap(err, "batch request") + } + + tracerx.Printf("api: batch %d files", len(bReq.Objects)) + + res, err := c.DoWithAuth(remote, req) + if err != nil { + tracerx.Printf("api error: %s", err) + return nil, errors.Wrap(err, "batch response") + } + c.LogResponse("lfs.batch", res) + + if err := lfsapi.DecodeJSON(res, bRes); err != nil { + return bRes, errors.Wrap(err, "batch response") + } + + if res.StatusCode != 200 { + return nil, lfsapi.NewStatusCodeError(res) + } + + return bRes, nil +} diff --git a/tq/api_test.go b/tq/api_test.go new file mode 100644 index 00000000..538262f0 --- /dev/null +++ b/tq/api_test.go @@ -0,0 +1,114 @@ +package tq + +import ( + "encoding/json" + "net/http" + "net/http/httptest" + "testing" + + "github.com/git-lfs/git-lfs/lfsapi" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func TestAPIBatch(t *testing.T) { + srv := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + if r.URL.Path != "/api/objects/batch" { + w.WriteHeader(404) + return + } + + assert.Equal(t, "POST", r.Method) + + bReq := &batchRequest{} + err := json.NewDecoder(r.Body).Decode(bReq) + r.Body.Close() + assert.Nil(t, err) + assert.EqualValues(t, []string{"basic", "whatev"}, bReq.TransferAdapterNames) + if assert.Equal(t, 1, len(bReq.Objects)) { + assert.Equal(t, "a", bReq.Objects[0].Oid) + } + + w.Header().Set("Content-Type", "application/json") + err = json.NewEncoder(w).Encode(&BatchResponse{ + TransferAdapterName: "basic", + Objects: bReq.Objects, + }) + })) + defer srv.Close() + + c, err := lfsapi.NewClient(nil, lfsapi.TestEnv(map[string]string{ + "lfs.url": srv.URL + "/api", + })) + require.Nil(t, err) + + tqc := &tqClient{Client: c} + bReq := &batchRequest{ + TransferAdapterNames: []string{"basic", "whatev"}, + Objects: []*Transfer{ + &Transfer{Oid: "a", Size: 1}, + }, + } + bRes, err := tqc.Batch("remote", bReq) + require.Nil(t, err) + assert.Equal(t, "basic", bRes.TransferAdapterName) + if assert.Equal(t, 1, len(bRes.Objects)) { + assert.Equal(t, "a", bRes.Objects[0].Oid) + } +} + +func TestAPIBatchOnlyBasic(t *testing.T) { + srv := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + if r.URL.Path != "/api/objects/batch" { + w.WriteHeader(404) + return + } + + assert.Equal(t, "POST", r.Method) + + bReq := &batchRequest{} + err := json.NewDecoder(r.Body).Decode(bReq) + r.Body.Close() + assert.Nil(t, err) + assert.Equal(t, 0, len(bReq.TransferAdapterNames)) + if assert.Equal(t, 1, len(bReq.Objects)) { + assert.Equal(t, "a", bReq.Objects[0].Oid) + } + + w.Header().Set("Content-Type", "application/json") + err = json.NewEncoder(w).Encode(&BatchResponse{ + TransferAdapterName: "basic", + }) + })) + defer srv.Close() + + c, err := lfsapi.NewClient(nil, lfsapi.TestEnv(map[string]string{ + "lfs.url": srv.URL + "/api", + })) + require.Nil(t, err) + + tqc := &tqClient{Client: c} + bReq := &batchRequest{ + TransferAdapterNames: []string{"basic"}, + Objects: []*Transfer{ + &Transfer{Oid: "a", Size: 1}, + }, + } + bRes, err := tqc.Batch("remote", bReq) + require.Nil(t, err) + assert.Equal(t, "basic", bRes.TransferAdapterName) +} + +func TestAPIBatchEmptyObjects(t *testing.T) { + c, err := lfsapi.NewClient(nil, nil) + require.Nil(t, err) + + tqc := &tqClient{Client: c} + bReq := &batchRequest{ + TransferAdapterNames: []string{"basic", "whatev"}, + } + bRes, err := tqc.Batch("remote", bReq) + require.Nil(t, err) + assert.Equal(t, "", bRes.TransferAdapterName) + assert.Equal(t, 0, len(bRes.Objects)) +} diff --git a/tq/basic_download.go b/tq/basic_download.go index 766bca93..b77b3298 100644 --- a/tq/basic_download.go +++ b/tq/basic_download.go @@ -9,9 +9,7 @@ import ( "regexp" "strconv" - "github.com/git-lfs/git-lfs/config" "github.com/git-lfs/git-lfs/errors" - "github.com/git-lfs/git-lfs/httputil" "github.com/git-lfs/git-lfs/localstorage" "github.com/git-lfs/git-lfs/tools" "github.com/rubyist/tracerx" @@ -44,7 +42,6 @@ func (a *basicDownloadAdapter) WorkerEnding(workerNum int, ctx interface{}) { } func (a *basicDownloadAdapter) DoTransfer(ctx interface{}, t *Transfer, cb ProgressCallback, authOkFunc func()) error { - f, fromByte, hashSoFar, err := a.checkResumeDownload(t) if err != nil { return err @@ -97,7 +94,7 @@ func (a *basicDownloadAdapter) download(t *Transfer, cb ProgressCallback, authOk // return errors.New("Object not found on the server.") } - req, err := httputil.NewHttpRequest("GET", rel.Href, rel.Header) + req, err := a.newHTTPRequest("GET", rel) if err != nil { return err } @@ -110,7 +107,7 @@ func (a *basicDownloadAdapter) download(t *Transfer, cb ProgressCallback, authOk req.Header.Set("Range", fmt.Sprintf("bytes=%d-%d", fromByte, t.Size-1)) } - res, err := httputil.DoHttpRequest(config.Config, req, !t.Authenticated) + res, err := a.doHTTP(t, req) if err != nil { // Special-case status code 416 () - fall back if fromByte > 0 && dlFile != nil && res.StatusCode == 416 { @@ -121,7 +118,8 @@ func (a *basicDownloadAdapter) download(t *Transfer, cb ProgressCallback, authOk } return errors.NewRetriableError(err) } - httputil.LogTransfer(config.Config, "lfs.data.download", res) + + a.apiClient.LogResponse("lfs.data.download", res) defer res.Body.Close() // Range request must return 206 & content range to confirm diff --git a/tq/basic_upload.go b/tq/basic_upload.go index 6175c50d..41b9d082 100644 --- a/tq/basic_upload.go +++ b/tq/basic_upload.go @@ -6,11 +6,10 @@ import ( "os" "path/filepath" "strconv" + "strings" - "github.com/git-lfs/git-lfs/api" - "github.com/git-lfs/git-lfs/config" "github.com/git-lfs/git-lfs/errors" - "github.com/git-lfs/git-lfs/httputil" + "github.com/git-lfs/git-lfs/lfsapi" "github.com/git-lfs/git-lfs/progress" ) @@ -50,7 +49,7 @@ func (a *basicUploadAdapter) DoTransfer(ctx interface{}, t *Transfer, cb Progres // return fmt.Errorf("No upload action for this object.") } - req, err := httputil.NewHttpRequest("PUT", rel.Href, rel.Header) + req, err := a.newHTTPRequest("PUT", rel) if err != nil { return err } @@ -81,27 +80,24 @@ func (a *basicUploadAdapter) DoTransfer(ctx interface{}, t *Transfer, cb Progres } return nil } - var reader io.Reader - reader = &progress.CallbackReader{ - C: ccb, - TotalSize: t.Size, - Reader: f, - } + var reader lfsapi.ReadSeekCloser = progress.NewBodyWithCallback(f, t.Size, ccb) // Signal auth was ok on first read; this frees up other workers to start if authOkFunc != nil { - reader = newStartCallbackReader(reader, func(*startCallbackReader) { + reader = newStartCallbackReader(reader, func() error { authOkFunc() + return nil }) } - req.Body = ioutil.NopCloser(reader) + req.Body = reader - res, err := httputil.DoHttpRequest(config.Config, req, !t.Authenticated) + res, err := a.doHTTP(t, req) if err != nil { return errors.NewRetriableError(err) } - httputil.LogTransfer(config.Config, "lfs.data.upload", res) + + a.apiClient.LogResponse("lfs.data.upload", res) // A status code of 403 likely means that an authentication token for the // upload has expired. This can be safely retried. @@ -111,32 +107,41 @@ func (a *basicUploadAdapter) DoTransfer(ctx interface{}, t *Transfer, cb Progres } if res.StatusCode > 299 { - return errors.Wrapf(nil, "Invalid status for %s: %d", httputil.TraceHttpReq(req), res.StatusCode) + return errors.Wrapf(nil, "Invalid status for %s %s: %d", + req.Method, + strings.SplitN(req.URL.String(), "?", 2)[0], + res.StatusCode, + ) } io.Copy(ioutil.Discard, res.Body) res.Body.Close() - return api.VerifyUpload(config.Config, toApiObject(t)) + return verifyUpload(a.apiClient, t) } // startCallbackReader is a reader wrapper which calls a function as soon as the // first Read() call is made. This callback is only made once type startCallbackReader struct { - r io.Reader - cb func(*startCallbackReader) + cb func() error cbDone bool + lfsapi.ReadSeekCloser } func (s *startCallbackReader) Read(p []byte) (n int, err error) { if !s.cbDone && s.cb != nil { - s.cb(s) + if err := s.cb(); err != nil { + return 0, err + } s.cbDone = true } - return s.r.Read(p) + return s.ReadSeekCloser.Read(p) } -func newStartCallbackReader(r io.Reader, cb func(*startCallbackReader)) *startCallbackReader { - return &startCallbackReader{r, cb, false} +func newStartCallbackReader(r lfsapi.ReadSeekCloser, cb func() error) *startCallbackReader { + return &startCallbackReader{ + ReadSeekCloser: r, + cb: cb, + } } func configureBasicUploadAdapter(m *Manifest) { diff --git a/tq/custom.go b/tq/custom.go index bacd46e5..722d3a81 100644 --- a/tq/custom.go +++ b/tq/custom.go @@ -12,13 +12,11 @@ import ( "strings" "time" + "github.com/git-lfs/git-lfs/lfsapi" "github.com/git-lfs/git-lfs/tools" - "github.com/git-lfs/git-lfs/api" "github.com/git-lfs/git-lfs/subprocess" "github.com/rubyist/tracerx" - - "github.com/git-lfs/git-lfs/config" ) // Adapter for custom transfer via external process @@ -88,7 +86,7 @@ func NewCustomAdapterDownloadRequest(oid string, size int64, action *Action) *cu } type customAdapterTerminateRequest struct { - MessageType string `json:"type"` + Event string `json:"event"` } func NewCustomAdapterTerminateRequest() *customAdapterTerminateRequest { @@ -113,8 +111,7 @@ func (a *customAdapter) Begin(cfg AdapterConfig, cb ProgressCallback) error { } // If config says not to launch multiple processes, downgrade incoming value - newCfg := &Manifest{concurrentTransfers: 1} - return a.adapterBase.Begin(newCfg, cb) + return a.adapterBase.Begin(&customAdapterConfig{AdapterConfig: cfg}, cb) } func (a *customAdapter) ClearTempStorage() error { @@ -316,7 +313,8 @@ func (a *customAdapter) DoTransfer(ctx interface{}, t *Transfer, cb ProgressCall return fmt.Errorf("Failed to copy downloaded file: %v", err) } } else if a.direction == Upload { - if err = api.VerifyUpload(config.Config, toApiObject(t)); err != nil { + cli := &lfsapi.Client{} + if err = verifyUpload(cli, t); err != nil { return err } } @@ -377,3 +375,11 @@ func configureCustomAdapters(git Env, m *Manifest) { } } } + +type customAdapterConfig struct { + AdapterConfig +} + +func (c *customAdapterConfig) ConcurrentTransfers() int { + return 1 +} diff --git a/tq/custom_test.go b/tq/custom_test.go index d17adf68..f07bcaa8 100644 --- a/tq/custom_test.go +++ b/tq/custom_test.go @@ -3,17 +3,19 @@ package tq import ( "testing" - "github.com/git-lfs/git-lfs/config" + "github.com/git-lfs/git-lfs/lfsapi" "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" ) func TestCustomTransferBasicConfig(t *testing.T) { path := "/path/to/binary" - cfg := config.NewFrom(config.Values{ - Git: map[string]string{"lfs.customtransfer.testsimple.path": path}, - }) + cli, err := lfsapi.NewClient(nil, lfsapi.TestEnv(map[string]string{ + "lfs.customtransfer.testsimple.path": path, + })) + require.Nil(t, err) - m := NewManifestWithGitEnv("", cfg.Git) + m := NewManifestWithClient(cli) u := m.NewUploadAdapter("testsimple") assert.NotNil(t, u, "Upload adapter should be present") cu, _ := u.(*customAdapter) @@ -34,16 +36,15 @@ func TestCustomTransferBasicConfig(t *testing.T) { func TestCustomTransferDownloadConfig(t *testing.T) { path := "/path/to/binary" args := "-c 1 --whatever" - cfg := config.NewFrom(config.Values{ - Git: map[string]string{ - "lfs.customtransfer.testdownload.path": path, - "lfs.customtransfer.testdownload.args": args, - "lfs.customtransfer.testdownload.concurrent": "false", - "lfs.customtransfer.testdownload.direction": "download", - }, - }) + cli, err := lfsapi.NewClient(nil, lfsapi.TestEnv(map[string]string{ + "lfs.customtransfer.testdownload.path": path, + "lfs.customtransfer.testdownload.args": args, + "lfs.customtransfer.testdownload.concurrent": "false", + "lfs.customtransfer.testdownload.direction": "download", + })) + require.Nil(t, err) - m := NewManifestWithGitEnv("", cfg.Git) + m := NewManifestWithClient(cli) u := m.NewUploadAdapter("testdownload") assert.NotNil(t, u, "Upload adapter should always be created") cu, _ := u.(*customAdapter) @@ -61,16 +62,15 @@ func TestCustomTransferDownloadConfig(t *testing.T) { func TestCustomTransferUploadConfig(t *testing.T) { path := "/path/to/binary" args := "-c 1 --whatever" - cfg := config.NewFrom(config.Values{ - Git: map[string]string{ - "lfs.customtransfer.testupload.path": path, - "lfs.customtransfer.testupload.args": args, - "lfs.customtransfer.testupload.concurrent": "false", - "lfs.customtransfer.testupload.direction": "upload", - }, - }) + cli, err := lfsapi.NewClient(nil, lfsapi.TestEnv(map[string]string{ + "lfs.customtransfer.testupload.path": path, + "lfs.customtransfer.testupload.args": args, + "lfs.customtransfer.testupload.concurrent": "false", + "lfs.customtransfer.testupload.direction": "upload", + })) + require.Nil(t, err) - m := NewManifestWithGitEnv("", cfg.Git) + m := NewManifestWithClient(cli) d := m.NewDownloadAdapter("testupload") assert.NotNil(t, d, "Download adapter should always be created") cd, _ := d.(*customAdapter) @@ -88,16 +88,15 @@ func TestCustomTransferUploadConfig(t *testing.T) { func TestCustomTransferBothConfig(t *testing.T) { path := "/path/to/binary" args := "-c 1 --whatever --yeah" - cfg := config.NewFrom(config.Values{ - Git: map[string]string{ - "lfs.customtransfer.testboth.path": path, - "lfs.customtransfer.testboth.args": args, - "lfs.customtransfer.testboth.concurrent": "yes", - "lfs.customtransfer.testboth.direction": "both", - }, - }) + cli, err := lfsapi.NewClient(nil, lfsapi.TestEnv(map[string]string{ + "lfs.customtransfer.testboth.path": path, + "lfs.customtransfer.testboth.args": args, + "lfs.customtransfer.testboth.concurrent": "yes", + "lfs.customtransfer.testboth.direction": "both", + })) + require.Nil(t, err) - m := NewManifestWithGitEnv("", cfg.Git) + m := NewManifestWithClient(cli) d := m.NewDownloadAdapter("testboth") assert.NotNil(t, d, "Download adapter should be present") cd, _ := d.(*customAdapter) diff --git a/tq/manifest.go b/tq/manifest.go index 173134d8..3a20a22a 100644 --- a/tq/manifest.go +++ b/tq/manifest.go @@ -3,6 +3,7 @@ package tq import ( "sync" + "github.com/git-lfs/git-lfs/lfsapi" "github.com/rubyist/tracerx" ) @@ -20,9 +21,15 @@ type Manifest struct { tusTransfersAllowed bool downloadAdapterFuncs map[string]NewAdapterFunc uploadAdapterFuncs map[string]NewAdapterFunc + apiClient *lfsapi.Client + tqClient *tqClient mu sync.Mutex } +func (m *Manifest) APIClient() *lfsapi.Client { + return m.apiClient +} + func (m *Manifest) MaxRetries() int { return m.maxRetries } @@ -31,18 +38,30 @@ func (m *Manifest) ConcurrentTransfers() int { return m.concurrentTransfers } -func NewManifest() *Manifest { - return NewManifestWithGitEnv("", nil) +func (m *Manifest) batchClient() *tqClient { + return m.tqClient } -func NewManifestWithGitEnv(access string, git Env) *Manifest { +func NewManifest() *Manifest { + cli, err := lfsapi.NewClient(nil, nil) + if err != nil { + tracerx.Printf("unable to init tq.Manifest: %s", err) + return nil + } + + return NewManifestWithClient(cli) +} + +func NewManifestWithClient(apiClient *lfsapi.Client) *Manifest { m := &Manifest{ + apiClient: apiClient, + tqClient: &tqClient{Client: apiClient}, downloadAdapterFuncs: make(map[string]NewAdapterFunc), uploadAdapterFuncs: make(map[string]NewAdapterFunc), } var tusAllowed bool - if git != nil { + if git := apiClient.GitEnv(); git != nil { if v := git.Int("lfs.transfer.maxretries", 0); v > 0 { m.maxRetries = v } @@ -58,9 +77,7 @@ func NewManifestWithGitEnv(access string, git Env) *Manifest { m.maxRetries = defaultMaxRetries } - if access == "ntlm" { - m.concurrentTransfers = 1 - } else if m.concurrentTransfers < 1 { + if m.concurrentTransfers < 1 { m.concurrentTransfers = defaultConcurrentTransfers } diff --git a/tq/manifest_test.go b/tq/manifest_test.go new file mode 100644 index 00000000..94ce3f81 --- /dev/null +++ b/tq/manifest_test.go @@ -0,0 +1,51 @@ +package tq + +import ( + "testing" + + "github.com/git-lfs/git-lfs/lfsapi" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func TestManifestIsConfigurable(t *testing.T) { + cli, err := lfsapi.NewClient(nil, lfsapi.TestEnv(map[string]string{ + "lfs.transfer.maxretries": "3", + })) + require.Nil(t, err) + + m := NewManifestWithClient(cli) + assert.Equal(t, 3, m.MaxRetries()) +} + +func TestManifestChecksNTLM(t *testing.T) { + cli, err := lfsapi.NewClient(nil, lfsapi.TestEnv(map[string]string{ + "lfs.url": "http://foo", + "lfs.http://foo.access": "ntlm", + "lfs.concurrenttransfers": "3", + })) + require.Nil(t, err) + + m := NewManifestWithClient(cli) + assert.Equal(t, 1, m.MaxRetries()) +} + +func TestManifestClampsValidValues(t *testing.T) { + cli, err := lfsapi.NewClient(nil, lfsapi.TestEnv(map[string]string{ + "lfs.transfer.maxretries": "-1", + })) + require.Nil(t, err) + + m := NewManifestWithClient(cli) + assert.Equal(t, 1, m.MaxRetries()) +} + +func TestManifestIgnoresNonInts(t *testing.T) { + cli, err := lfsapi.NewClient(nil, lfsapi.TestEnv(map[string]string{ + "lfs.transfer.maxretries": "not_an_int", + })) + require.Nil(t, err) + + m := NewManifestWithClient(cli) + assert.Equal(t, 1, m.MaxRetries()) +} diff --git a/tq/transfer.go b/tq/transfer.go index 84135f43..462358b9 100644 --- a/tq/transfer.go +++ b/tq/transfer.go @@ -6,8 +6,8 @@ import ( "fmt" "time" - "github.com/git-lfs/git-lfs/api" "github.com/git-lfs/git-lfs/errors" + "github.com/git-lfs/git-lfs/lfsapi" ) type Direction int @@ -17,14 +17,34 @@ const ( Download = Direction(iota) ) +func (d Direction) String() string { + switch d { + case Download: + return "download" + case Upload: + return "upload" + default: + return "" + } +} + type Transfer struct { - Name string `json:"name"` + Name string `json:"name,omitempty"` Oid string `json:"oid,omitempty"` Size int64 `json:"size"` Authenticated bool `json:"authenticated,omitempty"` Actions ActionSet `json:"actions,omitempty"` Error *ObjectError `json:"error,omitempty"` - Path string `json:"path"` + Path string `json:"path,omitempty"` +} + +func (t *Transfer) Rel(name string) (*Action, bool) { + if t.Actions == nil { + return nil, false + } + + rel, ok := t.Actions[name] + return rel, ok } type ObjectError struct { @@ -32,25 +52,30 @@ type ObjectError struct { Message string `json:"message"` } -// newTransfer creates a new Transfer instance -func newTransfer(name string, obj *api.ObjectResource, path string) *Transfer { +func (e *ObjectError) Error() string { + return fmt.Sprintf("[%d] %s", e.Code, e.Message) +} + +// newTransfer returns a copy of the given Transfer, with the name and path +// values set. +func newTransfer(tr *Transfer, name string, path string) *Transfer { t := &Transfer{ Name: name, - Oid: obj.Oid, - Size: obj.Size, - Authenticated: obj.Authenticated, - Actions: make(ActionSet), Path: path, + Oid: tr.Oid, + Size: tr.Size, + Authenticated: tr.Authenticated, + Actions: make(ActionSet), } - if obj.Error != nil { + if tr.Error != nil { t.Error = &ObjectError{ - Code: obj.Error.Code, - Message: obj.Error.Message, + Code: tr.Error.Code, + Message: tr.Error.Message, } } - for rel, action := range obj.Actions { + for rel, action := range tr.Actions { t.Actions[rel] = &Action{ Href: action.Href, Header: action.Header, @@ -59,7 +84,6 @@ func newTransfer(name string, obj *api.ObjectResource, path string) *Transfer { } return t - } type Action struct { @@ -122,32 +146,6 @@ func IsActionMissingError(err error) bool { return false } -func toApiObject(t *Transfer) *api.ObjectResource { - o := &api.ObjectResource{ - Oid: t.Oid, - Size: t.Size, - Authenticated: t.Authenticated, - Actions: make(map[string]*api.LinkRelation), - } - - for rel, a := range t.Actions { - o.Actions[rel] = &api.LinkRelation{ - Href: a.Href, - Header: a.Header, - ExpiresAt: a.ExpiresAt, - } - } - - if t.Error != nil { - o.Error = &api.ObjectError{ - Code: t.Error.Code, - Message: t.Error.Message, - } - } - - return o -} - // NewAdapterFunc creates new instances of Adapter. Code that wishes // to provide new Adapter instances should pass an implementation of this // function to RegisterNewTransferAdapterFunc() on a *Manifest. @@ -157,7 +155,27 @@ type NewAdapterFunc func(name string, dir Direction) Adapter type ProgressCallback func(name string, totalSize, readSoFar int64, readSinceLast int) error type AdapterConfig interface { + APIClient() *lfsapi.Client ConcurrentTransfers() int + Remote() string +} + +type adapterConfig struct { + apiClient *lfsapi.Client + concurrentTransfers int + remote string +} + +func (c *adapterConfig) ConcurrentTransfers() int { + return c.concurrentTransfers +} + +func (c *adapterConfig) APIClient() *lfsapi.Client { + return c.apiClient +} + +func (c *adapterConfig) Remote() string { + return c.remote } // Adapter is implemented by types which can upload and/or download LFS diff --git a/tq/transfer_queue.go b/tq/transfer_queue.go index febd9f38..7f59b89c 100644 --- a/tq/transfer_queue.go +++ b/tq/transfer_queue.go @@ -1,12 +1,12 @@ package tq import ( + "os" "sort" "sync" - "github.com/git-lfs/git-lfs/api" - "github.com/git-lfs/git-lfs/config" "github.com/git-lfs/git-lfs/errors" + "github.com/git-lfs/git-lfs/lfsapi" "github.com/git-lfs/git-lfs/progress" "github.com/rubyist/tracerx" ) @@ -72,19 +72,14 @@ func (r *retryCounter) CanRetry(oid string) (int, bool) { // all other workers are sitting idle. type batch []*objectTuple -func (b batch) ApiObjects() []*api.ObjectResource { - transfers := make([]*api.ObjectResource, 0, len(b)) +func (b batch) ToTransfers() []*Transfer { + transfers := make([]*Transfer, 0, len(b)) for _, t := range b { - transfers = append(transfers, tupleToApiObject(t)) + transfers = append(transfers, &Transfer{Oid: t.Oid, Size: t.Size}) } - return transfers } -func tupleToApiObject(t *objectTuple) *api.ObjectResource { - return &api.ObjectResource{Oid: t.Oid, Size: t.Size} -} - func (b batch) Len() int { return len(b) } func (b batch) Less(i, j int) bool { return b[i].Size < b[j].Size } func (b batch) Swap(i, j int) { b[i], b[j] = b[j], b[i] } @@ -94,6 +89,8 @@ func (b batch) Swap(i, j int) { b[i], b[j] = b[j], b[i] } // adapters, and dealing with progress, errors and retries. type TransferQueue struct { direction Direction + client *tqClient + remote string adapter Adapter adapterInProgress bool adapterInitMutex sync.Mutex @@ -147,9 +144,11 @@ func WithBufferDepth(depth int) Option { } // NewTransferQueue builds a TransferQueue, direction and underlying mechanism determined by adapter -func NewTransferQueue(dir Direction, manifest *Manifest, options ...Option) *TransferQueue { +func NewTransferQueue(dir Direction, manifest *Manifest, remote string, options ...Option) *TransferQueue { q := &TransferQueue{ direction: dir, + client: &tqClient{Client: manifest.APIClient()}, + remote: remote, errorc: make(chan error), transfers: make(map[string]*objectTuple), trMutex: &sync.Mutex{}, @@ -281,16 +280,10 @@ func (q *TransferQueue) collectBatches() { // enqueueAndCollectRetriesFor blocks until the entire Batch "batch" has been // processed. func (q *TransferQueue) enqueueAndCollectRetriesFor(batch batch) (batch, error) { - cfg := config.Config - next := q.makeBatch() - transferAdapterNames := q.manifest.GetAdapterNames(q.direction) - tracerx.Printf("tq: sending batch of size %d", len(batch)) - objs, adapterName, err := api.Batch( - cfg, batch.ApiObjects(), q.transferKind(), transferAdapterNames, - ) + bRes, err := Batch(q.manifest, q.direction, q.remote, batch.ToTransfers()) if err != nil { // If there was an error making the batch API call, mark all of // the objects for retry, and return them along with the error @@ -309,12 +302,16 @@ func (q *TransferQueue) enqueueAndCollectRetriesFor(batch batch) (batch, error) return next, err } - q.useAdapter(adapterName) + if len(bRes.Objects) == 0 { + return next, nil + } + + q.useAdapter(bRes.TransferAdapterName) q.startProgress.Do(q.meter.Start) - toTransfer := make([]*Transfer, 0, len(objs)) + toTransfer := make([]*Transfer, 0, len(bRes.Objects)) - for _, o := range objs { + for _, o := range bRes.Objects { if o.Error != nil { q.errorc <- errors.Wrapf(o.Error, "[%v] %v", o.Oid, o.Error.Message) q.Skip(o.Size) @@ -336,9 +333,9 @@ func (q *TransferQueue) enqueueAndCollectRetriesFor(batch batch) (batch, error) q.Skip(o.Size) q.wait.Done() } else { - tr := newTransfer(t.Name, o, t.Path) + tr := newTransfer(o, t.Name, t.Path) - if _, err := tr.Actions.Get(q.transferKind()); err != nil { + if _, err := tr.Actions.Get(q.direction.String()); err != nil { // XXX(taylor): duplication if q.canRetryObject(tr.Oid, err) { q.rc.Increment(tr.Oid) @@ -362,7 +359,7 @@ func (q *TransferQueue) enqueueAndCollectRetriesFor(batch batch) (batch, error) } } - retries := q.addToAdapter(toTransfer) + retries := q.addToAdapter(bRes.endpoint, toTransfer) for t := range retries { q.rc.Increment(t.Oid) count := q.rc.CountFor(t.Oid) @@ -385,10 +382,10 @@ func (q *TransferQueue) makeBatch() batch { return make(batch, 0, q.batchSize) } // closed. // // addToAdapter returns immediately, and does not block. -func (q *TransferQueue) addToAdapter(pending []*Transfer) <-chan *objectTuple { +func (q *TransferQueue) addToAdapter(e lfsapi.Endpoint, pending []*Transfer) <-chan *objectTuple { retries := make(chan *objectTuple, len(pending)) - if err := q.ensureAdapterBegun(); err != nil { + if err := q.ensureAdapterBegun(e); err != nil { close(retries) q.errorc <- err @@ -400,16 +397,21 @@ func (q *TransferQueue) addToAdapter(pending []*Transfer) <-chan *objectTuple { return retries } + present, missingResults := q.partitionTransfers(pending) + go func() { defer close(retries) var results <-chan TransferResult if q.dryRun { - results = q.makeDryRunResults(pending) + results = q.makeDryRunResults(present) } else { - results = q.adapter.Add(pending...) + results = q.adapter.Add(present...) } + for _, res := range missingResults { + q.handleTransferResult(res, retries) + } for res := range results { q.handleTransferResult(res, retries) } @@ -418,6 +420,39 @@ func (q *TransferQueue) addToAdapter(pending []*Transfer) <-chan *objectTuple { return retries } +func (q *TransferQueue) partitionTransfers(transfers []*Transfer) (present []*Transfer, results []TransferResult) { + if q.direction != Upload { + return transfers, nil + } + + present = make([]*Transfer, 0, len(transfers)) + results = make([]TransferResult, 0, len(transfers)) + + for _, t := range transfers { + var err error + + if t.Size < 0 { + err = errors.Errorf("Git LFS: object %q has invalid size (got: %d)", t.Oid, t.Size) + } else { + fd, serr := os.Stat(t.Path) + if serr != nil || fd.Size() != t.Size { + err = errors.Errorf("Unable to find object (%s) locally.", t.Oid) + } + } + + if err != nil { + results = append(results, TransferResult{ + Transfer: t, + Error: err, + }) + } else { + present = append(present, t) + } + } + + return +} + // makeDryRunResults returns a channel populated immediately with "successful" // results for all of the given transfers in "ts". func (q *TransferQueue) makeDryRunResults(ts []*Transfer) <-chan TransferResult { @@ -507,15 +542,7 @@ func (q *TransferQueue) Skip(size int64) { q.meter.Skip(size) } -func (q *TransferQueue) transferKind() string { - if q.direction == Download { - return "download" - } else { - return "upload" - } -} - -func (q *TransferQueue) ensureAdapterBegun() error { +func (q *TransferQueue) ensureAdapterBegun(e lfsapi.Endpoint) error { q.adapterInitMutex.Lock() defer q.adapterInitMutex.Unlock() @@ -525,12 +552,12 @@ func (q *TransferQueue) ensureAdapterBegun() error { // Progress callback - receives byte updates cb := func(name string, total, read int64, current int) error { - q.meter.TransferBytes(q.transferKind(), name, read, total, current) + q.meter.TransferBytes(q.direction.String(), name, read, total, current) return nil } tracerx.Printf("tq: starting transfer adapter %q", q.adapter.Name()) - err := q.adapter.Begin(q.manifest, cb) + err := q.adapter.Begin(q.toAdapterCfg(e), cb) if err != nil { return err } @@ -539,6 +566,20 @@ func (q *TransferQueue) ensureAdapterBegun() error { return nil } +func (q *TransferQueue) toAdapterCfg(e lfsapi.Endpoint) AdapterConfig { + apiClient := q.manifest.APIClient() + concurrency := q.manifest.ConcurrentTransfers() + if apiClient.Endpoints.AccessFor(e.Url) == lfsapi.NTLMAccess { + concurrency = 1 + } + + return &adapterConfig{ + concurrentTransfers: concurrency, + apiClient: apiClient, + remote: q.remote, + } +} + // Wait waits for the queue to finish processing all transfers. Once Wait is // called, Add will no longer add transfers to the queue. Any failed // transfers will be automatically retried once. diff --git a/tq/transfer_test.go b/tq/transfer_test.go index 96640146..c13a1ad5 100644 --- a/tq/transfer_test.go +++ b/tq/transfer_test.go @@ -3,9 +3,9 @@ package tq import ( "testing" - "github.com/git-lfs/git-lfs/config" - + "github.com/git-lfs/git-lfs/lfsapi" "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" ) type testAdapter struct { @@ -118,10 +118,12 @@ func testAdapterRegAndOverride(t *testing.T) { } func testAdapterRegButBasicOnly(t *testing.T) { - cfg := config.NewFrom(config.Values{ - Git: map[string]string{"lfs.basictransfersonly": "yes"}, - }) - m := NewManifestWithGitEnv("", cfg.Git) + cli, err := lfsapi.NewClient(nil, lfsapi.TestEnv(map[string]string{ + "lfs.basictransfersonly": "yes", + })) + require.Nil(t, err) + + m := NewManifestWithClient(cli) assert := assert.New(t) diff --git a/tq/tus_upload.go b/tq/tus_upload.go index dced036e..75317a39 100644 --- a/tq/tus_upload.go +++ b/tq/tus_upload.go @@ -6,11 +6,10 @@ import ( "io/ioutil" "os" "strconv" + "strings" - "github.com/git-lfs/git-lfs/api" - "github.com/git-lfs/git-lfs/config" "github.com/git-lfs/git-lfs/errors" - "github.com/git-lfs/git-lfs/httputil" + "github.com/git-lfs/git-lfs/lfsapi" "github.com/git-lfs/git-lfs/progress" "github.com/rubyist/tracerx" ) @@ -49,12 +48,14 @@ func (a *tusUploadAdapter) DoTransfer(ctx interface{}, t *Transfer, cb ProgressC // 1. Send HEAD request to determine upload start point // Request must include Tus-Resumable header (version) tracerx.Printf("xfer: sending tus.io HEAD request for %q", t.Oid) - req, err := httputil.NewHttpRequest("HEAD", rel.Href, rel.Header) + req, err := a.newHTTPRequest("HEAD", rel) if err != nil { return err } + req.Header.Set("Tus-Resumable", TusVersion) - res, err := httputil.DoHttpRequest(config.Config, req, false) + + res, err := a.doHTTP(t, req) if err != nil { return errors.NewRetriableError(err) } @@ -89,10 +90,6 @@ func (a *tusUploadAdapter) DoTransfer(ctx interface{}, t *Transfer, cb ProgressC } else { tracerx.Printf("xfer: tus.io resuming upload %q from %d", t.Oid, offset) advanceCallbackProgress(cb, t, offset) - _, err := f.Seek(offset, os.SEEK_CUR) - if err != nil { - return errors.Wrap(err, "tus upload") - } } // 2. Send PATCH request with byte start point (even if 0) in Upload-Offset @@ -101,10 +98,11 @@ func (a *tusUploadAdapter) DoTransfer(ctx interface{}, t *Transfer, cb ProgressC // Response may include Upload-Expires header in which case check not passed tracerx.Printf("xfer: sending tus.io PATCH request for %q", t.Oid) - req, err = httputil.NewHttpRequest("PATCH", rel.Href, rel.Header) + req, err = a.newHTTPRequest("PATCH", rel) if err != nil { return err } + req.Header.Set("Tus-Resumable", TusVersion) req.Header.Set("Upload-Offset", strconv.FormatInt(offset, 10)) req.Header.Set("Content-Type", "application/offset+octet-stream") @@ -119,27 +117,28 @@ func (a *tusUploadAdapter) DoTransfer(ctx interface{}, t *Transfer, cb ProgressC } return nil } - var reader io.Reader - reader = &progress.CallbackReader{ - C: ccb, - TotalSize: t.Size, - Reader: f, - } - // Signal auth was ok on first read; this frees up other workers to start - if authOkFunc != nil { - reader = newStartCallbackReader(reader, func(*startCallbackReader) { + var reader lfsapi.ReadSeekCloser = progress.NewBodyWithCallback(f, t.Size, ccb) + reader = newStartCallbackReader(reader, func() error { + // seek to the offset since lfsapi.Client rewinds the body + if _, err := f.Seek(offset, os.SEEK_CUR); err != nil { + return err + } + // Signal auth was ok on first read; this frees up other workers to start + if authOkFunc != nil { authOkFunc() - }) - } + } + return nil + }) - req.Body = ioutil.NopCloser(reader) + req.Body = reader - res, err = httputil.DoHttpRequest(config.Config, req, false) + res, err = a.doHTTP(t, req) if err != nil { return errors.NewRetriableError(err) } - httputil.LogTransfer(config.Config, "lfs.data.upload", res) + + a.apiClient.LogResponse("lfs.data.upload", res) // A status code of 403 likely means that an authentication token for the // upload has expired. This can be safely retried. @@ -149,13 +148,18 @@ func (a *tusUploadAdapter) DoTransfer(ctx interface{}, t *Transfer, cb ProgressC } if res.StatusCode > 299 { - return errors.Wrapf(nil, "Invalid status for %s: %d", httputil.TraceHttpReq(req), res.StatusCode) + return errors.Wrapf(nil, "Invalid status for %s %s: %d", + req.Method, + strings.SplitN(req.URL.String(), "?", 2)[0], + res.StatusCode, + ) } io.Copy(ioutil.Discard, res.Body) res.Body.Close() - return api.VerifyUpload(config.Config, toApiObject(t)) + cli := &lfsapi.Client{} + return verifyUpload(cli, t) } func configureTusAdapter(m *Manifest) { diff --git a/tq/verify.go b/tq/verify.go new file mode 100644 index 00000000..4c88c195 --- /dev/null +++ b/tq/verify.go @@ -0,0 +1,42 @@ +package tq + +import ( + "net/http" + + "github.com/git-lfs/git-lfs/lfsapi" +) + +func verifyUpload(c *lfsapi.Client, t *Transfer) error { + action, err := t.Actions.Get("verify") + if err != nil { + if IsActionMissingError(err) { + return nil + } + return err + } + + req, err := http.NewRequest("POST", action.Href, nil) + if err != nil { + return err + } + + err = lfsapi.MarshalToRequest(req, struct { + Oid string `json:"oid"` + Size int64 `json:"size"` + }{Oid: t.Oid, Size: t.Size}) + if err != nil { + return err + } + + for key, value := range action.Header { + req.Header.Set(key, value) + } + req.Header.Set("Content-Type", "application/vnd.git-lfs+json") + + res, err := c.Do(req) + if err != nil { + return err + } + + return res.Body.Close() +} diff --git a/tq/verify_test.go b/tq/verify_test.go new file mode 100644 index 00000000..a87d6e1b --- /dev/null +++ b/tq/verify_test.go @@ -0,0 +1,62 @@ +package tq + +import ( + "encoding/json" + "net/http" + "net/http/httptest" + "sync/atomic" + "testing" + + "github.com/git-lfs/git-lfs/lfsapi" + "github.com/stretchr/testify/assert" +) + +func TestVerifyWithoutAction(t *testing.T) { + c := &lfsapi.Client{} + tr := &Transfer{ + Oid: "abc", + Size: 123, + } + + assert.Nil(t, verifyUpload(c, tr)) +} + +func TestVerifySuccess(t *testing.T) { + var called uint32 + srv := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + if r.URL.String() != "/verify" { + w.WriteHeader(http.StatusNotFound) + return + } + + atomic.AddUint32(&called, 1) + + assert.Equal(t, "POST", r.Method) + assert.Equal(t, "bar", r.Header.Get("Foo")) + assert.Equal(t, "24", r.Header.Get("Content-Length")) + assert.Equal(t, "application/vnd.git-lfs+json", r.Header.Get("Content-Type")) + + var tr Transfer + assert.Nil(t, json.NewDecoder(r.Body).Decode(&tr)) + assert.Equal(t, "abc", tr.Oid) + assert.EqualValues(t, 123, tr.Size) + })) + defer srv.Close() + + c := &lfsapi.Client{} + tr := &Transfer{ + Oid: "abc", + Size: 123, + Actions: map[string]*Action{ + "verify": &Action{ + Href: srv.URL + "/verify", + Header: map[string]string{ + "foo": "bar", + }, + }, + }, + } + + assert.Nil(t, verifyUpload(c, tr)) + assert.EqualValues(t, 1, called) +}