Merge branch 'master' into experimental/transfer-features

# Conflicts:
#	lfs/pointer_smudge.go
#	lfs/transfer_queue.go
#	lfs/util_test.go
This commit is contained in:
Steve Streeting 2016-05-31 09:30:22 +01:00
commit b3b31180e1
768 changed files with 24586 additions and 5190 deletions

@ -94,8 +94,8 @@ tests:
## Updating 3rd party packages
0. Update `Nut.toml`.
0. Run `script/vendor` to update the code in the `.vendor/src` directory.
0. Update `glide.yaml`.
0. Run `script/vendor` to update the code in the `vendor` directory.
0. Commit the change. Git LFS vendors the full source code in the repository.
0. Submit a pull request.

@ -1,24 +0,0 @@
[application]
name = "git-lfs"
version = "1.2.0"
authors = [
"Rick Olson <technoweenie@gmail.com>",
"Scott Barron <rubyist@github.com>",
]
[dependencies]
"github.com/bgentry/go-netrc/netrc" = "9fd32a8b3d3d3f9d43c341bfe098430e07609480"
"github.com/cheggaaa/pb" = "bd14546a551971ae7f460e6d6e527c5b56cd38d7"
"github.com/kr/pretty" = "088c856450c08c03eb32f7a6c221e6eefaa10e6f"
"github.com/kr/pty" = "5cf931ef8f76dccd0910001d74a58a7fca84a83d"
"github.com/kr/text" = "6807e777504f54ad073ecef66747de158294b639"
"github.com/inconshreveable/mousetrap" = "76626ae9c91c4f2a10f34cad8ce83ea42c93bb75"
"github.com/olekukonko/ts" = "ecf753e7c962639ab5a1fb46f7da627d4c0a04b8"
"github.com/rubyist/tracerx" = "d7bcc0bc315bed2a841841bee5dbecc8d7d7582f"
"github.com/spf13/cobra" = "c55cdf33856a08e4822738728b41783292812889"
"github.com/spf13/pflag" = "580b9be06c33d8ba9dcc8757ea56b7642472c2f5"
"github.com/ThomsonReutersEikon/go-ntlm/ntlm" = "52b7efa603f1b809167b528b8bbaa467e36fdc02"
"github.com/technoweenie/assert" = "b25ea301d127043ffacf3b2545726e79b6632139"
"github.com/technoweenie/go-contentaddressable" = "38171def3cd15e3b76eb156219b3d48704643899"

@ -14,7 +14,7 @@ import (
"github.com/github/git-lfs/httputil"
"github.com/github/git-lfs/tools"
"github.com/github/git-lfs/vendor/_nuts/github.com/rubyist/tracerx"
"github.com/rubyist/tracerx"
)
// BatchOrLegacy calls the Batch API and falls back on the Legacy API

@ -1,155 +1,75 @@
// NOTE: Subject to change, do not rely on this package from outside git-lfs source
package api
import (
"net/http"
"net/url"
"path"
import "github.com/github/git-lfs/config"
"github.com/github/git-lfs/auth"
"github.com/github/git-lfs/config"
"github.com/github/git-lfs/errutil"
"github.com/github/git-lfs/httputil"
"github.com/github/git-lfs/vendor/_nuts/github.com/rubyist/tracerx"
)
type Operation string
const (
MediaType = "application/vnd.git-lfs+json; charset=utf-8"
UploadOperation Operation = "upload"
DownloadOperation Operation = "download"
)
// doLegacyApiRequest runs the request to the LFS legacy API.
func DoLegacyRequest(req *http.Request) (*http.Response, *ObjectResource, error) {
via := make([]*http.Request, 0, 4)
res, err := httputil.DoHttpRequestWithRedirects(req, via, true)
if err != nil {
return res, nil, err
}
// Client exposes the LFS API to callers through a multitude of different
// services and transport mechanisms. Callers can make a *RequestSchema using
// any service that is attached to the Client, and then execute a request based
// on that schema using the `Do()` method.
//
// A prototypical example follows:
// ```
// apiResponse, schema := client.Locks.Lock(request)
// resp, err := client.Do(schema)
// if err != nil {
// handleErr(err)
// }
//
// fmt.Println(apiResponse.Lock)
// ```
type Client struct {
// Locks is the LockService used to interact with the Git LFS file-
// locking API.
Locks LockService
obj := &ObjectResource{}
err = httputil.DecodeResponse(res, obj)
if err != nil {
httputil.SetErrorResponseContext(err, res)
return nil, nil, err
}
return res, obj, nil
// lifecycle is the lifecycle used by all requests through this client.
lifecycle Lifecycle
}
// doApiBatchRequest runs the request to the LFS batch API. If the API returns a
// 401, the repo will be marked as having private access and the request will be
// re-run. When the repo is marked as having private access, credentials will
// be retrieved.
func DoBatchRequest(req *http.Request) (*http.Response, []*ObjectResource, error) {
res, err := DoRequest(req, config.Config.PrivateAccess(auth.GetOperationForRequest(req)))
if err != nil {
if res != nil && res.StatusCode == 401 {
return res, nil, errutil.NewAuthError(err)
}
return res, nil, err
// NewClient instantiates and returns a new instance of *Client, with the given
// lifecycle.
//
// If no lifecycle is given, a HttpLifecycle is used by default.
func NewClient(lifecycle Lifecycle) *Client {
if lifecycle == nil {
lifecycle = NewHttpLifecycle(config.Config)
}
var objs map[string][]*ObjectResource
err = httputil.DecodeResponse(res, &objs)
if err != nil {
httputil.SetErrorResponseContext(err, res)
}
return res, objs["objects"], err
return &Client{lifecycle: lifecycle}
}
// DoRequest runs a request to the LFS API, without parsing the response
// body. If the API returns a 401, the repo will be marked as having private
// access and the request will be re-run. When the repo is marked as having
// private access, credentials will be retrieved.
func DoRequest(req *http.Request, useCreds bool) (*http.Response, error) {
via := make([]*http.Request, 0, 4)
return httputil.DoHttpRequestWithRedirects(req, via, useCreds)
}
func NewRequest(method, oid string) (*http.Request, error) {
objectOid := oid
operation := "download"
if method == "POST" {
if oid != "batch" {
objectOid = ""
operation = "upload"
}
}
endpoint := config.Config.Endpoint(operation)
res, err := auth.SshAuthenticate(endpoint, operation, oid)
if err != nil {
tracerx.Printf("ssh: attempted with %s. Error: %s",
endpoint.SshUserAndHost, err.Error(),
)
return nil, err
}
if len(res.Href) > 0 {
endpoint.Url = res.Href
}
u, err := ObjectUrl(endpoint, objectOid)
if err != nil {
return nil, err
}
req, err := httputil.NewHttpRequest(method, u.String(), res.Header)
if err != nil {
return nil, err
}
req.Header.Set("Accept", MediaType)
return req, nil
}
func NewBatchRequest(operation string) (*http.Request, error) {
endpoint := config.Config.Endpoint(operation)
res, err := auth.SshAuthenticate(endpoint, operation, "")
if err != nil {
tracerx.Printf("ssh: %s attempted with %s. Error: %s",
operation, endpoint.SshUserAndHost, err.Error(),
)
return nil, err
}
if len(res.Href) > 0 {
endpoint.Url = res.Href
}
u, err := ObjectUrl(endpoint, "batch")
if err != nil {
return nil, err
}
req, err := httputil.NewHttpRequest("POST", u.String(), nil)
if err != nil {
return nil, err
}
req.Header.Set("Accept", MediaType)
if res.Header != nil {
for key, value := range res.Header {
req.Header.Set(key, value)
}
}
return req, nil
}
func ObjectUrl(endpoint config.Endpoint, oid string) (*url.URL, error) {
u, err := url.Parse(endpoint.Url)
if err != nil {
return nil, err
}
u.Path = path.Join(u.Path, "objects")
if len(oid) > 0 {
u.Path = path.Join(u.Path, oid)
}
return u, nil
// Do preforms the request assosicated with the given *RequestSchema by
// delegating into the Lifecycle in use.
//
// If any error was encountered while either building, executing or cleaning up
// the request, then it will be returned immediately, and the request can be
// treated as invalid.
//
// If no error occured, an api.Response will be returned, along with a `nil`
// error. At this point, the body of the response has been serialized into
// `schema.Into`, and the body has been closed.
func (c *Client) Do(schema *RequestSchema) (Response, error) {
req, err := c.lifecycle.Build(schema)
if err != nil {
return nil, err
}
resp, err := c.lifecycle.Execute(req, schema.Into)
if err != nil {
return nil, err
}
if err = c.lifecycle.Cleanup(resp); err != nil {
return nil, err
}
return resp, nil
}

76
api/client_test.go Normal file

@ -0,0 +1,76 @@
package api_test
import (
"errors"
"net/http"
"testing"
"github.com/github/git-lfs/api"
"github.com/stretchr/testify/assert"
)
func TestClientUsesLifecycleToExecuteSchemas(t *testing.T) {
schema := new(api.RequestSchema)
req := new(http.Request)
resp := new(api.HttpResponse)
lifecycle := new(MockLifecycle)
lifecycle.On("Build", schema).Return(req, nil).Once()
lifecycle.On("Execute", req, schema.Into).Return(resp, nil).Once()
lifecycle.On("Cleanup", resp).Return(nil).Once()
client := api.NewClient(lifecycle)
r1, err := client.Do(schema)
assert.Equal(t, resp, r1)
assert.Nil(t, err)
lifecycle.AssertExpectations(t)
}
func TestClientHaltsIfSchemaCannotBeBuilt(t *testing.T) {
schema := new(api.RequestSchema)
lifecycle := new(MockLifecycle)
lifecycle.On("Build", schema).Return(nil, errors.New("uh-oh!")).Once()
client := api.NewClient(lifecycle)
resp, err := client.Do(schema)
lifecycle.AssertExpectations(t)
assert.Nil(t, resp)
assert.Equal(t, "uh-oh!", err.Error())
}
func TestClientHaltsIfSchemaCannotBeExecuted(t *testing.T) {
schema := new(api.RequestSchema)
req := new(http.Request)
lifecycle := new(MockLifecycle)
lifecycle.On("Build", schema).Return(req, nil).Once()
lifecycle.On("Execute", req, schema.Into).Return(nil, errors.New("uh-oh!")).Once()
client := api.NewClient(lifecycle)
resp, err := client.Do(schema)
lifecycle.AssertExpectations(t)
assert.Nil(t, resp)
assert.Equal(t, "uh-oh!", err.Error())
}
func TestClientReturnsCleanupErrors(t *testing.T) {
schema := new(api.RequestSchema)
req := new(http.Request)
resp := new(api.HttpResponse)
lifecycle := new(MockLifecycle)
lifecycle.On("Build", schema).Return(req, nil).Once()
lifecycle.On("Execute", req, schema.Into).Return(resp, nil).Once()
lifecycle.On("Cleanup", resp).Return(errors.New("uh-oh!")).Once()
client := api.NewClient(lifecycle)
r1, err := client.Do(schema)
lifecycle.AssertExpectations(t)
assert.Nil(t, r1)
assert.Equal(t, "uh-oh!", err.Error())
}

181
api/http_lifecycle.go Normal file

@ -0,0 +1,181 @@
// NOTE: Subject to change, do not rely on this package from outside git-lfs source
package api
import (
"bytes"
"encoding/json"
"errors"
"io"
"io/ioutil"
"net/http"
"net/url"
"github.com/github/git-lfs/auth"
"github.com/github/git-lfs/config"
"github.com/github/git-lfs/httputil"
)
var (
// ErrNoOperationGiven is an error which is returned when no operation
// is provided in a RequestSchema object.
ErrNoOperationGiven = errors.New("lfs/api: no operation provided in schema")
)
// EndpointSource is an interface which encapsulates the behavior of returning
// `config.Endpoint`s based on a particular operation.
type EndpointSource interface {
// Endpoint returns the `config.Endpoint` assosciated with a given
// operation.
Endpoint(operation string) config.Endpoint
}
// HttpLifecycle serves as the default implementation of the Lifecycle interface
// for HTTP requests. Internally, it leverages the *http.Client type to execute
// HTTP requests against a root *url.URL, as given in `NewHttpLifecycle`.
type HttpLifecycle struct {
endpoints EndpointSource
}
var _ Lifecycle = new(HttpLifecycle)
// NewHttpLifecycle initializes a new instance of the *HttpLifecycle type with a
// new *http.Client, and the given root (see above).
func NewHttpLifecycle(endpoints EndpointSource) *HttpLifecycle {
return &HttpLifecycle{
endpoints: endpoints,
}
}
// Build implements the Lifecycle.Build function.
//
// HttpLifecycle in particular, builds an absolute path by parsing and then
// relativizing the `schema.Path` with respsect to the `HttpLifecycle.root`. If
// there was an error in determining this URL, then that error will be returned,
//
// After this is complete, a body is attached to the request if the
// schema contained one. If a body was present, and there an error occurred while
// serializing it into JSON, then that error will be returned and the
// *http.Request will not be generated.
//
// In all cases, credentials are attached to the HTTP request as described in
// the `auth` package (see github.com/github/git-lfs/auth#GetCreds).
//
// Finally, all of these components are combined together and the resulting
// request is returned.
func (l *HttpLifecycle) Build(schema *RequestSchema) (*http.Request, error) {
path, err := l.absolutePath(schema.Operation, schema.Path)
if err != nil {
return nil, err
}
body, err := l.body(schema)
if err != nil {
return nil, err
}
req, err := http.NewRequest(schema.Method, path.String(), body)
if err != nil {
return nil, err
}
if _, err = auth.GetCreds(req); err != nil {
return nil, err
}
req.URL.RawQuery = l.queryParameters(schema).Encode()
return req, nil
}
// Execute implements the Lifecycle.Execute function.
//
// Internally, the *http.Client is used to execute the underlying *http.Request.
// If the client returned an error corresponding to a failure to make the
// request, then that error will be returned immediately, and the response is
// guaranteed not to be serialized.
//
// Once the response has been gathered from the server, it is unmarshled into
// the given `into interface{}` which is identical to the one provided in the
// original RequestSchema. If an error occured while decoding, then that error
// is returned.
//
// Otherwise, the api.Response is returned, along with no error, signaling that
// the request completed successfully.
func (l *HttpLifecycle) Execute(req *http.Request, into interface{}) (Response, error) {
resp, err := httputil.DoHttpRequestWithRedirects(req, []*http.Request{}, true)
if err != nil {
return nil, err
}
if into != nil {
decoder := json.NewDecoder(resp.Body)
if err = decoder.Decode(into); err != nil {
return nil, err
}
}
return WrapHttpResponse(resp), nil
}
// Cleanup implements the Lifecycle.Cleanup function by closing the Body
// attached to the response.
func (l *HttpLifecycle) Cleanup(resp Response) error {
return resp.Body().Close()
}
// absolutePath returns the absolute path made by combining a given relative
// path with the root URL of the endpoint corresponding to the given operation.
//
// If there was an error in parsing the relative path, then that error will be
// returned.
func (l *HttpLifecycle) absolutePath(operation Operation, path string) (*url.URL, error) {
if len(operation) == 0 {
return nil, ErrNoOperationGiven
}
root, err := url.Parse(l.endpoints.Endpoint(string(operation)).Url)
if err != nil {
return nil, err
}
rel, err := url.Parse(path)
if err != nil {
return nil, err
}
return root.ResolveReference(rel), nil
}
// body returns an io.Reader which reads out a JSON-encoded copy of the payload
// attached to a given *RequestSchema, if it is present. If no body is present
// in the request, then nil is returned instead.
//
// If an error was encountered while attempting to marshal the body, then that
// will be returned instead, along with a nil io.Reader.
func (l *HttpLifecycle) body(schema *RequestSchema) (io.ReadCloser, error) {
if schema.Body == nil {
return nil, nil
}
body, err := json.Marshal(schema.Body)
if err != nil {
return nil, err
}
return ioutil.NopCloser(bytes.NewReader(body)), nil
}
// queryParameters returns a url.Values containing all of the provided query
// parameters as given in the *RequestSchema. If no query parameters were given,
// then an empty url.Values is returned instead.
func (l *HttpLifecycle) queryParameters(schema *RequestSchema) url.Values {
vals := url.Values{}
if schema.Query != nil {
for k, v := range schema.Query {
vals.Add(k, v)
}
}
return vals
}

148
api/http_lifecycle_test.go Normal file

@ -0,0 +1,148 @@
package api_test
import (
"io/ioutil"
"net/http"
"net/http/httptest"
"testing"
"github.com/github/git-lfs/api"
"github.com/github/git-lfs/config"
"github.com/stretchr/testify/assert"
)
type NopEndpointSource struct {
Root string
}
func (e *NopEndpointSource) Endpoint(op string) config.Endpoint {
return config.Endpoint{Url: e.Root}
}
var (
source = &NopEndpointSource{"https://example.com"}
)
func TestHttpLifecycleMakesRequestsAgainstAbsolutePath(t *testing.T) {
SetupTestCredentialsFunc()
defer RestoreCredentialsFunc()
l := api.NewHttpLifecycle(source)
req, err := l.Build(&api.RequestSchema{
Path: "/foo",
Operation: api.DownloadOperation,
})
assert.Nil(t, err)
assert.Equal(t, "https://example.com/foo", req.URL.String())
}
func TestHttpLifecycleAttachesQueryParameters(t *testing.T) {
SetupTestCredentialsFunc()
defer RestoreCredentialsFunc()
l := api.NewHttpLifecycle(source)
req, err := l.Build(&api.RequestSchema{
Path: "/foo",
Operation: api.DownloadOperation,
Query: map[string]string{
"a": "b",
},
})
assert.Nil(t, err)
assert.Equal(t, "https://example.com/foo?a=b", req.URL.String())
}
func TestHttpLifecycleAttachesBodyWhenPresent(t *testing.T) {
SetupTestCredentialsFunc()
defer RestoreCredentialsFunc()
l := api.NewHttpLifecycle(source)
req, err := l.Build(&api.RequestSchema{
Operation: api.DownloadOperation,
Body: struct {
Foo string `json:"foo"`
}{"bar"},
})
assert.Nil(t, err)
body, err := ioutil.ReadAll(req.Body)
assert.Nil(t, err)
assert.Equal(t, "{\"foo\":\"bar\"}", string(body))
}
func TestHttpLifecycleDoesNotAttachBodyWhenEmpty(t *testing.T) {
SetupTestCredentialsFunc()
defer RestoreCredentialsFunc()
l := api.NewHttpLifecycle(source)
req, err := l.Build(&api.RequestSchema{
Operation: api.DownloadOperation,
})
assert.Nil(t, err)
assert.Nil(t, req.Body)
}
func TestHttpLifecycleErrsWithoutOperation(t *testing.T) {
SetupTestCredentialsFunc()
defer RestoreCredentialsFunc()
l := api.NewHttpLifecycle(source)
req, err := l.Build(&api.RequestSchema{
Path: "/foo",
})
assert.Equal(t, api.ErrNoOperationGiven, err)
assert.Nil(t, req)
}
func TestHttpLifecycleExecutesRequestWithoutBody(t *testing.T) {
SetupTestCredentialsFunc()
defer RestoreCredentialsFunc()
var called bool
server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
called = true
assert.Equal(t, "/path", r.URL.RequestURI())
}))
defer server.Close()
req, _ := http.NewRequest("GET", server.URL+"/path", nil)
l := api.NewHttpLifecycle(source)
_, err := l.Execute(req, nil)
assert.True(t, called)
assert.Nil(t, err)
}
func TestHttpLifecycleExecutesRequestWithBody(t *testing.T) {
SetupTestCredentialsFunc()
defer RestoreCredentialsFunc()
type Response struct {
Foo string `json:"foo"`
}
var called bool
server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
called = true
w.Write([]byte("{\"foo\":\"bar\"}"))
}))
defer server.Close()
req, _ := http.NewRequest("GET", server.URL+"/path", nil)
l := api.NewHttpLifecycle(source)
resp := new(Response)
_, err := l.Execute(req, resp)
assert.True(t, called)
assert.Nil(t, err)
assert.Equal(t, "bar", resp.Foo)
}

53
api/http_response.go Normal file

@ -0,0 +1,53 @@
// NOTE: Subject to change, do not rely on this package from outside git-lfs source
package api
import (
"io"
"net/http"
)
// HttpResponse is an implementation of the Response interface capable of
// handling HTTP responses. At its core, it works by wrapping an *http.Response.
type HttpResponse struct {
// r is the underlying *http.Response that is being wrapped.
r *http.Response
}
// WrapHttpResponse returns a wrapped *HttpResponse implementing the Repsonse
// type by using the given *http.Response.
func WrapHttpResponse(r *http.Response) *HttpResponse {
return &HttpResponse{
r: r,
}
}
var _ Response = new(HttpResponse)
// Status implements the Response.Status function, and returns the status given
// by the underlying *http.Response.
func (h *HttpResponse) Status() string {
return h.r.Status
}
// StatusCode implements the Response.StatusCode function, and returns the
// status code given by the underlying *http.Response.
func (h *HttpResponse) StatusCode() int {
return h.r.StatusCode
}
// Proto implements the Response.Proto function, and returns the proto given by
// the underlying *http.Response.
func (h *HttpResponse) Proto() string {
return h.r.Proto
}
// Body implements the Response.Body function, and returns the body as given by
// the underlying *http.Response.
func (h *HttpResponse) Body() io.ReadCloser {
return h.r.Body
}
// Header returns the underlying *http.Response's header.
func (h *HttpResponse) Header() http.Header {
return h.r.Header
}

27
api/http_response_test.go Normal file

@ -0,0 +1,27 @@
package api_test
import (
"bytes"
"io/ioutil"
"net/http"
"testing"
"github.com/github/git-lfs/api"
"github.com/stretchr/testify/assert"
)
func TestWrappedHttpResponsesMatchInternal(t *testing.T) {
resp := &http.Response{
Status: "200 OK",
StatusCode: 200,
Proto: "HTTP/1.1",
Body: ioutil.NopCloser(new(bytes.Buffer)),
}
wrapped := api.WrapHttpResponse(resp)
assert.Equal(t, resp.Status, wrapped.Status())
assert.Equal(t, resp.StatusCode, wrapped.StatusCode())
assert.Equal(t, resp.Proto, wrapped.Proto())
assert.Equal(t, resp.Body, wrapped.Body())
assert.Equal(t, resp.Header, wrapped.Header())
}

32
api/lifecycle.go Normal file

@ -0,0 +1,32 @@
// NOTE: Subject to change, do not rely on this package from outside git-lfs source
package api
import "net/http"
// TODO: extract interface for *http.Request; update methods. This will be in a
// later iteration of the API client.
// A Lifecycle represents and encapsulates the behavior on an API request from
// inception to cleanup.
//
// At a high level, it turns an *api.RequestSchema into an
// api.Response (and optionally an error). Lifecycle does so by providing
// several more fine-grained methods that are used by the client to manage the
// lifecycle of a request in a platform-agnostic fashion.
type Lifecycle interface {
// Build creates a sendable request by using the given RequestSchema as
// a model.
Build(req *RequestSchema) (*http.Request, error)
// Execute transforms generated request into a wrapped repsonse, (and
// optionally an error, if the request failed), and serializes the
// response into the `into interface{}`, if one was provided.
Execute(req *http.Request, into interface{}) (Response, error)
// Cleanup is called after the request has been completed and its
// response has been processed. It is meant to preform any post-request
// actions necessary, like closing or resetting the connection. If an
// error was encountered in doing this operation, it should be returned
// from this method, otherwise nil.
Cleanup(resp Response) error
}

39
api/lifecycle_test.go Normal file

@ -0,0 +1,39 @@
package api_test
import (
"net/http"
"github.com/github/git-lfs/api"
"github.com/stretchr/testify/mock"
)
type MockLifecycle struct {
mock.Mock
}
var _ api.Lifecycle = new(MockLifecycle)
func (l *MockLifecycle) Build(req *api.RequestSchema) (*http.Request, error) {
args := l.Called(req)
if args.Get(0) == nil {
return nil, args.Error(1)
}
return args.Get(0).(*http.Request), args.Error(1)
}
func (l *MockLifecycle) Execute(req *http.Request, into interface{}) (api.Response, error) {
args := l.Called(req, into)
if args.Get(0) == nil {
return nil, args.Error(1)
}
return args.Get(0).(api.Response), args.Error(1)
}
func (l *MockLifecycle) Cleanup(resp api.Response) error {
args := l.Called(resp)
return args.Error(0)
}

243
api/lock_api.go Normal file

@ -0,0 +1,243 @@
package api
import (
"fmt"
"strconv"
"time"
)
// LockService is an API service which encapsulates the Git LFS Locking API.
type LockService struct{}
// Lock generates a *RequestSchema that is used to preform the "attempt lock"
// API method.
//
// If a lock is already present, or if the server was unable to generate the
// lock, the Err field of the LockResponse type will be populated with a more
// detailed error describing the situation.
//
// If the caller does not have the minimum commit necessary to obtain the lock
// on that file, then the CommitNeeded field will be populated in the
// LockResponse, signaling that more commits are needed.
//
// In the successful case, a new Lock will be returned and granted to the
// caller.
func (s *LockService) Lock(req *LockRequest) (*RequestSchema, *LockResponse) {
var resp LockResponse
return &RequestSchema{
Method: "POST",
Path: "/locks",
Operation: UploadOperation,
Body: req,
Into: &resp,
}, &resp
}
// Search generates a *RequestSchema that is used to preform the "search for
// locks" API method.
//
// Searches can be scoped to match specific parameters by using the Filters
// field in the given LockSearchRequest. If no matching Locks were found, then
// the Locks field of the response will be empty.
//
// If the client expects that the server will return many locks, then the client
// can choose to paginate that response. Pagination is preformed by limiting the
// amount of results per page, and the server will inform the client of the ID
// of the last returned lock. Since the server is guaranteed to return results
// in reverse chronological order, the client simply sends the last ID it
// processed along with the next request, and the server will continue where it
// left off.
//
// If the server was unable to process the lock search request, then the Error
// field will be populated in the response.
//
// In the successful case, one or more locks will be returned as a part of the
// response.
func (s *LockService) Search(req *LockSearchRequest) (*RequestSchema, *LockList) {
var resp LockList
query := make(map[string]string)
for _, filter := range req.Filters {
query[filter.Property] = filter.Value
}
if req.Cursor != "" {
query["cursor"] = req.Cursor
}
if req.Limit != 0 {
query["limit"] = strconv.Itoa(req.Limit)
}
return &RequestSchema{
Method: "GET",
Path: "/locks",
Operation: UploadOperation,
Query: query,
Into: &resp,
}, &resp
}
// Unlock generates a *RequestSchema that is used to preform the "unlock" API
// method, against a particular lock potentially with --force.
//
// This method's corresponding response type will either contain a reference to
// the lock that was unlocked, or an error that was experienced by the server in
// unlocking it.
func (s *LockService) Unlock(id string, force bool) (*RequestSchema, *UnlockResponse) {
var resp UnlockResponse
return &RequestSchema{
Method: "POST",
Path: fmt.Sprintf("/locks/%s/unlock", id),
Operation: UploadOperation,
Body: &UnlockRequest{id, force},
Into: &resp,
}, &resp
}
// Lock represents a single lock that against a particular path.
//
// Locks returned from the API may or may not be currently active, according to
// the Expired flag.
type Lock struct {
// Id is the unique identifier corresponding to this particular Lock. It
// must be consistent with the local copy, and the server's copy.
Id string `json:"id"`
// Path is an absolute path to the file that is locked as a part of this
// lock.
Path string `json:"path"`
// Committer is the author who initiated this lock.
Committer Committer `json:"committer"`
// CommitSHA is the commit that this Lock was created against. It is
// strictly equal to the SHA of the minimum commit negotiated in order
// to create this lock.
CommitSHA string `json:"commit_sha"`
// LockedAt is a required parameter that represents the instant in time
// that this lock was created. For most server implementations, this
// should be set to the instant at which the lock was initially
// received.
LockedAt time.Time `json:"locked_at"`
// ExpiresAt is an optional parameter that represents the instant in
// time that the lock stopped being active. If the lock is still active,
// the server can either a) not send this field, or b) send the
// zero-value of time.Time.
UnlockedAt time.Time `json:"unlocked_at,omitempty"`
}
// Active returns whether or not the given lock is still active against the file
// that it is protecting.
func (l *Lock) Active() bool {
return l.UnlockedAt.IsZero()
}
// Committer represents a "First Last <email@domain.com>" pair.
type Committer struct {
// Name is the name of the individual who would like to obtain the
// lock, for instance: "Rick Olson".
Name string `json:"name"`
// Email is the email assopsicated with the individual who would
// like to obtain the lock, for instance: "rick@github.com".
Email string `json:"email"`
}
// LockRequest encapsulates the payload sent across the API when a client would
// like to obtain a lock against a particular path on a given remote.
type LockRequest struct {
// Path is the path that the client would like to obtain a lock against.
Path string `json:"path"`
// LatestRemoteCommit is the SHA of the last known commit from the
// remote that we are trying to create the lock against, as found in
// `.git/refs/origin/<name>`.
LatestRemoteCommit string `json:"latest_remote_commit"`
// Committer is the individual that wishes to obtain the lock.
Committer Committer `json:"committer"`
}
// LockResponse encapsulates the information sent over the API in response to
// a `LockRequest`.
type LockResponse struct {
// Lock is the Lock that was optionally created in response to the
// payload that was sent (see above). If the lock already exists, then
// the existing lock is sent in this field instead, and the author of
// that lock remains the same, meaning that the client failed to obtain
// that lock. An HTTP status of "409 - Conflict" is used here.
//
// If the lock was unable to be created, this field will hold the
// zero-value of Lock and the Err field will provide a more detailed set
// of information.
//
// If an error was experienced in creating this lock, then the
// zero-value of Lock should be sent here instead.
Lock *Lock `json:"lock"`
// CommitNeeded holds the minimum commit SHA that client must have to
// obtain the lock.
CommitNeeded string `json:"commit_needed,omitempty"`
// Err is the optional error that was encountered while trying to create
// the above lock.
Err string `json:"error,omitempty"`
}
// UnlockRequest encapsulates the data sent in an API request to remove a lock.
type UnlockRequest struct {
// Id is the Id of the lock that the user wishes to unlock.
Id string `json:"id"`
// Force determines whether or not the lock should be "forcibly"
// unlocked; that is to say whether or not a given individual should be
// able to break a different individual's lock.
Force bool `json:"force"`
}
// UnlockResponse is the result sent back from the API when asked to remove a
// lock.
type UnlockResponse struct {
// Lock is the lock corresponding to the asked-about lock in the
// `UnlockPayload` (see above). If no matching lock was found, this
// field will take the zero-value of Lock, and Err will be non-nil.
Lock *Lock `json:"lock"`
// Err is an optional field which holds any error that was experienced
// while removing the lock.
Err string `json:"error,omitempty"`
}
// Filter represents a single qualifier to apply against a set of locks.
type Filter struct {
// Property is the property to search against.
// Value is the value that the property must take.
Property, Value string
}
// LockSearchRequest encapsulates the request sent to the server when the client
// would like a list of locks that match the given criteria.
type LockSearchRequest struct {
// Filters is the set of filters to query against. If the client wishes
// to obtain a list of all locks, an empty array should be passed here.
Filters []Filter
// Cursor is an optional field used to tell the server which lock was
// seen last, if scanning through multiple pages of results.
//
// Servers must return a list of locks sorted in reverse chronological
// order, so the Cursor provides a consistent method of viewing all
// locks, even if more were created between two requests.
Cursor string
// Limit is the maximum number of locks to return in a single page.
Limit int
}
// LockList encapsulates a set of Locks.
type LockList struct {
// Locks is the set of locks returned back, typically matching the query
// parameters sent in the LockListRequest call. If no locks were matched
// from a given query, then `Locks` will be represented as an empty
// array.
Locks []Lock `json:"locks"`
// NextCursor returns the Id of the Lock the client should update its
// cursor to, if there are multiple pages of results for a particular
// `LockListRequest`.
NextCursor string `json:"next_cursor,omitempty"`
// Err populates any error that was encountered during the search. If no
// error was encountered and the operation was succesful, then a value
// of nil will be passed here.
Err string `json:"error,omitempty"`
}

220
api/lock_api_test.go Normal file

@ -0,0 +1,220 @@
package api_test
import (
"testing"
"time"
"github.com/github/git-lfs/api"
"github.com/github/git-lfs/api/schema"
)
var LockService api.LockService
func TestSuccessfullyObtainingALock(t *testing.T) {
got, body := LockService.Lock(new(api.LockRequest))
AssertRequestSchema(t, &api.RequestSchema{
Method: "POST",
Path: "/locks",
Operation: api.UploadOperation,
Body: new(api.LockRequest),
Into: body,
}, got)
}
func TestLockSearchWithFilters(t *testing.T) {
got, body := LockService.Search(&api.LockSearchRequest{
Filters: []api.Filter{
{"branch", "master"},
{"path", "/path/to/file"},
},
})
AssertRequestSchema(t, &api.RequestSchema{
Method: "GET",
Query: map[string]string{
"branch": "master",
"path": "/path/to/file",
},
Path: "/locks",
Operation: api.UploadOperation,
Into: body,
}, got)
}
func TestLockSearchWithNextCursor(t *testing.T) {
got, body := LockService.Search(&api.LockSearchRequest{
Cursor: "some-lock-id",
})
AssertRequestSchema(t, &api.RequestSchema{
Method: "GET",
Query: map[string]string{
"cursor": "some-lock-id",
},
Path: "/locks",
Operation: api.UploadOperation,
Into: body,
}, got)
}
func TestLockSearchWithLimit(t *testing.T) {
got, body := LockService.Search(&api.LockSearchRequest{
Limit: 20,
})
AssertRequestSchema(t, &api.RequestSchema{
Method: "GET",
Query: map[string]string{
"limit": "20",
},
Path: "/locks",
Operation: api.UploadOperation,
Into: body,
}, got)
}
func TestUnlockingALock(t *testing.T) {
got, body := LockService.Unlock("some-lock-id", true)
AssertRequestSchema(t, &api.RequestSchema{
Method: "POST",
Path: "/locks/some-lock-id/unlock",
Operation: api.UploadOperation,
Body: &api.UnlockRequest{
Id: "some-lock-id",
Force: true,
},
Into: body,
}, got)
}
func TestLockRequest(t *testing.T) {
schema.Validate(t, schema.LockRequestSchema, &api.LockRequest{
Path: "/path/to/lock",
LatestRemoteCommit: "deadbeef",
Committer: api.Committer{
Name: "Jane Doe",
Email: "jane@example.com",
},
})
}
func TestLockResponseWithLockedLock(t *testing.T) {
schema.Validate(t, schema.LockResponseSchema, &api.LockResponse{
Lock: &api.Lock{
Id: "some-lock-id",
Path: "/lock/path",
Committer: api.Committer{
Name: "Jane Doe",
Email: "jane@example.com",
},
LockedAt: time.Now(),
},
})
}
func TestLockResponseWithUnlockedLock(t *testing.T) {
schema.Validate(t, schema.LockResponseSchema, &api.LockResponse{
Lock: &api.Lock{
Id: "some-lock-id",
Path: "/lock/path",
Committer: api.Committer{
Name: "Jane Doe",
Email: "jane@example.com",
},
LockedAt: time.Now(),
UnlockedAt: time.Now(),
},
})
}
func TestLockResponseWithError(t *testing.T) {
schema.Validate(t, schema.LockResponseSchema, &api.LockResponse{
Err: "some error",
})
}
func TestLockResponseWithCommitNeeded(t *testing.T) {
schema.Validate(t, schema.LockResponseSchema, &api.LockResponse{
CommitNeeded: "deadbeef",
})
}
func TestLockResponseInvalidWithCommitAndError(t *testing.T) {
schema.Refute(t, schema.LockResponseSchema, &api.LockResponse{
Err: "some error",
CommitNeeded: "deadbeef",
})
}
func TestUnlockRequest(t *testing.T) {
schema.Validate(t, schema.UnlockRequestSchema, &api.UnlockRequest{
Id: "some-lock-id",
Force: false,
})
}
func TestUnlockResponseWithLock(t *testing.T) {
schema.Validate(t, schema.UnlockResponseSchema, &api.UnlockResponse{
Lock: &api.Lock{
Id: "some-lock-id",
},
})
}
func TestUnlockResponseWithError(t *testing.T) {
schema.Validate(t, schema.UnlockResponseSchema, &api.UnlockResponse{
Err: "some-error",
})
}
func TestUnlockResponseDoesNotAllowLockAndError(t *testing.T) {
schema.Refute(t, schema.UnlockResponseSchema, &api.UnlockResponse{
Lock: &api.Lock{
Id: "some-lock-id",
},
Err: "some-error",
})
}
func TestLockListWithLocks(t *testing.T) {
schema.Validate(t, schema.LockListSchema, &api.LockList{
Locks: []api.Lock{
api.Lock{Id: "foo"},
api.Lock{Id: "bar"},
},
})
}
func TestLockListWithNoResults(t *testing.T) {
schema.Validate(t, schema.LockListSchema, &api.LockList{
Locks: []api.Lock{},
})
}
func TestLockListWithNextCursor(t *testing.T) {
schema.Validate(t, schema.LockListSchema, &api.LockList{
Locks: []api.Lock{
api.Lock{Id: "foo"},
api.Lock{Id: "bar"},
},
NextCursor: "baz",
})
}
func TestLockListWithError(t *testing.T) {
schema.Validate(t, schema.LockListSchema, &api.LockList{
Err: "some error",
})
}
func TestLockListWithErrorAndLocks(t *testing.T) {
schema.Refute(t, schema.LockListSchema, &api.LockList{
Locks: []api.Lock{
api.Lock{Id: "foo"},
api.Lock{Id: "bar"},
},
Err: "this isn't possible!",
})
}

21
api/request_schema.go Normal file

@ -0,0 +1,21 @@
// NOTE: Subject to change, do not rely on this package from outside git-lfs source
package api
// RequestSchema provides a schema from which to generate sendable requests.
type RequestSchema struct {
// Method is the method that should be used when making a particular API
// call.
Method string
// Path is the relative path that this API call should be made against.
Path string
// Operation is the operation used to determine which endpoint to make
// the request against (see github.com/github/git-lfs/config).
Operation Operation
// Query is the query parameters used in the request URI.
Query map[string]string
// Body is the body of the request.
Body interface{}
// Into is an optional field used to represent the data structure into
// which a response should be serialized.
Into interface{}
}

@ -0,0 +1,23 @@
package api_test
import (
"testing"
"github.com/github/git-lfs/api"
"github.com/stretchr/testify/assert"
)
// AssertRequestSchema encapsulates a single assertion of equality against two
// generated RequestSchema instances.
//
// This assertion is meant only to test that the request schema generated by an
// API service matches what we expect it to be. It does not make use of the
// *api.Client, any particular lifecycle, or spin up a test server. All of that
// behavior is tested at a higher strata in the client/lifecycle tests.
//
// - t is the *testing.T used to preform the assertion.
// - Expected is the *api.RequestSchema that we expected to be generated.
// - Got is the *api.RequestSchema that was generated by a service.
func AssertRequestSchema(t *testing.T, expected, got *api.RequestSchema) {
assert.Equal(t, expected, got)
}

24
api/response.go Normal file

@ -0,0 +1,24 @@
// NOTE: Subject to change, do not rely on this package from outside git-lfs source
package api
import "io"
// Response is an interface that represents a response returned as a result of
// executing an API call. It is designed to represent itself across multiple
// response type, be it HTTP, SSH, or something else.
//
// The Response interface is meant to be small enough such that it can be
// sufficiently general, but is easily accessible if a caller needs more
// information specific to a particular protocol.
type Response interface {
// Status is a human-readable string representing the status the
// response was returned with.
Status() string
// StatusCode is the numeric code associated with a particular status.
StatusCode() int
// Proto is the protocol with which the response was delivered.
Proto() string
// Body returns an io.ReadCloser containg the contents of the response's
// body.
Body() io.ReadCloser
}

@ -0,0 +1,65 @@
{
"$schema": "http://json-schema.org/draft-04/schema#",
"type": "object",
"oneOf": [
{
"properties": {
"locks": {
"type": "array",
"items": {
"type": "object",
"properties": {
"id": {
"type": "string"
},
"path": {
"type": "string"
},
"committer": {
"type": "object",
"properties": {
"name": {
"type": "string"
},
"email": {
"type": "string"
}
},
"required": ["name", "email"]
},
"commit_sha": {
"type": "string"
},
"locked_at": {
"type": "string"
},
"unlocked_at": {
"type": "string"
}
},
"required": ["id", "path", "commit_sha", "locked_at"],
"additionalItems": false
}
},
"next_cursor": {
"type": "string"
}
},
"additionalProperties": false,
"required": ["locks"]
},
{
"properties": {
"locks": {
"type": "null"
},
"error": {
"type": "string"
}
},
"additionalProperties": false,
"required": ["error"]
}
]
}

@ -0,0 +1,25 @@
{
"type": "object",
"properties": {
"path": {
"type": "string"
},
"latest_remote_commit": {
"type": "string"
},
"committer": {
"type": "object",
"properties": {
"name": {
"type": "string"
},
"email": {
"type": "string"
}
},
"required": ["name", "email"]
}
},
"required": ["path", "latest_remote_commit", "committer"]
}

@ -0,0 +1,61 @@
{
"$schema": "http://json-schema.org/draft-04/schema#",
"type": "object",
"oneOf": [
{
"properties": {
"error": {
"type": "string"
}
},
"required": ["error"]
},
{
"properties": {
"commit_needed": {
"type": "string"
}
},
"required": ["commit_needed"]
},
{
"properties": {
"lock": {
"type": "object",
"properties": {
"id": {
"type": "string"
},
"path": {
"type": "string"
},
"committer": {
"type": "object",
"properties": {
"name": {
"type": "string"
},
"email": {
"type": "string"
}
},
"required": ["name", "email"]
},
"commit_sha": {
"type": "string"
},
"locked_at": {
"type": "string"
},
"unlocked_at": {
"type": "string"
}
},
"required": ["id", "path", "commit_sha", "locked_at"]
}
},
"required": ["lock"]
}
]
}

@ -0,0 +1,82 @@
package schema
import (
"encoding/json"
"fmt"
"os"
"path/filepath"
"testing"
"github.com/xeipuuv/gojsonschema"
)
// SchemaValidator uses the gojsonschema library to validate the JSON encoding
// of Go objects against a pre-defined JSON schema.
type SchemaValidator struct {
// Schema is the JSON schema to validate against.
//
// Subject is the instance of Go type that will be validated.
Schema, Subject gojsonschema.JSONLoader
}
func NewSchemaValidator(t *testing.T, schemaName string, got interface{}) *SchemaValidator {
dir, err := os.Getwd()
if err != nil {
t.Fatal(err)
}
schema := gojsonschema.NewReferenceLoader(fmt.Sprintf(
"file:///%s",
filepath.Join(dir, "schema/", schemaName),
))
marshalled, err := json.Marshal(got)
if err != nil {
t.Fatal(err)
}
subject := gojsonschema.NewStringLoader(string(marshalled))
return &SchemaValidator{
Schema: schema,
Subject: subject,
}
}
// Validate validates a Go object against JSON schema in a testing environment.
// If the validation fails, then the test will fail after logging all of the
// validation errors experienced by the validator.
func Validate(t *testing.T, schemaName string, got interface{}) {
NewSchemaValidator(t, schemaName, got).Assert(t)
}
// Refute ensures that a particular Go object does not validate the JSON schema
// given.
//
// If validation against the schema is successful, then the test will fail after
// logging.
func Refute(t *testing.T, schemaName string, got interface{}) {
NewSchemaValidator(t, schemaName, got).Refute(t)
}
// Assert preforms the validation assertion against the given *testing.T.
func (v *SchemaValidator) Assert(t *testing.T) {
if result, err := gojsonschema.Validate(v.Schema, v.Subject); err != nil {
t.Fatal(err)
} else if !result.Valid() {
for _, err := range result.Errors() {
t.Logf("Validation error: %s", err.Description())
}
t.Fail()
}
}
// Refute refutes that the given subject will validate against a particular
// schema.
func (v *SchemaValidator) Refute(t *testing.T) {
if result, err := gojsonschema.Validate(v.Schema, v.Subject); err != nil {
t.Fatal(err)
} else if result.Valid() {
t.Fatal("api/schema: expected validation to fail, succeeded")
}
}

24
api/schema/schemas.go Normal file

@ -0,0 +1,24 @@
// schema provides a testing utility for testing API types against a predefined
// JSON schema.
//
// The core philosophy for this package is as follows: when a new API is
// accepted, JSON Schema files should be added to document the types that are
// exchanged over this new API. Those files are placed in the `/api/schema`
// directory, and are used by the schema.Validate function to test that
// particular instances of these types as represented in Go match the predefined
// schema that was proposed as a part of the API.
//
// For ease of use, this file defines several constants, one for each schema
// file's name, to easily pass around during tests.
//
// As briefly described above, to validate that a Go type matches the schema for
// a particular API call, one should use the schema.Validate() function.
package schema
const (
LockListSchema = "lock_list_schema.json"
LockRequestSchema = "lock_request_schema.json"
LockResponseSchema = "lock_response_schema.json"
UnlockRequestSchema = "unlock_request_schema.json"
UnlockResponseSchema = "unlock_response_schema.json"
)

@ -0,0 +1,15 @@
{
"$schema": "http://json-schema.org/draft-04/schema#",
"type": "object",
"properties": {
"id": {
"type": "string"
},
"force": {
"type": "boolean"
}
},
"required": ["id", "force"],
"additionalItems": false
}

@ -0,0 +1,53 @@
{
"$schema": "http://json-schema.org/draft-04/schema#",
"type": "object",
"oneOf": [
{
"properties": {
"lock": {
"type": "object",
"properties": {
"id": {
"type": "string"
},
"path": {
"type": "string"
},
"committer": {
"type": "object",
"properties": {
"name": {
"type": "string"
},
"email": {
"type": "string"
}
},
"required": ["name", "email"]
},
"commit_sha": {
"type": "string"
},
"locked_at": {
"type": "string"
},
"unlocked_at": {
"type": "string"
}
},
"required": ["id", "path", "commit_sha", "locked_at"]
}
},
"required": ["lock"]
},
{
"properties": {
"error": {
"type": "string"
}
},
"required": ["error"]
}
]
}

155
api/v1.go Normal file

@ -0,0 +1,155 @@
package api
import (
"net/http"
"net/url"
"path"
"github.com/github/git-lfs/auth"
"github.com/github/git-lfs/config"
"github.com/github/git-lfs/errutil"
"github.com/github/git-lfs/httputil"
"github.com/rubyist/tracerx"
)
const (
MediaType = "application/vnd.git-lfs+json; charset=utf-8"
)
// doLegacyApiRequest runs the request to the LFS legacy API.
func DoLegacyRequest(req *http.Request) (*http.Response, *ObjectResource, error) {
via := make([]*http.Request, 0, 4)
res, err := httputil.DoHttpRequestWithRedirects(req, via, true)
if err != nil {
return res, nil, err
}
obj := &ObjectResource{}
err = httputil.DecodeResponse(res, obj)
if err != nil {
httputil.SetErrorResponseContext(err, res)
return nil, nil, err
}
return res, obj, nil
}
// doApiBatchRequest runs the request to the LFS batch API. If the API returns a
// 401, the repo will be marked as having private access and the request will be
// re-run. When the repo is marked as having private access, credentials will
// be retrieved.
func DoBatchRequest(req *http.Request) (*http.Response, []*ObjectResource, error) {
res, err := DoRequest(req, config.Config.PrivateAccess(auth.GetOperationForRequest(req)))
if err != nil {
if res != nil && res.StatusCode == 401 {
return res, nil, errutil.NewAuthError(err)
}
return res, nil, err
}
var objs map[string][]*ObjectResource
err = httputil.DecodeResponse(res, &objs)
if err != nil {
httputil.SetErrorResponseContext(err, res)
}
return res, objs["objects"], err
}
// DoRequest runs a request to the LFS API, without parsing the response
// body. If the API returns a 401, the repo will be marked as having private
// access and the request will be re-run. When the repo is marked as having
// private access, credentials will be retrieved.
func DoRequest(req *http.Request, useCreds bool) (*http.Response, error) {
via := make([]*http.Request, 0, 4)
return httputil.DoHttpRequestWithRedirects(req, via, useCreds)
}
func NewRequest(method, oid string) (*http.Request, error) {
objectOid := oid
operation := "download"
if method == "POST" {
if oid != "batch" {
objectOid = ""
operation = "upload"
}
}
endpoint := config.Config.Endpoint(operation)
res, err := auth.SshAuthenticate(endpoint, operation, oid)
if err != nil {
tracerx.Printf("ssh: attempted with %s. Error: %s",
endpoint.SshUserAndHost, err.Error(),
)
return nil, err
}
if len(res.Href) > 0 {
endpoint.Url = res.Href
}
u, err := ObjectUrl(endpoint, objectOid)
if err != nil {
return nil, err
}
req, err := httputil.NewHttpRequest(method, u.String(), res.Header)
if err != nil {
return nil, err
}
req.Header.Set("Accept", MediaType)
return req, nil
}
func NewBatchRequest(operation string) (*http.Request, error) {
endpoint := config.Config.Endpoint(operation)
res, err := auth.SshAuthenticate(endpoint, operation, "")
if err != nil {
tracerx.Printf("ssh: %s attempted with %s. Error: %s",
operation, endpoint.SshUserAndHost, err.Error(),
)
return nil, err
}
if len(res.Href) > 0 {
endpoint.Url = res.Href
}
u, err := ObjectUrl(endpoint, "batch")
if err != nil {
return nil, err
}
req, err := httputil.NewHttpRequest("POST", u.String(), nil)
if err != nil {
return nil, err
}
req.Header.Set("Accept", MediaType)
if res.Header != nil {
for key, value := range res.Header {
req.Header.Set(key, value)
}
}
return req, nil
}
func ObjectUrl(endpoint config.Endpoint, oid string) (*url.URL, error) {
u, err := url.Parse(endpoint.Url)
if err != nil {
return nil, err
}
u.Path = path.Join(u.Path, "objects")
if len(oid) > 0 {
u.Path = path.Join(u.Path, oid)
}
return u, nil
}

@ -14,7 +14,7 @@ import (
"github.com/github/git-lfs/config"
"github.com/github/git-lfs/errutil"
"github.com/github/git-lfs/vendor/_nuts/github.com/rubyist/tracerx"
"github.com/rubyist/tracerx"
)
// getCreds gets the credentials for a HTTP request and sets the given

@ -8,8 +8,8 @@ import (
"strings"
"testing"
"github.com/bgentry/go-netrc/netrc"
"github.com/github/git-lfs/config"
"github.com/github/git-lfs/vendor/_nuts/github.com/bgentry/go-netrc/netrc"
)
func TestGetCredentialsForApi(t *testing.T) {

@ -9,7 +9,7 @@ import (
"strings"
"github.com/github/git-lfs/config"
"github.com/github/git-lfs/vendor/_nuts/github.com/rubyist/tracerx"
"github.com/rubyist/tracerx"
)
type SshAuthResponse struct {

@ -5,7 +5,7 @@ import (
"testing"
"github.com/github/git-lfs/config"
"github.com/github/git-lfs/vendor/_nuts/github.com/technoweenie/assert"
"github.com/stretchr/testify/assert"
)
func TestSSHGetExeAndArgsSsh(t *testing.T) {

@ -11,8 +11,8 @@ import (
"github.com/github/git-lfs/git"
"github.com/github/git-lfs/lfs"
"github.com/github/git-lfs/progress"
"github.com/github/git-lfs/vendor/_nuts/github.com/rubyist/tracerx"
"github.com/github/git-lfs/vendor/_nuts/github.com/spf13/cobra"
"github.com/rubyist/tracerx"
"github.com/spf13/cobra"
)
var (

@ -6,7 +6,7 @@ import (
"github.com/github/git-lfs/errutil"
"github.com/github/git-lfs/lfs"
"github.com/github/git-lfs/progress"
"github.com/github/git-lfs/vendor/_nuts/github.com/spf13/cobra"
"github.com/spf13/cobra"
)
var (

@ -10,7 +10,7 @@ import (
"github.com/github/git-lfs/git"
"github.com/github/git-lfs/localstorage"
"github.com/github/git-lfs/tools"
"github.com/github/git-lfs/vendor/_nuts/github.com/spf13/cobra"
"github.com/spf13/cobra"
)
var (

@ -4,7 +4,7 @@ import (
"github.com/github/git-lfs/config"
"github.com/github/git-lfs/git"
"github.com/github/git-lfs/lfs"
"github.com/github/git-lfs/vendor/_nuts/github.com/spf13/cobra"
"github.com/spf13/cobra"
)
var (

@ -4,7 +4,7 @@ import (
"fmt"
"github.com/github/git-lfs/config"
"github.com/github/git-lfs/vendor/_nuts/github.com/spf13/cobra"
"github.com/spf13/cobra"
)
var (

@ -8,8 +8,8 @@ import (
"github.com/github/git-lfs/git"
"github.com/github/git-lfs/lfs"
"github.com/github/git-lfs/progress"
"github.com/github/git-lfs/vendor/_nuts/github.com/rubyist/tracerx"
"github.com/github/git-lfs/vendor/_nuts/github.com/spf13/cobra"
"github.com/rubyist/tracerx"
"github.com/spf13/cobra"
)
var (

@ -10,7 +10,7 @@ import (
"github.com/github/git-lfs/config"
"github.com/github/git-lfs/git"
"github.com/github/git-lfs/lfs"
"github.com/github/git-lfs/vendor/_nuts/github.com/spf13/cobra"
"github.com/spf13/cobra"
)
var (

@ -4,7 +4,7 @@ import (
"fmt"
"os"
"github.com/github/git-lfs/vendor/_nuts/github.com/spf13/cobra"
"github.com/spf13/cobra"
)
// TODO: Remove for Git LFS v2.0 https://github.com/github/git-lfs/issues/839

@ -2,7 +2,7 @@ package commands
import (
"github.com/github/git-lfs/lfs"
"github.com/github/git-lfs/vendor/_nuts/github.com/spf13/cobra"
"github.com/spf13/cobra"
)
var (

@ -8,7 +8,7 @@ import (
"github.com/github/git-lfs/config"
"github.com/github/git-lfs/errutil"
"github.com/github/git-lfs/vendor/_nuts/github.com/spf13/cobra"
"github.com/spf13/cobra"
)
var (

@ -5,7 +5,7 @@ import (
"github.com/github/git-lfs/git"
"github.com/github/git-lfs/lfs"
"github.com/github/git-lfs/vendor/_nuts/github.com/spf13/cobra"
"github.com/spf13/cobra"
)
var (

@ -11,7 +11,7 @@ import (
"os/exec"
"github.com/github/git-lfs/lfs"
"github.com/github/git-lfs/vendor/_nuts/github.com/spf13/cobra"
"github.com/spf13/cobra"
)
var (

@ -8,7 +8,7 @@ import (
"github.com/github/git-lfs/config"
"github.com/github/git-lfs/git"
"github.com/github/git-lfs/lfs"
"github.com/github/git-lfs/vendor/_nuts/github.com/spf13/cobra"
"github.com/spf13/cobra"
)
var (

@ -12,8 +12,8 @@ import (
"github.com/github/git-lfs/lfs"
"github.com/github/git-lfs/localstorage"
"github.com/github/git-lfs/progress"
"github.com/github/git-lfs/vendor/_nuts/github.com/rubyist/tracerx"
"github.com/github/git-lfs/vendor/_nuts/github.com/spf13/cobra"
"github.com/rubyist/tracerx"
"github.com/spf13/cobra"
)
var (

@ -5,7 +5,7 @@ import (
"github.com/github/git-lfs/config"
"github.com/github/git-lfs/git"
"github.com/github/git-lfs/vendor/_nuts/github.com/spf13/cobra"
"github.com/spf13/cobra"
)
var (

@ -7,8 +7,8 @@ import (
"github.com/github/git-lfs/config"
"github.com/github/git-lfs/git"
"github.com/github/git-lfs/lfs"
"github.com/github/git-lfs/vendor/_nuts/github.com/rubyist/tracerx"
"github.com/github/git-lfs/vendor/_nuts/github.com/spf13/cobra"
"github.com/rubyist/tracerx"
"github.com/spf13/cobra"
)
var (

@ -9,7 +9,7 @@ import (
"github.com/github/git-lfs/config"
"github.com/github/git-lfs/errutil"
"github.com/github/git-lfs/lfs"
"github.com/github/git-lfs/vendor/_nuts/github.com/spf13/cobra"
"github.com/spf13/cobra"
)
var (

@ -5,7 +5,7 @@ import (
"github.com/github/git-lfs/git"
"github.com/github/git-lfs/lfs"
"github.com/github/git-lfs/vendor/_nuts/github.com/spf13/cobra"
"github.com/spf13/cobra"
)
var (

@ -13,7 +13,7 @@ import (
"github.com/github/git-lfs/git"
"github.com/github/git-lfs/lfs"
"github.com/github/git-lfs/vendor/_nuts/github.com/spf13/cobra"
"github.com/spf13/cobra"
)
var (

@ -4,7 +4,7 @@ import (
"fmt"
"os"
"github.com/github/git-lfs/vendor/_nuts/github.com/spf13/cobra"
"github.com/spf13/cobra"
)
// TODO: Remove for Git LFS v2.0 https://github.com/github/git-lfs/issues/839

@ -2,7 +2,7 @@ package commands
import (
"github.com/github/git-lfs/lfs"
"github.com/github/git-lfs/vendor/_nuts/github.com/spf13/cobra"
"github.com/spf13/cobra"
)
var (

@ -8,7 +8,7 @@ import (
"github.com/github/git-lfs/config"
"github.com/github/git-lfs/lfs"
"github.com/github/git-lfs/vendor/_nuts/github.com/spf13/cobra"
"github.com/spf13/cobra"
)
var (

@ -6,7 +6,7 @@ import (
"github.com/github/git-lfs/config"
"github.com/github/git-lfs/git"
"github.com/github/git-lfs/lfs"
"github.com/github/git-lfs/vendor/_nuts/github.com/spf13/cobra"
"github.com/spf13/cobra"
)
var (

@ -2,7 +2,7 @@ package commands
import (
"github.com/github/git-lfs/httputil"
"github.com/github/git-lfs/vendor/_nuts/github.com/spf13/cobra"
"github.com/spf13/cobra"
)
var (

@ -15,7 +15,7 @@ import (
"github.com/github/git-lfs/errutil"
"github.com/github/git-lfs/git"
"github.com/github/git-lfs/lfs"
"github.com/github/git-lfs/vendor/_nuts/github.com/spf13/cobra"
"github.com/spf13/cobra"
)
// Populate man pages

@ -10,10 +10,10 @@ import (
"strings"
"sync"
"github.com/ThomsonReutersEikon/go-ntlm/ntlm"
"github.com/bgentry/go-netrc/netrc"
"github.com/github/git-lfs/git"
"github.com/github/git-lfs/vendor/_nuts/github.com/ThomsonReutersEikon/go-ntlm/ntlm"
"github.com/github/git-lfs/vendor/_nuts/github.com/bgentry/go-netrc/netrc"
"github.com/github/git-lfs/vendor/_nuts/github.com/rubyist/tracerx"
"github.com/rubyist/tracerx"
)
var (

@ -4,7 +4,7 @@ import (
"os"
"path/filepath"
"github.com/github/git-lfs/vendor/_nuts/github.com/bgentry/go-netrc/netrc"
"github.com/bgentry/go-netrc/netrc"
)
type netrcfinder interface {

@ -3,7 +3,7 @@ package config
import (
"testing"
"github.com/github/git-lfs/vendor/_nuts/github.com/technoweenie/assert"
"github.com/stretchr/testify/assert"
)
func TestEndpointDefaultsToOrigin(t *testing.T) {
@ -363,7 +363,7 @@ func TestBatchAbsentIsTrue(t *testing.T) {
config := &Configuration{}
v := config.BatchTransfer()
assert.Equal(t, true, v)
assert.True(t, v)
}
func TestAccessConfig(t *testing.T) {
@ -438,8 +438,8 @@ func TestAccessAbsentConfig(t *testing.T) {
config := &Configuration{}
assert.Equal(t, "none", config.Access("download"))
assert.Equal(t, "none", config.Access("upload"))
assert.Equal(t, false, config.PrivateAccess("download"))
assert.Equal(t, false, config.PrivateAccess("upload"))
assert.False(t, config.PrivateAccess("download"))
assert.False(t, config.PrivateAccess("upload"))
}
func TestLoadValidExtension(t *testing.T) {
@ -481,10 +481,10 @@ func TestFetchPruneConfigDefault(t *testing.T) {
assert.Equal(t, 7, fp.FetchRecentRefsDays)
assert.Equal(t, 0, fp.FetchRecentCommitsDays)
assert.Equal(t, 3, fp.PruneOffsetDays)
assert.Equal(t, true, fp.FetchRecentRefsIncludeRemotes)
assert.True(t, fp.FetchRecentRefsIncludeRemotes)
assert.Equal(t, 3, fp.PruneOffsetDays)
assert.Equal(t, "origin", fp.PruneRemoteName)
assert.Equal(t, false, fp.PruneVerifyRemoteAlways)
assert.False(t, fp.PruneVerifyRemoteAlways)
}
func TestFetchPruneConfigCustom(t *testing.T) {
@ -502,8 +502,8 @@ func TestFetchPruneConfigCustom(t *testing.T) {
assert.Equal(t, 12, fp.FetchRecentRefsDays)
assert.Equal(t, 9, fp.FetchRecentCommitsDays)
assert.Equal(t, false, fp.FetchRecentRefsIncludeRemotes)
assert.False(t, fp.FetchRecentRefsIncludeRemotes)
assert.Equal(t, 30, fp.PruneOffsetDays)
assert.Equal(t, "upstream", fp.PruneRemoteName)
assert.Equal(t, true, fp.PruneVerifyRemoteAlways)
assert.True(t, fp.PruneVerifyRemoteAlways)
}

@ -3,7 +3,7 @@ package config
import (
"testing"
"github.com/github/git-lfs/vendor/_nuts/github.com/technoweenie/assert"
"github.com/stretchr/testify/assert"
)
func TestSortExtensions(t *testing.T) {
@ -32,7 +32,7 @@ func TestSortExtensions(t *testing.T) {
sorted, err := SortExtensions(m)
assert.Equal(t, err, nil)
assert.Nil(t, err)
for i, ext := range sorted {
name := names[i]
@ -61,6 +61,6 @@ func TestSortExtensionsDuplicatePriority(t *testing.T) {
sorted, err := SortExtensions(m)
assert.NotEqual(t, err, nil)
assert.Equal(t, len(sorted), 0)
assert.NotNil(t, err)
assert.Empty(t, sorted)
}

@ -9,7 +9,7 @@ import (
"github.com/github/git-lfs/git"
"github.com/github/git-lfs/tools"
"github.com/github/git-lfs/vendor/_nuts/github.com/rubyist/tracerx"
"github.com/rubyist/tracerx"
)
var (

4
debian/rules vendored

@ -1,7 +1,7 @@
#!/usr/bin/make -f
export DH_OPTIONS
export GO15VENDOREXPERIMENT=0
export GO15VENDOREXPERIMENT=1
#dh_golang doesn't do this for you
ifeq ($(DEB_HOST_ARCH), i386)
@ -25,7 +25,7 @@ override_dh_clean:
override_dh_auto_build:
dh_auto_build
#dh_golang doesn't do anything here in deb 8, and it's needed in both
#dh_golang doesn't do anything here in deb 8, and it's needed in both
if [ "$(DEB_HOST_GNU_TYPE)" != "$(DEB_BUILD_GNU_TYPE)" ]; then\
cp -rf $(BUILD_DIR)/bin/*/* $(BUILD_DIR)/bin/; \
cp -rf $(BUILD_DIR)/pkg/*/* $(BUILD_DIR)/pkg/; \

315
docs/proposals/locking.md Normal file

@ -0,0 +1,315 @@
# Locking feature proposal
We need the ability to lock files to discourage (we can never prevent) parallel
editing of binary files which will result in an unmergeable situation. This is
not a common theme in git (for obvious reasons, it conflicts with its
distributed, parallel nature), but is a requirement of any binary management
system, since files are very often completely unmergeable, and no-one likes
having to throw their work away & do it again.
## What not to do: single branch model
The simplest way to organise locking is to require that if binary files are only
ever edited on a single branch, and therefore editing this file can follow a
simple sequence:
1. File starts out read-only locally
2. User locks the file, user is required to have the latest version locally from
the 'main' branch
3. User edits file & commits 1 or more times
4. User pushes these commits to the main branch
5. File is unlocked (and made read only locally again)
## A more usable approach: multi-branch model
In practice teams need to work on more than one branch, and sometimes that work
will have corresponding binary edits.
It's important to remember that the core requirement is to prevent *unintended
parallel edits of an unmergeable file*.
One way to address this would be to say that locking a file locks it across all
branches, and that lock is only released when the branch where the edit is is
merged back into a 'primary' branch. The problem is that although that allows
branching and also prevents merge conflicts, it forces merging of feature
branches before a further edit can be made by someone else.
An alternative is that locking a file locks it across all branches, but when the
lock is released, further locks on that file can only be taken on a descendant
of the latest edit that has been made, whichever branch it is on. That means
a change to the rules of the lock sequence, namely:
1. File starts out read-only locally
2. User tries to lock a file. This is only allowed if:
* The file is not already locked by anyone else, AND
* One of the following are true:
* The user has, or agrees to check out, a descendant of the latest commit
that was made for that file, whatever branch that was on, OR
* The user stays on their current commit but resets the locked file to the
state of the latest commit (making it modified locally, and
also cherry-picking changes for that file in practice).
3. User edits file & commits 1 or more times, on any branch they like
4. User pushes the commits
5. File is unlocked if:
* the latest commit to that file has been pushed (on any branch), and
* the file is not locally edited
This means that long-running branches can be maintained but that editing of a
binary file must always incorporate the latest binary edits. This means that if
this system is always respected, there is only ever one linear stream of
development for this binary file, even though that 'thread' may wind its way
across many different branches in the process.
This does mean that no-one's changes are accidentally lost, but it does mean
that we are either making new branches dependent on others, OR we're
cherry-picking changes to individual files across branches. This does change
the traditional git workflow, but importantly it achieves the core requirement
of never *accidentally* losing anyone's changes. How changes are threaded
across branches is always under the user's control.
## Breaking the rules
We must allow the user to break the rules if they know what they are doing.
Locking is there to prevent unintended binary merge conflicts, but sometimes you
might want to intentionally create one, with the full knowledge that you're
going to have to manually merge the result (or more likely, pick one side and
discard the other) later down the line. There are 2 cases of rule breaking to
support:
1. **Break someone else's lock**
People lock files and forget they've locked them, then go on holiday, or
worse, leave the company. You can't be stuck not being able to edit that file
so must be able to forcibly break someone else's lock. Ideally this should
result in some kind of notification to the original locker (might need to be a
special value-add on BB/Stash). This effectively removes the other person's
lock and is likely to cause them problems if they had edited and try to push
next time.
2. **Allow a parallel lock**
Actually similar to breaking someone else's lock, except it lets you take
another lock on a file in parallel, leaving their lock in place too, and
knowing that you're going to have to resolve the merge problem later. You
could handle this just by manually making files read/write, then using 'force
push' to override hooks that prevent pushing when not locked. However by
explicitly registering a parallel lock (possible form: 'git lfs lock
--force') this could be recorded and communicated to anyone else with a lock,
letting them know about possible merge issues down the line.
## Detailed feature points
|No | Feature | Notes
|---|---------|------------------
|1 |Lock server must be available at same API URL|
|2 |Identify unmergeable files as subset of lfs files|`git lfs track -b` ?
|3 |Make unmergeable files read-only on checkout|Perform in smudge filter
|4 |Lock a file<ul><li>Check with server which must atomically check/set</li><li>Check person requesting the lock is checked out on a commit which is a descendent of the last edit of that file (locally or on server, although last lock shouldn't have been released until push anyway), or allow --force to break rule</li><li>Record lock on server</li><li>Make file read/write locally if success</li></ul>|`git lfs lock <file>`?
|5 |Release a lock<ul><li>Check if locally modified, if so must discard</li><li>Check if user has more recent commit of this file than server, if so must push first</li><li>Release lock on server atomically</li><li>Make local file read-only</li></ul>|`git lfs unlock <file>`?
|6 |Break a lock, ie override someone else's lock and take it yourself.<ul><li>Release lock on server atomically</li><li>Proceed as per 'Lock a file'</li><li>Notify original lock holder HOW?</li></ul>|`git lfs lock -break <file>`?
|7 |Release lock on reset (maybe). Configurable option / prompt? May be resetting just to start editing again|
|8 |Release lock on push (maybe, if unmodified). See above|
|9 |Cater for read-only binary files when merging locally<ul><li>Because files are read-only this might prevent merge from working when actually it's valid.</li><li>Always fine to merge the latest version of a binary file to anywhere else</li><li>Fine to merge the non-latest version if user is aware that this may cause merge problems (see Breaking the rules)</li><li>Therefore this feature is about dealing with the read-only flag and issuing a warning if not the latest</li></ul>|
|10 |List current locks<ul><li>That the current user has</li><li>That anyone has</li><li>Potentially scoped to folder</li></ul>|`git lfs lock --list [paths...]`
|11 |Reject a push containing a binary file currently locked by someone else|pre-receive hook on server, allow --force to override (i.e. existing parameter to git push)
## Implementation details
### Types
To make the implementing locking on the lfs-test-server as well as other servers
in the future easier, it makes sense to create a `lock` package that can be
depended upon from any server. This will go along with Steve's refactor which
touches the `lfs` package quite a bit.
Below are enumerated some of the types that will presumably land in this
sub-package.
```go
// Lock represents a single lock that against a particular path.
//
// Locks returned from the API may or may not be currently active, according to
// the Expired flag.
type Lock struct {
// Id is the unique identifier corresponding to this particular Lock. It
// must be consistent with the local copy, and the server's copy.
Id string `json:"id"`
// Path is an absolute path to the file that is locked as a part of this
// lock.
Path string `json:"path"`
// Committer is the author who initiated this lock.
Committer struct {
Name string `json:"name"`
Email string `json:"email"`
} `json:"creator"`
// CommitSHA is the commit that this Lock was created against. It is
// strictly equal to the SHA of the minimum commit negotiated in order
// to create this lock.
CommitSHA string `json:"commit_sha"
// LockedAt is a required parameter that represents the instant in time
// that this lock was created. For most server implementations, this
// should be set to the instant at which the lock was initially
// received.
LockedAt time.Time `json:"locked_at"`
// ExpiresAt is an optional parameter that represents the instant in
// time that the lock stopped being active. If the lock is still active,
// the server can either a) not send this field, or b) send the
// zero-value of time.Time.
UnlockedAt time.Time `json:"unlocked_at,omitempty"`
}
// Active returns whether or not the given lock is still active against the file
// that it is protecting.
func (l *Lock) Active() bool {
return time.IsZero(l.UnlockedAt)
}
```
### Proposed Commands
#### `git lfs lock <path>`
The `lock` command will be used in accordance with the multi-branch flow as
proposed above to request that lock be granted to the specific path passed an
argument to the command.
```go
// LockRequest encapsulates the payload sent across the API when a client would
// like to obtain a lock against a particular path on a given remote.
type LockRequest struct {
// Path is the path that the client would like to obtain a lock against.
Path string `json:"path"`
// LatestRemoteCommit is the SHA of the last known commit from the
// remote that we are trying to create the lock against, as found in
// `.git/refs/origin/<name>`.
LatestRemoteCommit string `json:"latest_remote_commit"`
// Committer is the individual that wishes to obtain the lock.
Committer struct {
// Name is the name of the individual who would like to obtain the
// lock, for instance: "Rick Olson".
Name string `json:"name"`
// Email is the email assopsicated with the individual who would
// like to obtain the lock, for instance: "rick@github.com".
Email string `json:"email"`
} `json:"committer"`
}
```
```go
// LockResponse encapsulates the information sent over the API in response to
// a `LockRequest`.
type LockResponse struct {
// Lock is the Lock that was optionally created in response to the
// payload that was sent (see above). If the lock already exists, then
// the existing lock is sent in this field instead, and the author of
// that lock remains the same, meaning that the client failed to obtain
// that lock. An HTTP status of "409 - Conflict" is used here.
//
// If the lock was unable to be created, this field will hold the
// zero-value of Lock and the Err field will provide a more detailed set
// of information.
//
// If an error was experienced in creating this lock, then the
// zero-value of Lock should be sent here instead.
Lock Lock `json:"lock"`
// CommitNeeded holds the minimum commit SHA that client must have to
// obtain the lock.
CommitNeeded string `json:"commit_needed"`
// Err is the optional error that was encountered while trying to create
// the above lock.
Err error `json:"error,omitempty"`
}
```
#### `git lfs unlock <path>`
The `unlock` command is responsible for releasing the lock against a particular
file. The command takes a `<path>` argument which the LFS client will have to
internally resolve into a Id to unlock.
The API associated with this command can also be used on the server to remove
existing locks after a push.
```go
// An UnlockRequest is sent by the client over the API when they wish to remove
// a lock associated with the given Id.
type UnlockRequest struct {
// Id is the identifier of the lock that the client wishes to remove.
Id string `json:"id"`
}
```
```go
// UnlockResult is the result sent back from the API when asked to remove a
// lock.
type UnlockResult struct {
// Lock is the lock corresponding to the asked-about lock in the
// `UnlockPayload` (see above). If no matching lock was found, this
// field will take the zero-value of Lock, and Err will be non-nil.
Lock Lock `json:"lock"`
// Err is an optional field which holds any error that was experienced
// while removing the lock.
Err error `json:"error,omitempty"`
}
```
Clients can determine whether or not their lock was removed by calling the
`Active()` method on the returned Lock, if `UnlockResult.Err` is nil.
#### `git lfs locks (-r <remote>|-b <branch|-p <path>)|(-i id)`
For many operations, the LFS client will need to have knowledge of existing
locks on the server. Additionally, the client should not have to self-sort/index
this (potentially) large set. To remove this need, both the `locks` command and
corresponding API method take several filters.
Clients should turn the flag-values that were passed during the command
invocation into `Filter`s as described below, and batched up into the `Filters`
field in the `LockListRequest`.
```go
// Property is a constant-type that narrows fields pertaining to the server's
// Locks.
type Property string
const (
Branch Property = "branch"
Id Property = "id"
// (etc) ...
)
// LockListRequest encapsulates the request sent to the server when the client
// would like a list of locks that match the given criteria.
type LockListRequest struct {
// Filters is the set of filters to query against. If the client wishes
// to obtain a list of all locks, an empty array should be passed here.
Filters []{
// Prop is the property to search against.
Prop Property `json:"prop"`
// Value is the value that the property must take.
Value string `json:"value"`
} `json:"filters"`
// Cursor is an optional field used to tell the server which lock was
// seen last, if scanning through multiple pages of results.
//
// Servers must return a list of locks sorted in reverse chronological
// order, so the Cursor provides a consistent method of viewing all
// locks, even if more were created between two requests.
Cursor string `json:"cursor,omitempty"`
// Limit is the maximum number of locks to return in a single page.
Limit int `json:"limit"`
}
```
```go
// LockList encapsulates a set of Locks.
type LockList struct {
// Locks is the set of locks returned back, typically matching the query
// parameters sent in the LockListRequest call. If no locks were matched
// from a given query, then `Locks` will be represented as an empty
// array.
Locks []Lock `json:"locks"`
// NextCursor returns the Id of the Lock the client should update its
// cursor to, if there are multiple pages of results for a particular
// `LockListRequest`.
NextCursor string `json:"next_cursor,omitempty"`
// Err populates any error that was encountered during the search. If no
// error was encountered and the operation was succesful, then a value
// of nil will be passed here.
Err error `json:"error,omitempty"`
}

@ -0,0 +1,180 @@
# Locking API proposal
## POST /locks
| Method | Accept | Content-Type | Authorization |
|---------|--------------------------------|--------------------------------|---------------|
| `POST` | `application/vnd.git-lfs+json` | `application/vnd.git-lfs+json` | Basic |
### Request
```
> GET https://git-lfs-server.com/locks
> Accept: application/vnd.git-lfs+json
> Authorization: Basic
> Content-Type: application/vnd.git-lfs+json
>
> {
> path: "/path/to/file",
> remote: "origin",
> latest_remote_commit: "d3adbeef",
> committer: {
> name: "Jane Doe",
> email: "jane@example.com"
> }
> }
```
### Response
* **Successful response**
```
< HTTP/1.1 201 Created
< Content-Type: application/vnd.git-lfs+json
<
< {
< lock: {
< id: "some-uuid",
< path: "/path/to/file",
< committer: {
< name: "Jane Doe",
< email: "jane@example.com"
< },
< commit_sha: "d3adbeef",
< locked_at: "2016-05-17T15:49:06+00:00"
< }
< }
```
* **Bad request: minimum commit not met**
```
< HTTP/1.1 400 Bad request
< Content-Type: application/vnd.git-lfs+json
<
< {
< "commit_needed": "other_sha"
< }
```
* **Bad request: lock already present**
```
< HTTP/1.1 409 Conflict
< Content-Type: application/vnd.git-lfs+json
<
< {
< lock: {
< /* the previously created lock */
< },
< error: "already created lock"
< }
```
* **Bad repsonse: server error**
```
< HTTP/1.1 500 Internal server error
< Content-Type: application/vnd.git-lfs+json
<
< {
< error: "unable to create lock"
< }
```
## POST /locks/:id/unlock
| Method | Accept | Content-Type | Authorization |
|---------|--------------------------------|--------------|---------------|
| `POST` | `application/vnd.git-lfs+json` | None | Basic |
### Request
```
> POST https://git-lfs-server.com/locks/:id/unlock
> Accept: application/vnd.git-lfs+json
> Authorization: Basic
```
### Repsonse
* **Success: unlocked**
```
< HTTP/1.1 200 Ok
< Content-Type: application/vnd.git-lfs+json
<
< {
< lock: {
< id: "some-uuid",
< path: "/path/to/file",
< committer: {
< name: "Jane Doe",
< email: "jane@example.com"
< },
< commit_sha: "d3adbeef",
< locked_at: "2016-05-17T15:49:06+00:00",
< unlocked_at: "2016-05-17T15:49:06+00:00"
< }
< }
}
```
* **Bad response: server error**
```
< HTTP/1.1 500 Internal error
< Content-Type: application/vnd.git-lfs+json
<
< {
< error: "github/git-lfs: internal server error"
< }
```
## GET /locks
| Method | Accept | Content-Type | Authorization |
|--------|-------------------------------|--------------|---------------|
| `GET` | `application/vnd.git-lfs+json | None | Basic |
### Request
```
> GET https://git-lfs-server.com/locks?filters...&cursor=&limit=
> Accept: application/vnd.git-lfs+json
> Authorization: Basic
```
### Response
* **Success: locks found**
Note: no matching locks yields a payload of `locks: []`, and a status of 200.
```
< HTTP/1.1 200 Ok
< Content-Type: application/vnd.git-lfs+json
<
< {
< locks: [
< {
< id: "some-uuid",
< path: "/path/to/file",
< committer": {
< name: "Jane Doe",
< email: "jane@example.com"
< },
< commit_sha: "1ec245f",
< locked_at: "2016-05-17T15:49:06+00:00"
< }
< ],
< next_cursor: "optional-next-id",
< error: "optional error"
< }
```
* **Bad response: some locks may have matched, but the server encountered an error**
```
< HTTP/1.1 500 Internal error
< Content-Type: application/vnd.git-lfs+json
<
< {
< locks: [],
< error: "github/git-lfs: internal server error"
< }
```

@ -16,7 +16,7 @@ import (
"time"
"github.com/github/git-lfs/subprocess"
"github.com/github/git-lfs/vendor/_nuts/github.com/rubyist/tracerx"
"github.com/rubyist/tracerx"
)
type RefType int
@ -116,7 +116,8 @@ func RemoteForCurrentBranch() (string, error) {
return remote, nil
}
// RemoteRefForCurrentBranch returns the full remote ref (remote/remotebranch) that the current branch is tracking
// RemoteRefForCurrentBranch returns the full remote ref (refs/remotes/{remote}/{remotebranch})
// that the current branch is tracking.
func RemoteRefNameForCurrentBranch() (string, error) {
ref, err := CurrentRef()
if err != nil {
@ -134,7 +135,7 @@ func RemoteRefNameForCurrentBranch() (string, error) {
remotebranch := RemoteBranchForLocalBranch(ref.Name)
return remote + "/" + remotebranch, nil
return fmt.Sprintf("refs/remotes/%s/%s", remote, remotebranch), nil
}
// RemoteForBranch returns the remote name that a given local branch is tracking (blank if none)
@ -184,9 +185,13 @@ func LocalRefs() ([]*Ref, error) {
if err != nil {
return nil, fmt.Errorf("Failed to call git show-ref: %v", err)
}
cmd.Start()
var refs []*Ref
if err := cmd.Start(); err != nil {
return refs, err
}
scanner := bufio.NewScanner(outp)
for scanner.Scan() {
line := strings.TrimSpace(scanner.Text())

@ -10,7 +10,7 @@ import (
. "github.com/github/git-lfs/git"
"github.com/github/git-lfs/test"
"github.com/github/git-lfs/vendor/_nuts/github.com/technoweenie/assert"
"github.com/stretchr/testify/assert"
)
func TestCurrentRefAndCurrentRemoteRef(t *testing.T) {
@ -51,25 +51,25 @@ func TestCurrentRefAndCurrentRemoteRef(t *testing.T) {
outputs := repo.AddCommits(inputs)
// last commit was on branch3
ref, err := CurrentRef()
assert.Equal(t, nil, err)
assert.Nil(t, err)
assert.Equal(t, &Ref{"branch3", RefTypeLocalBranch, outputs[3].Sha}, ref)
test.RunGitCommand(t, true, "checkout", "master")
ref, err = CurrentRef()
assert.Equal(t, nil, err)
assert.Nil(t, err)
assert.Equal(t, &Ref{"master", RefTypeLocalBranch, outputs[2].Sha}, ref)
// Check remote
repo.AddRemote("origin")
test.RunGitCommand(t, true, "push", "-u", "origin", "master:someremotebranch")
ref, err = CurrentRemoteRef()
assert.Equal(t, nil, err)
assert.Nil(t, err)
assert.Equal(t, &Ref{"origin/someremotebranch", RefTypeRemoteBranch, outputs[2].Sha}, ref)
refname, err := RemoteRefNameForCurrentBranch()
assert.Equal(t, nil, err)
assert.Equal(t, "origin/someremotebranch", refname)
assert.Nil(t, err)
assert.Equal(t, "refs/remotes/origin/someremotebranch", refname)
remote, err := RemoteForCurrentBranch()
assert.Equal(t, nil, err)
assert.Nil(t, err)
assert.Equal(t, "origin", remote)
}
@ -262,19 +262,26 @@ func TestWorkTrees(t *testing.T) {
}
func TestVersionCompare(t *testing.T) {
assert.Equal(t, true, IsVersionAtLeast("2.6.0", "2.6.0"))
assert.Equal(t, true, IsVersionAtLeast("2.6.0", "2.6"))
assert.Equal(t, true, IsVersionAtLeast("2.6.0", "2"))
assert.Equal(t, true, IsVersionAtLeast("2.6.10", "2.6.5"))
assert.Equal(t, true, IsVersionAtLeast("2.8.1", "2.7.2"))
assert.True(t, IsVersionAtLeast("2.6.0", "2.6.0"))
assert.True(t, IsVersionAtLeast("2.6.0", "2.6"))
assert.True(t, IsVersionAtLeast("2.6.0", "2"))
assert.True(t, IsVersionAtLeast("2.6.10", "2.6.5"))
assert.True(t, IsVersionAtLeast("2.8.1", "2.7.2"))
assert.Equal(t, false, IsVersionAtLeast("1.6.0", "2"))
assert.Equal(t, false, IsVersionAtLeast("2.5.0", "2.6"))
assert.Equal(t, false, IsVersionAtLeast("2.5.0", "2.5.1"))
assert.Equal(t, false, IsVersionAtLeast("2.5.2", "2.5.10"))
assert.False(t, IsVersionAtLeast("1.6.0", "2"))
assert.False(t, IsVersionAtLeast("2.5.0", "2.6"))
assert.False(t, IsVersionAtLeast("2.5.0", "2.5.1"))
assert.False(t, IsVersionAtLeast("2.5.2", "2.5.10"))
}
func TestGitAndRootDirs(t *testing.T) {
repo := test.NewRepo(t)
repo.Pushd()
defer func() {
repo.Popd()
repo.Cleanup()
}()
git, root, err := GitAndRootDirs()
if err != nil {
t.Fatal(err)
@ -314,25 +321,25 @@ func TestGetTrackedFiles(t *testing.T) {
repo.AddCommits(inputs)
tracked, err := GetTrackedFiles("*.txt")
assert.Equal(t, nil, err)
assert.Nil(t, err)
sort.Strings(tracked) // for direct comparison
fulllist := []string{"file1.txt", "file2.txt", "file3.txt", "file4.txt", "folder1/anotherfile.txt", "folder1/file10.txt", "folder2/folder3/deep.txt", "folder2/something.txt"}
assert.Equal(t, fulllist, tracked)
tracked, err = GetTrackedFiles("*file*.txt")
assert.Equal(t, nil, err)
assert.Nil(t, err)
sort.Strings(tracked)
sublist := []string{"file1.txt", "file2.txt", "file3.txt", "file4.txt", "folder1/anotherfile.txt", "folder1/file10.txt"}
assert.Equal(t, sublist, tracked)
tracked, err = GetTrackedFiles("folder1/*")
assert.Equal(t, nil, err)
assert.Nil(t, err)
sort.Strings(tracked)
sublist = []string{"folder1/anotherfile.txt", "folder1/file10.txt"}
assert.Equal(t, sublist, tracked)
tracked, err = GetTrackedFiles("folder2/*")
assert.Equal(t, nil, err)
assert.Nil(t, err)
sort.Strings(tracked)
sublist = []string{"folder2/folder3/deep.txt", "folder2/something.txt"}
assert.Equal(t, sublist, tracked)
@ -340,7 +347,7 @@ func TestGetTrackedFiles(t *testing.T) {
// relative dir
os.Chdir("folder1")
tracked, err = GetTrackedFiles("*.txt")
assert.Equal(t, nil, err)
assert.Nil(t, err)
sort.Strings(tracked)
sublist = []string{"anotherfile.txt", "file10.txt"}
assert.Equal(t, sublist, tracked)
@ -350,7 +357,7 @@ func TestGetTrackedFiles(t *testing.T) {
ioutil.WriteFile("z_newfile.txt", []byte("Hello world"), 0644)
test.RunGitCommand(t, true, "add", "z_newfile.txt")
tracked, err = GetTrackedFiles("*.txt")
assert.Equal(t, nil, err)
assert.Nil(t, err)
sort.Strings(tracked)
fulllist = append(fulllist, "z_newfile.txt")
assert.Equal(t, fulllist, tracked)
@ -358,21 +365,21 @@ func TestGetTrackedFiles(t *testing.T) {
// Test includes modified files (not staged)
ioutil.WriteFile("file1.txt", []byte("Modifications"), 0644)
tracked, err = GetTrackedFiles("*.txt")
assert.Equal(t, nil, err)
assert.Nil(t, err)
sort.Strings(tracked)
assert.Equal(t, fulllist, tracked)
// Test includes modified files (staged)
test.RunGitCommand(t, true, "add", "file1.txt")
tracked, err = GetTrackedFiles("*.txt")
assert.Equal(t, nil, err)
assert.Nil(t, err)
sort.Strings(tracked)
assert.Equal(t, fulllist, tracked)
// Test excludes deleted files (not committed)
test.RunGitCommand(t, true, "rm", "file2.txt")
tracked, err = GetTrackedFiles("*.txt")
assert.Equal(t, nil, err)
assert.Nil(t, err)
sort.Strings(tracked)
deletedlist := []string{"file1.txt", "file3.txt", "file4.txt", "folder1/anotherfile.txt", "folder1/file10.txt", "folder2/folder3/deep.txt", "folder2/something.txt", "z_newfile.txt"}
assert.Equal(t, deletedlist, tracked)
@ -380,12 +387,38 @@ func TestGetTrackedFiles(t *testing.T) {
}
func TestLocalRefs(t *testing.T) {
repo := test.NewRepo(t)
repo.Pushd()
defer func() {
repo.Popd()
repo.Cleanup()
}()
repo.AddCommits([]*test.CommitInput{
{
Files: []*test.FileInput{
{Filename: "file1.txt", Size: 20},
},
},
{
NewBranch: "branch",
ParentBranches: []string{"master"},
Files: []*test.FileInput{
{Filename: "file1.txt", Size: 20},
},
},
})
test.RunGitCommand(t, true, "tag", "v1")
refs, err := LocalRefs()
if err != nil {
t.Fatal(err)
}
actual := make(map[string]bool)
for _, r := range refs {
t.Logf("REF: %s", r.Name)
switch r.Type {
case RefTypeHEAD:
t.Errorf("Local HEAD ref: %v", r)
@ -393,6 +426,22 @@ func TestLocalRefs(t *testing.T) {
t.Errorf("Stash or unknown ref: %v", r)
case RefTypeRemoteBranch, RefTypeRemoteTag:
t.Errorf("Remote ref: %v", r)
default:
actual[r.Name] = true
}
}
expected := []string{"master", "branch", "v1"}
found := 0
for _, refname := range expected {
if actual[refname] {
found += 1
} else {
t.Errorf("could not find ref %q", refname)
}
}
if found != len(expected) {
t.Errorf("Unexpected local refs: %v", actual)
}
}

40
glide.lock generated Normal file

@ -0,0 +1,40 @@
hash: 275cba8ff30fe108a79618e8ad803f600136572260dc78a7d879bd248b3bdbdb
updated: 2016-05-25T10:47:36.829238548-06:00
imports:
- name: github.com/bgentry/go-netrc
version: 9fd32a8b3d3d3f9d43c341bfe098430e07609480
subpackages:
- netrc
- name: github.com/cheggaaa/pb
version: bd14546a551971ae7f460e6d6e527c5b56cd38d7
- name: github.com/inconshreveable/mousetrap
version: 76626ae9c91c4f2a10f34cad8ce83ea42c93bb75
- name: github.com/kr/pretty
version: 088c856450c08c03eb32f7a6c221e6eefaa10e6f
- name: github.com/kr/pty
version: 5cf931ef8f76dccd0910001d74a58a7fca84a83d
- name: github.com/kr/text
version: 6807e777504f54ad073ecef66747de158294b639
- name: github.com/olekukonko/ts
version: ecf753e7c962639ab5a1fb46f7da627d4c0a04b8
- name: github.com/rubyist/tracerx
version: d7bcc0bc315bed2a841841bee5dbecc8d7d7582f
- name: github.com/spf13/cobra
version: c55cdf33856a08e4822738728b41783292812889
- name: github.com/spf13/pflag
version: 580b9be06c33d8ba9dcc8757ea56b7642472c2f5
- name: github.com/stretchr/testify
version: 6cb3b85ef5a0efef77caef88363ec4d4b5c0976d
- name: github.com/technoweenie/go-contentaddressable
version: 38171def3cd15e3b76eb156219b3d48704643899
- name: github.com/ThomsonReutersEikon/go-ntlm
version: b00ec39bbdd04f845950f4dbb4fd0a2c3155e830
subpackages:
- ntlm
- name: github.com/xeipuuv/gojsonpointer
version: e0fe6f68307607d540ed8eac07a342c33fa1b54a
- name: github.com/xeipuuv/gojsonreference
version: e02fc20de94c78484cd5ffb007f8af96be030a45
- name: github.com/xeipuuv/gojsonschema
version: d5336c75940ef31c9ceeb0ae64cf92944bccb4ee
devImports: []

39
glide.yaml Normal file

@ -0,0 +1,39 @@
package: github.com/github/git-lfs
import:
- package: github.com/bgentry/go-netrc
version: 9fd32a8b3d3d3f9d43c341bfe098430e07609480
subpackages:
- netrc
- package: github.com/cheggaaa/pb
version: bd14546a551971ae7f460e6d6e527c5b56cd38d7
- package: github.com/kr/pretty
version: 088c856450c08c03eb32f7a6c221e6eefaa10e6f
- package: github.com/kr/pty
version: 5cf931ef8f76dccd0910001d74a58a7fca84a83d
- package: github.com/kr/text
version: 6807e777504f54ad073ecef66747de158294b639
- package: github.com/inconshreveable/mousetrap
version: 76626ae9c91c4f2a10f34cad8ce83ea42c93bb75
- package: github.com/olekukonko/ts
version: ecf753e7c962639ab5a1fb46f7da627d4c0a04b8
- package: github.com/rubyist/tracerx
version: d7bcc0bc315bed2a841841bee5dbecc8d7d7582f
- package: github.com/spf13/cobra
version: c55cdf33856a08e4822738728b41783292812889
- package: github.com/spf13/pflag
version: 580b9be06c33d8ba9dcc8757ea56b7642472c2f5
- package: github.com/stretchr/testify
version: 6cb3b85ef5a0efef77caef88363ec4d4b5c0976d
- package: github.com/ThomsonReutersEikon/go-ntlm
version: b00ec39bbdd04f845950f4dbb4fd0a2c3155e830
# go-ntlm includes a util/ directory, which was removed
subpackages:
- ntlm
- package: github.com/technoweenie/go-contentaddressable
version: 38171def3cd15e3b76eb156219b3d48704643899
- package: github.com/xeipuuv/gojsonpointer
version: e0fe6f68307607d540ed8eac07a342c33fa1b54a
- package: github.com/xeipuuv/gojsonreference
version: e02fc20de94c78484cd5ffb007f8af96be030a45
- package: github.com/xeipuuv/gojsonschema
version: d5336c75940ef31c9ceeb0ae64cf92944bccb4ee

@ -7,7 +7,7 @@ import (
"path/filepath"
"github.com/github/git-lfs/config"
"github.com/github/git-lfs/vendor/_nuts/github.com/rubyist/tracerx"
"github.com/rubyist/tracerx"
)
// isCertVerificationDisabledForHost returns whether SSL certificate verification

@ -6,7 +6,7 @@ import (
"strings"
"github.com/github/git-lfs/subprocess"
"github.com/github/git-lfs/vendor/_nuts/github.com/rubyist/tracerx"
"github.com/rubyist/tracerx"
)
func appendRootCAsForHostFromPlatform(pool *x509.CertPool, host string) *x509.CertPool {

@ -1,14 +1,13 @@
package httputil
import (
"crypto/x509"
"io/ioutil"
"os"
"path/filepath"
"testing"
"github.com/github/git-lfs/config"
"github.com/github/git-lfs/vendor/_nuts/github.com/technoweenie/assert"
"github.com/stretchr/testify/assert"
)
var testCert = `-----BEGIN CERTIFICATE-----
@ -39,11 +38,11 @@ func TestCertFromSSLCAInfoConfig(t *testing.T) {
defer config.Config.ResetConfig()
tempfile, err := ioutil.TempFile("", "testcert")
assert.Equal(t, nil, err, "Error creating temp cert file")
assert.Nil(t, err, "Error creating temp cert file")
defer os.Remove(tempfile.Name())
_, err = tempfile.WriteString(testCert)
assert.Equal(t, nil, err, "Error writing temp cert file")
assert.Nil(t, err, "Error writing temp cert file")
tempfile.Close()
config.Config.ClearConfig()
@ -51,15 +50,15 @@ func TestCertFromSSLCAInfoConfig(t *testing.T) {
// Should match
pool := getRootCAsForHost("git-lfs.local")
assert.NotEqual(t, (*x509.CertPool)(nil), pool)
assert.NotNil(t, pool)
// Shouldn't match
pool = getRootCAsForHost("wronghost.com")
assert.Equal(t, (*x509.CertPool)(nil), pool)
assert.Nil(t, pool)
// Ports have to match
pool = getRootCAsForHost("git-lfs.local:8443")
assert.Equal(t, (*x509.CertPool)(nil), pool)
assert.Nil(t, pool)
// Now use global sslcainfo
config.Config.ClearConfig()
@ -67,22 +66,22 @@ func TestCertFromSSLCAInfoConfig(t *testing.T) {
// Should match anything
pool = getRootCAsForHost("git-lfs.local")
assert.NotEqual(t, (*x509.CertPool)(nil), pool)
assert.NotNil(t, pool)
pool = getRootCAsForHost("wronghost.com")
assert.NotEqual(t, (*x509.CertPool)(nil), pool)
assert.NotNil(t, pool)
pool = getRootCAsForHost("git-lfs.local:8443")
assert.NotEqual(t, (*x509.CertPool)(nil), pool)
assert.NotNil(t, pool)
}
func TestCertFromSSLCAInfoEnv(t *testing.T) {
tempfile, err := ioutil.TempFile("", "testcert")
assert.Equal(t, nil, err, "Error creating temp cert file")
assert.Nil(t, err, "Error creating temp cert file")
defer os.Remove(tempfile.Name())
_, err = tempfile.WriteString(testCert)
assert.Equal(t, nil, err, "Error writing temp cert file")
assert.Nil(t, err, "Error writing temp cert file")
tempfile.Close()
oldEnv := config.Config.GetAllEnv()
@ -93,11 +92,11 @@ func TestCertFromSSLCAInfoEnv(t *testing.T) {
// Should match any host at all
pool := getRootCAsForHost("git-lfs.local")
assert.NotEqual(t, (*x509.CertPool)(nil), pool)
assert.NotNil(t, pool)
pool = getRootCAsForHost("wronghost.com")
assert.NotEqual(t, (*x509.CertPool)(nil), pool)
assert.NotNil(t, pool)
pool = getRootCAsForHost("notthisone.com:8888")
assert.NotEqual(t, (*x509.CertPool)(nil), pool)
assert.NotNil(t, pool)
}
@ -105,33 +104,33 @@ func TestCertFromSSLCAPathConfig(t *testing.T) {
defer config.Config.ResetConfig()
tempdir, err := ioutil.TempDir("", "testcertdir")
assert.Equal(t, nil, err, "Error creating temp cert dir")
assert.Nil(t, err, "Error creating temp cert dir")
defer os.RemoveAll(tempdir)
err = ioutil.WriteFile(filepath.Join(tempdir, "cert1.pem"), []byte(testCert), 0644)
assert.Equal(t, nil, err, "Error creating cert file")
assert.Nil(t, err, "Error creating cert file")
config.Config.ClearConfig()
config.Config.SetConfig("http.sslcapath", tempdir)
// Should match any host at all
pool := getRootCAsForHost("git-lfs.local")
assert.NotEqual(t, (*x509.CertPool)(nil), pool)
assert.NotNil(t, pool)
pool = getRootCAsForHost("wronghost.com")
assert.NotEqual(t, (*x509.CertPool)(nil), pool)
assert.NotNil(t, pool)
pool = getRootCAsForHost("notthisone.com:8888")
assert.NotEqual(t, (*x509.CertPool)(nil), pool)
assert.NotNil(t, pool)
}
func TestCertFromSSLCAPathEnv(t *testing.T) {
tempdir, err := ioutil.TempDir("", "testcertdir")
assert.Equal(t, nil, err, "Error creating temp cert dir")
assert.Nil(t, err, "Error creating temp cert dir")
defer os.RemoveAll(tempdir)
err = ioutil.WriteFile(filepath.Join(tempdir, "cert1.pem"), []byte(testCert), 0644)
assert.Equal(t, nil, err, "Error creating cert file")
assert.Nil(t, err, "Error creating cert file")
oldEnv := config.Config.GetAllEnv()
defer func() {
@ -141,17 +140,17 @@ func TestCertFromSSLCAPathEnv(t *testing.T) {
// Should match any host at all
pool := getRootCAsForHost("git-lfs.local")
assert.NotEqual(t, (*x509.CertPool)(nil), pool)
assert.NotNil(t, pool)
pool = getRootCAsForHost("wronghost.com")
assert.NotEqual(t, (*x509.CertPool)(nil), pool)
assert.NotNil(t, pool)
pool = getRootCAsForHost("notthisone.com:8888")
assert.NotEqual(t, (*x509.CertPool)(nil), pool)
assert.NotNil(t, pool)
}
func TestCertVerifyDisabledGlobalEnv(t *testing.T) {
assert.Equal(t, false, isCertVerificationDisabledForHost("anyhost.com"))
assert.False(t, isCertVerificationDisabledForHost("anyhost.com"))
oldEnv := config.Config.GetAllEnv()
defer func() {
@ -159,29 +158,29 @@ func TestCertVerifyDisabledGlobalEnv(t *testing.T) {
}()
config.Config.SetAllEnv(map[string]string{"GIT_SSL_NO_VERIFY": "1"})
assert.Equal(t, true, isCertVerificationDisabledForHost("anyhost.com"))
assert.True(t, isCertVerificationDisabledForHost("anyhost.com"))
}
func TestCertVerifyDisabledGlobalConfig(t *testing.T) {
defer config.Config.ResetConfig()
assert.Equal(t, false, isCertVerificationDisabledForHost("anyhost.com"))
assert.False(t, isCertVerificationDisabledForHost("anyhost.com"))
config.Config.ClearConfig()
config.Config.SetConfig("http.sslverify", "false")
assert.Equal(t, true, isCertVerificationDisabledForHost("anyhost.com"))
assert.True(t, isCertVerificationDisabledForHost("anyhost.com"))
}
func TestCertVerifyDisabledHostConfig(t *testing.T) {
defer config.Config.ResetConfig()
assert.Equal(t, false, isCertVerificationDisabledForHost("specifichost.com"))
assert.Equal(t, false, isCertVerificationDisabledForHost("otherhost.com"))
assert.False(t, isCertVerificationDisabledForHost("specifichost.com"))
assert.False(t, isCertVerificationDisabledForHost("otherhost.com"))
config.Config.ClearConfig()
config.Config.SetConfig("http.https://specifichost.com/.sslverify", "false")
assert.Equal(t, true, isCertVerificationDisabledForHost("specifichost.com"))
assert.Equal(t, false, isCertVerificationDisabledForHost("otherhost.com"))
assert.True(t, isCertVerificationDisabledForHost("specifichost.com"))
assert.False(t, isCertVerificationDisabledForHost("otherhost.com"))
}

@ -19,7 +19,7 @@ import (
"time"
"github.com/github/git-lfs/config"
"github.com/github/git-lfs/vendor/_nuts/github.com/rubyist/tracerx"
"github.com/rubyist/tracerx"
)
type httpTransferStats struct {

@ -12,9 +12,9 @@ import (
"strings"
"sync/atomic"
"github.com/ThomsonReutersEikon/go-ntlm/ntlm"
"github.com/github/git-lfs/auth"
"github.com/github/git-lfs/config"
"github.com/github/git-lfs/vendor/_nuts/github.com/ThomsonReutersEikon/go-ntlm/ntlm"
)
func ntlmClientSession(c *config.Configuration, creds auth.Creds) (ntlm.ClientSession, error) {

@ -10,7 +10,7 @@ import (
"github.com/github/git-lfs/auth"
"github.com/github/git-lfs/config"
"github.com/github/git-lfs/vendor/_nuts/github.com/technoweenie/assert"
"github.com/stretchr/testify/assert"
)
func TestNtlmClientSession(t *testing.T) {
@ -20,12 +20,12 @@ func TestNtlmClientSession(t *testing.T) {
creds := auth.Creds{"username": "MOOSEDOMAIN\\canadian", "password": "MooseAntlersYeah"}
_, err := ntlmClientSession(config.Config, creds)
assert.Equal(t, err, nil)
assert.Nil(t, err)
//The second call should ignore creds and give the session we just created.
badCreds := auth.Creds{"username": "badusername", "password": "MooseAntlersYeah"}
_, err = ntlmClientSession(config.Config, badCreds)
assert.Equal(t, err, nil)
assert.Nil(t, err)
//clean up
config.Config.NtlmSession = nil
@ -38,7 +38,7 @@ func TestNtlmClientSessionBadCreds(t *testing.T) {
creds := auth.Creds{"username": "badusername", "password": "MooseAntlersYeah"}
_, err := ntlmClientSession(config.Config, creds)
assert.NotEqual(t, err, nil)
assert.NotNil(t, err)
//clean up
config.Config.NtlmSession = nil
@ -47,12 +47,12 @@ func TestNtlmClientSessionBadCreds(t *testing.T) {
func TestNtlmCloneRequest(t *testing.T) {
req1, _ := http.NewRequest("Method", "url", nil)
cloneOfReq1, err := cloneRequest(req1)
assert.Equal(t, err, nil)
assert.Nil(t, err)
assertRequestsEqual(t, req1, cloneOfReq1)
req2, _ := http.NewRequest("Method", "url", bytes.NewReader([]byte("Moose can be request bodies")))
cloneOfReq2, err := cloneRequest(req2)
assert.Equal(t, err, nil)
assert.Nil(t, err)
assertRequestsEqual(t, req2, cloneOfReq2)
}
@ -64,11 +64,11 @@ func assertRequestsEqual(t *testing.T, req1 *http.Request, req2 *http.Request) {
}
if req1.Body == nil {
assert.Equal(t, req2.Body, nil)
assert.Nil(t, req2.Body)
} else {
bytes1, _ := ioutil.ReadAll(req1.Body)
bytes2, _ := ioutil.ReadAll(req2.Body)
assert.Equal(t, bytes.Compare(bytes1, bytes2), 0)
assert.Equal(t, bytes1, bytes2)
}
}
@ -77,8 +77,8 @@ func TestNtlmHeaderParseValid(t *testing.T) {
res.Header = make(map[string][]string)
res.Header.Add("Www-Authenticate", "NTLM "+base64.StdEncoding.EncodeToString([]byte("I am a moose")))
bytes, err := parseChallengeResponse(&res)
assert.Equal(t, err, nil)
assert.Equal(t, strings.HasPrefix(string(bytes), "NTLM"), false)
assert.Nil(t, err)
assert.False(t, strings.HasPrefix(string(bytes), "NTLM"))
}
func TestNtlmHeaderParseInvalidLength(t *testing.T) {
@ -100,7 +100,7 @@ func TestNtlmHeaderParseInvalid(t *testing.T) {
res.Header = make(map[string][]string)
res.Header.Add("Www-Authenticate", base64.StdEncoding.EncodeToString([]byte("NTLM I am a moose")))
_, err := parseChallengeResponse(&res)
assert.NotEqual(t, err, nil)
assert.NotNil(t, err)
}
func TestCloneSmallBody(t *testing.T) {

@ -12,7 +12,7 @@ import (
"github.com/github/git-lfs/config"
"github.com/github/git-lfs/errutil"
"github.com/github/git-lfs/vendor/_nuts/github.com/rubyist/tracerx"
"github.com/rubyist/tracerx"
)
type ClientError struct {

@ -3,7 +3,7 @@ package lfs
import (
"testing"
"github.com/github/git-lfs/vendor/_nuts/github.com/technoweenie/assert"
"github.com/stretchr/testify/assert"
)
func TestBatcherSizeMet(t *testing.T) {
@ -65,7 +65,7 @@ func runBatcherTests(cases []batcherTestCase, t *testing.T) {
items := c.ItemCount
for i := 0; i < c.Batches(); i++ {
group := b.Next()
assert.Equal(t, c.BatchSize, len(group))
assert.Len(t, group, c.BatchSize)
items -= c.BatchSize
}
}

@ -11,7 +11,7 @@ import (
"github.com/github/git-lfs/config"
"github.com/github/git-lfs/localstorage"
"github.com/github/git-lfs/tools"
"github.com/github/git-lfs/vendor/_nuts/github.com/rubyist/tracerx"
"github.com/rubyist/tracerx"
)
var (

@ -7,7 +7,7 @@ import (
"github.com/github/git-lfs/lfs"
"github.com/github/git-lfs/test"
"github.com/github/git-lfs/vendor/_nuts/github.com/technoweenie/assert"
"github.com/stretchr/testify/assert"
)
func TestAllCurrentObjectsNone(t *testing.T) {

@ -6,15 +6,15 @@ import (
"os"
"path/filepath"
"github.com/cheggaaa/pb"
"github.com/github/git-lfs/tools"
"github.com/github/git-lfs/transfer"
"github.com/github/git-lfs/api"
"github.com/github/git-lfs/config"
"github.com/github/git-lfs/errutil"
"github.com/github/git-lfs/progress"
"github.com/github/git-lfs/tools"
"github.com/github/git-lfs/vendor/_nuts/github.com/cheggaaa/pb"
"github.com/github/git-lfs/vendor/_nuts/github.com/rubyist/tracerx"
"github.com/rubyist/tracerx"
)
func PointerSmudgeToFile(filename string, ptr *Pointer, download bool, cb progress.CopyCallback) error {

@ -9,14 +9,14 @@ import (
"testing"
"github.com/github/git-lfs/errutil"
"github.com/github/git-lfs/vendor/_nuts/github.com/technoweenie/assert"
"github.com/stretchr/testify/assert"
)
func TestEncode(t *testing.T) {
var buf bytes.Buffer
pointer := NewPointer("booya", 12345, nil)
_, err := EncodePointer(&buf, pointer)
assert.Equal(t, nil, err)
assert.Nil(t, err)
bufReader := bufio.NewReader(&buf)
assertLine(t, bufReader, "version https://git-lfs.github.com/spec/v1\n")
@ -51,7 +51,7 @@ func TestEncodeExtensions(t *testing.T) {
}
pointer := NewPointer("main_oid", 12345, exts)
_, err := EncodePointer(&buf, pointer)
assert.Equal(t, nil, err)
assert.Nil(t, err)
bufReader := bufio.NewReader(&buf)
assertLine(t, bufReader, "version https://git-lfs.github.com/spec/v1\n")
@ -70,7 +70,7 @@ func TestEncodeExtensions(t *testing.T) {
func assertLine(t *testing.T, r *bufio.Reader, expected string) {
actual, err := r.ReadString('\n')
assert.Equal(t, nil, err)
assert.Nil(t, err)
assert.Equal(t, expected, actual)
}
@ -290,5 +290,5 @@ size 12345`,
}
func assertEqualWithExample(t *testing.T, example string, expected, actual interface{}) {
assert.Equalf(t, expected, actual, "Example:\n%s", strings.TrimSpace(example))
assert.Equal(t, expected, actual, "Example:\n%s", strings.TrimSpace(example))
}

@ -15,7 +15,7 @@ import (
"time"
"github.com/github/git-lfs/git"
"github.com/github/git-lfs/vendor/_nuts/github.com/rubyist/tracerx"
"github.com/rubyist/tracerx"
)
const (

@ -11,7 +11,7 @@ import (
. "github.com/github/git-lfs/lfs"
"github.com/github/git-lfs/test"
"github.com/github/git-lfs/vendor/_nuts/github.com/technoweenie/assert"
"github.com/stretchr/testify/assert"
)
func TestScanUnpushed(t *testing.T) {
@ -54,36 +54,36 @@ func TestScanUnpushed(t *testing.T) {
repo.AddRemote("upstream")
pointers, err := ScanUnpushed("")
assert.Equal(t, nil, err, "Should be no error calling ScanUnpushed")
assert.Equal(t, 4, len(pointers), "Should be 4 pointers because none pushed")
assert.Nil(t, err, "Should be no error calling ScanUnpushed")
assert.Len(t, pointers, 4, "Should be 4 pointers because none pushed")
test.RunGitCommand(t, true, "push", "origin", "branch2")
// Branch2 will have pushed 2 commits
pointers, err = ScanUnpushed("")
assert.Equal(t, nil, err, "Should be no error calling ScanUnpushed")
assert.Equal(t, 2, len(pointers), "Should be 2 pointers")
assert.Nil(t, err, "Should be no error calling ScanUnpushed")
assert.Len(t, pointers, 2, "Should be 2 pointers")
test.RunGitCommand(t, true, "push", "upstream", "master")
// Master pushes 1 more commit
pointers, err = ScanUnpushed("")
assert.Equal(t, nil, err, "Should be no error calling ScanUnpushed")
assert.Equal(t, 1, len(pointers), "Should be 1 pointer")
assert.Nil(t, err, "Should be no error calling ScanUnpushed")
assert.Len(t, pointers, 1, "Should be 1 pointer")
test.RunGitCommand(t, true, "push", "origin", "branch3")
// All pushed (somewhere)
pointers, err = ScanUnpushed("")
assert.Equal(t, nil, err, "Should be no error calling ScanUnpushed")
assert.Equal(t, 0, len(pointers), "Should be 0 pointers unpushed")
assert.Nil(t, err, "Should be no error calling ScanUnpushed")
assert.Empty(t, pointers, "Should be 0 pointers unpushed")
// Check origin
pointers, err = ScanUnpushed("origin")
assert.Equal(t, nil, err, "Should be no error calling ScanUnpushed")
assert.Equal(t, 0, len(pointers), "Should be 0 pointers unpushed to origin")
assert.Nil(t, err, "Should be no error calling ScanUnpushed")
assert.Empty(t, pointers, "Should be 0 pointers unpushed to origin")
// Check upstream
pointers, err = ScanUnpushed("upstream")
assert.Equal(t, nil, err, "Should be no error calling ScanUnpushed")
assert.Equal(t, 2, len(pointers), "Should be 2 pointers unpushed to upstream")
assert.Nil(t, err, "Should be no error calling ScanUnpushed")
assert.Len(t, pointers, 2, "Should be 2 pointers unpushed to upstream")
}
func TestScanPreviousVersions(t *testing.T) {

@ -4,7 +4,7 @@ import (
"strings"
"testing"
"github.com/github/git-lfs/vendor/_nuts/github.com/technoweenie/assert"
"github.com/stretchr/testify/assert"
)
var (
@ -108,7 +108,7 @@ func TestParseLogOutputToPointersAdditions(t *testing.T) {
for p := range pchan {
pointers = append(pointers, p)
}
assert.Equal(t, 5, len(pointers))
assert.Len(t, pointers, 5)
// modification, + side
assert.Equal(t, "radial_1.png", pointers[0].Name)
@ -142,7 +142,7 @@ func TestParseLogOutputToPointersAdditions(t *testing.T) {
for p := range pchan {
pointers = append(pointers, p)
}
assert.Equal(t, 1, len(pointers))
assert.Len(t, pointers, 1)
assert.Equal(t, "waveNM.png", pointers[0].Name)
assert.Equal(t, "fe2c2f236b97bba4585d9909a227a8fa64897d9bbe297fa272f714302d86c908", pointers[0].Oid)
assert.Equal(t, int64(125873), pointers[0].Size)
@ -158,7 +158,7 @@ func TestParseLogOutputToPointersAdditions(t *testing.T) {
for p := range pchan {
pointers = append(pointers, p)
}
assert.Equal(t, 4, len(pointers))
assert.Len(t, pointers, 4)
assert.Equal(t, "radial_1.png", pointers[0].Name)
assert.Equal(t, "3301b3da173d231f0f6b1f9bf075e573758cd79b3cfeff7623a953d708d6688b", pointers[0].Oid)
assert.Equal(t, int64(3152388), pointers[0].Size)
@ -188,7 +188,7 @@ func TestParseLogOutputToPointersDeletion(t *testing.T) {
pointers = append(pointers, p)
}
assert.Equal(t, 4, len(pointers))
assert.Len(t, pointers, 4)
// deletion, - side
assert.Equal(t, "smoke_1.png", pointers[0].Name)
@ -218,7 +218,7 @@ func TestParseLogOutputToPointersDeletion(t *testing.T) {
for p := range pchan {
pointers = append(pointers, p)
}
assert.Equal(t, 1, len(pointers))
assert.Len(t, pointers, 1)
assert.Equal(t, "flare_1.png", pointers[0].Name)
assert.Equal(t, "ea61c67cc5e8b3504d46de77212364045f31d9a023ad4448a1ace2a2fb4eed28", pointers[0].Oid)
assert.Equal(t, int64(72982), pointers[0].Size)
@ -234,7 +234,7 @@ func TestParseLogOutputToPointersDeletion(t *testing.T) {
for p := range pchan {
pointers = append(pointers, p)
}
assert.Equal(t, 3, len(pointers))
assert.Len(t, pointers, 3)
assert.Equal(t, "smoke_1.png", pointers[0].Name)
assert.Equal(t, "8eb65d66303acc60062f44b44ef1f7360d7189db8acf3d066e59e2528f39514e", pointers[0].Oid)
assert.Equal(t, int64(35022), pointers[0].Size)

@ -10,7 +10,7 @@ import (
"github.com/github/git-lfs/git"
"github.com/github/git-lfs/progress"
"github.com/github/git-lfs/transfer"
"github.com/github/git-lfs/vendor/_nuts/github.com/rubyist/tracerx"
"github.com/rubyist/tracerx"
)
const (

@ -6,7 +6,7 @@ import (
"testing"
"github.com/github/git-lfs/progress"
"github.com/github/git-lfs/vendor/_nuts/github.com/technoweenie/assert"
"github.com/stretchr/testify/assert"
)
func TestWriterWithCallback(t *testing.T) {
@ -26,15 +26,15 @@ func TestWriterWithCallback(t *testing.T) {
readBuf := make([]byte, 3)
n, err := reader.Read(readBuf)
assert.Equal(t, nil, err)
assert.Nil(t, err)
assert.Equal(t, "BOO", string(readBuf[0:n]))
n, err = reader.Read(readBuf)
assert.Equal(t, nil, err)
assert.Nil(t, err)
assert.Equal(t, "YA", string(readBuf[0:n]))
assert.Equal(t, 2, called)
assert.Equal(t, 2, len(calledRead))
assert.Len(t, calledRead, 2)
assert.Equal(t, 3, int(calledRead[0]))
assert.Equal(t, 5, int(calledRead[1]))
}

@ -4,7 +4,7 @@ import (
"os"
"path/filepath"
"github.com/github/git-lfs/vendor/_nuts/github.com/rubyist/tracerx"
"github.com/rubyist/tracerx"
)
// AllObjects returns a slice of the the objects stored in this LocalStorage

@ -6,7 +6,7 @@ import (
"strings"
"time"
"github.com/github/git-lfs/vendor/_nuts/github.com/rubyist/tracerx"
"github.com/rubyist/tracerx"
)
func (s *LocalStorage) ClearTempObjects() error {

@ -8,7 +8,7 @@ import (
"sync/atomic"
"time"
"github.com/github/git-lfs/vendor/_nuts/github.com/olekukonko/ts"
"github.com/olekukonko/ts"
)
// ProgressMeter provides a progress bar type output for the TransferQueue. It

@ -6,7 +6,7 @@ import (
"runtime"
"strings"
"github.com/github/git-lfs/vendor/_nuts/github.com/olekukonko/ts"
"github.com/olekukonko/ts"
)
// Indeterminate progress indicator 'spinner'

@ -12,4 +12,4 @@ fi
script/fmt
rm -rf bin/*
GO15VENDOREXPERIMENT=0 go run script/*.go -cmd build "$@"
GO15VENDOREXPERIMENT=1 go run script/*.go -cmd build "$@"

@ -46,4 +46,4 @@ parallel=${GIT_LFS_TEST_MAXPROCS:-4}
echo "Running this maxprocs=$parallel"
echo
GO15VENDOREXPERIMENT=0 GIT_LFS_TEST_MAXPROCS=$parallel GIT_LFS_TEST_DIR="$GIT_LFS_TEST_DIR" SHUTDOWN_LFS="no" go run script/*.go -cmd integration "$@"
GO15VENDOREXPERIMENT=1 GIT_LFS_TEST_MAXPROCS=$parallel GIT_LFS_TEST_DIR="$GIT_LFS_TEST_DIR" SHUTDOWN_LFS="no" go run script/*.go -cmd integration "$@"

@ -1,6 +1,6 @@
#!/usr/bin/env bash
deps=$(go list -f '{{join .Deps "\n"}}' . | xargs go list -f '{{if not .Standard}}{{.ImportPath}}{{end}}' | grep -v "github.com/github/git-lfs")
deps=$(GO15VENDOREXPERIMENT=1 go list -f '{{join .Deps "\n"}}' . | xargs go list -f '{{if not .Standard}}{{.ImportPath}}{{end}}' | grep -v "github.com/github/git-lfs")
# exit 0 means non-vendored deps were found
if [ $? -eq 0 ];
@ -8,8 +8,8 @@ then
echo "Non vendored dependencies found:"
for d in $deps; do echo "\t$d"; done
echo
echo "These dependencies should be tracked in 'Nut.toml', with an import prefix of:"
echo "\tgithub.com/github/git-lfs/vendor/_nuts"
echo "These dependencies should be tracked in 'glide.yaml'."
echo "Consider running "glide update" or "glide get" to vendor a new dependency."
exit 1
else
echo "Looks good!"

@ -1,3 +1,3 @@
#!/usr/bin/env bash
script/fmt
GO15VENDOREXPERIMENT=0 go run script/*.go -cmd release "$@"
GO15VENDOREXPERIMENT=1 go run script/*.go -cmd release "$@"

@ -1,4 +1,4 @@
#!/usr/bin/env bash
script/fmt
commit=`git rev-parse --short HEAD`
GO15VENDOREXPERIMENT=0 go run -ldflags="-X github.com/github/git-lfs/config.GitCommit=$commit" ./git-lfs.go "$@"
GO15VENDOREXPERIMENT=1 go run -ldflags="-X github.com/github/git-lfs/config.GitCommit=$commit" ./git-lfs.go "$@"

@ -1,10 +1,14 @@
#!/usr/bin/env bash
#/ Usage: script/test # run all tests
#/ Usage: script/test # run all non-vendored tests
#/ script/test <subdir> # run just a package's tests
script/fmt
if [ $# -gt 0 ]; then
GO15VENDOREXPERIMENT=0 go test "./$1"
else
GO15VENDOREXPERIMENT=0 go test ./...
GO15VENDOREXPERIMENT=1 go test "./$1"
else
GO15VENDOREXPERIMENT=1 go test \
$(GO15VENDOREXPERIMENT=1 go list ./... \
| grep -v "github.com/olekukonko/ts" \
| grep -v "github.com/technoweenie/go-contentaddressable" \
)
fi

@ -1,2 +1,5 @@
#!/usr/bin/env bash
nut install
glide update -s -u
glide install -s -u
rm -rf vendor/github.com/ThomsonReutersEikon/go-ntlm/utils

@ -6,7 +6,7 @@ import (
"os/exec"
"syscall"
"github.com/github/git-lfs/vendor/_nuts/github.com/kr/pty"
"github.com/kr/pty"
)
// NewTty creates a pseudo-TTY for a command and modifies it appropriately so

@ -8,7 +8,7 @@ import (
"os/exec"
"strings"
"github.com/github/git-lfs/vendor/_nuts/github.com/rubyist/tracerx"
"github.com/rubyist/tracerx"
)
// SimpleExec is a small wrapper around os/exec.Command.

@ -67,7 +67,7 @@ There are a few environment variables that you can set to change the test suite
behavior:
* `GIT_LFS_TEST_DIR=path` - This sets the directory that is used as the current
working directory of the tests. By default, this will be in your temp dir. It's
working directory of the tests. By default, this will be in your temp dir. It's
recommended that this is set to a directory outside of any Git repository.
* `GIT_LFS_TEST_MAXPROCS=N` - This tells `script/integration` how many tests to
run in parallel. Default: 4.

@ -15,7 +15,7 @@ import (
"github.com/github/git-lfs/errutil"
"github.com/github/git-lfs/lfs"
"github.com/github/git-lfs/test"
"github.com/github/git-lfs/vendor/_nuts/github.com/spf13/cobra"
"github.com/spf13/cobra"
)
type TestObject struct {

@ -197,7 +197,7 @@ clone_repo() {
# clone_repo_ssl clones a repository from the test Git server to the subdirectory
# $dir under $TRASHDIR, using the SSL endpoint.
# $dir under $TRASHDIR, using the SSL endpoint.
# setup_remote_repo() needs to be run first. Output is written to clone_ssl.log.
clone_repo_ssl() {
cd "$TRASHDIR"
@ -233,10 +233,10 @@ setup() {
if [ -z "$SKIPCOMPILE" ]; then
for go in test/cmd/*.go; do
GO15VENDOREXPERIMENT=0 go build -o "$BINPATH/$(basename $go .go)" "$go"
GO15VENDOREXPERIMENT=1 go build -o "$BINPATH/$(basename $go .go)" "$go"
done
# Ensure API test util is built during tests to ensure it stays in sync
GO15VENDOREXPERIMENT=0 go build -o "$BINPATH/git-lfs-test-server-api" "test/git-lfs-test-server-api/main.go" "test/git-lfs-test-server-api/testdownload.go" "test/git-lfs-test-server-api/testupload.go"
GO15VENDOREXPERIMENT=1 go build -o "$BINPATH/git-lfs-test-server-api" "test/git-lfs-test-server-api/main.go" "test/git-lfs-test-server-api/testdownload.go" "test/git-lfs-test-server-api/testupload.go"
fi
LFSTEST_URL="$LFS_URL_FILE" LFSTEST_SSL_URL="$LFS_SSL_URL_FILE" LFSTEST_DIR="$REMOTEDIR" LFSTEST_CERT="$LFS_CERT_FILE" lfstest-gitserver > "$REMOTEDIR/gitserver.log" 2>&1 &

@ -5,7 +5,7 @@ import (
"io/ioutil"
"testing"
"github.com/bmizerany/assert"
"github.com/stretchr/testify/assert"
)
func TestCopyWithCallback(t *testing.T) {
@ -20,10 +20,10 @@ func TestCopyWithCallback(t *testing.T) {
assert.Equal(t, 5, int(total))
return nil
})
assert.Equal(t, nil, err)
assert.Nil(t, err)
assert.Equal(t, 5, int(n))
assert.Equal(t, 1, called)
assert.Equal(t, 1, len(calledWritten))
assert.Len(t, calledWritten, 1)
assert.Equal(t, 5, int(calledWritten[0]))
}

@ -15,7 +15,7 @@ import (
"github.com/github/git-lfs/httputil"
"github.com/github/git-lfs/progress"
"github.com/github/git-lfs/tools"
"github.com/github/git-lfs/vendor/_nuts/github.com/rubyist/tracerx"
"github.com/rubyist/tracerx"
)
const (

Some files were not shown because too many files have changed in this diff Show More