diff --git a/.gitignore b/.gitignore index 255c385e..1ee245b9 100644 --- a/.gitignore +++ b/.gitignore @@ -1,5 +1,6 @@ bin/ benchmark/ +out/ # only allow man/*.\d.ronn files man/* @@ -24,3 +25,5 @@ docker/*.key src commands/mancontent_gen.go + +lfstest-* diff --git a/.travis.yml b/.travis.yml index de4f1538..8e1b2cbc 100644 --- a/.travis.yml +++ b/.travis.yml @@ -15,7 +15,6 @@ matrix: fast_finish: true allow_failures: - os: osx - go: 1.6 include: - env: git-latest os: linux diff --git a/CHANGELOG.md b/CHANGELOG.md index e085ac4c..22040e31 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,5 +1,30 @@ # Git LFS Changelog +## 1.2.1 (2 June 2016) + +### Features + +* Add missing config details to `env` command #1217 (@sinbad) +* Allow smudge filter to return 0 on download failure #1213 (@sinbad) +* Add `git lfs update --manual` option & promote it on hook install fail #1182 (@sinbad) +* Pass `git lfs clone` flags through to `git clone` correctly, respect some options #1160 (@sinbad) + +### Bugs + +* Clean trailing `/` from include/exclude paths #1278 (@ttaylorr) +* Fix problems with user prompts in `git lfs clone` #1185 (@sinbad) +* Fix failure to return non-zero exit code when lfs install/update fails to install hooks #1178 (@sinbad) +* Fix missing man page #1149 (@javabrett) +* fix concurrent map read and map write #1179 (@technoweenie) + +### Misc + +* Allow additional fields on request & response schema #1276 (@sinbad) +* Fix installer error on win32. #1198 (@teo-tsirpanis) +* Applied same -ldflags -X name value -> name=value fix #1193 (@javabrett) +* add instructions to install from MacPorts #1186 (@skymoo) +* Add xenial repo #1170 (@graingert) + ## 1.2.0 (14 April 2016) ### Features @@ -18,7 +43,7 @@ * Fix silent failure to push LFS objects when ref matches a filename in the working copy #1096 (@epriestley) * Fix problems with using LFS in symlinked folders #818 (@sinbad) * Fix git lfs push silently misbehaving on ambiguous refs; fail like git push instead #1118 (@sinbad) -* Whitelist lfs.*.access config in local ~/.lfsconfig #1122 (@rjbell4) +* Whitelist `lfs.*.access` config in local ~/.lfsconfig #1122 (@rjbell4) * Only write the encoded pointer information to Stdout #1105 (@sschuberth) * Use hardcoded auth from remote or lfs config when accessing the storage api #1136 (@technoweenie, @jonmagic) * SSH should be called more strictly with command as one argument #1134 (@sinbad) diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index 1fed404e..054675ae 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -94,8 +94,8 @@ tests: ## Updating 3rd party packages -0. Update `Nut.toml`. -0. Run `script/vendor` to update the code in the `.vendor/src` directory. +0. Update `glide.yaml`. +0. Run `script/vendor` to update the code in the `vendor` directory. 0. Commit the change. Git LFS vendors the full source code in the repository. 0. Submit a pull request. diff --git a/Makefile b/Makefile new file mode 100644 index 00000000..770a8688 --- /dev/null +++ b/Makefile @@ -0,0 +1,52 @@ +GOC ?= gccgo +AR ?= ar + +SRCDIR := $(dir $(lastword $(MAKEFILE_LIST))) + +LIBDIR := out/github.com/github/git-lfs +GOFLAGS := -Iout + +ifeq ($(MAKEFILE_GEN),) + +MAKEFILE_GEN := out/Makefile.gen + +all: $(MAKEFILE_GEN) + @$(MAKE) -f $(lastword $(MAKEFILE_LIST)) $(MAKEFLAGS) MAKEFILE_GEN=$(MAKEFILE_GEN) $@ + +$(MAKEFILE_GEN) : out/genmakefile $(SRCDIR)commands/mancontent_gen.go + @mkdir -p $(dir $@) + $< "$(SRCDIR)" github.com/github/git-lfs/ > $@ + +else + +all : bin/git-lfs + +include $(MAKEFILE_GEN) + +$(LIBDIR)/git-lfs.o : $(SRC_main) $(DEPS_main) + @mkdir -p $(dir $@) + $(GOC) $(GOFLAGS) -c -o $@ $(SRC_main) + +bin/git-lfs : $(LIBDIR)/git-lfs.o $(DEPS_main) + @mkdir -p $(dir $@) + $(GOC) $(GOFLAGS) -o $@ $^ + +%.a : %.o + $(AR) rc $@ $< + +endif + +$(SRCDIR)commands/mancontent_gen.go : out/mangen + cd $(SRCDIR)commands && $(CURDIR)/out/mangen + +out/mangen : $(SRCDIR)docs/man/mangen.go + @mkdir -p $(dir $@) + $(GOC) -o $@ $< + +out/genmakefile : $(SRCDIR)script/genmakefile/genmakefile.go + @mkdir -p $(dir $@) + $(GOC) -o $@ $< + +clean : + rm -rf out bin + rm -f $(SRCDIR)commands/mancontent_gen.go diff --git a/Nut.toml b/Nut.toml deleted file mode 100644 index 7f8608ad..00000000 --- a/Nut.toml +++ /dev/null @@ -1,24 +0,0 @@ -[application] - -name = "git-lfs" -version = "1.2.0" -authors = [ - "Rick Olson ", - "Scott Barron ", -] - -[dependencies] - -"github.com/bgentry/go-netrc/netrc" = "9fd32a8b3d3d3f9d43c341bfe098430e07609480" -"github.com/cheggaaa/pb" = "bd14546a551971ae7f460e6d6e527c5b56cd38d7" -"github.com/kr/pretty" = "088c856450c08c03eb32f7a6c221e6eefaa10e6f" -"github.com/kr/pty" = "5cf931ef8f76dccd0910001d74a58a7fca84a83d" -"github.com/kr/text" = "6807e777504f54ad073ecef66747de158294b639" -"github.com/inconshreveable/mousetrap" = "76626ae9c91c4f2a10f34cad8ce83ea42c93bb75" -"github.com/olekukonko/ts" = "ecf753e7c962639ab5a1fb46f7da627d4c0a04b8" -"github.com/rubyist/tracerx" = "d7bcc0bc315bed2a841841bee5dbecc8d7d7582f" -"github.com/spf13/cobra" = "c55cdf33856a08e4822738728b41783292812889" -"github.com/spf13/pflag" = "580b9be06c33d8ba9dcc8757ea56b7642472c2f5" -"github.com/ThomsonReutersEikon/go-ntlm/ntlm" = "52b7efa603f1b809167b528b8bbaa467e36fdc02" -"github.com/technoweenie/assert" = "b25ea301d127043ffacf3b2545726e79b6632139" -"github.com/technoweenie/go-contentaddressable" = "38171def3cd15e3b76eb156219b3d48704643899" diff --git a/README.md b/README.md index 76c22c98..64af6047 100644 --- a/README.md +++ b/README.md @@ -22,8 +22,8 @@ preferences. Note: Git LFS requires Git v1.8.2 or higher. -One installed, you need to setup the global Git hooks for Git LFS. This only -needs to be run once per machine. +Once installed, you need to setup the global Git hooks for Git LFS. This only +needs to be done once per machine. ```bash $ git lfs install @@ -102,12 +102,20 @@ page][impl]. You can also join [the project's chat room][chat]. [impl]: https://github.com/github/git-lfs/wiki/Implementations +### Using LFS from other Go code + +At the moment git-lfs is only focussed on the stability of its command line +interface, and the [server APIs](docs/api/README.md). The contents of the +source packages is subject to change. We therefore currently discourage other +Go code from depending on the git-lfs packages directly; an API to be used by +external Go code may be provided in future. + ## Core Team These are the humans that form the Git LFS core team, which runs the project. In alphabetical order: -| [@andyneff](https://github.com/andyneff) | [@rubyist](https://github.com/rubyist) | [@sinbad](https://github.com/sinbad) | [@technoweenie](https://github.com/technoweenie) | -|---|---|---|---|---| -| [![](https://avatars1.githubusercontent.com/u/7596961?v=3&s=100)](https://github.com/andyneff) | [![](https://avatars1.githubusercontent.com/u/143?v=3&s=100)](https://github.com/rubyist) | [![](https://avatars1.githubusercontent.com/u/142735?v=3&s=100)](https://github.com/sinbad) | [![](https://avatars3.githubusercontent.com/u/21?v=3&s=100)](https://github.com/technoweenie) | +| [@andyneff](https://github.com/andyneff) | [@rubyist](https://github.com/rubyist) | [@sinbad](https://github.com/sinbad) | [@technoweenie](https://github.com/technoweenie) | [@ttaylorr](https://github.com/ttaylorr) | +|---|---|---|---|---|---| +| [![](https://avatars1.githubusercontent.com/u/7596961?v=3&s=100)](https://github.com/andyneff) | [![](https://avatars1.githubusercontent.com/u/143?v=3&s=100)](https://github.com/rubyist) | [![](https://avatars1.githubusercontent.com/u/142735?v=3&s=100)](https://github.com/sinbad) | [![](https://avatars3.githubusercontent.com/u/21?v=3&s=100)](https://github.com/technoweenie) | [![](https://avatars3.githubusercontent.com/u/443245?v=3&s=100)](https://github.com/ttaylorr) | diff --git a/ROADMAP.md b/ROADMAP.md index aa5bc1ca..fe3100ac 100644 --- a/ROADMAP.md +++ b/ROADMAP.md @@ -7,7 +7,6 @@ Git LFS. If you have an idea for a new feature, open an issue for discussion. * git index issues [#937](https://github.com/github/git-lfs/issues/937) * `authenticated` property on urls [#960](https://github.com/github/git-lfs/issues/960) -* Use `expires_at` to quickly put objects in the queue to hit the API again to refresh tokens. * Add ref information to upload request [#969](https://github.com/github/git-lfs/issues/969) * Accept raw remote URLs as valid [#1085](https://github.com/github/git-lfs/issues/1085) * use git proxy settings [#1125](https://github.com/github/git-lfs/issues/1125) @@ -19,7 +18,6 @@ Git LFS. If you have an idea for a new feature, open an issue for discussion. * Investigate `--shared` and `--dissociate` options for `git clone` (similar to `--references`) * Investigate `GIT_SSH_COMMAND` [#1142](https://github.com/github/git-lfs/issues/1142) * Teach `git lfs install` to use `git config --system` instead of `git config --global` by default [#1177](https://github.com/github/git-lfs/pull/1177) -* Don't allow `git lfs track` to operate on `.git*` or `.lfs*` files [#1099](https://github.com/github/git-lfs/issues/1099) * Investigate `git -c lfs.url=... lfs clone` usage * Test that manpages are built and included [#1149](https://github.com/github/git-lfs/pull/1149) * Update CI to build from source outside of git repo [#1156](https://github.com/github/git-lfs/issues/1156#issuecomment-211574343) diff --git a/api/api.go b/api/api.go new file mode 100644 index 00000000..c5873a8d --- /dev/null +++ b/api/api.go @@ -0,0 +1,202 @@ +// Package api provides the interface for querying LFS servers (metadata) +// NOTE: Subject to change, do not rely on this package from outside git-lfs source +package api + +import ( + "bytes" + "encoding/json" + "fmt" + "strconv" + + "github.com/github/git-lfs/config" + "github.com/github/git-lfs/errutil" + "github.com/github/git-lfs/git" + "github.com/github/git-lfs/httputil" + "github.com/github/git-lfs/tools" + + "github.com/rubyist/tracerx" +) + +// BatchOrLegacy calls the Batch API and falls back on the Legacy API +// This is for simplicity, legacy route is not most optimal (serial) +// TODO LEGACY API: remove when legacy API removed +func BatchOrLegacy(objects []*ObjectResource, operation string, transferAdapters []string) (objs []*ObjectResource, transferAdapter string, e error) { + if !config.Config.BatchTransfer() { + objs, err := Legacy(objects, operation) + return objs, "", err + } + objs, adapterName, err := Batch(objects, operation, transferAdapters) + if err != nil { + if errutil.IsNotImplementedError(err) { + git.Config.SetLocal("", "lfs.batch", "false") + objs, err := Legacy(objects, operation) + return objs, "", err + } + return nil, "", err + } + return objs, adapterName, nil +} + +func BatchOrLegacySingle(inobj *ObjectResource, operation string, transferAdapters []string) (obj *ObjectResource, transferAdapter string, e error) { + objs, adapterName, err := BatchOrLegacy([]*ObjectResource{inobj}, operation, transferAdapters) + if err != nil { + return nil, "", err + } + if len(objs) > 0 { + return objs[0], adapterName, nil + } + return nil, "", fmt.Errorf("Object not found") +} + +// Batch calls the batch API and returns object results +func Batch(objects []*ObjectResource, operation string, transferAdapters []string) (objs []*ObjectResource, transferAdapter string, e error) { + if len(objects) == 0 { + return nil, "", nil + } + + o := &batchRequest{Operation: operation, Objects: objects, TransferAdapterNames: transferAdapters} + by, err := json.Marshal(o) + if err != nil { + return nil, "", errutil.Error(err) + } + + req, err := NewBatchRequest(operation) + if err != nil { + return nil, "", errutil.Error(err) + } + + req.Header.Set("Content-Type", MediaType) + req.Header.Set("Content-Length", strconv.Itoa(len(by))) + req.ContentLength = int64(len(by)) + req.Body = tools.NewReadSeekCloserWrapper(bytes.NewReader(by)) + + tracerx.Printf("api: batch %d files", len(objects)) + + res, bresp, err := DoBatchRequest(req) + + if err != nil { + + if res == nil { + return nil, "", errutil.NewRetriableError(err) + } + + if res.StatusCode == 0 { + return nil, "", errutil.NewRetriableError(err) + } + + if errutil.IsAuthError(err) { + httputil.SetAuthType(req, res) + return Batch(objects, operation, transferAdapters) + } + + switch res.StatusCode { + case 404, 410: + tracerx.Printf("api: batch not implemented: %d", res.StatusCode) + return nil, "", errutil.NewNotImplementedError(nil) + } + + tracerx.Printf("api error: %s", err) + return nil, "", errutil.Error(err) + } + httputil.LogTransfer("lfs.batch", res) + + if res.StatusCode != 200 { + return nil, "", errutil.Error(fmt.Errorf("Invalid status for %s: %d", httputil.TraceHttpReq(req), res.StatusCode)) + } + + return bresp.Objects, bresp.TransferAdapterName, nil +} + +// Legacy calls the legacy API serially and returns ObjectResources +// TODO LEGACY API: remove when legacy API removed +func Legacy(objects []*ObjectResource, operation string) ([]*ObjectResource, error) { + retobjs := make([]*ObjectResource, 0, len(objects)) + dl := operation == "download" + var globalErr error + for _, o := range objects { + var ret *ObjectResource + var err error + if dl { + ret, err = DownloadCheck(o.Oid) + } else { + ret, err = UploadCheck(o.Oid, o.Size) + } + if err != nil { + // Store for the end, likely only one + globalErr = err + } + retobjs = append(retobjs, ret) + } + return retobjs, globalErr +} + +// TODO LEGACY API: remove when legacy API removed +func DownloadCheck(oid string) (*ObjectResource, error) { + req, err := NewRequest("GET", oid) + if err != nil { + return nil, errutil.Error(err) + } + + res, obj, err := DoLegacyRequest(req) + if err != nil { + return nil, err + } + httputil.LogTransfer("lfs.download", res) + + _, err = obj.NewRequest("download", "GET") + if err != nil { + return nil, errutil.Error(err) + } + + return obj, nil +} + +// TODO LEGACY API: remove when legacy API removed +func UploadCheck(oid string, size int64) (*ObjectResource, error) { + + reqObj := &ObjectResource{ + Oid: oid, + Size: size, + } + + by, err := json.Marshal(reqObj) + if err != nil { + return nil, errutil.Error(err) + } + + req, err := NewRequest("POST", oid) + if err != nil { + return nil, errutil.Error(err) + } + + req.Header.Set("Content-Type", MediaType) + req.Header.Set("Content-Length", strconv.Itoa(len(by))) + req.ContentLength = int64(len(by)) + req.Body = tools.NewReadSeekCloserWrapper(bytes.NewReader(by)) + + tracerx.Printf("api: uploading (%s)", oid) + res, obj, err := DoLegacyRequest(req) + + if err != nil { + if errutil.IsAuthError(err) { + httputil.SetAuthType(req, res) + return UploadCheck(oid, size) + } + + return nil, errutil.NewRetriableError(err) + } + httputil.LogTransfer("lfs.upload", res) + + if res.StatusCode == 200 { + return nil, nil + } + + if obj.Oid == "" { + obj.Oid = oid + } + if obj.Size == 0 { + obj.Size = reqObj.Size + } + + return obj, nil +} diff --git a/api/client.go b/api/client.go new file mode 100644 index 00000000..cfc06cc5 --- /dev/null +++ b/api/client.go @@ -0,0 +1,75 @@ +// NOTE: Subject to change, do not rely on this package from outside git-lfs source +package api + +import "github.com/github/git-lfs/config" + +type Operation string + +const ( + UploadOperation Operation = "upload" + DownloadOperation Operation = "download" +) + +// Client exposes the LFS API to callers through a multitude of different +// services and transport mechanisms. Callers can make a *RequestSchema using +// any service that is attached to the Client, and then execute a request based +// on that schema using the `Do()` method. +// +// A prototypical example follows: +// ``` +// apiResponse, schema := client.Locks.Lock(request) +// resp, err := client.Do(schema) +// if err != nil { +// handleErr(err) +// } +// +// fmt.Println(apiResponse.Lock) +// ``` +type Client struct { + // Locks is the LockService used to interact with the Git LFS file- + // locking API. + Locks LockService + + // lifecycle is the lifecycle used by all requests through this client. + lifecycle Lifecycle +} + +// NewClient instantiates and returns a new instance of *Client, with the given +// lifecycle. +// +// If no lifecycle is given, a HttpLifecycle is used by default. +func NewClient(lifecycle Lifecycle) *Client { + if lifecycle == nil { + lifecycle = NewHttpLifecycle(config.Config) + } + + return &Client{lifecycle: lifecycle} +} + +// Do preforms the request assosicated with the given *RequestSchema by +// delegating into the Lifecycle in use. +// +// If any error was encountered while either building, executing or cleaning up +// the request, then it will be returned immediately, and the request can be +// treated as invalid. +// +// If no error occured, an api.Response will be returned, along with a `nil` +// error. At this point, the body of the response has been serialized into +// `schema.Into`, and the body has been closed. +func (c *Client) Do(schema *RequestSchema) (Response, error) { + req, err := c.lifecycle.Build(schema) + if err != nil { + return nil, err + } + + resp, err := c.lifecycle.Execute(req, schema.Into) + if err != nil { + return nil, err + } + + if err = c.lifecycle.Cleanup(resp); err != nil { + return nil, err + } + + return resp, nil +} diff --git a/api/client_test.go b/api/client_test.go new file mode 100644 index 00000000..d2005dba --- /dev/null +++ b/api/client_test.go @@ -0,0 +1,76 @@ +package api_test + +import ( + "errors" + "net/http" + "testing" + + "github.com/github/git-lfs/api" + "github.com/stretchr/testify/assert" +) + +func TestClientUsesLifecycleToExecuteSchemas(t *testing.T) { + schema := new(api.RequestSchema) + req := new(http.Request) + resp := new(api.HttpResponse) + + lifecycle := new(MockLifecycle) + lifecycle.On("Build", schema).Return(req, nil).Once() + lifecycle.On("Execute", req, schema.Into).Return(resp, nil).Once() + lifecycle.On("Cleanup", resp).Return(nil).Once() + + client := api.NewClient(lifecycle) + r1, err := client.Do(schema) + + assert.Equal(t, resp, r1) + assert.Nil(t, err) + lifecycle.AssertExpectations(t) +} + +func TestClientHaltsIfSchemaCannotBeBuilt(t *testing.T) { + schema := new(api.RequestSchema) + + lifecycle := new(MockLifecycle) + lifecycle.On("Build", schema).Return(nil, errors.New("uh-oh!")).Once() + + client := api.NewClient(lifecycle) + resp, err := client.Do(schema) + + lifecycle.AssertExpectations(t) + assert.Nil(t, resp) + assert.Equal(t, "uh-oh!", err.Error()) +} + +func TestClientHaltsIfSchemaCannotBeExecuted(t *testing.T) { + schema := new(api.RequestSchema) + req := new(http.Request) + + lifecycle := new(MockLifecycle) + lifecycle.On("Build", schema).Return(req, nil).Once() + lifecycle.On("Execute", req, schema.Into).Return(nil, errors.New("uh-oh!")).Once() + + client := api.NewClient(lifecycle) + resp, err := client.Do(schema) + + lifecycle.AssertExpectations(t) + assert.Nil(t, resp) + assert.Equal(t, "uh-oh!", err.Error()) +} + +func TestClientReturnsCleanupErrors(t *testing.T) { + schema := new(api.RequestSchema) + req := new(http.Request) + resp := new(api.HttpResponse) + + lifecycle := new(MockLifecycle) + lifecycle.On("Build", schema).Return(req, nil).Once() + lifecycle.On("Execute", req, schema.Into).Return(resp, nil).Once() + lifecycle.On("Cleanup", resp).Return(errors.New("uh-oh!")).Once() + + client := api.NewClient(lifecycle) + r1, err := client.Do(schema) + + lifecycle.AssertExpectations(t) + assert.Nil(t, r1) + assert.Equal(t, "uh-oh!", err.Error()) +} diff --git a/api/download_test.go b/api/download_test.go new file mode 100644 index 00000000..c338f0ca --- /dev/null +++ b/api/download_test.go @@ -0,0 +1,377 @@ +package api_test + +import ( + "encoding/base64" + "encoding/json" + "fmt" + "io/ioutil" + "net/http" + "net/http/httptest" + "net/url" + "os" + "strconv" + "strings" + "testing" + + "github.com/github/git-lfs/api" + "github.com/github/git-lfs/auth" + "github.com/github/git-lfs/config" + "github.com/github/git-lfs/errutil" + "github.com/github/git-lfs/httputil" +) + +func TestSuccessfulDownload(t *testing.T) { + SetupTestCredentialsFunc() + defer func() { + RestoreCredentialsFunc() + }() + + mux := http.NewServeMux() + server := httptest.NewServer(mux) + defer server.Close() + + tmp := tempdir(t) + defer os.RemoveAll(tmp) + + mux.HandleFunc("/media/objects/oid", func(w http.ResponseWriter, r *http.Request) { + t.Logf("Server: %s %s", r.Method, r.URL) + t.Logf("request header: %v", r.Header) + + if r.Method != "GET" { + w.WriteHeader(405) + return + } + + if r.Header.Get("Accept") != api.MediaType { + t.Error("Invalid Accept") + } + + if r.Header.Get("Authorization") != expectedAuth(t, server) { + t.Error("Invalid Authorization") + } + + obj := &api.ObjectResource{ + Oid: "oid", + Size: 4, + Actions: map[string]*api.LinkRelation{ + "download": &api.LinkRelation{ + Href: server.URL + "/download", + Header: map[string]string{"A": "1"}, + }, + }, + } + + by, err := json.Marshal(obj) + if err != nil { + t.Fatal(err) + } + + head := w.Header() + head.Set("Content-Type", api.MediaType) + head.Set("Content-Length", strconv.Itoa(len(by))) + w.WriteHeader(200) + w.Write(by) + }) + + defer config.Config.ResetConfig() + config.Config.SetConfig("lfs.batch", "false") + config.Config.SetConfig("lfs.url", server.URL+"/media") + + obj, _, err := api.BatchOrLegacySingle(&api.ObjectResource{Oid: "oid"}, "download", []string{"basic"}) + if err != nil { + if isDockerConnectionError(err) { + return + } + t.Fatalf("unexpected error: %s", err) + } + + if obj.Size != 4 { + t.Errorf("unexpected size: %d", obj.Size) + } + +} + +// nearly identical to TestSuccessfulDownload +// called multiple times to return different 3xx status codes +func TestSuccessfulDownloadWithRedirects(t *testing.T) { + SetupTestCredentialsFunc() + defer func() { + RestoreCredentialsFunc() + }() + + mux := http.NewServeMux() + server := httptest.NewServer(mux) + defer server.Close() + + tmp := tempdir(t) + defer os.RemoveAll(tmp) + + // all of these should work for GET requests + redirectCodes := []int{301, 302, 303, 307} + redirectIndex := 0 + + mux.HandleFunc("/redirect/objects/oid", func(w http.ResponseWriter, r *http.Request) { + t.Logf("Server: %s %s", r.Method, r.URL) + t.Logf("request header: %v", r.Header) + + if r.Method != "GET" { + w.WriteHeader(405) + return + } + + w.Header().Set("Location", server.URL+"/redirect2/objects/oid") + w.WriteHeader(redirectCodes[redirectIndex]) + t.Logf("redirect with %d", redirectCodes[redirectIndex]) + }) + + mux.HandleFunc("/redirect2/objects/oid", func(w http.ResponseWriter, r *http.Request) { + t.Logf("Server: %s %s", r.Method, r.URL) + t.Logf("request header: %v", r.Header) + + if r.Method != "GET" { + w.WriteHeader(405) + return + } + + w.Header().Set("Location", server.URL+"/media/objects/oid") + w.WriteHeader(redirectCodes[redirectIndex]) + t.Logf("redirect again with %d", redirectCodes[redirectIndex]) + redirectIndex += 1 + }) + + mux.HandleFunc("/media/objects/oid", func(w http.ResponseWriter, r *http.Request) { + t.Logf("Server: %s %s", r.Method, r.URL) + t.Logf("request header: %v", r.Header) + + if r.Method != "GET" { + w.WriteHeader(405) + return + } + + if r.Header.Get("Accept") != api.MediaType { + t.Error("Invalid Accept") + } + + if r.Header.Get("Authorization") != expectedAuth(t, server) { + t.Error("Invalid Authorization") + } + + obj := &api.ObjectResource{ + Oid: "oid", + Size: 4, + Actions: map[string]*api.LinkRelation{ + "download": &api.LinkRelation{ + Href: server.URL + "/download", + Header: map[string]string{"A": "1"}, + }, + }, + } + + by, err := json.Marshal(obj) + if err != nil { + t.Fatal(err) + } + + head := w.Header() + head.Set("Content-Type", api.MediaType) + head.Set("Content-Length", strconv.Itoa(len(by))) + w.WriteHeader(200) + w.Write(by) + }) + + defer config.Config.ResetConfig() + config.Config.SetConfig("lfs.batch", "false") + config.Config.SetConfig("lfs.url", server.URL+"/redirect") + + for _, redirect := range redirectCodes { + obj, _, err := api.BatchOrLegacySingle(&api.ObjectResource{Oid: "oid"}, "download", []string{"basic"}) + if err != nil { + if isDockerConnectionError(err) { + return + } + t.Fatalf("unexpected error for %d status: %s", redirect, err) + } + + if obj.Size != 4 { + t.Errorf("unexpected size for %d status: %d", redirect, obj.Size) + } + + } +} + +// nearly identical to TestSuccessfulDownload +// the api request returns a custom Authorization header +func TestSuccessfulDownloadWithAuthorization(t *testing.T) { + SetupTestCredentialsFunc() + defer func() { + RestoreCredentialsFunc() + }() + + mux := http.NewServeMux() + server := httptest.NewServer(mux) + defer server.Close() + + tmp := tempdir(t) + defer os.RemoveAll(tmp) + + mux.HandleFunc("/media/objects/oid", func(w http.ResponseWriter, r *http.Request) { + t.Logf("Server: %s %s", r.Method, r.URL) + t.Logf("request header: %v", r.Header) + + if r.Method != "GET" { + w.WriteHeader(405) + return + } + + if r.Header.Get("Accept") != api.MediaType { + t.Error("Invalid Accept") + } + + if r.Header.Get("Authorization") != expectedAuth(t, server) { + t.Error("Invalid Authorization") + } + + obj := &api.ObjectResource{ + Oid: "oid", + Size: 4, + Actions: map[string]*api.LinkRelation{ + "download": &api.LinkRelation{ + Href: server.URL + "/download", + Header: map[string]string{ + "A": "1", + "Authorization": "custom", + }, + }, + }, + } + + by, err := json.Marshal(obj) + if err != nil { + t.Fatal(err) + } + + head := w.Header() + head.Set("Content-Type", "application/json; charset=utf-8") + head.Set("Content-Length", strconv.Itoa(len(by))) + w.WriteHeader(200) + w.Write(by) + }) + + defer config.Config.ResetConfig() + config.Config.SetConfig("lfs.batch", "false") + config.Config.SetConfig("lfs.url", server.URL+"/media") + obj, _, err := api.BatchOrLegacySingle(&api.ObjectResource{Oid: "oid"}, "download", []string{"basic"}) + if err != nil { + if isDockerConnectionError(err) { + return + } + t.Fatalf("unexpected error: %s", err) + } + + if obj.Size != 4 { + t.Errorf("unexpected size: %d", obj.Size) + } + +} + +func TestDownloadAPIError(t *testing.T) { + SetupTestCredentialsFunc() + defer func() { + RestoreCredentialsFunc() + }() + + mux := http.NewServeMux() + server := httptest.NewServer(mux) + defer server.Close() + + tmp := tempdir(t) + defer os.RemoveAll(tmp) + + mux.HandleFunc("/media/objects/oid", func(w http.ResponseWriter, r *http.Request) { + w.WriteHeader(404) + }) + + defer config.Config.ResetConfig() + config.Config.SetConfig("lfs.batch", "false") + config.Config.SetConfig("lfs.url", server.URL+"/media") + _, _, err := api.BatchOrLegacySingle(&api.ObjectResource{Oid: "oid"}, "download", []string{"basic"}) + if err == nil { + t.Fatal("no error?") + } + + if errutil.IsFatalError(err) { + t.Fatal("should not panic") + } + + if isDockerConnectionError(err) { + return + } + + if err.Error() != fmt.Sprintf(httputil.GetDefaultError(404), server.URL+"/media/objects/oid") { + t.Fatalf("Unexpected error: %s", err.Error()) + } + +} + +// guards against connection errors that only seem to happen on debian docker +// images. +func isDockerConnectionError(err error) bool { + if err == nil { + return false + } + + if os.Getenv("TRAVIS") == "true" { + return false + } + + e := err.Error() + return strings.Contains(e, "connection reset by peer") || + strings.Contains(e, "connection refused") +} + +func tempdir(t *testing.T) string { + dir, err := ioutil.TempDir("", "git-lfs-test") + if err != nil { + t.Fatalf("Error getting temp dir: %s", err) + } + return dir +} + +func expectedAuth(t *testing.T, server *httptest.Server) string { + u, err := url.Parse(server.URL) + if err != nil { + t.Fatal(err) + } + + token := fmt.Sprintf("%s:%s", u.Host, "monkey") + return "Basic " + strings.TrimSpace(base64.StdEncoding.EncodeToString([]byte(token))) +} + +var ( + TestCredentialsFunc auth.CredentialFunc + origCredentialsFunc auth.CredentialFunc +) + +func init() { + TestCredentialsFunc = func(input auth.Creds, subCommand string) (auth.Creds, error) { + output := make(auth.Creds) + for key, value := range input { + output[key] = value + } + if _, ok := output["username"]; !ok { + output["username"] = input["host"] + } + output["password"] = "monkey" + return output, nil + } +} + +// Override the credentials func for testing +func SetupTestCredentialsFunc() { + origCredentialsFunc = auth.SetCredentialsFunc(TestCredentialsFunc) +} + +// Put the original credentials func back +func RestoreCredentialsFunc() { + auth.SetCredentialsFunc(origCredentialsFunc) +} diff --git a/api/http_lifecycle.go b/api/http_lifecycle.go new file mode 100644 index 00000000..ecac392c --- /dev/null +++ b/api/http_lifecycle.go @@ -0,0 +1,184 @@ +// NOTE: Subject to change, do not rely on this package from outside git-lfs source +package api + +import ( + "bytes" + "encoding/json" + "errors" + "io" + "io/ioutil" + "net/http" + "net/url" + + "github.com/github/git-lfs/auth" + "github.com/github/git-lfs/config" + "github.com/github/git-lfs/httputil" +) + +var ( + // ErrNoOperationGiven is an error which is returned when no operation + // is provided in a RequestSchema object. + ErrNoOperationGiven = errors.New("lfs/api: no operation provided in schema") +) + +// EndpointSource is an interface which encapsulates the behavior of returning +// `config.Endpoint`s based on a particular operation. +type EndpointSource interface { + // Endpoint returns the `config.Endpoint` assosciated with a given + // operation. + Endpoint(operation string) config.Endpoint +} + +// HttpLifecycle serves as the default implementation of the Lifecycle interface +// for HTTP requests. Internally, it leverages the *http.Client type to execute +// HTTP requests against a root *url.URL, as given in `NewHttpLifecycle`. +type HttpLifecycle struct { + endpoints EndpointSource +} + +var _ Lifecycle = new(HttpLifecycle) + +// NewHttpLifecycle initializes a new instance of the *HttpLifecycle type with a +// new *http.Client, and the given root (see above). +func NewHttpLifecycle(endpoints EndpointSource) *HttpLifecycle { + return &HttpLifecycle{ + endpoints: endpoints, + } +} + +// Build implements the Lifecycle.Build function. +// +// HttpLifecycle in particular, builds an absolute path by parsing and then +// relativizing the `schema.Path` with respsect to the `HttpLifecycle.root`. If +// there was an error in determining this URL, then that error will be returned, +// +// After this is complete, a body is attached to the request if the +// schema contained one. If a body was present, and there an error occurred while +// serializing it into JSON, then that error will be returned and the +// *http.Request will not be generated. +// +// In all cases, credentials are attached to the HTTP request as described in +// the `auth` package (see github.com/github/git-lfs/auth#GetCreds). +// +// Finally, all of these components are combined together and the resulting +// request is returned. +func (l *HttpLifecycle) Build(schema *RequestSchema) (*http.Request, error) { + path, err := l.absolutePath(schema.Operation, schema.Path) + if err != nil { + return nil, err + } + + body, err := l.body(schema) + if err != nil { + return nil, err + } + + req, err := http.NewRequest(schema.Method, path.String(), body) + if err != nil { + return nil, err + } + + if _, err = auth.GetCreds(req); err != nil { + return nil, err + } + + req.URL.RawQuery = l.queryParameters(schema).Encode() + + return req, nil +} + +// Execute implements the Lifecycle.Execute function. +// +// Internally, the *http.Client is used to execute the underlying *http.Request. +// If the client returned an error corresponding to a failure to make the +// request, then that error will be returned immediately, and the response is +// guaranteed not to be serialized. +// +// Once the response has been gathered from the server, it is unmarshled into +// the given `into interface{}` which is identical to the one provided in the +// original RequestSchema. If an error occured while decoding, then that error +// is returned. +// +// Otherwise, the api.Response is returned, along with no error, signaling that +// the request completed successfully. +func (l *HttpLifecycle) Execute(req *http.Request, into interface{}) (Response, error) { + resp, err := httputil.DoHttpRequestWithRedirects(req, []*http.Request{}, true) + if err != nil { + return nil, err + } + + // TODO(taylor): check status >=500, handle content type, return error, + // halt immediately. + + if into != nil { + decoder := json.NewDecoder(resp.Body) + if err = decoder.Decode(into); err != nil { + return nil, err + } + } + + return WrapHttpResponse(resp), nil +} + +// Cleanup implements the Lifecycle.Cleanup function by closing the Body +// attached to the response. +func (l *HttpLifecycle) Cleanup(resp Response) error { + return resp.Body().Close() +} + +// absolutePath returns the absolute path made by combining a given relative +// path with the root URL of the endpoint corresponding to the given operation. +// +// If there was an error in parsing the relative path, then that error will be +// returned. +func (l *HttpLifecycle) absolutePath(operation Operation, path string) (*url.URL, error) { + if len(operation) == 0 { + return nil, ErrNoOperationGiven + } + + root, err := url.Parse(l.endpoints.Endpoint(string(operation)).Url) + if err != nil { + return nil, err + } + + rel, err := url.Parse(path) + if err != nil { + return nil, err + + } + + return root.ResolveReference(rel), nil +} + +// body returns an io.Reader which reads out a JSON-encoded copy of the payload +// attached to a given *RequestSchema, if it is present. If no body is present +// in the request, then nil is returned instead. +// +// If an error was encountered while attempting to marshal the body, then that +// will be returned instead, along with a nil io.Reader. +func (l *HttpLifecycle) body(schema *RequestSchema) (io.ReadCloser, error) { + if schema.Body == nil { + return nil, nil + } + + body, err := json.Marshal(schema.Body) + if err != nil { + return nil, err + } + + return ioutil.NopCloser(bytes.NewReader(body)), nil +} + +// queryParameters returns a url.Values containing all of the provided query +// parameters as given in the *RequestSchema. If no query parameters were given, +// then an empty url.Values is returned instead. +func (l *HttpLifecycle) queryParameters(schema *RequestSchema) url.Values { + vals := url.Values{} + if schema.Query != nil { + for k, v := range schema.Query { + vals.Add(k, v) + } + } + + return vals +} diff --git a/api/http_lifecycle_test.go b/api/http_lifecycle_test.go new file mode 100644 index 00000000..154397c1 --- /dev/null +++ b/api/http_lifecycle_test.go @@ -0,0 +1,148 @@ +package api_test + +import ( + "io/ioutil" + "net/http" + "net/http/httptest" + "testing" + + "github.com/github/git-lfs/api" + "github.com/github/git-lfs/config" + "github.com/stretchr/testify/assert" +) + +type NopEndpointSource struct { + Root string +} + +func (e *NopEndpointSource) Endpoint(op string) config.Endpoint { + return config.Endpoint{Url: e.Root} +} + +var ( + source = &NopEndpointSource{"https://example.com"} +) + +func TestHttpLifecycleMakesRequestsAgainstAbsolutePath(t *testing.T) { + SetupTestCredentialsFunc() + defer RestoreCredentialsFunc() + + l := api.NewHttpLifecycle(source) + req, err := l.Build(&api.RequestSchema{ + Path: "/foo", + Operation: api.DownloadOperation, + }) + + assert.Nil(t, err) + assert.Equal(t, "https://example.com/foo", req.URL.String()) +} + +func TestHttpLifecycleAttachesQueryParameters(t *testing.T) { + SetupTestCredentialsFunc() + defer RestoreCredentialsFunc() + + l := api.NewHttpLifecycle(source) + req, err := l.Build(&api.RequestSchema{ + Path: "/foo", + Operation: api.DownloadOperation, + Query: map[string]string{ + "a": "b", + }, + }) + + assert.Nil(t, err) + assert.Equal(t, "https://example.com/foo?a=b", req.URL.String()) +} + +func TestHttpLifecycleAttachesBodyWhenPresent(t *testing.T) { + SetupTestCredentialsFunc() + defer RestoreCredentialsFunc() + + l := api.NewHttpLifecycle(source) + req, err := l.Build(&api.RequestSchema{ + Operation: api.DownloadOperation, + Body: struct { + Foo string `json:"foo"` + }{"bar"}, + }) + + assert.Nil(t, err) + + body, err := ioutil.ReadAll(req.Body) + assert.Nil(t, err) + assert.Equal(t, "{\"foo\":\"bar\"}", string(body)) +} + +func TestHttpLifecycleDoesNotAttachBodyWhenEmpty(t *testing.T) { + SetupTestCredentialsFunc() + defer RestoreCredentialsFunc() + + l := api.NewHttpLifecycle(source) + req, err := l.Build(&api.RequestSchema{ + Operation: api.DownloadOperation, + }) + + assert.Nil(t, err) + assert.Nil(t, req.Body) +} + +func TestHttpLifecycleErrsWithoutOperation(t *testing.T) { + SetupTestCredentialsFunc() + defer RestoreCredentialsFunc() + + l := api.NewHttpLifecycle(source) + req, err := l.Build(&api.RequestSchema{ + Path: "/foo", + }) + + assert.Equal(t, api.ErrNoOperationGiven, err) + assert.Nil(t, req) +} + +func TestHttpLifecycleExecutesRequestWithoutBody(t *testing.T) { + SetupTestCredentialsFunc() + defer RestoreCredentialsFunc() + + var called bool + server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + called = true + + assert.Equal(t, "/path", r.URL.RequestURI()) + })) + defer server.Close() + + req, _ := http.NewRequest("GET", server.URL+"/path", nil) + + l := api.NewHttpLifecycle(source) + _, err := l.Execute(req, nil) + + assert.True(t, called) + assert.Nil(t, err) +} + +func TestHttpLifecycleExecutesRequestWithBody(t *testing.T) { + SetupTestCredentialsFunc() + defer RestoreCredentialsFunc() + + type Response struct { + Foo string `json:"foo"` + } + + var called bool + server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + called = true + + w.Write([]byte("{\"foo\":\"bar\"}")) + })) + defer server.Close() + + req, _ := http.NewRequest("GET", server.URL+"/path", nil) + + l := api.NewHttpLifecycle(source) + resp := new(Response) + _, err := l.Execute(req, resp) + + assert.True(t, called) + assert.Nil(t, err) + assert.Equal(t, "bar", resp.Foo) +} diff --git a/api/http_response.go b/api/http_response.go new file mode 100644 index 00000000..5d923cff --- /dev/null +++ b/api/http_response.go @@ -0,0 +1,53 @@ +// NOTE: Subject to change, do not rely on this package from outside git-lfs source +package api + +import ( + "io" + "net/http" +) + +// HttpResponse is an implementation of the Response interface capable of +// handling HTTP responses. At its core, it works by wrapping an *http.Response. +type HttpResponse struct { + // r is the underlying *http.Response that is being wrapped. + r *http.Response +} + +// WrapHttpResponse returns a wrapped *HttpResponse implementing the Repsonse +// type by using the given *http.Response. +func WrapHttpResponse(r *http.Response) *HttpResponse { + return &HttpResponse{ + r: r, + } +} + +var _ Response = new(HttpResponse) + +// Status implements the Response.Status function, and returns the status given +// by the underlying *http.Response. +func (h *HttpResponse) Status() string { + return h.r.Status +} + +// StatusCode implements the Response.StatusCode function, and returns the +// status code given by the underlying *http.Response. +func (h *HttpResponse) StatusCode() int { + return h.r.StatusCode +} + +// Proto implements the Response.Proto function, and returns the proto given by +// the underlying *http.Response. +func (h *HttpResponse) Proto() string { + return h.r.Proto +} + +// Body implements the Response.Body function, and returns the body as given by +// the underlying *http.Response. +func (h *HttpResponse) Body() io.ReadCloser { + return h.r.Body +} + +// Header returns the underlying *http.Response's header. +func (h *HttpResponse) Header() http.Header { + return h.r.Header +} diff --git a/api/http_response_test.go b/api/http_response_test.go new file mode 100644 index 00000000..779a27fc --- /dev/null +++ b/api/http_response_test.go @@ -0,0 +1,27 @@ +package api_test + +import ( + "bytes" + "io/ioutil" + "net/http" + "testing" + + "github.com/github/git-lfs/api" + "github.com/stretchr/testify/assert" +) + +func TestWrappedHttpResponsesMatchInternal(t *testing.T) { + resp := &http.Response{ + Status: "200 OK", + StatusCode: 200, + Proto: "HTTP/1.1", + Body: ioutil.NopCloser(new(bytes.Buffer)), + } + wrapped := api.WrapHttpResponse(resp) + + assert.Equal(t, resp.Status, wrapped.Status()) + assert.Equal(t, resp.StatusCode, wrapped.StatusCode()) + assert.Equal(t, resp.Proto, wrapped.Proto()) + assert.Equal(t, resp.Body, wrapped.Body()) + assert.Equal(t, resp.Header, wrapped.Header()) +} diff --git a/api/lifecycle.go b/api/lifecycle.go new file mode 100644 index 00000000..d8737f58 --- /dev/null +++ b/api/lifecycle.go @@ -0,0 +1,32 @@ +// NOTE: Subject to change, do not rely on this package from outside git-lfs source +package api + +import "net/http" + +// TODO: extract interface for *http.Request; update methods. This will be in a +// later iteration of the API client. + +// A Lifecycle represents and encapsulates the behavior on an API request from +// inception to cleanup. +// +// At a high level, it turns an *api.RequestSchema into an +// api.Response (and optionally an error). Lifecycle does so by providing +// several more fine-grained methods that are used by the client to manage the +// lifecycle of a request in a platform-agnostic fashion. +type Lifecycle interface { + // Build creates a sendable request by using the given RequestSchema as + // a model. + Build(req *RequestSchema) (*http.Request, error) + + // Execute transforms generated request into a wrapped repsonse, (and + // optionally an error, if the request failed), and serializes the + // response into the `into interface{}`, if one was provided. + Execute(req *http.Request, into interface{}) (Response, error) + + // Cleanup is called after the request has been completed and its + // response has been processed. It is meant to preform any post-request + // actions necessary, like closing or resetting the connection. If an + // error was encountered in doing this operation, it should be returned + // from this method, otherwise nil. + Cleanup(resp Response) error +} diff --git a/api/lifecycle_test.go b/api/lifecycle_test.go new file mode 100644 index 00000000..e2260ecd --- /dev/null +++ b/api/lifecycle_test.go @@ -0,0 +1,39 @@ +package api_test + +import ( + "net/http" + + "github.com/github/git-lfs/api" + "github.com/stretchr/testify/mock" +) + +type MockLifecycle struct { + mock.Mock +} + +var _ api.Lifecycle = new(MockLifecycle) + +func (l *MockLifecycle) Build(req *api.RequestSchema) (*http.Request, error) { + args := l.Called(req) + + if args.Get(0) == nil { + return nil, args.Error(1) + } + + return args.Get(0).(*http.Request), args.Error(1) +} + +func (l *MockLifecycle) Execute(req *http.Request, into interface{}) (api.Response, error) { + args := l.Called(req, into) + + if args.Get(0) == nil { + return nil, args.Error(1) + } + + return args.Get(0).(api.Response), args.Error(1) +} + +func (l *MockLifecycle) Cleanup(resp api.Response) error { + args := l.Called(resp) + return args.Error(0) +} diff --git a/api/lock_api.go b/api/lock_api.go new file mode 100644 index 00000000..472556f2 --- /dev/null +++ b/api/lock_api.go @@ -0,0 +1,256 @@ +package api + +import ( + "fmt" + "strconv" + "time" + + "github.com/github/git-lfs/config" +) + +// LockService is an API service which encapsulates the Git LFS Locking API. +type LockService struct{} + +// Lock generates a *RequestSchema that is used to preform the "attempt lock" +// API method. +// +// If a lock is already present, or if the server was unable to generate the +// lock, the Err field of the LockResponse type will be populated with a more +// detailed error describing the situation. +// +// If the caller does not have the minimum commit necessary to obtain the lock +// on that file, then the CommitNeeded field will be populated in the +// LockResponse, signaling that more commits are needed. +// +// In the successful case, a new Lock will be returned and granted to the +// caller. +func (s *LockService) Lock(req *LockRequest) (*RequestSchema, *LockResponse) { + var resp LockResponse + + return &RequestSchema{ + Method: "POST", + Path: "/locks", + Operation: UploadOperation, + Body: req, + Into: &resp, + }, &resp +} + +// Search generates a *RequestSchema that is used to preform the "search for +// locks" API method. +// +// Searches can be scoped to match specific parameters by using the Filters +// field in the given LockSearchRequest. If no matching Locks were found, then +// the Locks field of the response will be empty. +// +// If the client expects that the server will return many locks, then the client +// can choose to paginate that response. Pagination is preformed by limiting the +// amount of results per page, and the server will inform the client of the ID +// of the last returned lock. Since the server is guaranteed to return results +// in reverse chronological order, the client simply sends the last ID it +// processed along with the next request, and the server will continue where it +// left off. +// +// If the server was unable to process the lock search request, then the Error +// field will be populated in the response. +// +// In the successful case, one or more locks will be returned as a part of the +// response. +func (s *LockService) Search(req *LockSearchRequest) (*RequestSchema, *LockList) { + var resp LockList + + query := make(map[string]string) + for _, filter := range req.Filters { + query[filter.Property] = filter.Value + } + + if req.Cursor != "" { + query["cursor"] = req.Cursor + } + + if req.Limit != 0 { + query["limit"] = strconv.Itoa(req.Limit) + } + + return &RequestSchema{ + Method: "GET", + Path: "/locks", + Operation: UploadOperation, + Query: query, + Into: &resp, + }, &resp +} + +// Unlock generates a *RequestSchema that is used to preform the "unlock" API +// method, against a particular lock potentially with --force. +// +// This method's corresponding response type will either contain a reference to +// the lock that was unlocked, or an error that was experienced by the server in +// unlocking it. +func (s *LockService) Unlock(id string, force bool) (*RequestSchema, *UnlockResponse) { + var resp UnlockResponse + + return &RequestSchema{ + Method: "POST", + Path: fmt.Sprintf("/locks/%s/unlock", id), + Operation: UploadOperation, + Body: &UnlockRequest{id, force}, + Into: &resp, + }, &resp +} + +// Lock represents a single lock that against a particular path. +// +// Locks returned from the API may or may not be currently active, according to +// the Expired flag. +type Lock struct { + // Id is the unique identifier corresponding to this particular Lock. It + // must be consistent with the local copy, and the server's copy. + Id string `json:"id"` + // Path is an absolute path to the file that is locked as a part of this + // lock. + Path string `json:"path"` + // Committer is the author who initiated this lock. + Committer Committer `json:"committer"` + // CommitSHA is the commit that this Lock was created against. It is + // strictly equal to the SHA of the minimum commit negotiated in order + // to create this lock. + CommitSHA string `json:"commit_sha"` + // LockedAt is a required parameter that represents the instant in time + // that this lock was created. For most server implementations, this + // should be set to the instant at which the lock was initially + // received. + LockedAt time.Time `json:"locked_at"` + // ExpiresAt is an optional parameter that represents the instant in + // time that the lock stopped being active. If the lock is still active, + // the server can either a) not send this field, or b) send the + // zero-value of time.Time. + UnlockedAt time.Time `json:"unlocked_at,omitempty"` +} + +// Active returns whether or not the given lock is still active against the file +// that it is protecting. +func (l *Lock) Active() bool { + return l.UnlockedAt.IsZero() +} + +// Committer represents a "First Last " pair. +type Committer struct { + // Name is the name of the individual who would like to obtain the + // lock, for instance: "Rick Olson". + Name string `json:"name"` + // Email is the email assopsicated with the individual who would + // like to obtain the lock, for instance: "rick@github.com". + Email string `json:"email"` +} + +// CurrentCommitter returns a Committer instance populated with the same +// credentials as would be used to author a commit. In particular, the +// "user.name" and "user.email" configuration values are used from the +// config.Config singleton. +func CurrentCommitter() Committer { + name, _ := config.Config.GitConfig("user.name") + email, _ := config.Config.GitConfig("user.email") + + return Committer{name, email} +} + +// LockRequest encapsulates the payload sent across the API when a client would +// like to obtain a lock against a particular path on a given remote. +type LockRequest struct { + // Path is the path that the client would like to obtain a lock against. + Path string `json:"path"` + // LatestRemoteCommit is the SHA of the last known commit from the + // remote that we are trying to create the lock against, as found in + // `.git/refs/origin/`. + LatestRemoteCommit string `json:"latest_remote_commit"` + // Committer is the individual that wishes to obtain the lock. + Committer Committer `json:"committer"` +} + +// LockResponse encapsulates the information sent over the API in response to +// a `LockRequest`. +type LockResponse struct { + // Lock is the Lock that was optionally created in response to the + // payload that was sent (see above). If the lock already exists, then + // the existing lock is sent in this field instead, and the author of + // that lock remains the same, meaning that the client failed to obtain + // that lock. An HTTP status of "409 - Conflict" is used here. + // + // If the lock was unable to be created, this field will hold the + // zero-value of Lock and the Err field will provide a more detailed set + // of information. + // + // If an error was experienced in creating this lock, then the + // zero-value of Lock should be sent here instead. + Lock *Lock `json:"lock"` + // CommitNeeded holds the minimum commit SHA that client must have to + // obtain the lock. + CommitNeeded string `json:"commit_needed,omitempty"` + // Err is the optional error that was encountered while trying to create + // the above lock. + Err string `json:"error,omitempty"` +} + +// UnlockRequest encapsulates the data sent in an API request to remove a lock. +type UnlockRequest struct { + // Id is the Id of the lock that the user wishes to unlock. + Id string `json:"id"` + // Force determines whether or not the lock should be "forcibly" + // unlocked; that is to say whether or not a given individual should be + // able to break a different individual's lock. + Force bool `json:"force"` +} + +// UnlockResponse is the result sent back from the API when asked to remove a +// lock. +type UnlockResponse struct { + // Lock is the lock corresponding to the asked-about lock in the + // `UnlockPayload` (see above). If no matching lock was found, this + // field will take the zero-value of Lock, and Err will be non-nil. + Lock *Lock `json:"lock"` + // Err is an optional field which holds any error that was experienced + // while removing the lock. + Err string `json:"error,omitempty"` +} + +// Filter represents a single qualifier to apply against a set of locks. +type Filter struct { + // Property is the property to search against. + // Value is the value that the property must take. + Property, Value string +} + +// LockSearchRequest encapsulates the request sent to the server when the client +// would like a list of locks that match the given criteria. +type LockSearchRequest struct { + // Filters is the set of filters to query against. If the client wishes + // to obtain a list of all locks, an empty array should be passed here. + Filters []Filter + // Cursor is an optional field used to tell the server which lock was + // seen last, if scanning through multiple pages of results. + // + // Servers must return a list of locks sorted in reverse chronological + // order, so the Cursor provides a consistent method of viewing all + // locks, even if more were created between two requests. + Cursor string + // Limit is the maximum number of locks to return in a single page. + Limit int +} + +// LockList encapsulates a set of Locks. +type LockList struct { + // Locks is the set of locks returned back, typically matching the query + // parameters sent in the LockListRequest call. If no locks were matched + // from a given query, then `Locks` will be represented as an empty + // array. + Locks []Lock `json:"locks"` + // NextCursor returns the Id of the Lock the client should update its + // cursor to, if there are multiple pages of results for a particular + // `LockListRequest`. + NextCursor string `json:"next_cursor,omitempty"` + // Err populates any error that was encountered during the search. If no + // error was encountered and the operation was succesful, then a value + // of nil will be passed here. + Err string `json:"error,omitempty"` +} diff --git a/api/lock_api_test.go b/api/lock_api_test.go new file mode 100644 index 00000000..d2cb5b01 --- /dev/null +++ b/api/lock_api_test.go @@ -0,0 +1,220 @@ +package api_test + +import ( + "testing" + "time" + + "github.com/github/git-lfs/api" + "github.com/github/git-lfs/api/schema" +) + +var LockService api.LockService + +func TestSuccessfullyObtainingALock(t *testing.T) { + got, body := LockService.Lock(new(api.LockRequest)) + + AssertRequestSchema(t, &api.RequestSchema{ + Method: "POST", + Path: "/locks", + Operation: api.UploadOperation, + Body: new(api.LockRequest), + Into: body, + }, got) +} + +func TestLockSearchWithFilters(t *testing.T) { + got, body := LockService.Search(&api.LockSearchRequest{ + Filters: []api.Filter{ + {"branch", "master"}, + {"path", "/path/to/file"}, + }, + }) + + AssertRequestSchema(t, &api.RequestSchema{ + Method: "GET", + Query: map[string]string{ + "branch": "master", + "path": "/path/to/file", + }, + Path: "/locks", + Operation: api.UploadOperation, + Into: body, + }, got) +} + +func TestLockSearchWithNextCursor(t *testing.T) { + got, body := LockService.Search(&api.LockSearchRequest{ + Cursor: "some-lock-id", + }) + + AssertRequestSchema(t, &api.RequestSchema{ + Method: "GET", + Query: map[string]string{ + "cursor": "some-lock-id", + }, + Path: "/locks", + Operation: api.UploadOperation, + Into: body, + }, got) +} + +func TestLockSearchWithLimit(t *testing.T) { + got, body := LockService.Search(&api.LockSearchRequest{ + Limit: 20, + }) + + AssertRequestSchema(t, &api.RequestSchema{ + Method: "GET", + Query: map[string]string{ + "limit": "20", + }, + Path: "/locks", + Operation: api.UploadOperation, + Into: body, + }, got) +} + +func TestUnlockingALock(t *testing.T) { + got, body := LockService.Unlock("some-lock-id", true) + + AssertRequestSchema(t, &api.RequestSchema{ + Method: "POST", + Path: "/locks/some-lock-id/unlock", + Operation: api.UploadOperation, + Body: &api.UnlockRequest{ + Id: "some-lock-id", + Force: true, + }, + Into: body, + }, got) +} + +func TestLockRequest(t *testing.T) { + schema.Validate(t, schema.LockRequestSchema, &api.LockRequest{ + Path: "/path/to/lock", + LatestRemoteCommit: "deadbeef", + Committer: api.Committer{ + Name: "Jane Doe", + Email: "jane@example.com", + }, + }) +} + +func TestLockResponseWithLockedLock(t *testing.T) { + schema.Validate(t, schema.LockResponseSchema, &api.LockResponse{ + Lock: &api.Lock{ + Id: "some-lock-id", + Path: "/lock/path", + Committer: api.Committer{ + Name: "Jane Doe", + Email: "jane@example.com", + }, + LockedAt: time.Now(), + }, + }) +} + +func TestLockResponseWithUnlockedLock(t *testing.T) { + schema.Validate(t, schema.LockResponseSchema, &api.LockResponse{ + Lock: &api.Lock{ + Id: "some-lock-id", + Path: "/lock/path", + Committer: api.Committer{ + Name: "Jane Doe", + Email: "jane@example.com", + }, + LockedAt: time.Now(), + UnlockedAt: time.Now(), + }, + }) +} + +func TestLockResponseWithError(t *testing.T) { + schema.Validate(t, schema.LockResponseSchema, &api.LockResponse{ + Err: "some error", + }) +} + +func TestLockResponseWithCommitNeeded(t *testing.T) { + schema.Validate(t, schema.LockResponseSchema, &api.LockResponse{ + CommitNeeded: "deadbeef", + }) +} + +func TestLockResponseInvalidWithCommitAndError(t *testing.T) { + schema.Refute(t, schema.LockResponseSchema, &api.LockResponse{ + Err: "some error", + CommitNeeded: "deadbeef", + }) +} + +func TestUnlockRequest(t *testing.T) { + schema.Validate(t, schema.UnlockRequestSchema, &api.UnlockRequest{ + Id: "some-lock-id", + Force: false, + }) +} + +func TestUnlockResponseWithLock(t *testing.T) { + schema.Validate(t, schema.UnlockResponseSchema, &api.UnlockResponse{ + Lock: &api.Lock{ + Id: "some-lock-id", + }, + }) +} + +func TestUnlockResponseWithError(t *testing.T) { + schema.Validate(t, schema.UnlockResponseSchema, &api.UnlockResponse{ + Err: "some-error", + }) +} + +func TestUnlockResponseDoesNotAllowLockAndError(t *testing.T) { + schema.Refute(t, schema.UnlockResponseSchema, &api.UnlockResponse{ + Lock: &api.Lock{ + Id: "some-lock-id", + }, + Err: "some-error", + }) +} + +func TestLockListWithLocks(t *testing.T) { + schema.Validate(t, schema.LockListSchema, &api.LockList{ + Locks: []api.Lock{ + api.Lock{Id: "foo"}, + api.Lock{Id: "bar"}, + }, + }) +} + +func TestLockListWithNoResults(t *testing.T) { + schema.Validate(t, schema.LockListSchema, &api.LockList{ + Locks: []api.Lock{}, + }) +} + +func TestLockListWithNextCursor(t *testing.T) { + schema.Validate(t, schema.LockListSchema, &api.LockList{ + Locks: []api.Lock{ + api.Lock{Id: "foo"}, + api.Lock{Id: "bar"}, + }, + NextCursor: "baz", + }) +} + +func TestLockListWithError(t *testing.T) { + schema.Validate(t, schema.LockListSchema, &api.LockList{ + Err: "some error", + }) +} + +func TestLockListWithErrorAndLocks(t *testing.T) { + schema.Refute(t, schema.LockListSchema, &api.LockList{ + Locks: []api.Lock{ + api.Lock{Id: "foo"}, + api.Lock{Id: "bar"}, + }, + Err: "this isn't possible!", + }) +} diff --git a/api/object.go b/api/object.go new file mode 100644 index 00000000..974cfbb0 --- /dev/null +++ b/api/object.go @@ -0,0 +1,80 @@ +package api + +import ( + "errors" + "fmt" + "net/http" + "time" + + "github.com/github/git-lfs/httputil" +) + +type ObjectError struct { + Code int `json:"code"` + Message string `json:"message"` +} + +func (e *ObjectError) Error() string { + return fmt.Sprintf("[%d] %s", e.Code, e.Message) +} + +type ObjectResource struct { + Oid string `json:"oid,omitempty"` + Size int64 `json:"size"` + Actions map[string]*LinkRelation `json:"actions,omitempty"` + Links map[string]*LinkRelation `json:"_links,omitempty"` + Error *ObjectError `json:"error,omitempty"` +} + +// TODO LEGACY API: remove when legacy API removed +func (o *ObjectResource) NewRequest(relation, method string) (*http.Request, error) { + rel, ok := o.Rel(relation) + if !ok { + if relation == "download" { + return nil, errors.New("Object not found on the server.") + } + return nil, fmt.Errorf("No %q action for this object.", relation) + + } + + req, err := httputil.NewHttpRequest(method, rel.Href, rel.Header) + if err != nil { + return nil, err + } + + return req, nil +} + +func (o *ObjectResource) Rel(name string) (*LinkRelation, bool) { + var rel *LinkRelation + var ok bool + + if o.Actions != nil { + rel, ok = o.Actions[name] + } else { + rel, ok = o.Links[name] + } + + return rel, ok +} + +// IsExpired returns true if any of the actions in this object resource have an +// ExpiresAt field that is after the given instant "now". +// +// If the object contains no actions, or none of the actions it does contain +// have non-zero ExpiresAt fields, the object is not expired. +func (o *ObjectResource) IsExpired(now time.Time) bool { + for _, a := range o.Actions { + if !a.ExpiresAt.IsZero() && a.ExpiresAt.Before(now) { + return true + } + } + + return false +} + +type LinkRelation struct { + Href string `json:"href"` + Header map[string]string `json:"header,omitempty"` + ExpiresAt time.Time `json:"expires_at,omitempty"` +} diff --git a/api/object_test.go b/api/object_test.go new file mode 100644 index 00000000..9aed52e4 --- /dev/null +++ b/api/object_test.go @@ -0,0 +1,49 @@ +package api_test + +import ( + "testing" + "time" + + "github.com/github/git-lfs/api" + "github.com/stretchr/testify/assert" +) + +func TestObjectsWithNoActionsAreNotExpired(t *testing.T) { + o := &api.ObjectResource{ + Oid: "some-oid", + Actions: map[string]*api.LinkRelation{}, + } + + assert.False(t, o.IsExpired(time.Now())) +} + +func TestObjectsWithZeroValueTimesAreNotExpired(t *testing.T) { + o := &api.ObjectResource{ + Oid: "some-oid", + Actions: map[string]*api.LinkRelation{ + "upload": &api.LinkRelation{ + Href: "http://your-lfs-server.com", + ExpiresAt: time.Time{}, + }, + }, + } + + assert.False(t, o.IsExpired(time.Now())) +} + +func TestObjectsWithExpirationDatesAreExpired(t *testing.T) { + now := time.Now() + expires := time.Now().Add(-60 * 60 * time.Second) + + o := &api.ObjectResource{ + Oid: "some-oid", + Actions: map[string]*api.LinkRelation{ + "upload": &api.LinkRelation{ + Href: "http://your-lfs-server.com", + ExpiresAt: expires, + }, + }, + } + + assert.True(t, o.IsExpired(now)) +} diff --git a/api/request_schema.go b/api/request_schema.go new file mode 100644 index 00000000..d26d01aa --- /dev/null +++ b/api/request_schema.go @@ -0,0 +1,21 @@ +// NOTE: Subject to change, do not rely on this package from outside git-lfs source +package api + +// RequestSchema provides a schema from which to generate sendable requests. +type RequestSchema struct { + // Method is the method that should be used when making a particular API + // call. + Method string + // Path is the relative path that this API call should be made against. + Path string + // Operation is the operation used to determine which endpoint to make + // the request against (see github.com/github/git-lfs/config). + Operation Operation + // Query is the query parameters used in the request URI. + Query map[string]string + // Body is the body of the request. + Body interface{} + // Into is an optional field used to represent the data structure into + // which a response should be serialized. + Into interface{} +} diff --git a/api/request_schema_test.go b/api/request_schema_test.go new file mode 100644 index 00000000..e5ae66be --- /dev/null +++ b/api/request_schema_test.go @@ -0,0 +1,23 @@ +package api_test + +import ( + "testing" + + "github.com/github/git-lfs/api" + "github.com/stretchr/testify/assert" +) + +// AssertRequestSchema encapsulates a single assertion of equality against two +// generated RequestSchema instances. +// +// This assertion is meant only to test that the request schema generated by an +// API service matches what we expect it to be. It does not make use of the +// *api.Client, any particular lifecycle, or spin up a test server. All of that +// behavior is tested at a higher strata in the client/lifecycle tests. +// +// - t is the *testing.T used to preform the assertion. +// - Expected is the *api.RequestSchema that we expected to be generated. +// - Got is the *api.RequestSchema that was generated by a service. +func AssertRequestSchema(t *testing.T, expected, got *api.RequestSchema) { + assert.Equal(t, expected, got) +} diff --git a/api/response.go b/api/response.go new file mode 100644 index 00000000..294cd2f2 --- /dev/null +++ b/api/response.go @@ -0,0 +1,24 @@ +// NOTE: Subject to change, do not rely on this package from outside git-lfs source +package api + +import "io" + +// Response is an interface that represents a response returned as a result of +// executing an API call. It is designed to represent itself across multiple +// response type, be it HTTP, SSH, or something else. +// +// The Response interface is meant to be small enough such that it can be +// sufficiently general, but is easily accessible if a caller needs more +// information specific to a particular protocol. +type Response interface { + // Status is a human-readable string representing the status the + // response was returned with. + Status() string + // StatusCode is the numeric code associated with a particular status. + StatusCode() int + // Proto is the protocol with which the response was delivered. + Proto() string + // Body returns an io.ReadCloser containg the contents of the response's + // body. + Body() io.ReadCloser +} diff --git a/api/schema/lock_list_schema.json b/api/schema/lock_list_schema.json new file mode 100644 index 00000000..e2fa6a36 --- /dev/null +++ b/api/schema/lock_list_schema.json @@ -0,0 +1,65 @@ +{ + "$schema": "http://json-schema.org/draft-04/schema#", + + "type": "object", + "oneOf": [ + { + "properties": { + "locks": { + "type": "array", + "items": { + "type": "object", + "properties": { + "id": { + "type": "string" + }, + "path": { + "type": "string" + }, + "committer": { + "type": "object", + "properties": { + "name": { + "type": "string" + }, + "email": { + "type": "string" + } + }, + "required": ["name", "email"] + }, + "commit_sha": { + "type": "string" + }, + "locked_at": { + "type": "string" + }, + "unlocked_at": { + "type": "string" + } + }, + "required": ["id", "path", "commit_sha", "locked_at"], + "additionalItems": false + } + }, + "next_cursor": { + "type": "string" + } + }, + "additionalProperties": false, + "required": ["locks"] + }, + { + "properties": { + "locks": { + "type": "null" + }, + "error": { + "type": "string" + } + }, + "additionalProperties": false, + "required": ["error"] + } + ] +} diff --git a/api/schema/lock_request_schema.json b/api/schema/lock_request_schema.json new file mode 100644 index 00000000..10d5e2f2 --- /dev/null +++ b/api/schema/lock_request_schema.json @@ -0,0 +1,25 @@ +{ + "type": "object", + "properties": { + "path": { + "type": "string" + }, + "latest_remote_commit": { + "type": "string" + }, + "committer": { + "type": "object", + "properties": { + "name": { + "type": "string" + }, + "email": { + "type": "string" + } + }, + "required": ["name", "email"] + } + }, + "required": ["path", "latest_remote_commit", "committer"] + +} diff --git a/api/schema/lock_response_schema.json b/api/schema/lock_response_schema.json new file mode 100644 index 00000000..a5b458de --- /dev/null +++ b/api/schema/lock_response_schema.json @@ -0,0 +1,61 @@ +{ + "$schema": "http://json-schema.org/draft-04/schema#", + + "type": "object", + "oneOf": [ + { + "properties": { + "error": { + "type": "string" + } + }, + "required": ["error"] + }, + { + "properties": { + "commit_needed": { + "type": "string" + } + }, + "required": ["commit_needed"] + }, + { + "properties": { + "lock": { + "type": "object", + "properties": { + "id": { + "type": "string" + }, + "path": { + "type": "string" + }, + "committer": { + "type": "object", + "properties": { + "name": { + "type": "string" + }, + "email": { + "type": "string" + } + }, + "required": ["name", "email"] + }, + "commit_sha": { + "type": "string" + }, + "locked_at": { + "type": "string" + }, + "unlocked_at": { + "type": "string" + } + }, + "required": ["id", "path", "commit_sha", "locked_at"] + } + }, + "required": ["lock"] + } + ] +} diff --git a/api/schema/schema_validator.go b/api/schema/schema_validator.go new file mode 100644 index 00000000..5e1b7272 --- /dev/null +++ b/api/schema/schema_validator.go @@ -0,0 +1,82 @@ +package schema + +import ( + "encoding/json" + "fmt" + "os" + "path/filepath" + "testing" + + "github.com/xeipuuv/gojsonschema" +) + +// SchemaValidator uses the gojsonschema library to validate the JSON encoding +// of Go objects against a pre-defined JSON schema. +type SchemaValidator struct { + // Schema is the JSON schema to validate against. + // + // Subject is the instance of Go type that will be validated. + Schema, Subject gojsonschema.JSONLoader +} + +func NewSchemaValidator(t *testing.T, schemaName string, got interface{}) *SchemaValidator { + dir, err := os.Getwd() + if err != nil { + t.Fatal(err) + } + + schema := gojsonschema.NewReferenceLoader(fmt.Sprintf( + "file:///%s", + filepath.Join(dir, "schema/", schemaName), + )) + + marshalled, err := json.Marshal(got) + if err != nil { + t.Fatal(err) + } + + subject := gojsonschema.NewStringLoader(string(marshalled)) + + return &SchemaValidator{ + Schema: schema, + Subject: subject, + } +} + +// Validate validates a Go object against JSON schema in a testing environment. +// If the validation fails, then the test will fail after logging all of the +// validation errors experienced by the validator. +func Validate(t *testing.T, schemaName string, got interface{}) { + NewSchemaValidator(t, schemaName, got).Assert(t) +} + +// Refute ensures that a particular Go object does not validate the JSON schema +// given. +// +// If validation against the schema is successful, then the test will fail after +// logging. +func Refute(t *testing.T, schemaName string, got interface{}) { + NewSchemaValidator(t, schemaName, got).Refute(t) +} + +// Assert preforms the validation assertion against the given *testing.T. +func (v *SchemaValidator) Assert(t *testing.T) { + if result, err := gojsonschema.Validate(v.Schema, v.Subject); err != nil { + t.Fatal(err) + } else if !result.Valid() { + for _, err := range result.Errors() { + t.Logf("Validation error: %s", err.Description()) + } + t.Fail() + } +} + +// Refute refutes that the given subject will validate against a particular +// schema. +func (v *SchemaValidator) Refute(t *testing.T) { + if result, err := gojsonschema.Validate(v.Schema, v.Subject); err != nil { + t.Fatal(err) + } else if result.Valid() { + t.Fatal("api/schema: expected validation to fail, succeeded") + } +} diff --git a/api/schema/schemas.go b/api/schema/schemas.go new file mode 100644 index 00000000..7d4c94d3 --- /dev/null +++ b/api/schema/schemas.go @@ -0,0 +1,24 @@ +// schema provides a testing utility for testing API types against a predefined +// JSON schema. +// +// The core philosophy for this package is as follows: when a new API is +// accepted, JSON Schema files should be added to document the types that are +// exchanged over this new API. Those files are placed in the `/api/schema` +// directory, and are used by the schema.Validate function to test that +// particular instances of these types as represented in Go match the predefined +// schema that was proposed as a part of the API. +// +// For ease of use, this file defines several constants, one for each schema +// file's name, to easily pass around during tests. +// +// As briefly described above, to validate that a Go type matches the schema for +// a particular API call, one should use the schema.Validate() function. +package schema + +const ( + LockListSchema = "lock_list_schema.json" + LockRequestSchema = "lock_request_schema.json" + LockResponseSchema = "lock_response_schema.json" + UnlockRequestSchema = "unlock_request_schema.json" + UnlockResponseSchema = "unlock_response_schema.json" +) diff --git a/api/schema/unlock_request_schema.json b/api/schema/unlock_request_schema.json new file mode 100644 index 00000000..c4362bb9 --- /dev/null +++ b/api/schema/unlock_request_schema.json @@ -0,0 +1,15 @@ +{ + "$schema": "http://json-schema.org/draft-04/schema#", + + "type": "object", + "properties": { + "id": { + "type": "string" + }, + "force": { + "type": "boolean" + } + }, + "required": ["id", "force"], + "additionalItems": false +} diff --git a/api/schema/unlock_response_schema.json b/api/schema/unlock_response_schema.json new file mode 100644 index 00000000..5ede9024 --- /dev/null +++ b/api/schema/unlock_response_schema.json @@ -0,0 +1,53 @@ +{ + "$schema": "http://json-schema.org/draft-04/schema#", + + "type": "object", + "oneOf": [ + { + "properties": { + "lock": { + "type": "object", + "properties": { + "id": { + "type": "string" + }, + "path": { + "type": "string" + }, + "committer": { + "type": "object", + "properties": { + "name": { + "type": "string" + }, + "email": { + "type": "string" + } + }, + "required": ["name", "email"] + }, + "commit_sha": { + "type": "string" + }, + "locked_at": { + "type": "string" + }, + "unlocked_at": { + "type": "string" + } + }, + "required": ["id", "path", "commit_sha", "locked_at"] + } + }, + "required": ["lock"] + }, + { + "properties": { + "error": { + "type": "string" + } + }, + "required": ["error"] + } + ] +} diff --git a/api/upload_test.go b/api/upload_test.go new file mode 100644 index 00000000..1d24c78c --- /dev/null +++ b/api/upload_test.go @@ -0,0 +1,580 @@ +package api_test // prevent import cycles + +import ( + "bytes" + "encoding/json" + "fmt" + "io" + "io/ioutil" + "net/http" + "net/http/httptest" + "os" + "path/filepath" + "strconv" + "testing" + + "github.com/github/git-lfs/api" + "github.com/github/git-lfs/config" + "github.com/github/git-lfs/errutil" + "github.com/github/git-lfs/httputil" + "github.com/github/git-lfs/lfs" + "github.com/github/git-lfs/test" +) + +func TestExistingUpload(t *testing.T) { + SetupTestCredentialsFunc() + repo := test.NewRepo(t) + repo.Pushd() + defer func() { + repo.Popd() + repo.Cleanup() + RestoreCredentialsFunc() + }() + + mux := http.NewServeMux() + server := httptest.NewServer(mux) + tmp := tempdir(t) + defer server.Close() + defer os.RemoveAll(tmp) + + postCalled := false + + mux.HandleFunc("/media/objects", func(w http.ResponseWriter, r *http.Request) { + t.Logf("Server: %s %s", r.Method, r.URL) + + if r.Method != "POST" { + w.WriteHeader(405) + return + } + + if r.Header.Get("Accept") != api.MediaType { + t.Errorf("Invalid Accept") + } + + if r.Header.Get("Content-Type") != api.MediaType { + t.Errorf("Invalid Content-Type") + } + + buf := &bytes.Buffer{} + tee := io.TeeReader(r.Body, buf) + reqObj := &api.ObjectResource{} + err := json.NewDecoder(tee).Decode(reqObj) + t.Logf("request header: %v", r.Header) + t.Logf("request body: %s", buf.String()) + if err != nil { + t.Fatal(err) + } + + if reqObj.Oid != "988881adc9fc3655077dc2d4d757d480b5ea0e11" { + t.Errorf("invalid oid from request: %s", reqObj.Oid) + } + + if reqObj.Size != 4 { + t.Errorf("invalid size from request: %d", reqObj.Size) + } + + obj := &api.ObjectResource{ + Oid: reqObj.Oid, + Size: reqObj.Size, + Actions: map[string]*api.LinkRelation{ + "upload": &api.LinkRelation{ + Href: server.URL + "/upload", + Header: map[string]string{"A": "1"}, + }, + "verify": &api.LinkRelation{ + Href: server.URL + "/verify", + Header: map[string]string{"B": "2"}, + }, + }, + } + + by, err := json.Marshal(obj) + if err != nil { + t.Fatal(err) + } + + postCalled = true + head := w.Header() + head.Set("Content-Type", api.MediaType) + head.Set("Content-Length", strconv.Itoa(len(by))) + w.WriteHeader(200) + w.Write(by) + }) + + defer config.Config.ResetConfig() + config.Config.SetConfig("lfs.url", server.URL+"/media") + + oidPath, _ := lfs.LocalMediaPath("988881adc9fc3655077dc2d4d757d480b5ea0e11") + if err := ioutil.WriteFile(oidPath, []byte("test"), 0744); err != nil { + t.Fatal(err) + } + + oid := filepath.Base(oidPath) + stat, _ := os.Stat(oidPath) + o, _, err := api.BatchOrLegacySingle(&api.ObjectResource{Oid: oid, Size: stat.Size()}, "upload", []string{"basic"}) + if err != nil { + if isDockerConnectionError(err) { + return + } + t.Fatal(err) + } + if o != nil { + t.Errorf("Got an object back") + } + + if !postCalled { + t.Errorf("POST not called") + } + +} + +func TestUploadWithRedirect(t *testing.T) { + SetupTestCredentialsFunc() + repo := test.NewRepo(t) + repo.Pushd() + defer func() { + repo.Popd() + repo.Cleanup() + RestoreCredentialsFunc() + }() + + mux := http.NewServeMux() + server := httptest.NewServer(mux) + tmp := tempdir(t) + defer server.Close() + defer os.RemoveAll(tmp) + + mux.HandleFunc("/redirect/objects", func(w http.ResponseWriter, r *http.Request) { + t.Logf("Server: %s %s", r.Method, r.URL) + + if r.Method != "POST" { + w.WriteHeader(405) + return + } + + w.Header().Set("Location", server.URL+"/redirect2/objects") + w.WriteHeader(307) + }) + + mux.HandleFunc("/redirect2/objects", func(w http.ResponseWriter, r *http.Request) { + t.Logf("Server: %s %s", r.Method, r.URL) + + if r.Method != "POST" { + w.WriteHeader(405) + return + } + + w.Header().Set("Location", server.URL+"/media/objects") + w.WriteHeader(307) + }) + + mux.HandleFunc("/media/objects", func(w http.ResponseWriter, r *http.Request) { + t.Logf("Server: %s %s", r.Method, r.URL) + + if r.Method != "POST" { + w.WriteHeader(405) + return + } + + if r.Header.Get("Accept") != api.MediaType { + t.Errorf("Invalid Accept") + } + + if r.Header.Get("Content-Type") != api.MediaType { + t.Errorf("Invalid Content-Type") + } + + buf := &bytes.Buffer{} + tee := io.TeeReader(r.Body, buf) + reqObj := &api.ObjectResource{} + err := json.NewDecoder(tee).Decode(reqObj) + t.Logf("request header: %v", r.Header) + t.Logf("request body: %s", buf.String()) + if err != nil { + t.Fatal(err) + } + + if reqObj.Oid != "988881adc9fc3655077dc2d4d757d480b5ea0e11" { + t.Errorf("invalid oid from request: %s", reqObj.Oid) + } + + if reqObj.Size != 4 { + t.Errorf("invalid size from request: %d", reqObj.Size) + } + + obj := &api.ObjectResource{ + Actions: map[string]*api.LinkRelation{ + "upload": &api.LinkRelation{ + Href: server.URL + "/upload", + Header: map[string]string{"A": "1"}, + }, + "verify": &api.LinkRelation{ + Href: server.URL + "/verify", + Header: map[string]string{"B": "2"}, + }, + }, + } + + by, err := json.Marshal(obj) + if err != nil { + t.Fatal(err) + } + + head := w.Header() + head.Set("Content-Type", api.MediaType) + head.Set("Content-Length", strconv.Itoa(len(by))) + w.WriteHeader(200) + w.Write(by) + }) + + defer config.Config.ResetConfig() + config.Config.SetConfig("lfs.url", server.URL+"/redirect") + + oidPath, _ := lfs.LocalMediaPath("988881adc9fc3655077dc2d4d757d480b5ea0e11") + if err := ioutil.WriteFile(oidPath, []byte("test"), 0744); err != nil { + t.Fatal(err) + } + + oid := filepath.Base(oidPath) + stat, _ := os.Stat(oidPath) + o, _, err := api.BatchOrLegacySingle(&api.ObjectResource{Oid: oid, Size: stat.Size()}, "upload", []string{"basic"}) + if err != nil { + if isDockerConnectionError(err) { + return + } + t.Fatal(err) + } + + if o != nil { + t.Fatal("Received an object") + } +} + +func TestSuccessfulUploadWithVerify(t *testing.T) { + SetupTestCredentialsFunc() + repo := test.NewRepo(t) + repo.Pushd() + defer func() { + repo.Popd() + repo.Cleanup() + RestoreCredentialsFunc() + }() + + mux := http.NewServeMux() + server := httptest.NewServer(mux) + tmp := tempdir(t) + defer server.Close() + defer os.RemoveAll(tmp) + + postCalled := false + verifyCalled := false + + mux.HandleFunc("/media/objects", func(w http.ResponseWriter, r *http.Request) { + t.Logf("Server: %s %s", r.Method, r.URL) + + if r.Method != "POST" { + w.WriteHeader(405) + return + } + + if r.Header.Get("Accept") != api.MediaType { + t.Errorf("Invalid Accept") + } + + if r.Header.Get("Content-Type") != api.MediaType { + t.Errorf("Invalid Content-Type") + } + + buf := &bytes.Buffer{} + tee := io.TeeReader(r.Body, buf) + reqObj := &api.ObjectResource{} + err := json.NewDecoder(tee).Decode(reqObj) + t.Logf("request header: %v", r.Header) + t.Logf("request body: %s", buf.String()) + if err != nil { + t.Fatal(err) + } + + if reqObj.Oid != "988881adc9fc3655077dc2d4d757d480b5ea0e11" { + t.Errorf("invalid oid from request: %s", reqObj.Oid) + } + + if reqObj.Size != 4 { + t.Errorf("invalid size from request: %d", reqObj.Size) + } + + obj := &api.ObjectResource{ + Oid: reqObj.Oid, + Size: reqObj.Size, + Actions: map[string]*api.LinkRelation{ + "upload": &api.LinkRelation{ + Href: server.URL + "/upload", + Header: map[string]string{"A": "1"}, + }, + "verify": &api.LinkRelation{ + Href: server.URL + "/verify", + Header: map[string]string{"B": "2"}, + }, + }, + } + + by, err := json.Marshal(obj) + if err != nil { + t.Fatal(err) + } + + postCalled = true + head := w.Header() + head.Set("Content-Type", api.MediaType) + head.Set("Content-Length", strconv.Itoa(len(by))) + w.WriteHeader(202) + w.Write(by) + }) + + mux.HandleFunc("/verify", func(w http.ResponseWriter, r *http.Request) { + t.Logf("Server: %s %s", r.Method, r.URL) + + if r.Method != "POST" { + w.WriteHeader(405) + return + } + + if r.Header.Get("B") != "2" { + t.Error("Invalid B") + } + + if r.Header.Get("Content-Type") != api.MediaType { + t.Error("Invalid Content-Type") + } + + buf := &bytes.Buffer{} + tee := io.TeeReader(r.Body, buf) + reqObj := &api.ObjectResource{} + err := json.NewDecoder(tee).Decode(reqObj) + t.Logf("request header: %v", r.Header) + t.Logf("request body: %s", buf.String()) + if err != nil { + t.Fatal(err) + } + + if reqObj.Oid != "988881adc9fc3655077dc2d4d757d480b5ea0e11" { + t.Errorf("invalid oid from request: %s", reqObj.Oid) + } + + if reqObj.Size != 4 { + t.Errorf("invalid size from request: %d", reqObj.Size) + } + + verifyCalled = true + w.WriteHeader(200) + }) + + defer config.Config.ResetConfig() + config.Config.SetConfig("lfs.url", server.URL+"/media") + + oidPath, _ := lfs.LocalMediaPath("988881adc9fc3655077dc2d4d757d480b5ea0e11") + if err := ioutil.WriteFile(oidPath, []byte("test"), 0744); err != nil { + t.Fatal(err) + } + + oid := filepath.Base(oidPath) + stat, _ := os.Stat(oidPath) + o, _, err := api.BatchOrLegacySingle(&api.ObjectResource{Oid: oid, Size: stat.Size()}, "upload", []string{"basic"}) + if err != nil { + if isDockerConnectionError(err) { + return + } + t.Fatal(err) + } + api.VerifyUpload(o) + + if !postCalled { + t.Errorf("POST not called") + } + + if !verifyCalled { + t.Errorf("verify not called") + } + +} + +func TestUploadApiError(t *testing.T) { + SetupTestCredentialsFunc() + repo := test.NewRepo(t) + repo.Pushd() + defer func() { + repo.Popd() + repo.Cleanup() + RestoreCredentialsFunc() + }() + + mux := http.NewServeMux() + server := httptest.NewServer(mux) + tmp := tempdir(t) + defer server.Close() + defer os.RemoveAll(tmp) + + postCalled := false + + mux.HandleFunc("/media/objects", func(w http.ResponseWriter, r *http.Request) { + postCalled = true + w.WriteHeader(404) + }) + + defer config.Config.ResetConfig() + config.Config.SetConfig("lfs.url", server.URL+"/media") + + oidPath, _ := lfs.LocalMediaPath("988881adc9fc3655077dc2d4d757d480b5ea0e11") + if err := ioutil.WriteFile(oidPath, []byte("test"), 0744); err != nil { + t.Fatal(err) + } + + oid := filepath.Base(oidPath) + stat, _ := os.Stat(oidPath) + _, _, err := api.BatchOrLegacySingle(&api.ObjectResource{Oid: oid, Size: stat.Size()}, "upload", []string{"basic"}) + if err == nil { + t.Fatal(err) + } + + if errutil.IsFatalError(err) { + t.Fatal("should not panic") + } + + if isDockerConnectionError(err) { + return + } + + if err.Error() != fmt.Sprintf(httputil.GetDefaultError(404), server.URL+"/media/objects") { + t.Fatalf("Unexpected error: %s", err.Error()) + } + + if !postCalled { + t.Errorf("POST not called") + } +} + +func TestUploadVerifyError(t *testing.T) { + SetupTestCredentialsFunc() + repo := test.NewRepo(t) + repo.Pushd() + defer func() { + repo.Popd() + repo.Cleanup() + RestoreCredentialsFunc() + }() + + mux := http.NewServeMux() + server := httptest.NewServer(mux) + tmp := tempdir(t) + defer server.Close() + defer os.RemoveAll(tmp) + + postCalled := false + verifyCalled := false + + mux.HandleFunc("/media/objects", func(w http.ResponseWriter, r *http.Request) { + t.Logf("Server: %s %s", r.Method, r.URL) + + if r.Method != "POST" { + w.WriteHeader(405) + return + } + + if r.Header.Get("Accept") != api.MediaType { + t.Errorf("Invalid Accept") + } + + if r.Header.Get("Content-Type") != api.MediaType { + t.Errorf("Invalid Content-Type") + } + + buf := &bytes.Buffer{} + tee := io.TeeReader(r.Body, buf) + reqObj := &api.ObjectResource{} + err := json.NewDecoder(tee).Decode(reqObj) + t.Logf("request header: %v", r.Header) + t.Logf("request body: %s", buf.String()) + if err != nil { + t.Fatal(err) + } + + if reqObj.Oid != "988881adc9fc3655077dc2d4d757d480b5ea0e11" { + t.Errorf("invalid oid from request: %s", reqObj.Oid) + } + + if reqObj.Size != 4 { + t.Errorf("invalid size from request: %d", reqObj.Size) + } + + obj := &api.ObjectResource{ + Oid: reqObj.Oid, + Size: reqObj.Size, + Actions: map[string]*api.LinkRelation{ + "upload": &api.LinkRelation{ + Href: server.URL + "/upload", + Header: map[string]string{"A": "1"}, + }, + "verify": &api.LinkRelation{ + Href: server.URL + "/verify", + Header: map[string]string{"B": "2"}, + }, + }, + } + + by, err := json.Marshal(obj) + if err != nil { + t.Fatal(err) + } + + postCalled = true + head := w.Header() + head.Set("Content-Type", api.MediaType) + head.Set("Content-Length", strconv.Itoa(len(by))) + w.WriteHeader(202) + w.Write(by) + }) + + mux.HandleFunc("/verify", func(w http.ResponseWriter, r *http.Request) { + verifyCalled = true + w.WriteHeader(404) + }) + + defer config.Config.ResetConfig() + config.Config.SetConfig("lfs.url", server.URL+"/media") + + oidPath, _ := lfs.LocalMediaPath("988881adc9fc3655077dc2d4d757d480b5ea0e11") + if err := ioutil.WriteFile(oidPath, []byte("test"), 0744); err != nil { + t.Fatal(err) + } + + oid := filepath.Base(oidPath) + stat, _ := os.Stat(oidPath) + o, _, err := api.BatchOrLegacySingle(&api.ObjectResource{Oid: oid, Size: stat.Size()}, "upload", []string{"basic"}) + if err != nil { + if isDockerConnectionError(err) { + return + } + t.Fatal(err) + } + err = api.VerifyUpload(o) + if err == nil { + t.Fatal("verify should fail") + } + + if errutil.IsFatalError(err) { + t.Fatal("should not panic") + } + + if err.Error() != fmt.Sprintf(httputil.GetDefaultError(404), server.URL+"/verify") { + t.Fatalf("Unexpected error: %s", err.Error()) + } + + if !postCalled { + t.Errorf("POST not called") + } + + if !verifyCalled { + t.Errorf("verify not called") + } + +} diff --git a/api/v1.go b/api/v1.go new file mode 100644 index 00000000..e6e87010 --- /dev/null +++ b/api/v1.go @@ -0,0 +1,165 @@ +package api + +import ( + "net/http" + "net/url" + "path" + + "github.com/github/git-lfs/auth" + "github.com/github/git-lfs/config" + "github.com/github/git-lfs/errutil" + "github.com/github/git-lfs/httputil" + + "github.com/rubyist/tracerx" +) + +const ( + MediaType = "application/vnd.git-lfs+json; charset=utf-8" +) + +// doLegacyApiRequest runs the request to the LFS legacy API. +func DoLegacyRequest(req *http.Request) (*http.Response, *ObjectResource, error) { + via := make([]*http.Request, 0, 4) + res, err := httputil.DoHttpRequestWithRedirects(req, via, true) + if err != nil { + return res, nil, err + } + + obj := &ObjectResource{} + err = httputil.DecodeResponse(res, obj) + + if err != nil { + httputil.SetErrorResponseContext(err, res) + return nil, nil, err + } + + return res, obj, nil +} + +type batchRequest struct { + TransferAdapterNames []string `json:"transfers"` + Operation string `json:"operation"` + Objects []*ObjectResource `json:"objects"` +} +type batchResponse struct { + TransferAdapterName string `json:"transfer"` + Objects []*ObjectResource `json:"objects"` +} + +// doApiBatchRequest runs the request to the LFS batch API. If the API returns a +// 401, the repo will be marked as having private access and the request will be +// re-run. When the repo is marked as having private access, credentials will +// be retrieved. +func DoBatchRequest(req *http.Request) (*http.Response, *batchResponse, error) { + res, err := DoRequest(req, config.Config.PrivateAccess(auth.GetOperationForRequest(req))) + + if err != nil { + if res != nil && res.StatusCode == 401 { + return res, nil, errutil.NewAuthError(err) + } + return res, nil, err + } + + resp := &batchResponse{} + err = httputil.DecodeResponse(res, resp) + + if err != nil { + httputil.SetErrorResponseContext(err, res) + } + + return res, resp, err +} + +// DoRequest runs a request to the LFS API, without parsing the response +// body. If the API returns a 401, the repo will be marked as having private +// access and the request will be re-run. When the repo is marked as having +// private access, credentials will be retrieved. +func DoRequest(req *http.Request, useCreds bool) (*http.Response, error) { + via := make([]*http.Request, 0, 4) + return httputil.DoHttpRequestWithRedirects(req, via, useCreds) +} + +func NewRequest(method, oid string) (*http.Request, error) { + objectOid := oid + operation := "download" + if method == "POST" { + if oid != "batch" { + objectOid = "" + operation = "upload" + } + } + endpoint := config.Config.Endpoint(operation) + + res, err := auth.SshAuthenticate(endpoint, operation, oid) + if err != nil { + tracerx.Printf("ssh: attempted with %s. Error: %s", + endpoint.SshUserAndHost, err.Error(), + ) + return nil, err + } + + if len(res.Href) > 0 { + endpoint.Url = res.Href + } + + u, err := ObjectUrl(endpoint, objectOid) + if err != nil { + return nil, err + } + + req, err := httputil.NewHttpRequest(method, u.String(), res.Header) + if err != nil { + return nil, err + } + + req.Header.Set("Accept", MediaType) + return req, nil +} + +func NewBatchRequest(operation string) (*http.Request, error) { + endpoint := config.Config.Endpoint(operation) + + res, err := auth.SshAuthenticate(endpoint, operation, "") + if err != nil { + tracerx.Printf("ssh: %s attempted with %s. Error: %s", + operation, endpoint.SshUserAndHost, err.Error(), + ) + return nil, err + } + + if len(res.Href) > 0 { + endpoint.Url = res.Href + } + + u, err := ObjectUrl(endpoint, "batch") + if err != nil { + return nil, err + } + + req, err := httputil.NewHttpRequest("POST", u.String(), nil) + if err != nil { + return nil, err + } + + req.Header.Set("Accept", MediaType) + if res.Header != nil { + for key, value := range res.Header { + req.Header.Set(key, value) + } + } + + return req, nil +} + +func ObjectUrl(endpoint config.Endpoint, oid string) (*url.URL, error) { + u, err := url.Parse(endpoint.Url) + if err != nil { + return nil, err + } + + u.Path = path.Join(u.Path, "objects") + if len(oid) > 0 { + u.Path = path.Join(u.Path, oid) + } + return u, nil +} diff --git a/api/verify.go b/api/verify.go new file mode 100644 index 00000000..63f5262a --- /dev/null +++ b/api/verify.go @@ -0,0 +1,46 @@ +package api + +import ( + "bytes" + "encoding/json" + "io" + "io/ioutil" + "strconv" + + "github.com/github/git-lfs/errutil" + "github.com/github/git-lfs/httputil" +) + +// VerifyUpload calls the "verify" API link relation on obj if it exists +func VerifyUpload(obj *ObjectResource) error { + + // Do we need to do verify? + if _, ok := obj.Rel("verify"); !ok { + return nil + } + + req, err := obj.NewRequest("verify", "POST") + if err != nil { + return errutil.Error(err) + } + + by, err := json.Marshal(obj) + if err != nil { + return errutil.Error(err) + } + + req.Header.Set("Content-Type", MediaType) + req.Header.Set("Content-Length", strconv.Itoa(len(by))) + req.ContentLength = int64(len(by)) + req.Body = ioutil.NopCloser(bytes.NewReader(by)) + res, err := DoRequest(req, true) + if err != nil { + return err + } + + httputil.LogTransfer("lfs.data.verify", res) + io.Copy(ioutil.Discard, res.Body) + res.Body.Close() + + return err +} diff --git a/auth/auth.go b/auth/auth.go new file mode 100644 index 00000000..99a3adb1 --- /dev/null +++ b/auth/auth.go @@ -0,0 +1,3 @@ +// Package auth provides common authentication tools +// NOTE: Subject to change, do not rely on this package from outside git-lfs source +package auth diff --git a/lfs/credentials.go b/auth/credentials.go similarity index 66% rename from lfs/credentials.go rename to auth/credentials.go index 3ba6e32f..77410ae6 100644 --- a/lfs/credentials.go +++ b/auth/credentials.go @@ -1,37 +1,41 @@ -package lfs +package auth import ( "bytes" + "encoding/base64" "errors" "fmt" "net" "net/http" "net/url" + "os" "os/exec" "strings" - "github.com/github/git-lfs/vendor/_nuts/github.com/rubyist/tracerx" + "github.com/github/git-lfs/config" + "github.com/github/git-lfs/errutil" + "github.com/rubyist/tracerx" ) -// getCreds gets the credentials for LFS API requests and sets the given +// getCreds gets the credentials for a HTTP request and sets the given // request's Authorization header with them using Basic Authentication. -// 1. Check the LFS URL for authentication. Ex: http://user:pass@example.com +// 1. Check the URL for authentication. Ex: http://user:pass@example.com // 2. Check netrc for authentication. // 3. Check the Git remote URL for authentication IF it's the same scheme and -// host of the LFS URL. +// host of the URL. // 4. Ask 'git credential' to fill in the password from one of the above URLs. // // This prefers the Git remote URL for checking credentials so that users only // have to enter their passwords once for Git and Git LFS. It uses the same // URL path that Git does, in case 'useHttpPath' is enabled in the Git config. -func getCreds(req *http.Request) (Creds, error) { +func GetCreds(req *http.Request) (Creds, error) { if skipCredsCheck(req) { return nil, nil } credsUrl, err := getCredURLForAPI(req) if err != nil { - return nil, Error(err) + return nil, errutil.Error(err) } if credsUrl == nil { @@ -46,8 +50,8 @@ func getCreds(req *http.Request) (Creds, error) { } func getCredURLForAPI(req *http.Request) (*url.URL, error) { - operation := getOperationForHttpRequest(req) - apiUrl, err := url.Parse(Config.Endpoint(operation).Url) + operation := GetOperationForRequest(req) + apiUrl, err := url.Parse(config.Config.Endpoint(operation).Url) if err != nil { return nil, err } @@ -64,8 +68,8 @@ func getCredURLForAPI(req *http.Request) (*url.URL, error) { } credsUrl := apiUrl - if len(Config.CurrentRemote) > 0 { - if u := Config.GitRemoteUrl(Config.CurrentRemote, operation == "upload"); u != "" { + if len(config.Config.CurrentRemote) > 0 { + if u := config.Config.GitRemoteUrl(config.Config.CurrentRemote, operation == "upload"); u != "" { gitRemoteUrl, err := url.Parse(u) if err != nil { return nil, err @@ -100,7 +104,7 @@ func setCredURLFromNetrc(req *http.Request) bool { host = hostname } - machine, err := Config.FindNetrcHost(host) + machine, err := config.Config.FindNetrcHost(host) if err != nil { tracerx.Printf("netrc: error finding match for %q: %s", hostname, err) return false @@ -115,7 +119,7 @@ func setCredURLFromNetrc(req *http.Request) bool { } func skipCredsCheck(req *http.Request) bool { - if Config.NtlmAccess(getOperationForHttpRequest(req)) { + if config.Config.NtlmAccess(GetOperationForRequest(req)) { return false } @@ -155,7 +159,7 @@ func fillCredentials(req *http.Request, u *url.URL) (Creds, error) { return creds, err } -func saveCredentials(creds Creds, res *http.Response) { +func SaveCredentials(creds Creds, res *http.Response) { if creds == nil { return } @@ -185,7 +189,8 @@ func (c Creds) Buffer() *bytes.Buffer { return buf } -type credentialFunc func(Creds, string) (Creds, error) +// Credentials function which will be called whenever credentials are requested +type CredentialFunc func(Creds, string) (Creds, error) func execCredsCommand(input Creds, subCommand string) (Creds, error) { output := new(bytes.Buffer) @@ -207,7 +212,7 @@ func execCredsCommand(input Creds, subCommand string) (Creds, error) { } if _, ok := err.(*exec.ExitError); ok { - if !Config.GetenvBool("GIT_TERMINAL_PROMPT", true) { + if !config.Config.GetenvBool("GIT_TERMINAL_PROMPT", true) { return nil, fmt.Errorf("Change the GIT_TERMINAL_PROMPT env var to be prompted to enter your credentials for %s://%s.", input["protocol"], input["host"]) } @@ -235,4 +240,52 @@ func execCredsCommand(input Creds, subCommand string) (Creds, error) { return creds, nil } -var execCreds credentialFunc = execCredsCommand +func setRequestAuthFromUrl(req *http.Request, u *url.URL) bool { + if !config.Config.NtlmAccess(GetOperationForRequest(req)) && u.User != nil { + if pass, ok := u.User.Password(); ok { + fmt.Fprintln(os.Stderr, "warning: current Git remote contains credentials") + setRequestAuth(req, u.User.Username(), pass) + return true + } + } + + return false +} + +func setRequestAuth(req *http.Request, user, pass string) { + if config.Config.NtlmAccess(GetOperationForRequest(req)) { + return + } + + if len(user) == 0 && len(pass) == 0 { + return + } + + token := fmt.Sprintf("%s:%s", user, pass) + auth := "Basic " + strings.TrimSpace(base64.StdEncoding.EncodeToString([]byte(token))) + req.Header.Set("Authorization", auth) +} + +var execCreds CredentialFunc = execCredsCommand + +// GetCredentialsFunc returns the current credentials function +func GetCredentialsFunc() CredentialFunc { + return execCreds +} + +// SetCredentialsFunc overrides the default credentials function (which is to call git) +// Returns the previous credentials func +func SetCredentialsFunc(f CredentialFunc) CredentialFunc { + oldf := execCreds + execCreds = f + return oldf +} + +// GetOperationForRequest determines the operation type for a http.Request +func GetOperationForRequest(req *http.Request) string { + operation := "download" + if req.Method == "POST" || req.Method == "PUT" { + operation = "upload" + } + return operation +} diff --git a/lfs/credentials_test.go b/auth/credentials_test.go similarity index 86% rename from lfs/credentials_test.go rename to auth/credentials_test.go index 78dec933..7663c259 100644 --- a/lfs/credentials_test.go +++ b/auth/credentials_test.go @@ -1,4 +1,4 @@ -package lfs +package auth import ( "encoding/base64" @@ -8,11 +8,14 @@ import ( "strings" "testing" - "github.com/github/git-lfs/vendor/_nuts/github.com/bgentry/go-netrc/netrc" + "github.com/bgentry/go-netrc/netrc" + "github.com/github/git-lfs/config" ) func TestGetCredentialsForApi(t *testing.T) { - checkGetCredentials(t, getCreds, []*getCredentialCheck{ + SetupTestCredentialsFunc() + + checkGetCredentials(t, GetCreds, []*getCredentialCheck{ { Desc: "simple", Config: map[string]string{"lfs.url": "https://git-server.com"}, @@ -110,6 +113,8 @@ func TestGetCredentialsForApi(t *testing.T) { SkipAuth: true, }, }) + + RestoreCredentialsFunc() } type fakeNetrc struct{} @@ -122,7 +127,9 @@ func (n *fakeNetrc) FindMachine(host string) *netrc.Machine { } func TestNetrcWithHostAndPort(t *testing.T) { - Config.parsedNetrc = &fakeNetrc{} + SetupTestCredentialsFunc() + + config.Config.SetNetrc(&fakeNetrc{}) u, err := url.Parse("http://some-host:123/foo/bar") if err != nil { t.Fatal(err) @@ -141,10 +148,14 @@ func TestNetrcWithHostAndPort(t *testing.T) { if auth != "Basic YWJjOmRlZg==" { t.Fatalf("bad basic auth: %q", auth) } + + RestoreCredentialsFunc() } func TestNetrcWithHost(t *testing.T) { - Config.parsedNetrc = &fakeNetrc{} + SetupTestCredentialsFunc() + + config.Config.SetNetrc(&fakeNetrc{}) u, err := url.Parse("http://some-host/foo/bar") if err != nil { t.Fatal(err) @@ -163,10 +174,14 @@ func TestNetrcWithHost(t *testing.T) { if auth != "Basic YWJjOmRlZg==" { t.Fatalf("bad basic auth: %q", auth) } + + RestoreCredentialsFunc() } func TestNetrcWithBadHost(t *testing.T) { - Config.parsedNetrc = &fakeNetrc{} + SetupTestCredentialsFunc() + + config.Config.SetNetrc(&fakeNetrc{}) u, err := url.Parse("http://other-host/foo/bar") if err != nil { t.Fatal(err) @@ -185,16 +200,18 @@ func TestNetrcWithBadHost(t *testing.T) { if auth != "" { t.Fatalf("bad basic auth: %q", auth) } + + RestoreCredentialsFunc() } func checkGetCredentials(t *testing.T, getCredsFunc func(*http.Request) (Creds, error), checks []*getCredentialCheck) { - existingRemote := Config.CurrentRemote + existingRemote := config.Config.CurrentRemote for _, check := range checks { t.Logf("Checking %q", check.Desc) - Config.CurrentRemote = check.CurrentRemote + config.Config.CurrentRemote = check.CurrentRemote for key, value := range check.Config { - Config.SetConfig(key, value) + config.Config.SetConfig(key, value) } req, err := http.NewRequest(check.Method, check.Href, nil) @@ -259,8 +276,8 @@ func checkGetCredentials(t *testing.T, getCredsFunc func(*http.Request) (Creds, } } - Config.ResetConfig() - Config.CurrentRemote = existingRemote + config.Config.ResetConfig() + config.Config.CurrentRemote = existingRemote } } @@ -285,8 +302,13 @@ func (c *getCredentialCheck) ExpectCreds() bool { len(c.Password) > 0 || len(c.Path) > 0 } +var ( + TestCredentialsFunc CredentialFunc + origCredentialsFunc CredentialFunc +) + func init() { - execCreds = func(input Creds, subCommand string) (Creds, error) { + TestCredentialsFunc = func(input Creds, subCommand string) (Creds, error) { output := make(Creds) for key, value := range input { output[key] = value @@ -298,3 +320,13 @@ func init() { return output, nil } } + +// Override the credentials func for testing +func SetupTestCredentialsFunc() { + origCredentialsFunc = SetCredentialsFunc(TestCredentialsFunc) +} + +// Put the original credentials func back +func RestoreCredentialsFunc() { + SetCredentialsFunc(origCredentialsFunc) +} diff --git a/lfs/ssh.go b/auth/ssh.go similarity index 78% rename from lfs/ssh.go rename to auth/ssh.go index 1595a184..b8fc3b96 100644 --- a/lfs/ssh.go +++ b/auth/ssh.go @@ -1,4 +1,4 @@ -package lfs +package auth import ( "bytes" @@ -8,22 +8,23 @@ import ( "path/filepath" "strings" - "github.com/github/git-lfs/vendor/_nuts/github.com/rubyist/tracerx" + "github.com/github/git-lfs/config" + "github.com/rubyist/tracerx" ) -type sshAuthResponse struct { +type SshAuthResponse struct { Message string `json:"-"` Href string `json:"href"` Header map[string]string `json:"header"` ExpiresAt string `json:"expires_at"` } -func sshAuthenticate(endpoint Endpoint, operation, oid string) (sshAuthResponse, error) { +func SshAuthenticate(endpoint config.Endpoint, operation, oid string) (SshAuthResponse, error) { // This is only used as a fallback where the Git URL is SSH but server doesn't support a full SSH binary protocol // and therefore we derive a HTTPS endpoint for binaries instead; but check authentication here via SSH - res := sshAuthResponse{} + res := SshAuthResponse{} if len(endpoint.SshUserAndHost) == 0 { return res, nil } @@ -60,7 +61,7 @@ func sshAuthenticate(endpoint Endpoint, operation, oid string) (sshAuthResponse, // Return the executable name for ssh on this machine and the base args // Base args includes port settings, user/host, everything pre the command to execute -func sshGetExeAndArgs(endpoint Endpoint) (exe string, baseargs []string) { +func sshGetExeAndArgs(endpoint config.Endpoint) (exe string, baseargs []string) { if len(endpoint.SshUserAndHost) == 0 { return "", nil } @@ -68,7 +69,13 @@ func sshGetExeAndArgs(endpoint Endpoint) (exe string, baseargs []string) { isPlink := false isTortoise := false - ssh := Config.Getenv("GIT_SSH") + ssh := config.Config.Getenv("GIT_SSH") + cmdArgs := strings.Fields(config.Config.Getenv("GIT_SSH_COMMAND")) + if len(cmdArgs) > 0 { + ssh = cmdArgs[0] + cmdArgs = cmdArgs[1:] + } + if ssh == "" { ssh = "ssh" } else { @@ -81,7 +88,11 @@ func sshGetExeAndArgs(endpoint Endpoint) (exe string, baseargs []string) { isTortoise = strings.EqualFold(basessh, "tortoiseplink") } - args := make([]string, 0, 4) + args := make([]string, 0, 4+len(cmdArgs)) + if len(cmdArgs) > 0 { + args = append(args, cmdArgs...) + } + if isTortoise { // TortoisePlink requires the -batch argument to behave like ssh/plink args = append(args, "-batch") diff --git a/auth/ssh_test.go b/auth/ssh_test.go new file mode 100644 index 00000000..d73878ae --- /dev/null +++ b/auth/ssh_test.go @@ -0,0 +1,208 @@ +package auth + +import ( + "path/filepath" + "testing" + + "github.com/github/git-lfs/config" + "github.com/stretchr/testify/assert" +) + +func TestSSHGetExeAndArgsSsh(t *testing.T) { + endpoint := config.Config.Endpoint("download") + endpoint.SshUserAndHost = "user@foo.com" + oldGITSSHCommand := config.Config.Getenv("GIT_SSH_COMMAND") + config.Config.Setenv("GIT_SSH_COMMAND", "") + oldGITSSH := config.Config.Getenv("GIT_SSH") + config.Config.Setenv("GIT_SSH", "") + exe, args := sshGetExeAndArgs(endpoint) + assert.Equal(t, "ssh", exe) + assert.Equal(t, []string{"user@foo.com"}, args) + + config.Config.Setenv("GIT_SSH", oldGITSSH) + config.Config.Setenv("GIT_SSH_COMMAND", oldGITSSHCommand) +} + +func TestSSHGetExeAndArgsSshCustomPort(t *testing.T) { + endpoint := config.Config.Endpoint("download") + endpoint.SshUserAndHost = "user@foo.com" + endpoint.SshPort = "8888" + oldGITSSHCommand := config.Config.Getenv("GIT_SSH_COMMAND") + config.Config.Setenv("GIT_SSH_COMMAND", "") + oldGITSSH := config.Config.Getenv("GIT_SSH") + config.Config.Setenv("GIT_SSH", "") + exe, args := sshGetExeAndArgs(endpoint) + assert.Equal(t, "ssh", exe) + assert.Equal(t, []string{"-p", "8888", "user@foo.com"}, args) + + config.Config.Setenv("GIT_SSH", oldGITSSH) + config.Config.Setenv("GIT_SSH_COMMAND", oldGITSSHCommand) +} + +func TestSSHGetExeAndArgsPlink(t *testing.T) { + endpoint := config.Config.Endpoint("download") + endpoint.SshUserAndHost = "user@foo.com" + oldGITSSHCommand := config.Config.Getenv("GIT_SSH_COMMAND") + config.Config.Setenv("GIT_SSH_COMMAND", "") + oldGITSSH := config.Config.Getenv("GIT_SSH") + // this will run on non-Windows platforms too but no biggie + plink := filepath.Join("Users", "joebloggs", "bin", "plink.exe") + config.Config.Setenv("GIT_SSH", plink) + exe, args := sshGetExeAndArgs(endpoint) + assert.Equal(t, plink, exe) + assert.Equal(t, []string{"user@foo.com"}, args) + + config.Config.Setenv("GIT_SSH", oldGITSSH) + config.Config.Setenv("GIT_SSH_COMMAND", oldGITSSHCommand) +} + +func TestSSHGetExeAndArgsPlinkCustomPort(t *testing.T) { + endpoint := config.Config.Endpoint("download") + endpoint.SshUserAndHost = "user@foo.com" + endpoint.SshPort = "8888" + oldGITSSHCommand := config.Config.Getenv("GIT_SSH_COMMAND") + config.Config.Setenv("GIT_SSH_COMMAND", "") + oldGITSSH := config.Config.Getenv("GIT_SSH") + // this will run on non-Windows platforms too but no biggie + plink := filepath.Join("Users", "joebloggs", "bin", "plink") + config.Config.Setenv("GIT_SSH", plink) + exe, args := sshGetExeAndArgs(endpoint) + assert.Equal(t, plink, exe) + assert.Equal(t, []string{"-P", "8888", "user@foo.com"}, args) + + config.Config.Setenv("GIT_SSH", oldGITSSH) + config.Config.Setenv("GIT_SSH_COMMAND", oldGITSSHCommand) +} + +func TestSSHGetExeAndArgsTortoisePlink(t *testing.T) { + endpoint := config.Config.Endpoint("download") + endpoint.SshUserAndHost = "user@foo.com" + oldGITSSHCommand := config.Config.Getenv("GIT_SSH_COMMAND") + config.Config.Setenv("GIT_SSH_COMMAND", "") + oldGITSSH := config.Config.Getenv("GIT_SSH") + // this will run on non-Windows platforms too but no biggie + plink := filepath.Join("Users", "joebloggs", "bin", "tortoiseplink.exe") + config.Config.Setenv("GIT_SSH", plink) + exe, args := sshGetExeAndArgs(endpoint) + assert.Equal(t, plink, exe) + assert.Equal(t, []string{"-batch", "user@foo.com"}, args) + + config.Config.Setenv("GIT_SSH", oldGITSSH) + config.Config.Setenv("GIT_SSH_COMMAND", oldGITSSHCommand) +} + +func TestSSHGetExeAndArgsTortoisePlinkCustomPort(t *testing.T) { + endpoint := config.Config.Endpoint("download") + endpoint.SshUserAndHost = "user@foo.com" + endpoint.SshPort = "8888" + oldGITSSHCommand := config.Config.Getenv("GIT_SSH_COMMAND") + config.Config.Setenv("GIT_SSH_COMMAND", "") + oldGITSSH := config.Config.Getenv("GIT_SSH") + // this will run on non-Windows platforms too but no biggie + plink := filepath.Join("Users", "joebloggs", "bin", "tortoiseplink") + config.Config.Setenv("GIT_SSH", plink) + exe, args := sshGetExeAndArgs(endpoint) + assert.Equal(t, plink, exe) + assert.Equal(t, []string{"-batch", "-P", "8888", "user@foo.com"}, args) + + config.Config.Setenv("GIT_SSH", oldGITSSH) + config.Config.Setenv("GIT_SSH_COMMAND", oldGITSSHCommand) +} + +func TestSSHGetExeAndArgsSshCommandPrecedence(t *testing.T) { + endpoint := config.Config.Endpoint("download") + endpoint.SshUserAndHost = "user@foo.com" + oldGITSSHCommand := config.Config.Getenv("GIT_SSH_COMMAND") + config.Config.Setenv("GIT_SSH_COMMAND", "sshcmd") + oldGITSSH := config.Config.Getenv("GIT_SSH") + config.Config.Setenv("GIT_SSH", "bad") + exe, args := sshGetExeAndArgs(endpoint) + assert.Equal(t, "sshcmd", exe) + assert.Equal(t, []string{"user@foo.com"}, args) + + config.Config.Setenv("GIT_SSH", oldGITSSH) + config.Config.Setenv("GIT_SSH_COMMAND", oldGITSSHCommand) +} + +func TestSSHGetExeAndArgsSshCommandArgs(t *testing.T) { + endpoint := config.Config.Endpoint("download") + endpoint.SshUserAndHost = "user@foo.com" + oldGITSSHCommand := config.Config.Getenv("GIT_SSH_COMMAND") + config.Config.Setenv("GIT_SSH_COMMAND", "sshcmd --args 1") + exe, args := sshGetExeAndArgs(endpoint) + assert.Equal(t, "sshcmd", exe) + assert.Equal(t, []string{"--args", "1", "user@foo.com"}, args) + + config.Config.Setenv("GIT_SSH_COMMAND", oldGITSSHCommand) +} + +func TestSSHGetExeAndArgsSshCommandCustomPort(t *testing.T) { + endpoint := config.Config.Endpoint("download") + endpoint.SshUserAndHost = "user@foo.com" + endpoint.SshPort = "8888" + oldGITSSHCommand := config.Config.Getenv("GIT_SSH_COMMAND") + config.Config.Setenv("GIT_SSH_COMMAND", "sshcmd") + exe, args := sshGetExeAndArgs(endpoint) + assert.Equal(t, "sshcmd", exe) + assert.Equal(t, []string{"-p", "8888", "user@foo.com"}, args) + + config.Config.Setenv("GIT_SSH_COMMAND", oldGITSSHCommand) +} + +func TestSSHGetExeAndArgsPlinkCommand(t *testing.T) { + endpoint := config.Config.Endpoint("download") + endpoint.SshUserAndHost = "user@foo.com" + oldGITSSHCommand := config.Config.Getenv("GIT_SSH_COMMAND") + // this will run on non-Windows platforms too but no biggie + plink := filepath.Join("Users", "joebloggs", "bin", "plink.exe") + config.Config.Setenv("GIT_SSH_COMMAND", plink) + exe, args := sshGetExeAndArgs(endpoint) + assert.Equal(t, plink, exe) + assert.Equal(t, []string{"user@foo.com"}, args) + + config.Config.Setenv("GIT_SSH_COMMAND", oldGITSSHCommand) +} + +func TestSSHGetExeAndArgsPlinkCommandCustomPort(t *testing.T) { + endpoint := config.Config.Endpoint("download") + endpoint.SshUserAndHost = "user@foo.com" + endpoint.SshPort = "8888" + oldGITSSHCommand := config.Config.Getenv("GIT_SSH_COMMAND") + // this will run on non-Windows platforms too but no biggie + plink := filepath.Join("Users", "joebloggs", "bin", "plink") + config.Config.Setenv("GIT_SSH_COMMAND", plink) + exe, args := sshGetExeAndArgs(endpoint) + assert.Equal(t, plink, exe) + assert.Equal(t, []string{"-P", "8888", "user@foo.com"}, args) + + config.Config.Setenv("GIT_SSH_COMMAND", oldGITSSHCommand) +} + +func TestSSHGetExeAndArgsTortoisePlinkCommand(t *testing.T) { + endpoint := config.Config.Endpoint("download") + endpoint.SshUserAndHost = "user@foo.com" + oldGITSSHCommand := config.Config.Getenv("GIT_SSH_COMMAND") + // this will run on non-Windows platforms too but no biggie + plink := filepath.Join("Users", "joebloggs", "bin", "tortoiseplink.exe") + config.Config.Setenv("GIT_SSH_COMMAND", plink) + exe, args := sshGetExeAndArgs(endpoint) + assert.Equal(t, plink, exe) + assert.Equal(t, []string{"-batch", "user@foo.com"}, args) + + config.Config.Setenv("GIT_SSH_COMMAND", oldGITSSHCommand) +} + +func TestSSHGetExeAndArgsTortoisePlinkCommandCustomPort(t *testing.T) { + endpoint := config.Config.Endpoint("download") + endpoint.SshUserAndHost = "user@foo.com" + endpoint.SshPort = "8888" + oldGITSSHCommand := config.Config.Getenv("GIT_SSH_COMMAND") + // this will run on non-Windows platforms too but no biggie + plink := filepath.Join("Users", "joebloggs", "bin", "tortoiseplink") + config.Config.Setenv("GIT_SSH_COMMAND", plink) + exe, args := sshGetExeAndArgs(endpoint) + assert.Equal(t, plink, exe) + assert.Equal(t, []string{"-batch", "-P", "8888", "user@foo.com"}, args) + + config.Config.Setenv("GIT_SSH_COMMAND", oldGITSSHCommand) +} diff --git a/commands/command_checkout.go b/commands/command_checkout.go index 45adcc90..dfa9e445 100644 --- a/commands/command_checkout.go +++ b/commands/command_checkout.go @@ -6,10 +6,13 @@ import ( "os/exec" "sync" + "github.com/github/git-lfs/config" + "github.com/github/git-lfs/errutil" "github.com/github/git-lfs/git" "github.com/github/git-lfs/lfs" - "github.com/github/git-lfs/vendor/_nuts/github.com/rubyist/tracerx" - "github.com/github/git-lfs/vendor/_nuts/github.com/spf13/cobra" + "github.com/github/git-lfs/progress" + "github.com/rubyist/tracerx" + "github.com/spf13/cobra" ) var ( @@ -116,7 +119,7 @@ func checkoutWithIncludeExclude(include []string, exclude []string) { for _, pointer := range pointers { totalBytes += pointer.Size } - progress := lfs.NewProgressMeter(len(pointers), totalBytes, false) + progress := progress.NewProgressMeter(len(pointers), totalBytes, false, config.Config.Getenv("GIT_LFS_PROGRESS")) progress.Start() totalBytes = 0 for _, pointer := range pointers { @@ -176,7 +179,7 @@ func checkoutWithChan(in <-chan *lfs.WrappedPointer) { // Check the content - either missing or still this pointer (not exist is ok) filepointer, err := lfs.DecodePointerFromFile(pointer.Name) if err != nil && !os.IsNotExist(err) { - if lfs.IsNotAPointerError(err) { + if errutil.IsNotAPointerError(err) { // File has non-pointer content, leave it alone continue } @@ -195,7 +198,7 @@ func checkoutWithChan(in <-chan *lfs.WrappedPointer) { err = lfs.PointerSmudgeToFile(cwdfilepath, pointer.Pointer, false, nil) if err != nil { - if lfs.IsDownloadDeclinedError(err) { + if errutil.IsDownloadDeclinedError(err) { // acceptable error, data not local (fetch not run or include/exclude) LoggedError(err, "Skipped checkout for %v, content not local. Use fetch to download.", pointer.Name) } else { diff --git a/commands/command_clean.go b/commands/command_clean.go index d2272f31..a0dea529 100644 --- a/commands/command_clean.go +++ b/commands/command_clean.go @@ -3,8 +3,10 @@ package commands import ( "os" + "github.com/github/git-lfs/errutil" "github.com/github/git-lfs/lfs" - "github.com/github/git-lfs/vendor/_nuts/github.com/spf13/cobra" + "github.com/github/git-lfs/progress" + "github.com/spf13/cobra" ) var ( @@ -19,7 +21,7 @@ func cleanCommand(cmd *cobra.Command, args []string) { lfs.InstallHooks(false) var fileName string - var cb lfs.CopyCallback + var cb progress.CopyCallback var file *os.File var fileSize int64 if len(args) > 0 { @@ -48,8 +50,8 @@ func cleanCommand(cmd *cobra.Command, args []string) { defer cleaned.Teardown() } - if lfs.IsCleanPointerError(err) { - os.Stdout.Write(lfs.ErrorGetContext(err, "bytes").([]byte)) + if errutil.IsCleanPointerError(err) { + os.Stdout.Write(errutil.ErrorGetContext(err, "bytes").([]byte)) return } diff --git a/commands/command_clone.go b/commands/command_clone.go index 019a7a6e..245fc56b 100644 --- a/commands/command_clone.go +++ b/commands/command_clone.go @@ -6,9 +6,11 @@ import ( "path/filepath" "strings" + "github.com/github/git-lfs/config" "github.com/github/git-lfs/git" - "github.com/github/git-lfs/lfs" - "github.com/github/git-lfs/vendor/_nuts/github.com/spf13/cobra" + "github.com/github/git-lfs/localstorage" + "github.com/github/git-lfs/tools" + "github.com/spf13/cobra" ) var ( @@ -17,7 +19,9 @@ var ( Run: cloneCommand, } - cloneFlags git.CloneFlags + cloneFlags git.CloneFlags + cloneIncludeArg string + cloneExcludeArg string ) func cloneCommand(cmd *cobra.Command, args []string) { @@ -37,14 +41,14 @@ func cloneCommand(cmd *cobra.Command, args []string) { // Either the last argument was a relative or local dir, or we have to // derive it from the clone URL clonedir, err := filepath.Abs(args[len(args)-1]) - if err != nil || !lfs.DirExists(clonedir) { + if err != nil || !tools.DirExists(clonedir) { // Derive from clone URL instead base := path.Base(args[len(args)-1]) if strings.HasSuffix(base, ".git") { base = base[:len(base)-4] } clonedir, _ = filepath.Abs(base) - if !lfs.DirExists(clonedir) { + if !tools.DirExists(clonedir) { Exit("Unable to find clone dir at %q", clonedir) } } @@ -58,22 +62,23 @@ func cloneCommand(cmd *cobra.Command, args []string) { defer os.Chdir(cwd) // Also need to derive dirs now - lfs.ResolveDirs() + localstorage.ResolveDirs() requireInRepo() // Now just call pull with default args // Support --origin option to clone if len(cloneFlags.Origin) > 0 { - lfs.Config.CurrentRemote = cloneFlags.Origin + config.Config.CurrentRemote = cloneFlags.Origin } else { - lfs.Config.CurrentRemote = "origin" + config.Config.CurrentRemote = "origin" } + include, exclude := determineIncludeExcludePaths(config.Config, cloneIncludeArg, cloneExcludeArg) if cloneFlags.NoCheckout || cloneFlags.Bare { // If --no-checkout or --bare then we shouldn't check out, just fetch instead - fetchRef("HEAD", nil, nil) + fetchRef("HEAD", include, exclude) } else { - pull(nil, nil) + pull(include, exclude) } } @@ -104,5 +109,9 @@ func init() { cloneCmd.Flags().BoolVarP(&cloneFlags.Verbose, "verbose", "", false, "See 'git clone --help'") cloneCmd.Flags().BoolVarP(&cloneFlags.Ipv4, "ipv4", "", false, "See 'git clone --help'") cloneCmd.Flags().BoolVarP(&cloneFlags.Ipv6, "ipv6", "", false, "See 'git clone --help'") + + cloneCmd.Flags().StringVarP(&cloneIncludeArg, "include", "I", "", "Include a list of paths") + cloneCmd.Flags().StringVarP(&cloneExcludeArg, "exclude", "X", "", "Exclude a list of paths") + RootCmd.AddCommand(cloneCmd) } diff --git a/commands/command_env.go b/commands/command_env.go index 2695c608..1d512ea2 100644 --- a/commands/command_env.go +++ b/commands/command_env.go @@ -1,9 +1,10 @@ package commands import ( + "github.com/github/git-lfs/config" "github.com/github/git-lfs/git" "github.com/github/git-lfs/lfs" - "github.com/github/git-lfs/vendor/_nuts/github.com/spf13/cobra" + "github.com/spf13/cobra" ) var ( @@ -14,29 +15,29 @@ var ( ) func envCommand(cmd *cobra.Command, args []string) { - lfs.ShowConfigWarnings = true - config := lfs.Config - endpoint := config.Endpoint("download") + config.ShowConfigWarnings = true + cfg := config.Config + endpoint := cfg.Endpoint("download") gitV, err := git.Config.Version() if err != nil { gitV = "Error getting git version: " + err.Error() } - Print(lfs.UserAgent) + Print(config.VersionDesc) Print(gitV) Print("") if len(endpoint.Url) > 0 { - Print("Endpoint=%s (auth=%s)", endpoint.Url, config.EndpointAccess(endpoint)) + Print("Endpoint=%s (auth=%s)", endpoint.Url, cfg.EndpointAccess(endpoint)) if len(endpoint.SshUserAndHost) > 0 { Print(" SSH=%s:%s", endpoint.SshUserAndHost, endpoint.SshPath) } } - for _, remote := range config.Remotes() { - remoteEndpoint := config.RemoteEndpoint(remote, "download") - Print("Endpoint (%s)=%s (auth=%s)", remote, remoteEndpoint.Url, config.EndpointAccess(remoteEndpoint)) + for _, remote := range cfg.Remotes() { + remoteEndpoint := cfg.RemoteEndpoint(remote, "download") + Print("Endpoint (%s)=%s (auth=%s)", remote, remoteEndpoint.Url, cfg.EndpointAccess(remoteEndpoint)) if len(remoteEndpoint.SshUserAndHost) > 0 { Print(" SSH=%s:%s", remoteEndpoint.SshUserAndHost, remoteEndpoint.SshPath) } @@ -47,7 +48,7 @@ func envCommand(cmd *cobra.Command, args []string) { } for _, key := range []string{"filter.lfs.smudge", "filter.lfs.clean"} { - value, _ := lfs.Config.GitConfig(key) + value, _ := cfg.GitConfig(key) Print("git config %s = %q", key, value) } } diff --git a/commands/command_ext.go b/commands/command_ext.go index a69d9f64..da934eb5 100644 --- a/commands/command_ext.go +++ b/commands/command_ext.go @@ -3,8 +3,8 @@ package commands import ( "fmt" - "github.com/github/git-lfs/lfs" - "github.com/github/git-lfs/vendor/_nuts/github.com/spf13/cobra" + "github.com/github/git-lfs/config" + "github.com/spf13/cobra" ) var ( @@ -31,17 +31,17 @@ func extListCommand(cmd *cobra.Command, args []string) { return } - config := lfs.Config + cfg := config.Config for _, key := range args { - ext := config.Extensions()[key] + ext := cfg.Extensions()[key] printExt(ext) } } func printAllExts() { - config := lfs.Config + cfg := config.Config - extensions, err := lfs.SortExtensions(config.Extensions()) + extensions, err := cfg.SortedExtensions() if err != nil { fmt.Println(err) return @@ -51,7 +51,7 @@ func printAllExts() { } } -func printExt(ext lfs.Extension) { +func printExt(ext config.Extension) { Print("Extension: %s", ext.Name) Print(" clean = %s", ext.Clean) Print(" smudge = %s", ext.Smudge) diff --git a/commands/command_fetch.go b/commands/command_fetch.go index 1fc4cbe7..bd17ef16 100644 --- a/commands/command_fetch.go +++ b/commands/command_fetch.go @@ -4,10 +4,12 @@ import ( "fmt" "time" + "github.com/github/git-lfs/config" "github.com/github/git-lfs/git" "github.com/github/git-lfs/lfs" - "github.com/github/git-lfs/vendor/_nuts/github.com/rubyist/tracerx" - "github.com/github/git-lfs/vendor/_nuts/github.com/spf13/cobra" + "github.com/github/git-lfs/progress" + "github.com/rubyist/tracerx" + "github.com/spf13/cobra" ) var ( @@ -32,14 +34,14 @@ func fetchCommand(cmd *cobra.Command, args []string) { if err := git.ValidateRemote(args[0]); err != nil { Exit("Invalid remote name %q", args[0]) } - lfs.Config.CurrentRemote = args[0] + config.Config.CurrentRemote = args[0] } else { // Actively find the default remote, don't just assume origin defaultRemote, err := git.DefaultRemote() if err != nil { Exit("No default remote") } - lfs.Config.CurrentRemote = defaultRemote + config.Config.CurrentRemote = defaultRemote } if len(args) > 1 { @@ -48,7 +50,7 @@ func fetchCommand(cmd *cobra.Command, args []string) { Panic(err, "Invalid ref argument: %v", args[1:]) } refs = resolvedrefs - } else { + } else if !fetchAllArg { ref, err := git.CurrentRef() if err != nil { Panic(err, "Could not fetch") @@ -64,13 +66,13 @@ func fetchCommand(cmd *cobra.Command, args []string) { if fetchIncludeArg != "" || fetchExcludeArg != "" { Exit("Cannot combine --all with --include or --exclude") } - if len(lfs.Config.FetchIncludePaths()) > 0 || len(lfs.Config.FetchExcludePaths()) > 0 { + if len(config.Config.FetchIncludePaths()) > 0 || len(config.Config.FetchExcludePaths()) > 0 { Print("Ignoring global include / exclude paths to fulfil --all") } success = fetchAll() } else { // !all - includePaths, excludePaths := determineIncludeExcludePaths(fetchIncludeArg, fetchExcludeArg) + includePaths, excludePaths := determineIncludeExcludePaths(config.Config, fetchIncludeArg, fetchExcludeArg) // Fetch refs sequentially per arg order; duplicates in later refs will be ignored for _, ref := range refs { @@ -79,14 +81,14 @@ func fetchCommand(cmd *cobra.Command, args []string) { success = success && s } - if fetchRecentArg || lfs.Config.FetchPruneConfig().FetchRecentAlways { + if fetchRecentArg || config.Config.FetchPruneConfig().FetchRecentAlways { s := fetchRecent(refs, includePaths, excludePaths) success = success && s } } if fetchPruneArg { - verify := lfs.Config.FetchPruneConfig().PruneVerifyRemoteAlways + verify := config.Config.FetchPruneConfig().PruneVerifyRemoteAlways // no dry-run or verbose options in fetch, assume false prune(verify, false, false) } @@ -146,7 +148,7 @@ func fetchPreviousVersions(ref string, since time.Time, include, exclude []strin // Fetch recent objects based on config func fetchRecent(alreadyFetchedRefs []*git.Ref, include, exclude []string) bool { - fetchconf := lfs.Config.FetchPruneConfig() + fetchconf := config.Config.FetchPruneConfig() if fetchconf.FetchRecentRefsDays == 0 && fetchconf.FetchRecentCommitsDays == 0 { return true @@ -162,7 +164,7 @@ func fetchRecent(alreadyFetchedRefs []*git.Ref, include, exclude []string) bool if fetchconf.FetchRecentRefsDays > 0 { Print("Fetching recent branches within %v days", fetchconf.FetchRecentRefsDays) refsSince := time.Now().AddDate(0, 0, -fetchconf.FetchRecentRefsDays) - refs, err := git.RecentBranches(refsSince, fetchconf.FetchRecentRefsIncludeRemotes, lfs.Config.CurrentRemote) + refs, err := git.RecentBranches(refsSince, fetchconf.FetchRecentRefsIncludeRemotes, config.Config.CurrentRemote) if err != nil { Panic(err, "Could not scan for recent refs") } @@ -214,7 +216,7 @@ func scanAll() []*lfs.WrappedPointer { // This could be a long process so use the chan version & report progress Print("Scanning for all objects ever referenced...") - spinner := lfs.NewSpinner() + spinner := progress.NewSpinner() var numObjs int64 pointerchan, err := lfs.ScanRefsToChan("", "", opts) if err != nil { @@ -287,6 +289,8 @@ func fetchAndReportToChan(pointers []*lfs.WrappedPointer, include, exclude []str tracerx.Printf("fetch %v [%v]", p.Name, p.Oid) q.Add(lfs.NewDownloadable(p)) } else { + // Ensure progress matches + q.Skip(p.Size) if !passFilter { tracerx.Printf("Skipping %v [%v], include/exclude filters applied", p.Name, p.Oid) } else { diff --git a/commands/command_fsck.go b/commands/command_fsck.go index 3dd28926..7f620f53 100644 --- a/commands/command_fsck.go +++ b/commands/command_fsck.go @@ -7,9 +7,10 @@ import ( "os" "path/filepath" + "github.com/github/git-lfs/config" "github.com/github/git-lfs/git" "github.com/github/git-lfs/lfs" - "github.com/github/git-lfs/vendor/_nuts/github.com/spf13/cobra" + "github.com/spf13/cobra" ) var ( @@ -55,7 +56,7 @@ func doFsck() (bool, error) { ok := true for oid, name := range pointerIndex { - path := filepath.Join(lfs.LocalMediaDir, oid[0:2], oid[2:4], oid) + path := lfs.LocalMediaPathReadOnly(oid) Debug("Examining %v (%v)", name, path) @@ -84,7 +85,7 @@ func doFsck() (bool, error) { continue } - badDir := filepath.Join(lfs.LocalGitStorageDir, "lfs", "bad") + badDir := filepath.Join(config.LocalGitStorageDir, "lfs", "bad") if err := os.MkdirAll(badDir, 0755); err != nil { return false, err } diff --git a/commands/command_init.go b/commands/command_init.go index 5b53b24a..65caf8d2 100644 --- a/commands/command_init.go +++ b/commands/command_init.go @@ -4,7 +4,7 @@ import ( "fmt" "os" - "github.com/github/git-lfs/vendor/_nuts/github.com/spf13/cobra" + "github.com/spf13/cobra" ) // TODO: Remove for Git LFS v2.0 https://github.com/github/git-lfs/issues/839 diff --git a/commands/command_install.go b/commands/command_install.go index 8144565d..692b7124 100644 --- a/commands/command_install.go +++ b/commands/command_install.go @@ -2,7 +2,7 @@ package commands import ( "github.com/github/git-lfs/lfs" - "github.com/github/git-lfs/vendor/_nuts/github.com/spf13/cobra" + "github.com/spf13/cobra" ) var ( diff --git a/commands/command_lock.go b/commands/command_lock.go new file mode 100644 index 00000000..2a3daf27 --- /dev/null +++ b/commands/command_lock.go @@ -0,0 +1,113 @@ +package commands + +import ( + "fmt" + "os" + "path/filepath" + "strings" + + "github.com/github/git-lfs/api" + "github.com/github/git-lfs/config" + "github.com/github/git-lfs/git" + "github.com/spf13/cobra" +) + +var ( + lockRemote string + lockRemoteHelp = "specify which remote to use when interacting with locks" + + // TODO(taylor): consider making this (and the above flag) a property of + // some parent-command, or another similarly less ugly way of handling + // this + setLockRemoteFor = func(c *config.Configuration) { + c.CurrentRemote = lockRemote + } + + lockCmd = &cobra.Command{ + Use: "lock", + Run: lockCommand, + } +) + +func lockCommand(cmd *cobra.Command, args []string) { + setLockRemoteFor(config.Config) + + if len(args) == 0 { + Print("Usage: git lfs lock ") + return + } + + latest, err := git.CurrentRemoteRef() + if err != nil { + Error(err.Error()) + Exit("Unable to determine lastest remote ref for branch.") + } + + path, err := lockPath(args[0]) + if err != nil { + Exit(err.Error()) + } + + s, resp := API.Locks.Lock(&api.LockRequest{ + Path: path, + Committer: api.CurrentCommitter(), + LatestRemoteCommit: latest.Sha, + }) + + if _, err := API.Do(s); err != nil { + Error(err.Error()) + Exit("Error communicating with LFS API.") + } + + if len(resp.Err) > 0 { + Error(resp.Err) + Exit("Server unable to create lock.") + } + + Print("\n'%s' was locked (%s)", args[0], resp.Lock.Id) +} + +// lockPaths relativizes the given filepath such that it is relative to the root +// path of the repository it is contained within, taking into account the +// working directory of the caller. +// +// If the root directory, working directory, or file cannot be +// determined/opened, an error will be returned. If the file in question is +// actually a directory, an error will be returned. Otherwise, the cleaned path +// will be returned. +// +// For example: +// - Working directory: /code/foo/bar/ +// - Repository root: /code/foo/ +// - File to lock: ./baz +// - Resolved path bar/baz +func lockPath(file string) (string, error) { + repo, err := git.RootDir() + if err != nil { + return "", err + } + + wd, err := os.Getwd() + if err != nil { + return "", err + } + + abs := filepath.Join(wd, file) + path := strings.TrimPrefix(abs, repo) + + if stat, err := os.Stat(abs); err != nil { + return "", err + } else { + if stat.IsDir() { + return "", fmt.Errorf("lfs: cannot lock directory: %s", file) + } + + return path[1:], nil + } +} + +func init() { + lockCmd.Flags().StringVarP(&lockRemote, "remote", "r", config.Config.CurrentRemote, lockRemoteHelp) + + RootCmd.AddCommand(lockCmd) +} diff --git a/commands/command_locks.go b/commands/command_locks.go new file mode 100644 index 00000000..ab8de92d --- /dev/null +++ b/commands/command_locks.go @@ -0,0 +1,102 @@ +package commands + +import ( + "github.com/github/git-lfs/api" + "github.com/github/git-lfs/config" + "github.com/spf13/cobra" +) + +var ( + locksCmdFlags = new(locksFlags) + locksCmd = &cobra.Command{ + Use: "locks", + Run: locksCommand, + } +) + +func locksCommand(cmd *cobra.Command, args []string) { + setLockRemoteFor(config.Config) + + filters, err := locksCmdFlags.Filters() + if err != nil { + Error(err.Error()) + } + + var locks []api.Lock + + query := &api.LockSearchRequest{Filters: filters} + for { + s, resp := API.Locks.Search(query) + if _, err := API.Do(s); err != nil { + Error(err.Error()) + Exit("Error communicating with LFS API.") + } + + if resp.Err != "" { + Error(resp.Err) + } + + locks = append(locks, resp.Locks...) + + if locksCmdFlags.Limit > 0 && len(locks) > locksCmdFlags.Limit { + locks = locks[:locksCmdFlags.Limit] + break + } + + if resp.NextCursor != "" { + query.Cursor = resp.NextCursor + } else { + break + } + } + + Print("\n%d lock(s) matched query:", len(locks)) + for _, lock := range locks { + Print("%s\t%s <%s>", lock.Path, lock.Committer.Name, lock.Committer.Email) + } +} + +func init() { + locksCmd.Flags().StringVarP(&lockRemote, "remote", "r", config.Config.CurrentRemote, lockRemoteHelp) + + locksCmd.Flags().StringVarP(&locksCmdFlags.Path, "path", "p", "", "filter locks results matching a particular path") + locksCmd.Flags().StringVarP(&locksCmdFlags.Id, "id", "i", "", "filter locks results matching a particular ID") + locksCmd.Flags().IntVarP(&locksCmdFlags.Limit, "limit", "l", 0, "optional limit for number of results to return") + + RootCmd.AddCommand(locksCmd) +} + +// locksFlags wraps up and holds all of the flags that can be given to the +// `git lfs locks` command. +type locksFlags struct { + // Path is an optional filter parameter to filter against the lock's + // path + Path string + // Id is an optional filter parameter used to filtere against the lock's + // ID. + Id string + // limit is an optional request parameter sent to the server used to + // limit the + Limit int +} + +// Filters produces a slice of api.Filter instances based on the internal state +// of this locksFlags instance. The return value of this method is capable (and +// recommend to be used with) the api.LockSearchRequest type. +func (l *locksFlags) Filters() ([]api.Filter, error) { + filters := make([]api.Filter, 0) + + if l.Path != "" { + path, err := lockPath(l.Path) + if err != nil { + return nil, err + } + + filters = append(filters, api.Filter{"path", path}) + } + if l.Id != "" { + filters = append(filters, api.Filter{"id", l.Id}) + } + + return filters, nil +} diff --git a/commands/command_logs.go b/commands/command_logs.go index f7332f4f..8eb477e8 100644 --- a/commands/command_logs.go +++ b/commands/command_logs.go @@ -6,8 +6,9 @@ import ( "os" "path/filepath" - "github.com/github/git-lfs/lfs" - "github.com/github/git-lfs/vendor/_nuts/github.com/spf13/cobra" + "github.com/github/git-lfs/config" + "github.com/github/git-lfs/errutil" + "github.com/spf13/cobra" ) var ( @@ -60,7 +61,7 @@ func logsShowCommand(cmd *cobra.Command, args []string) { } name := args[0] - by, err := ioutil.ReadFile(filepath.Join(lfs.LocalLogDir, name)) + by, err := ioutil.ReadFile(filepath.Join(config.LocalLogDir, name)) if err != nil { Exit("Error reading log: %s", name) } @@ -70,23 +71,23 @@ func logsShowCommand(cmd *cobra.Command, args []string) { } func logsClearCommand(cmd *cobra.Command, args []string) { - err := os.RemoveAll(lfs.LocalLogDir) + err := os.RemoveAll(config.LocalLogDir) if err != nil { - Panic(err, "Error clearing %s", lfs.LocalLogDir) + Panic(err, "Error clearing %s", config.LocalLogDir) } - Print("Cleared %s", lfs.LocalLogDir) + Print("Cleared %s", config.LocalLogDir) } func logsBoomtownCommand(cmd *cobra.Command, args []string) { Debug("Debug message") - err := lfs.Errorf(errors.New("Inner error message!"), "Error!") + err := errutil.Errorf(errors.New("Inner error message!"), "Error!") Panic(err, "Welcome to Boomtown") Debug("Never seen") } func sortedLogs() []string { - fileinfos, err := ioutil.ReadDir(lfs.LocalLogDir) + fileinfos, err := ioutil.ReadDir(config.LocalLogDir) if err != nil { return []string{} } diff --git a/commands/command_ls_files.go b/commands/command_ls_files.go index b4b69510..935acf8f 100644 --- a/commands/command_ls_files.go +++ b/commands/command_ls_files.go @@ -5,7 +5,7 @@ import ( "github.com/github/git-lfs/git" "github.com/github/git-lfs/lfs" - "github.com/github/git-lfs/vendor/_nuts/github.com/spf13/cobra" + "github.com/spf13/cobra" ) var ( diff --git a/commands/command_pointer.go b/commands/command_pointer.go index b8648af1..b7c35fd9 100644 --- a/commands/command_pointer.go +++ b/commands/command_pointer.go @@ -11,7 +11,7 @@ import ( "os/exec" "github.com/github/git-lfs/lfs" - "github.com/github/git-lfs/vendor/_nuts/github.com/spf13/cobra" + "github.com/spf13/cobra" ) var ( diff --git a/commands/command_pre_push.go b/commands/command_pre_push.go index c2e0784c..58809cf7 100644 --- a/commands/command_pre_push.go +++ b/commands/command_pre_push.go @@ -5,9 +5,10 @@ import ( "os" "strings" + "github.com/github/git-lfs/config" "github.com/github/git-lfs/git" "github.com/github/git-lfs/lfs" - "github.com/github/git-lfs/vendor/_nuts/github.com/spf13/cobra" + "github.com/spf13/cobra" ) var ( @@ -52,12 +53,12 @@ func prePushCommand(cmd *cobra.Command, args []string) { Exit("Invalid remote name %q", args[0]) } - lfs.Config.CurrentRemote = args[0] + config.Config.CurrentRemote = args[0] ctx := newUploadContext(prePushDryRun) scanOpt := lfs.NewScanRefsOptions() scanOpt.ScanMode = lfs.ScanLeftToRemoteMode - scanOpt.RemoteName = lfs.Config.CurrentRemote + scanOpt.RemoteName = config.Config.CurrentRemote // We can be passed multiple lines of refs scanner := bufio.NewScanner(os.Stdin) diff --git a/commands/command_prune.go b/commands/command_prune.go index fd752ff7..70b01513 100644 --- a/commands/command_prune.go +++ b/commands/command_prune.go @@ -7,12 +7,13 @@ import ( "sync" "time" - "github.com/github/git-lfs/vendor/_nuts/github.com/rubyist/tracerx" - + "github.com/github/git-lfs/config" "github.com/github/git-lfs/git" "github.com/github/git-lfs/lfs" "github.com/github/git-lfs/localstorage" - "github.com/github/git-lfs/vendor/_nuts/github.com/spf13/cobra" + "github.com/github/git-lfs/progress" + "github.com/rubyist/tracerx" + "github.com/spf13/cobra" ) var ( @@ -35,7 +36,7 @@ func pruneCommand(cmd *cobra.Command, args []string) { } verify := !pruneDoNotVerifyArg && - (lfs.Config.FetchPruneConfig().PruneVerifyRemoteAlways || pruneVerifyArg) + (config.Config.FetchPruneConfig().PruneVerifyRemoteAlways || pruneVerifyArg) prune(verify, pruneDryRunArg, pruneVerboseArg) @@ -121,9 +122,9 @@ func prune(verifyRemote, dryRun, verbose bool) { var verifyc chan string if verifyRemote { - lfs.Config.CurrentRemote = lfs.Config.FetchPruneConfig().PruneRemoteName + config.Config.CurrentRemote = config.Config.FetchPruneConfig().PruneRemoteName // build queue now, no estimates or progress output - verifyQueue = lfs.NewDownloadCheckQueue(0, 0, true) + verifyQueue = lfs.NewDownloadCheckQueue(0, 0) verifiedObjects = lfs.NewStringSetWithCapacity(len(localObjects) / 2) // this channel is filled with oids for which Check() succeeded & Transfer() was called @@ -142,7 +143,7 @@ func prune(verifyRemote, dryRun, verbose bool) { if verifyRemote { tracerx.Printf("VERIFYING: %v", file.Oid) pointer := lfs.NewPointer(file.Oid, file.Size, nil) - verifyQueue.Add(lfs.NewDownloadCheckable(&lfs.WrappedPointer{Pointer: pointer})) + verifyQueue.Add(lfs.NewDownloadable(&lfs.WrappedPointer{Pointer: pointer})) } } } @@ -222,7 +223,7 @@ func pruneCheckErrors(taskErrors []error) { func pruneTaskDisplayProgress(progressChan PruneProgressChan, waitg *sync.WaitGroup) { defer waitg.Done() - spinner := lfs.NewSpinner() + spinner := progress.NewSpinner() localCount := 0 retainCount := 0 verifyCount := 0 @@ -267,7 +268,7 @@ func pruneTaskCollectErrors(outtaskErrors *[]error, errorChan chan error, errorw } func pruneDeleteFiles(prunableObjects []string) { - spinner := lfs.NewSpinner() + spinner := progress.NewSpinner() var problems bytes.Buffer // In case we fail to delete some var deletedFiles int @@ -363,7 +364,7 @@ func pruneTaskGetRetainedCurrentAndRecentRefs(retainChan chan string, errorChan go pruneTaskGetRetainedAtRef(ref.Sha, retainChan, errorChan, waitg) // Now recent - fetchconf := lfs.Config.FetchPruneConfig() + fetchconf := config.Config.FetchPruneConfig() if fetchconf.FetchRecentRefsDays > 0 { pruneRefDays := fetchconf.FetchRecentRefsDays + fetchconf.PruneOffsetDays tracerx.Printf("PRUNE: Retaining non-HEAD refs within %d (%d+%d) days", pruneRefDays, fetchconf.FetchRecentRefsDays, fetchconf.PruneOffsetDays) @@ -404,7 +405,7 @@ func pruneTaskGetRetainedCurrentAndRecentRefs(retainChan chan string, errorChan func pruneTaskGetRetainedUnpushed(retainChan chan string, errorChan chan error, waitg *sync.WaitGroup) { defer waitg.Done() - remoteName := lfs.Config.FetchPruneConfig().PruneRemoteName + remoteName := config.Config.FetchPruneConfig().PruneRemoteName refchan, err := lfs.ScanUnpushedToChan(remoteName) if err != nil { @@ -427,7 +428,7 @@ func pruneTaskGetRetainedWorktree(retainChan chan string, errorChan chan error, // Retain other worktree HEADs too // Working copy, branch & maybe commit is different but repo is shared - allWorktreeRefs, err := git.GetAllWorkTreeHEADs(lfs.LocalGitStorageDir) + allWorktreeRefs, err := git.GetAllWorkTreeHEADs(config.LocalGitStorageDir) if err != nil { errorChan <- err return diff --git a/commands/command_pull.go b/commands/command_pull.go index fef9096c..08464639 100644 --- a/commands/command_pull.go +++ b/commands/command_pull.go @@ -3,9 +3,9 @@ package commands import ( "fmt" + "github.com/github/git-lfs/config" "github.com/github/git-lfs/git" - "github.com/github/git-lfs/lfs" - "github.com/github/git-lfs/vendor/_nuts/github.com/spf13/cobra" + "github.com/spf13/cobra" ) var ( @@ -25,17 +25,17 @@ func pullCommand(cmd *cobra.Command, args []string) { if err := git.ValidateRemote(args[0]); err != nil { Panic(err, fmt.Sprintf("Invalid remote name '%v'", args[0])) } - lfs.Config.CurrentRemote = args[0] + config.Config.CurrentRemote = args[0] } else { // Actively find the default remote, don't just assume origin defaultRemote, err := git.DefaultRemote() if err != nil { Panic(err, "No default remote") } - lfs.Config.CurrentRemote = defaultRemote + config.Config.CurrentRemote = defaultRemote } - pull(determineIncludeExcludePaths(pullIncludeArg, pullExcludeArg)) + pull(determineIncludeExcludePaths(config.Config, pullIncludeArg, pullExcludeArg)) } diff --git a/commands/command_push.go b/commands/command_push.go index 11c2dd98..2eeb6b22 100644 --- a/commands/command_push.go +++ b/commands/command_push.go @@ -4,10 +4,11 @@ import ( "io/ioutil" "os" + "github.com/github/git-lfs/config" "github.com/github/git-lfs/git" "github.com/github/git-lfs/lfs" - "github.com/github/git-lfs/vendor/_nuts/github.com/rubyist/tracerx" - "github.com/github/git-lfs/vendor/_nuts/github.com/spf13/cobra" + "github.com/rubyist/tracerx" + "github.com/spf13/cobra" ) var ( @@ -28,7 +29,7 @@ func uploadsBetweenRefs(ctx *uploadContext, left string, right string) { scanOpt := lfs.NewScanRefsOptions() scanOpt.ScanMode = lfs.ScanRefsMode - scanOpt.RemoteName = lfs.Config.CurrentRemote + scanOpt.RemoteName = config.Config.CurrentRemote pointers, err := lfs.ScanRefs(left, right, scanOpt) if err != nil { @@ -39,11 +40,11 @@ func uploadsBetweenRefs(ctx *uploadContext, left string, right string) { } func uploadsBetweenRefAndRemote(ctx *uploadContext, refnames []string) { - tracerx.Printf("Upload refs %v to remote %v", refnames, lfs.Config.CurrentRemote) + tracerx.Printf("Upload refs %v to remote %v", refnames, config.Config.CurrentRemote) scanOpt := lfs.NewScanRefsOptions() scanOpt.ScanMode = lfs.ScanLeftToRemoteMode - scanOpt.RemoteName = lfs.Config.CurrentRemote + scanOpt.RemoteName = config.Config.CurrentRemote if pushAll { scanOpt.ScanMode = lfs.ScanRefsMode @@ -122,7 +123,7 @@ func pushCommand(cmd *cobra.Command, args []string) { Exit("Invalid remote name %q", args[0]) } - lfs.Config.CurrentRemote = args[0] + config.Config.CurrentRemote = args[0] ctx := newUploadContext(pushDryRun) if useStdin { diff --git a/commands/command_smudge.go b/commands/command_smudge.go index 7babcde4..309a57cb 100644 --- a/commands/command_smudge.go +++ b/commands/command_smudge.go @@ -6,8 +6,10 @@ import ( "os" "path/filepath" + "github.com/github/git-lfs/config" + "github.com/github/git-lfs/errutil" "github.com/github/git-lfs/lfs" - "github.com/github/git-lfs/vendor/_nuts/github.com/spf13/cobra" + "github.com/spf13/cobra" ) var ( @@ -60,7 +62,7 @@ func smudgeCommand(cmd *cobra.Command, args []string) { Error(err.Error()) } - cfg := lfs.Config + cfg := config.Config download := lfs.FilenamePassesIncludeExcludeFilter(filename, cfg.FetchIncludePaths(), cfg.FetchExcludePaths()) if smudgeSkip || cfg.GetenvBool("GIT_LFS_SKIP_SMUDGE", false) { @@ -75,7 +77,7 @@ func smudgeCommand(cmd *cobra.Command, args []string) { if err != nil { ptr.Encode(os.Stdout) // Download declined error is ok to skip if we weren't requesting download - if !(lfs.IsDownloadDeclinedError(err) && !download) { + if !(errutil.IsDownloadDeclinedError(err) && !download) { LoggedError(err, "Error downloading object: %s (%s)", filename, ptr.Oid) if !cfg.SkipDownloadErrors() { os.Exit(2) @@ -89,8 +91,8 @@ func smudgeFilename(args []string, err error) string { return args[0] } - if lfs.IsSmudgeError(err) { - return filepath.Base(lfs.ErrorGetContext(err, "FileName").(string)) + if errutil.IsSmudgeError(err) { + return filepath.Base(errutil.ErrorGetContext(err, "FileName").(string)) } return "" diff --git a/commands/command_status.go b/commands/command_status.go index 0c64e2f4..a262dc10 100644 --- a/commands/command_status.go +++ b/commands/command_status.go @@ -5,7 +5,7 @@ import ( "github.com/github/git-lfs/git" "github.com/github/git-lfs/lfs" - "github.com/github/git-lfs/vendor/_nuts/github.com/spf13/cobra" + "github.com/spf13/cobra" ) var ( diff --git a/commands/command_track.go b/commands/command_track.go index a8f8ed3b..3d0eeee1 100644 --- a/commands/command_track.go +++ b/commands/command_track.go @@ -9,26 +9,34 @@ import ( "strings" "time" + "github.com/github/git-lfs/config" "github.com/github/git-lfs/git" "github.com/github/git-lfs/lfs" - "github.com/github/git-lfs/vendor/_nuts/github.com/spf13/cobra" + "github.com/spf13/cobra" ) var ( + prefixBlocklist = []string{ + ".git", ".lfs", + } + trackCmd = &cobra.Command{ Use: "track", Run: trackCommand, } + + trackVerboseLoggingFlag bool + trackDryRunFlag bool ) func trackCommand(cmd *cobra.Command, args []string) { - if lfs.LocalGitDir == "" { + if config.LocalGitDir == "" { Print("Not a git repository.") os.Exit(128) } - if lfs.LocalWorkingDir == "" { + if config.LocalWorkingDir == "" { Print("This operation must be run in a work tree.") os.Exit(128) } @@ -59,9 +67,9 @@ func trackCommand(cmd *cobra.Command, args []string) { } wd, _ := os.Getwd() - relpath, err := filepath.Rel(lfs.LocalWorkingDir, wd) + relpath, err := filepath.Rel(config.LocalWorkingDir, wd) if err != nil { - Exit("Current directory %q outside of git working directory %q.", wd, lfs.LocalWorkingDir) + Exit("Current directory %q outside of git working directory %q.", wd, config.LocalWorkingDir) } ArgsLoop: @@ -73,32 +81,63 @@ ArgsLoop: } } - encodedArg := strings.Replace(pattern, " ", "[[:space:]]", -1) - _, err := attributesFile.WriteString(fmt.Sprintf("%s filter=lfs diff=lfs merge=lfs -text\n", encodedArg)) - if err != nil { - Print("Error adding path %s", pattern) - continue - } - Print("Tracking %s", pattern) - // Make sure any existing git tracked files have their timestamp updated // so they will now show as modifed // note this is relative to current dir which is how we write .gitattributes // deliberately not done in parallel as a chan because we'll be marking modified + // + // NOTE: `git ls-files` does not do well with leading slashes. + // Since all `git-lfs track` calls are relative to the root of + // the repository, the leading slash is simply removed for its + // implicit counterpart. + if trackVerboseLoggingFlag { + Print("Searching for files matching pattern: %s", pattern) + } gittracked, err := git.GetTrackedFiles(pattern) if err != nil { LoggedError(err, "Error getting git tracked files") continue } + if trackVerboseLoggingFlag { + Print("Found %d files previously added to Git matching pattern: %s", len(gittracked), pattern) + } now := time.Now() + + var matchedBlocklist bool for _, f := range gittracked { - err := os.Chtimes(f, now, now) + if forbidden := blocklistItem(f); forbidden != "" { + Print("Pattern %s matches forbidden file %s. If you would like to track %s, modify .gitattributes manually.", pattern, f, f) + matchedBlocklist = true + } + + } + if matchedBlocklist { + continue + } + + if !trackDryRunFlag { + encodedArg := strings.Replace(pattern, " ", "[[:space:]]", -1) + _, err := attributesFile.WriteString(fmt.Sprintf("%s filter=lfs diff=lfs merge=lfs -text\n", encodedArg)) if err != nil { - LoggedError(err, "Error marking %q modified", f) + Print("Error adding path %s", pattern) continue } } + Print("Tracking %s", pattern) + for _, f := range gittracked { + if trackVerboseLoggingFlag || trackDryRunFlag { + Print("Git LFS: touching %s", f) + } + + if !trackDryRunFlag { + err := os.Chtimes(f, now, now) + if err != nil { + LoggedError(err, "Error marking %q modified", f) + continue + } + } + } } } @@ -122,7 +161,7 @@ func findPaths() []mediaPath { line := scanner.Text() if strings.Contains(line, "filter=lfs") { fields := strings.Fields(line) - relfile, _ := filepath.Rel(lfs.LocalWorkingDir, path) + relfile, _ := filepath.Rel(config.LocalWorkingDir, path) pattern := fields[0] if reldir := filepath.Dir(relfile); len(reldir) > 0 { pattern = filepath.Join(reldir, pattern) @@ -139,12 +178,12 @@ func findPaths() []mediaPath { func findAttributeFiles() []string { paths := make([]string, 0) - repoAttributes := filepath.Join(lfs.LocalGitDir, "info", "attributes") + repoAttributes := filepath.Join(config.LocalGitDir, "info", "attributes") if info, err := os.Stat(repoAttributes); err == nil && !info.IsDir() { paths = append(paths, repoAttributes) } - filepath.Walk(lfs.LocalWorkingDir, func(path string, info os.FileInfo, err error) error { + filepath.Walk(config.LocalWorkingDir, func(path string, info os.FileInfo, err error) error { if err != nil { return err } @@ -180,6 +219,23 @@ func needsTrailingLinebreak(filename string) bool { return !strings.HasSuffix(string(buf[0:bytesRead]), "\n") } +// blocklistItem returns the name of the blocklist item preventing the given +// file-name from being tracked, or an empty string, if there is none. +func blocklistItem(name string) string { + base := filepath.Base(name) + + for _, p := range prefixBlocklist { + if strings.HasPrefix(base, p) { + return p + } + } + + return "" +} + func init() { + trackCmd.Flags().BoolVarP(&trackVerboseLoggingFlag, "verbose", "v", false, "log which files are being tracked and modified") + trackCmd.Flags().BoolVarP(&trackDryRunFlag, "dry-run", "d", false, "preview results of running `git lfs track`") + RootCmd.AddCommand(trackCmd) } diff --git a/commands/command_uninit.go b/commands/command_uninit.go index bcb43116..08ee27e6 100644 --- a/commands/command_uninit.go +++ b/commands/command_uninit.go @@ -4,7 +4,7 @@ import ( "fmt" "os" - "github.com/github/git-lfs/vendor/_nuts/github.com/spf13/cobra" + "github.com/spf13/cobra" ) // TODO: Remove for Git LFS v2.0 https://github.com/github/git-lfs/issues/839 diff --git a/commands/command_uninstall.go b/commands/command_uninstall.go index 9f2258bc..026bb99c 100644 --- a/commands/command_uninstall.go +++ b/commands/command_uninstall.go @@ -2,7 +2,7 @@ package commands import ( "github.com/github/git-lfs/lfs" - "github.com/github/git-lfs/vendor/_nuts/github.com/spf13/cobra" + "github.com/spf13/cobra" ) var ( diff --git a/commands/command_unlock.go b/commands/command_unlock.go new file mode 100644 index 00000000..b16e5cbf --- /dev/null +++ b/commands/command_unlock.go @@ -0,0 +1,107 @@ +package commands + +import ( + "errors" + + "github.com/github/git-lfs/api" + "github.com/github/git-lfs/config" + "github.com/spf13/cobra" +) + +var ( + // errNoMatchingLocks is an error returned when no matching locks were + // able to be resolved + errNoMatchingLocks = errors.New("lfs: no matching locks found") + // errLockAmbiguous is an error returned when multiple matching locks + // were found + errLockAmbiguous = errors.New("lfs: multiple locks found; ambiguous") + + unlockCmdFlags unlockFlags + unlockCmd = &cobra.Command{ + Use: "unlock", + Run: unlockCommand, + } +) + +// unlockFlags holds the flags given to the `git lfs unlock` command +type unlockFlags struct { + // Id is the Id of the lock that is being unlocked. + Id string + // Force specifies whether or not the `lfs unlock` command was invoked + // with "--force", signifying the user's intent to break another + // individual's lock(s). + Force bool +} + +func unlockCommand(cmd *cobra.Command, args []string) { + setLockRemoteFor(config.Config) + + var id string + if len(args) != 0 { + path, err := lockPath(args[0]) + if err != nil { + Error(err.Error()) + } + + if id, err = lockIdFromPath(path); err != nil { + Error(err.Error()) + } + } else if unlockCmdFlags.Id != "" { + id = unlockCmdFlags.Id + } else { + Error("Usage: git lfs unlock (--id my-lock-id | )") + } + + s, resp := API.Locks.Unlock(id, unlockCmdFlags.Force) + + if _, err := API.Do(s); err != nil { + Error(err.Error()) + Exit("Error communicating with LFS API.") + } + + if len(resp.Err) > 0 { + Error(resp.Err) + Exit("Server unable to unlock lock.") + } + + Print("'%s' was unlocked (%s)", args[0], resp.Lock.Id) +} + +// lockIdFromPath makes a call to the LFS API and resolves the ID for the locked +// locked at the given path. +// +// If the API call failed, an error will be returned. If multiple locks matched +// the given path (should not happen during real-world usage), an error will be +// returnd. If no locks matched the given path, an error will be returned. +// +// If the API call is successful, and only one lock matches the given filepath, +// then its ID will be returned, along with a value of "nil" for the error. +func lockIdFromPath(path string) (string, error) { + s, resp := API.Locks.Search(&api.LockSearchRequest{ + Filters: []api.Filter{ + {"path", path}, + }, + }) + + if _, err := API.Do(s); err != nil { + return "", err + } + + switch len(resp.Locks) { + case 0: + return "", errNoMatchingLocks + case 1: + return resp.Locks[0].Id, nil + default: + return "", errLockAmbiguous + } +} + +func init() { + unlockCmd.Flags().StringVarP(&lockRemote, "remote", "r", config.Config.CurrentRemote, lockRemoteHelp) + + unlockCmd.Flags().StringVarP(&unlockCmdFlags.Id, "id", "i", "", "unlock a lock by its ID") + unlockCmd.Flags().BoolVarP(&unlockCmdFlags.Force, "force", "f", false, "forcibly break another user's lock(s)") + + RootCmd.AddCommand(unlockCmd) +} diff --git a/commands/command_untrack.go b/commands/command_untrack.go index 5ac688fb..bd4127bc 100644 --- a/commands/command_untrack.go +++ b/commands/command_untrack.go @@ -6,8 +6,9 @@ import ( "os" "strings" + "github.com/github/git-lfs/config" "github.com/github/git-lfs/lfs" - "github.com/github/git-lfs/vendor/_nuts/github.com/spf13/cobra" + "github.com/spf13/cobra" ) var ( @@ -20,11 +21,11 @@ var ( // untrackCommand takes a list of paths as an argument, and removes each path from the // default attributes file (.gitattributes), if it exists. func untrackCommand(cmd *cobra.Command, args []string) { - if lfs.LocalGitDir == "" { + if config.LocalGitDir == "" { Print("Not a git repository.") os.Exit(128) } - if lfs.LocalWorkingDir == "" { + if config.LocalWorkingDir == "" { Print("This operation must be run in a work tree.") os.Exit(128) } diff --git a/commands/command_update.go b/commands/command_update.go index 6f67662c..5b5d8c34 100644 --- a/commands/command_update.go +++ b/commands/command_update.go @@ -3,9 +3,10 @@ package commands import ( "regexp" + "github.com/github/git-lfs/config" "github.com/github/git-lfs/git" "github.com/github/git-lfs/lfs" - "github.com/github/git-lfs/vendor/_nuts/github.com/spf13/cobra" + "github.com/spf13/cobra" ) var ( @@ -24,7 +25,7 @@ func updateCommand(cmd *cobra.Command, args []string) { requireInRepo() lfsAccessRE := regexp.MustCompile(`\Alfs\.(.*)\.access\z`) - for key, value := range lfs.Config.AllGitConfig() { + for key, value := range config.Config.AllGitConfig() { matches := lfsAccessRE.FindStringSubmatch(key) if len(matches) < 2 { continue diff --git a/commands/command_version.go b/commands/command_version.go index 98dcc40a..3f584d5b 100644 --- a/commands/command_version.go +++ b/commands/command_version.go @@ -1,8 +1,8 @@ package commands import ( - "github.com/github/git-lfs/lfs" - "github.com/github/git-lfs/vendor/_nuts/github.com/spf13/cobra" + "github.com/github/git-lfs/httputil" + "github.com/spf13/cobra" ) var ( @@ -15,7 +15,7 @@ var ( ) func versionCommand(cmd *cobra.Command, args []string) { - Print(lfs.UserAgent) + Print(httputil.UserAgent) if lovesComics { Print("Nothing may see Gah Lak Tus and survive!") diff --git a/commands/commands.go b/commands/commands.go index 4a2f2286..a324b898 100644 --- a/commands/commands.go +++ b/commands/commands.go @@ -11,15 +11,23 @@ import ( "strings" "time" + "github.com/github/git-lfs/api" + "github.com/github/git-lfs/config" + "github.com/github/git-lfs/errutil" "github.com/github/git-lfs/git" "github.com/github/git-lfs/lfs" - "github.com/github/git-lfs/vendor/_nuts/github.com/spf13/cobra" + "github.com/github/git-lfs/tools" + "github.com/spf13/cobra" ) // Populate man pages //go:generate go run ../docs/man/mangen.go var ( + // API is a package-local instance of the API client for use within + // various command implementations. + API = api.NewClient(nil) + Debugging = false ErrorBuffer = &bytes.Buffer{} ErrorWriter = io.MultiWriter(os.Stderr, ErrorBuffer) @@ -58,10 +66,10 @@ func Exit(format string, args ...interface{}) { } func ExitWithError(err error) { - if Debugging || lfs.IsFatalError(err) { + if Debugging || errutil.IsFatalError(err) { Panic(err, err.Error()) } else { - if inner := lfs.GetInnerError(err); inner != nil { + if inner := errutil.GetInnerError(err); inner != nil { Error(inner.Error()) } Exit(err.Error()) @@ -112,9 +120,17 @@ func PipeCommand(name string, args ...string) error { } func requireStdin(msg string) { - stat, _ := os.Stdin.Stat() - if (stat.Mode() & os.ModeCharDevice) != 0 { - Error("Cannot read from STDIN. %s", msg) + var out string + + stat, err := os.Stdin.Stat() + if err != nil { + out = fmt.Sprintf("Cannot read from STDIN. %s (%s)", msg, err) + } else if (stat.Mode() & os.ModeCharDevice) != 0 { + out = fmt.Sprintf("Cannot read from STDIN. %s", msg) + } + + if len(out) > 0 { + Error(out) os.Exit(1) } } @@ -139,11 +155,11 @@ func logPanic(loggedError error) string { now := time.Now() name := now.Format("20060102T150405.999999999") - full := filepath.Join(lfs.LocalLogDir, name+".log") + full := filepath.Join(config.LocalLogDir, name+".log") - if err := os.MkdirAll(lfs.LocalLogDir, 0755); err != nil { + if err := os.MkdirAll(config.LocalLogDir, 0755); err != nil { full = "" - fmt.Fprintf(fmtWriter, "Unable to log panic to %s: %s\n\n", lfs.LocalLogDir, err.Error()) + fmt.Fprintf(fmtWriter, "Unable to log panic to %s: %s\n\n", config.LocalLogDir, err.Error()) } else if file, err := os.Create(full); err != nil { filename := full full = "" @@ -168,7 +184,7 @@ func logPanicToWriter(w io.Writer, loggedError error) { gitV = "Error getting git version: " + err.Error() } - fmt.Fprintln(w, lfs.UserAgent) + fmt.Fprintln(w, config.VersionDesc) fmt.Fprintln(w, gitV) // log the command that was run @@ -192,7 +208,7 @@ func logPanicToWriter(w io.Writer, loggedError error) { } w.Write(err.Stack()) } else { - w.Write(lfs.Stack()) + w.Write(errutil.Stack()) } fmt.Fprintln(w, "\nENV:") @@ -208,28 +224,9 @@ type ErrorWithStack interface { Stack() []byte } -// determineIncludeExcludePaths is a common function to take the string arguments -// for include/exclude and derive slices either from these options or from the -// common global config -func determineIncludeExcludePaths(includeArg, excludeArg string) (include, exclude []string) { - var includePaths, excludePaths []string - if len(includeArg) > 0 { - for _, inc := range strings.Split(includeArg, ",") { - inc = strings.TrimSpace(inc) - includePaths = append(includePaths, inc) - } - } else { - includePaths = lfs.Config.FetchIncludePaths() - } - if len(excludeArg) > 0 { - for _, ex := range strings.Split(excludeArg, ",") { - ex = strings.TrimSpace(ex) - excludePaths = append(excludePaths, ex) - } - } else { - excludePaths = lfs.Config.FetchExcludePaths() - } - return includePaths, excludePaths +func determineIncludeExcludePaths(config *config.Configuration, includeArg, excludeArg string) (include, exclude []string) { + return tools.CleanPathsDefault(includeArg, ",", config.FetchIncludePaths()), + tools.CleanPathsDefault(excludeArg, ",", config.FetchExcludePaths()) } func printHelp(commandName string) { diff --git a/commands/commands_test.go b/commands/commands_test.go new file mode 100644 index 00000000..4bee0db1 --- /dev/null +++ b/commands/commands_test.go @@ -0,0 +1,29 @@ +package commands + +import ( + "testing" + + "github.com/github/git-lfs/config" + "github.com/stretchr/testify/assert" +) + +var ( + cfg = config.NewFromValues(map[string]string{ + "lfs.fetchinclude": "/default/include", + "lfs.fetchexclude": "/default/exclude", + }) +) + +func TestDetermineIncludeExcludePathsReturnsCleanedPaths(t *testing.T) { + i, e := determineIncludeExcludePaths(cfg, "/some/include", "/some/exclude") + + assert.Equal(t, []string{"/some/include"}, i) + assert.Equal(t, []string{"/some/exclude"}, e) +} + +func TestDetermineIncludeExcludePathsReturnsDefaultsWhenAbsent(t *testing.T) { + i, e := determineIncludeExcludePaths(cfg, "", "") + + assert.Equal(t, []string{"/default/include"}, i) + assert.Equal(t, []string{"/default/exclude"}, e) +} diff --git a/commands/uploader.go b/commands/uploader.go index 12ae87ac..cafe446c 100644 --- a/commands/uploader.go +++ b/commands/uploader.go @@ -3,6 +3,7 @@ package commands import ( "os" + "github.com/github/git-lfs/errutil" "github.com/github/git-lfs/lfs" ) @@ -87,13 +88,13 @@ func (c *uploadContext) checkMissing(missing []*lfs.WrappedPointer, missingSize return } - checkQueue := lfs.NewDownloadCheckQueue(numMissing, missingSize, true) + checkQueue := lfs.NewDownloadCheckQueue(numMissing, missingSize) // this channel is filled with oids for which Check() succeeded & Transfer() was called transferc := checkQueue.Watch() for _, p := range missing { - checkQueue.Add(lfs.NewDownloadCheckable(p)) + checkQueue.Add(lfs.NewDownloadable(p)) } done := make(chan int) @@ -127,8 +128,8 @@ func upload(c *uploadContext, unfiltered []*lfs.WrappedPointer) { for _, p := range pointers { u, err := lfs.NewUploadable(p.Oid, p.Name) if err != nil { - if lfs.IsCleanPointerError(err) { - Exit(uploadMissingErr, p.Oid, p.Name, lfs.ErrorGetContext(err, "pointer").(*lfs.Pointer).Oid) + if errutil.IsCleanPointerError(err) { + Exit(uploadMissingErr, p.Oid, p.Name, errutil.ErrorGetContext(err, "pointer").(*lfs.Pointer).Oid) } else { ExitWithError(err) } @@ -141,10 +142,10 @@ func upload(c *uploadContext, unfiltered []*lfs.WrappedPointer) { q.Wait() for _, err := range q.Errors() { - if Debugging || lfs.IsFatalError(err) { + if Debugging || errutil.IsFatalError(err) { LoggedError(err, err.Error()) } else { - if inner := lfs.GetInnerError(err); inner != nil { + if inner := errutil.GetInnerError(err); inner != nil { Error(inner.Error()) } Error(err.Error()) diff --git a/lfs/config.go b/config/config.go similarity index 81% rename from lfs/config.go rename to config/config.go index a0acd0e8..5c53f4a9 100644 --- a/lfs/config.go +++ b/config/config.go @@ -1,18 +1,21 @@ -package lfs +// Package config collects together all configuration settings +// NOTE: Subject to change, do not rely on this package from outside git-lfs source +package config import ( + "bytes" "fmt" - "net/http" "os" "path/filepath" "strconv" "strings" "sync" + "github.com/ThomsonReutersEikon/go-ntlm/ntlm" + "github.com/bgentry/go-netrc/netrc" "github.com/github/git-lfs/git" - "github.com/github/git-lfs/vendor/_nuts/github.com/ThomsonReutersEikon/go-ntlm/ntlm" - "github.com/github/git-lfs/vendor/_nuts/github.com/bgentry/go-netrc/netrc" - "github.com/github/git-lfs/vendor/_nuts/github.com/rubyist/tracerx" + "github.com/github/git-lfs/tools" + "github.com/rubyist/tracerx" ) var ( @@ -44,16 +47,13 @@ type FetchPruneConfig struct { } type Configuration struct { - CurrentRemote string - httpClients map[string]*HttpClient - httpClientsMutex sync.Mutex - redirectingHttpClient *http.Client - ntlmSession ntlm.ClientSession - envVars map[string]string - envVarsMutex sync.Mutex - isTracingHttp bool - isDebuggingHttp bool - isLoggingStats bool + CurrentRemote string + NtlmSession ntlm.ClientSession + envVars map[string]string + envVarsMutex sync.Mutex + IsTracingHttp bool + IsDebuggingHttp bool + IsLoggingStats bool loading sync.Mutex // guards initialization of gitConfig and remotes gitConfig map[string]string @@ -72,12 +72,35 @@ func NewConfig() *Configuration { CurrentRemote: defaultRemote, envVars: make(map[string]string), } - c.isTracingHttp = c.GetenvBool("GIT_CURL_VERBOSE", false) - c.isDebuggingHttp = c.GetenvBool("LFS_DEBUG_HTTP", false) - c.isLoggingStats = c.GetenvBool("GIT_LOG_STATS", false) + c.IsTracingHttp = c.GetenvBool("GIT_CURL_VERBOSE", false) + c.IsDebuggingHttp = c.GetenvBool("LFS_DEBUG_HTTP", false) + c.IsLoggingStats = c.GetenvBool("GIT_LOG_STATS", false) return c } +// NewFromValues returns a new *config.Configuration instance as if it had +// been read from the .gitconfig specified by "gitconfig" parameter. +// +// NOTE: this method should only be called during testing. +func NewFromValues(gitconfig map[string]string) *Configuration { + config := &Configuration{ + gitConfig: make(map[string]string, 0), + } + + buf := bytes.NewBuffer([]byte{}) + for k, v := range gitconfig { + fmt.Fprintf(buf, "%s=%s\n", k, v) + } + + config.readGitConfig( + string(buf.Bytes()), + map[string]bool{}, + false, + ) + + return config +} + func (c *Configuration) Getenv(key string) string { c.envVarsMutex.Lock() defer c.envVarsMutex.Unlock() @@ -104,6 +127,27 @@ func (c *Configuration) Setenv(key, value string) error { return os.Setenv(key, value) } +func (c *Configuration) GetAllEnv() map[string]string { + c.envVarsMutex.Lock() + defer c.envVarsMutex.Unlock() + + ret := make(map[string]string) + for k, v := range c.envVars { + ret[k] = v + } + return ret +} + +func (c *Configuration) SetAllEnv(env map[string]string) { + c.envVarsMutex.Lock() + defer c.envVarsMutex.Unlock() + + c.envVars = make(map[string]string) + for k, v := range env { + c.envVars[k] = v + } +} + // GetenvBool parses a boolean environment variable and returns the result as a bool. // If the environment variable is unset, empty, or if the parsing fails, // the value of def (default) is returned instead. @@ -183,6 +227,22 @@ func (c *Configuration) ConcurrentTransfers() int { return uploads } +// BasicTransfersOnly returns whether to only allow "basic" HTTP transfers +// Default is false, including if the lfs.basictransfersonly is invalid +func (c *Configuration) BasicTransfersOnly() bool { + value, ok := c.GitConfig("lfs.basictransfersonly") + if !ok || len(value) == 0 { + return false + } + + basicOnly, err := parseConfigBool(value) + if err != nil { + return false + } + + return basicOnly +} + func (c *Configuration) BatchTransfer() bool { value, ok := c.GitConfig("lfs.batch") if !ok || len(value) == 0 { @@ -233,6 +293,11 @@ func (c *Configuration) FindNetrcHost(host string) (*netrc.Machine, error) { return c.parsedNetrc.FindMachine(host), nil } +// Manually override the netrc config +func (c *Configuration) SetNetrc(n netrcfinder) { + c.parsedNetrc = n +} + func (c *Configuration) EndpointAccess(e Endpoint) string { key := fmt.Sprintf("lfs.%s.access", e.Url) if v, ok := c.GitConfig(key); ok && len(v) > 0 { @@ -271,6 +336,7 @@ func (c *Configuration) FetchIncludePaths() []string { c.loadGitConfig() return c.fetchIncludePaths } + func (c *Configuration) FetchExcludePaths() []string { c.loadGitConfig() return c.fetchExcludePaths @@ -318,6 +384,11 @@ func (c *Configuration) Extensions() map[string]Extension { return c.extensions } +// SortedExtensions gets the list of extensions ordered by Priority +func (c *Configuration) SortedExtensions() ([]Extension, error) { + return SortExtensions(c.Extensions()) +} + // GitConfigInt parses a git config value and returns it as an integer. func (c *Configuration) GitConfigInt(key string, def int) int { s, _ := c.GitConfig(key) @@ -558,15 +629,12 @@ func (c *Configuration) readGitConfig(output string, uniqRemotes map[string]bool c.gitConfig[key] = value - if len(keyParts) == 2 && keyParts[0] == "lfs" && keyParts[1] == "fetchinclude" { - for _, inc := range strings.Split(value, ",") { - inc = strings.TrimSpace(inc) - c.fetchIncludePaths = append(c.fetchIncludePaths, inc) - } - } else if len(keyParts) == 2 && keyParts[0] == "lfs" && keyParts[1] == "fetchexclude" { - for _, ex := range strings.Split(value, ",") { - ex = strings.TrimSpace(ex) - c.fetchExcludePaths = append(c.fetchExcludePaths, ex) + if len(keyParts) == 2 && keyParts[0] == "lfs" { + switch keyParts[1] { + case "fetchinclude": + c.fetchIncludePaths = tools.CleanPaths(value, ",") + case "fetchexclude": + c.fetchExcludePaths = tools.CleanPaths(value, ",") } } } @@ -587,3 +655,39 @@ var safeKeys = []string{ "lfs.gitprotocol", "lfs.url", } + +// only used for tests +func (c *Configuration) SetConfig(key, value string) { + if c.loadGitConfig() { + c.loading.Lock() + c.origConfig = make(map[string]string) + for k, v := range c.gitConfig { + c.origConfig[k] = v + } + c.loading.Unlock() + } + + c.gitConfig[key] = value +} + +func (c *Configuration) ClearConfig() { + if c.loadGitConfig() { + c.loading.Lock() + c.origConfig = make(map[string]string) + for k, v := range c.gitConfig { + c.origConfig[k] = v + } + c.loading.Unlock() + } + + c.gitConfig = make(map[string]string) +} + +func (c *Configuration) ResetConfig() { + c.loading.Lock() + c.gitConfig = make(map[string]string) + for k, v := range c.origConfig { + c.gitConfig[k] = v + } + c.loading.Unlock() +} diff --git a/lfs/config_netrc.go b/config/config_netrc.go similarity index 85% rename from lfs/config_netrc.go rename to config/config_netrc.go index 3a0a1497..4c3ff19b 100644 --- a/lfs/config_netrc.go +++ b/config/config_netrc.go @@ -1,10 +1,10 @@ -package lfs +package config import ( "os" "path/filepath" - "github.com/github/git-lfs/vendor/_nuts/github.com/bgentry/go-netrc/netrc" + "github.com/bgentry/go-netrc/netrc" ) type netrcfinder interface { diff --git a/lfs/config_nix.go b/config/config_nix.go similarity index 76% rename from lfs/config_nix.go rename to config/config_nix.go index 4d071780..daaface5 100644 --- a/lfs/config_nix.go +++ b/config/config_nix.go @@ -1,5 +1,5 @@ // +build !windows -package lfs +package config var netrcBasename = ".netrc" diff --git a/lfs/config_test.go b/config/config_test.go similarity index 92% rename from lfs/config_test.go rename to config/config_test.go index 748909a3..42387510 100644 --- a/lfs/config_test.go +++ b/config/config_test.go @@ -1,9 +1,9 @@ -package lfs +package config import ( "testing" - "github.com/github/git-lfs/vendor/_nuts/github.com/technoweenie/assert" + "github.com/stretchr/testify/assert" ) func TestEndpointDefaultsToOrigin(t *testing.T) { @@ -336,6 +336,35 @@ func TestConcurrentTransfersNegativeValue(t *testing.T) { assert.Equal(t, 3, n) } +func TestBasicTransfersOnlySetValue(t *testing.T) { + config := &Configuration{ + gitConfig: map[string]string{ + "lfs.basictransfersonly": "true", + }, + } + + b := config.BasicTransfersOnly() + assert.Equal(t, true, b) +} + +func TestBasicTransfersOnlyDefault(t *testing.T) { + config := &Configuration{} + + b := config.BasicTransfersOnly() + assert.Equal(t, false, b) +} + +func TestBasicTransfersOnlyInvalidValue(t *testing.T) { + config := &Configuration{ + gitConfig: map[string]string{ + "lfs.basictransfersonly": "wat", + }, + } + + b := config.BasicTransfersOnly() + assert.Equal(t, false, b) +} + func TestBatch(t *testing.T) { tests := map[string]bool{ "": true, @@ -363,7 +392,7 @@ func TestBatchAbsentIsTrue(t *testing.T) { config := &Configuration{} v := config.BatchTransfer() - assert.Equal(t, true, v) + assert.True(t, v) } func TestAccessConfig(t *testing.T) { @@ -438,8 +467,8 @@ func TestAccessAbsentConfig(t *testing.T) { config := &Configuration{} assert.Equal(t, "none", config.Access("download")) assert.Equal(t, "none", config.Access("upload")) - assert.Equal(t, false, config.PrivateAccess("download")) - assert.Equal(t, false, config.PrivateAccess("upload")) + assert.False(t, config.PrivateAccess("download")) + assert.False(t, config.PrivateAccess("upload")) } func TestLoadValidExtension(t *testing.T) { @@ -481,10 +510,10 @@ func TestFetchPruneConfigDefault(t *testing.T) { assert.Equal(t, 7, fp.FetchRecentRefsDays) assert.Equal(t, 0, fp.FetchRecentCommitsDays) assert.Equal(t, 3, fp.PruneOffsetDays) - assert.Equal(t, true, fp.FetchRecentRefsIncludeRemotes) + assert.True(t, fp.FetchRecentRefsIncludeRemotes) assert.Equal(t, 3, fp.PruneOffsetDays) assert.Equal(t, "origin", fp.PruneRemoteName) - assert.Equal(t, false, fp.PruneVerifyRemoteAlways) + assert.False(t, fp.PruneVerifyRemoteAlways) } func TestFetchPruneConfigCustom(t *testing.T) { @@ -502,31 +531,18 @@ func TestFetchPruneConfigCustom(t *testing.T) { assert.Equal(t, 12, fp.FetchRecentRefsDays) assert.Equal(t, 9, fp.FetchRecentCommitsDays) - assert.Equal(t, false, fp.FetchRecentRefsIncludeRemotes) + assert.False(t, fp.FetchRecentRefsIncludeRemotes) assert.Equal(t, 30, fp.PruneOffsetDays) assert.Equal(t, "upstream", fp.PruneRemoteName) - assert.Equal(t, true, fp.PruneVerifyRemoteAlways) + assert.True(t, fp.PruneVerifyRemoteAlways) } -// only used for tests -func (c *Configuration) SetConfig(key, value string) { - if c.loadGitConfig() { - c.loading.Lock() - c.origConfig = make(map[string]string) - for k, v := range c.gitConfig { - c.origConfig[k] = v - } - c.loading.Unlock() - } +func TestFetchIncludeExcludesAreCleaned(t *testing.T) { + config := NewFromValues(map[string]string{ + "lfs.fetchinclude": "/path/to/clean/", + "lfs.fetchexclude": "/other/path/to/clean/", + }) - c.gitConfig[key] = value -} - -func (c *Configuration) ResetConfig() { - c.loading.Lock() - c.gitConfig = make(map[string]string) - for k, v := range c.origConfig { - c.gitConfig[k] = v - } - c.loading.Unlock() + assert.Equal(t, []string{"/path/to/clean"}, config.FetchIncludePaths()) + assert.Equal(t, []string{"/other/path/to/clean"}, config.FetchExcludePaths()) } diff --git a/lfs/config_windows.go b/config/config_windows.go similarity index 76% rename from lfs/config_windows.go rename to config/config_windows.go index a9dfbe70..f4ee656e 100644 --- a/lfs/config_windows.go +++ b/config/config_windows.go @@ -1,4 +1,4 @@ // +build windows -package lfs +package config var netrcBasename = "_netrc" diff --git a/lfs/endpoint.go b/config/endpoint.go similarity index 93% rename from lfs/endpoint.go rename to config/endpoint.go index 56ab38da..f95fbd31 100644 --- a/lfs/endpoint.go +++ b/config/endpoint.go @@ -1,4 +1,4 @@ -package lfs +package config import ( "fmt" @@ -143,16 +143,3 @@ func endpointFromGitUrl(u *url.URL, c *Configuration) Endpoint { u.Scheme = c.GitProtocol() return Endpoint{Url: u.String()} } - -func ObjectUrl(endpoint Endpoint, oid string) (*url.URL, error) { - u, err := url.Parse(endpoint.Url) - if err != nil { - return nil, err - } - - u.Path = path.Join(u.Path, "objects") - if len(oid) > 0 { - u.Path = path.Join(u.Path, oid) - } - return u, nil -} diff --git a/config/extension.go b/config/extension.go new file mode 100644 index 00000000..1b5004e4 --- /dev/null +++ b/config/extension.go @@ -0,0 +1,39 @@ +package config + +import ( + "fmt" + "sort" +) + +// An Extension describes how to manipulate files during smudge and clean. +// Extensions are parsed from the Git config. +type Extension struct { + Name string + Clean string + Smudge string + Priority int +} + +// SortExtensions sorts a map of extensions in ascending order by Priority +func SortExtensions(m map[string]Extension) ([]Extension, error) { + pMap := make(map[int]Extension) + priorities := make([]int, 0, len(m)) + for n, ext := range m { + p := ext.Priority + if _, exist := pMap[p]; exist { + err := fmt.Errorf("duplicate priority %d on %s", p, n) + return nil, err + } + pMap[p] = ext + priorities = append(priorities, p) + } + + sort.Ints(priorities) + + result := make([]Extension, len(priorities)) + for i, p := range priorities { + result[i] = pMap[p] + } + + return result, nil +} diff --git a/lfs/extension_test.go b/config/extension_test.go similarity index 84% rename from lfs/extension_test.go rename to config/extension_test.go index fe8f6ab6..56b3a03f 100644 --- a/lfs/extension_test.go +++ b/config/extension_test.go @@ -1,9 +1,9 @@ -package lfs +package config import ( "testing" - "github.com/github/git-lfs/vendor/_nuts/github.com/technoweenie/assert" + "github.com/stretchr/testify/assert" ) func TestSortExtensions(t *testing.T) { @@ -32,7 +32,7 @@ func TestSortExtensions(t *testing.T) { sorted, err := SortExtensions(m) - assert.Equal(t, err, nil) + assert.Nil(t, err) for i, ext := range sorted { name := names[i] @@ -61,6 +61,6 @@ func TestSortExtensionsDuplicatePriority(t *testing.T) { sorted, err := SortExtensions(m) - assert.NotEqual(t, err, nil) - assert.Equal(t, len(sorted), 0) + assert.NotNil(t, err) + assert.Empty(t, sorted) } diff --git a/config/filesystem.go b/config/filesystem.go new file mode 100644 index 00000000..f89d5454 --- /dev/null +++ b/config/filesystem.go @@ -0,0 +1,102 @@ +package config + +import ( + "fmt" + "io/ioutil" + "os" + "path/filepath" + "strings" + + "github.com/github/git-lfs/git" + "github.com/github/git-lfs/tools" + "github.com/rubyist/tracerx" +) + +var ( + LocalWorkingDir string + LocalGitDir string // parent of index / config / hooks etc + LocalGitStorageDir string // parent of objects/lfs (may be same as LocalGitDir but may not) + LocalReferenceDir string // alternative local media dir (relative to clone reference repo) + LocalLogDir string +) + +// Determins the LocalWorkingDir, LocalGitDir etc +func ResolveGitBasicDirs() { + var err error + LocalGitDir, LocalWorkingDir, err = git.GitAndRootDirs() + if err == nil { + // Make sure we've fully evaluated symlinks, failure to do consistently + // can cause discrepancies + LocalGitDir = tools.ResolveSymlinks(LocalGitDir) + LocalWorkingDir = tools.ResolveSymlinks(LocalWorkingDir) + + LocalGitStorageDir = resolveGitStorageDir(LocalGitDir) + LocalReferenceDir = resolveReferenceDir(LocalGitStorageDir) + + } else { + errMsg := err.Error() + tracerx.Printf("Error running 'git rev-parse': %s", errMsg) + if !strings.Contains(errMsg, "Not a git repository") { + fmt.Fprintf(os.Stderr, "Error: %s\n", errMsg) + } + } +} + +func resolveReferenceDir(gitStorageDir string) string { + cloneReferencePath := filepath.Join(gitStorageDir, "objects", "info", "alternates") + if tools.FileExists(cloneReferencePath) { + buffer, err := ioutil.ReadFile(cloneReferencePath) + if err == nil { + path := strings.TrimSpace(string(buffer[:])) + referenceLfsStoragePath := filepath.Join(filepath.Dir(path), "lfs", "objects") + if tools.DirExists(referenceLfsStoragePath) { + return referenceLfsStoragePath + } + } + } + return "" +} + +// From a git dir, get the location that objects are to be stored (we will store lfs alongside) +// Sometimes there is an additional level of redirect on the .git folder by way of a commondir file +// before you find object storage, e.g. 'git worktree' uses this. It redirects to gitdir either by GIT_DIR +// (during setup) or .git/git-dir: (during use), but this only contains the index etc, the objects +// are found in another git dir via 'commondir'. +func resolveGitStorageDir(gitDir string) string { + commondirpath := filepath.Join(gitDir, "commondir") + if tools.FileExists(commondirpath) && !tools.DirExists(filepath.Join(gitDir, "objects")) { + // no git-dir: prefix in commondir + storage, err := processGitRedirectFile(commondirpath, "") + if err == nil { + return storage + } + } + return gitDir +} + +func processGitRedirectFile(file, prefix string) (string, error) { + data, err := ioutil.ReadFile(file) + if err != nil { + return "", err + } + + contents := string(data) + var dir string + if len(prefix) > 0 { + if !strings.HasPrefix(contents, prefix) { + // Prefix required & not found + return "", nil + } + dir = strings.TrimSpace(contents[len(prefix):]) + } else { + dir = strings.TrimSpace(contents) + } + + if !filepath.IsAbs(dir) { + // The .git file contains a relative path. + // Create an absolute path based on the directory the .git file is located in. + dir = filepath.Join(filepath.Dir(file), dir) + } + + return dir, nil +} diff --git a/config/version.go b/config/version.go new file mode 100644 index 00000000..a7a9a5a4 --- /dev/null +++ b/config/version.go @@ -0,0 +1,28 @@ +package config + +import ( + "fmt" + "runtime" + "strings" +) + +var ( + GitCommit string + Version = "1.2.0" + VersionDesc string +) + +func init() { + gitCommit := "" + if len(GitCommit) > 0 { + gitCommit = "; git " + GitCommit + } + VersionDesc = fmt.Sprintf("git-lfs/%s (GitHub; %s %s; go %s%s)", + Version, + runtime.GOOS, + runtime.GOARCH, + strings.Replace(runtime.Version(), "go", "", 1), + gitCommit, + ) + +} diff --git a/debian/changelog b/debian/changelog index 8377e3b0..9f4c706e 100644 --- a/debian/changelog +++ b/debian/changelog @@ -1,3 +1,9 @@ +git-lfs (1.2.1) stable; urgency=low + + * New upstream version + + -- Stephen Gelman Thu, 2 Jun 2016 14:29:00 +0000 + git-lfs (1.2.0) stable; urgency=low * New upstream version diff --git a/debian/rules b/debian/rules index 85555c35..c8b2ea42 100755 --- a/debian/rules +++ b/debian/rules @@ -1,7 +1,7 @@ #!/usr/bin/make -f export DH_OPTIONS -export GO15VENDOREXPERIMENT=0 +export GO15VENDOREXPERIMENT=1 #dh_golang doesn't do this for you ifeq ($(DEB_HOST_ARCH), i386) @@ -12,8 +12,11 @@ endif BUILD_DIR := obj-$(DEB_HOST_GNU_TYPE) export DH_GOPKG := github.com/github/git-lfs -export DH_GOLANG_EXCLUDES := test +# DH_GOLANG_EXCLUDES typically incorporates vendor exclusions from script/test +export DH_GOLANG_EXCLUDES := test github.com/olekukonko/ts/* github.com/xeipuuv/gojsonschema/* github.com/technoweenie/go-contentaddressable/* github.com/spf13/cobra/* export PATH := $(CURDIR)/$(BUILD_DIR)/bin:$(PATH) +# by-default, dh_golang only copies *.go and other source - this upsets a bunch of vendor test routines +export DH_GOLANG_INSTALL_ALL := 1 %: dh $@ --buildsystem=golang --with=golang @@ -25,7 +28,7 @@ override_dh_clean: override_dh_auto_build: dh_auto_build - #dh_golang doesn't do anything here in deb 8, and it's needed in both + #dh_golang doesn't do anything here in deb 8, and it's needed in both if [ "$(DEB_HOST_GNU_TYPE)" != "$(DEB_BUILD_GNU_TYPE)" ]; then\ cp -rf $(BUILD_DIR)/bin/*/* $(BUILD_DIR)/bin/; \ cp -rf $(BUILD_DIR)/pkg/*/* $(BUILD_DIR)/pkg/; \ diff --git a/docker/README.md b/docker/README.md index f4bb12a2..57aff1d8 100644 --- a/docker/README.md +++ b/docker/README.md @@ -47,7 +47,7 @@ To only run certain docker images, supply them as arguments, e.g. ./docker/run_docker.bsh debian_7 ./docker/run_docker.bsh centos_7 debian_8 - ./docker/run_docker.bsh centos_{5,6,7} + ./docker/run_docker.bsh centos_{6,7} And only those images will be run. @@ -104,7 +104,7 @@ will be extracted from the images and saved in the `./repo` directory. ./docker/run_dockers.bsh centos_6_env centos_6 This isn't all that important anymore, unless you want ruby2 and the gems used to -make the man pages for Centos 5/6 where ruby2 is not natively available. Calling +make the man pages for CentOS 6 where ruby2 is not natively available. Calling the environment building images only needs to be done once, they should remain in the `./repo` directory afterwards. @@ -262,19 +262,9 @@ also work with 4096 bit RSA signing keys. CentOS will **not** work with subkeys[3]. CentOS 6 and 7 will work with 4096 bit RSA signing keys -CentOS 5 will **not** work with v4 signatures. The rpms will be so unrecognizable -that it can't even be installed with --nogpgcheck. It should work with RSA on v3. -However, it does not. It builds v3 correctly, but for some reason the GPG check -fails for RSA. CentOS 5 will **not** work with 2048 bit DSA keys... I suspect -2048 is too big for it to fathom. CentOS 5 **will** work with 1024 bit DSA keys. - You can make a 4096 RSA key for Debian and CentOS 6/7 (4 for step 1 above, and -4096 for step 2) and a 1024 DSA key for CentOS 5 (3 for step 1 above, and 1024 -for step 2. And be sure to make the key in a CentOS 5 docker.). And only have two -keys... Or optionally a 4096 RSA subkey for Debain -[1]. Or a key for each distro. Dealers choice. You should have least two since -1024 bit isn't that great and you are only using it for CentOS 5 because nothing -else works. +4096 for step 2). And only have two keys... Or optionally a 4096 RSA subkey for Debain +[1]. Or a key for each distro. Dealers choice. [1] https://www.digitalocean.com/community/tutorials/how-to-use-reprepro-for-a-secure-package-repository-on-ubuntu-14-04 diff --git a/docker/run_dockers.bsh b/docker/run_dockers.bsh index daccaa0e..c9a13de8 100755 --- a/docker/run_dockers.bsh +++ b/docker/run_dockers.bsh @@ -2,7 +2,7 @@ # Usage: # ./run_dockers.bsh - Run all the docker images -# ./run_dockers.bsh centos_5 centos_7 - Run only CentOS 5 & 7 image +# ./run_dockers.bsh centos_6 centos_7 - Run only CentOS 6 & 7 image # ./run_dockers.bsh centos_6 -- bash #Runs bash in the CentOS 6 docker # # Special Environmet Variables @@ -54,7 +54,7 @@ while [[ $# > 0 ]]; do done if [[ ${#IMAGES[@]} == 0 ]]; then - IMAGES=(centos_5 centos_6 centos_7 debian_7 debian_8) + IMAGES=(centos_6 centos_7 debian_7 debian_8) fi mkdir -p "${PACKAGE_DIR}" @@ -104,4 +104,4 @@ for IMAGE_NAME in "${IMAGES[@]}"; do done -echo "Docker run completed successfully!" \ No newline at end of file +echo "Docker run completed successfully!" diff --git a/docker/test_dockers.bsh b/docker/test_dockers.bsh index 17cd724b..60da28c6 100755 --- a/docker/test_dockers.bsh +++ b/docker/test_dockers.bsh @@ -4,4 +4,4 @@ CUR_DIR=$(dirname "${BASH_SOURCE[0]}") -${CUR_DIR}/run_dockers.bsh centos_5_test centos_6_test centos_7_test debian_7_test debian_8_test "${@}" +${CUR_DIR}/run_dockers.bsh centos_6_test centos_7_test debian_7_test debian_8_test "${@}" diff --git a/docs/api/README.md b/docs/api/README.md index 141b3558..b076a23e 100644 --- a/docs/api/README.md +++ b/docs/api/README.md @@ -7,12 +7,12 @@ flow might look like: push`) objects. 2. The client contacts the Git LFS API to get information about transferring the objects. -3. The client then transfers the objects through the storage API. +3. The client then transfers the objects through the transfer API. ## HTTP API The Git LFS HTTP API is responsible for authenticating the user requests, and -returning the proper info for the Git LFS client to use the storage API. By +returning the proper info for the Git LFS client to use the transfer API. By default, API endpoint is based on the current Git remote. For example: ``` @@ -26,11 +26,17 @@ Git LFS endpoint: https://git-server.com/user/repo.git/info/lfs The [specification](/docs/spec.md) describes how clients can configure the Git LFS API endpoint manually. -The [original v1 API][v1] is used for Git LFS v0.5.x. An experimental [v1 -batch API][batch] is in the works for v0.6.x. +The [legacy v1 API][legacy] was used for Git LFS v0.5.x. From 0.6.x the +[batch API][batch] should always be used where available. + +[legacy]: ./v1/http-v1-legacy.md +[batch]: ./v1/http-v1-batch.md + +From v1.3 there are [optional extensions to the batch API][batch v1.3] for more +flexible transfers. + +[batch v1.3]: ./v1.3/http-v1.3-batch.md -[v1]: ./http-v1-original.md -[batch]: ./http-v1-batch.md ### Authentication @@ -81,43 +87,19 @@ HTTPS is strongly encouraged for all production Git LFS servers. If your Git LFS server authenticates with NTLM then you must provide your credentials to `git-credential` in the form `username:DOMAIN\user password:password`. -### Hypermedia +## Transfer API -The Git LFS API uses hypermedia hints to instruct the client what to do next. -These links are included in a `_links` property. Possible relations for objects -include: - -* `self` - This points to the object's canonical API URL. -* `download` - Follow this link with a GET and the optional header values to -download the object content. -* `upload` - Upload the object content to this link with a PUT. -* `verify` - Optional link for the client to POST after an upload. If -included, the client assumes this step is required after uploading an object. -See the "Verification" section below for more. - -Link relations specify the `href`, and optionally a collection of header values -to set for the request. These are optional, and depend on the backing object -store that the Git LFS API is using. - -The Git LFS client will automatically send the same credentials to the followed -link relation as Basic Authentication IF: - -* The url scheme, host, and port all match the Git LFS API endpoint's. -* The link relation does not specify an Authorization header. - -If the host name is different, the Git LFS API needs to send enough information -through the href query or header values to authenticate the request. - -The Git LFS client expects a 200 or 201 response from these hypermedia requests. -Any other response code is treated as an error. - -## Storage API - -The Storage API is a generic API for directly uploading and downloading objects. +The transfer API is a generic API for directly uploading and downloading objects. Git LFS servers can offload object storage to cloud services like S3, or implemented natively in the Git LFS server. The only requirement is that hypermedia objects from the Git LFS API return the correct headers so clients -can access the storage API properly. +can access the transfer API properly. + +As of v1.3 there can be multiple ways files can be uploaded or downloaded, see +the [v1.3 API doc](v1.3/http-v1.3-batch.md) for details. The following section +describes the basic transfer method which is the default. + +### The basic transfer API The client downloads objects through individual GET requests. The URL and any special headers are provided by a "download" hypermedia link: @@ -125,7 +107,7 @@ special headers are provided by a "download" hypermedia link: ``` # the hypermedia object from the Git LFS API # { -# "_links": { +# "actions": { # "download": { # "href": "https://storage-server.com/OID", # "header": { @@ -152,7 +134,7 @@ are provided by an "upload" hypermedia link: ``` # the hypermedia object from the Git LFS API # { -# "_links": { +# "actions": { # "upload": { # "href": "https://storage-server.com/OID", # "header": { diff --git a/docs/api/v1.3/http-v1.3-batch-request-schema.json b/docs/api/v1.3/http-v1.3-batch-request-schema.json new file mode 100644 index 00000000..d63446b4 --- /dev/null +++ b/docs/api/v1.3/http-v1.3-batch-request-schema.json @@ -0,0 +1,33 @@ +{ + "$schema": "http://json-schema.org/draft-04/schema", + "title": "Git LFS HTTPS Batch API v1.3 Request", + "type": "object", + "properties": { + "transfers": { + "type": "array", + "items": { + "type": "string" + }, + }, + "operation": { + "type": "string" + }, + "objects": { + "type": "array", + "items": { + "type": "object", + "properties": { + "oid": { + "type": "string" + }, + "size": { + "type": "number" + } + }, + "required": ["oid", "size"], + "additionalProperties": false + } + } + }, + "required": ["objects", "operation"] +} diff --git a/docs/api/v1.3/http-v1.3-batch-response-schema.json b/docs/api/v1.3/http-v1.3-batch-response-schema.json new file mode 100644 index 00000000..4b01a36c --- /dev/null +++ b/docs/api/v1.3/http-v1.3-batch-response-schema.json @@ -0,0 +1,79 @@ +{ + "$schema": "http://json-schema.org/draft-04/schema", + "title": "Git LFS HTTPS Batch API v1.3 Response", + "type": "object", + + "definitions": { + "action": { + "type": "object", + "properties": { + "href": { + "type": "string" + }, + "header": { + "type": "object", + "additionalProperties": true + }, + "expires_at": { + "type": "string" + } + }, + "required": ["href"], + "additionalProperties": false + } + }, + + "properties": { + "transfer": { + "type": "string" + }, + "objects": { + "type": "array", + "items": { + "type": "object", + "properties": { + "oid": { + "type": "string" + }, + "size": { + "type": "number" + }, + "actions": { + "type": "object", + "properties": { + "download": { "$ref": "#/definitions/action" }, + "upload": { "$ref": "#/definitions/action" }, + "verify": { "$ref": "#/definitions/action" } + }, + "additionalProperties": false + }, + "error": { + "type": "object", + "properties": { + "code": { + "type": "number" + }, + "message": { + "type": "string" + } + }, + "required": ["code", "message"], + "additionalProperties": false + } + }, + "required": ["oid", "size"], + "additionalProperties": false + } + }, + "message": { + "type": "string" + }, + "request_id": { + "type": "string" + }, + "documentation_url": { + "type": "string" + }, + }, + "required": ["objects"] +} diff --git a/docs/api/v1.3/http-v1.3-batch.md b/docs/api/v1.3/http-v1.3-batch.md new file mode 100644 index 00000000..165a4996 --- /dev/null +++ b/docs/api/v1.3/http-v1.3-batch.md @@ -0,0 +1,98 @@ +# Git LFS v1.3 Batch API + +The Git LFS Batch API extends the [batch v1 API](../v1/http-v1-batch.md), adding +optional fields to the request and response to negotiate transfer methods. + +Only the differences from the v1 API will be listed here, everything else is +unchanged. + +## POST /objects/batch +### Request changes + +The v1.3 request adds an additional optional top-level field, `transfers`, +which is an array of strings naming the transfer methods this client supports. +The transfer methods are in decreasing order of preference. + +The default transfer method which simply uploads and downloads using simple HTTP +`PUT` and `GET`, named "basic", is always supported and is implied. + +Example request: + +``` +> POST https://git-lfs-server.com/objects/batch HTTP/1.1 +> Accept: application/vnd.git-lfs+json +> Content-Type: application/vnd.git-lfs+json +> Authorization: Basic ... (if authentication is needed) +> +> { +> "operation": "upload", +> "transfers": [ "tus.io", "basic" ], +> "objects": [ +> { +> "oid": "1111111", +> "size": 123 +> } +> ] +> } +> +``` + +In the example above `"basic"` is included for illustration but is actually +unnecessary since it is always the fallback. The client is indicating that it is +able to upload using the resumable `"tus.io"` method, should the server support +that. The server may include a chosen method in the response, which must be +one of those listed, or `"basic"`. + +### Response changes + +If the server understands the new optional `transfers` field in the request, it +should determine which of the named transfer methods it also supports, and +include the chosen one in the response in the new `transfer` field. If only +`"basic"` is supported, the field is optional since that is the default. + +If the server supports more than one of the named transfer methods, it should +pick the first one it supports, since the client will list them in order of +preference. + +Example response to the previous request if the server also supports `tus.io`: + +``` +< HTTP/1.1 200 Ok +< Content-Type: application/vnd.git-lfs+json +< +< { +< "transfer": "tus.io", +< "objects": [ +< { +< "oid": "1111111", +< "size": 123, +< "actions": { +< "upload": { +< "href": "https://some-tus-io-upload.com", +< "header": { +< "Key": "value" +< } +< }, +< "verify": { +< "href": "https://some-callback.com", +< "header": { +< "Key": "value" +< } +< } +< } +> } +< ] +< } +``` + +Apart from naming the chosen transfer method in `transfer`, the server should +also return upload / download links in the `href` field which are compatible +with the method chosen. If the server supports more than one method (and it's +advisable that the server implement at least `"basic` in all cases in addition +to more sophisticated methods, to support older clients), the `href` is likely +to be different for each. + +## Updated schemas + +* [Batch request](./http-v1.3-batch-request-schema.json) +* [Batch response](./http-v1.3-batch-response-schema.json) diff --git a/docs/api/http-v1-batch-request-schema.json b/docs/api/v1/http-v1-batch-request-schema.json similarity index 88% rename from docs/api/http-v1-batch-request-schema.json rename to docs/api/v1/http-v1-batch-request-schema.json index b321fcb7..f4c79f6a 100644 --- a/docs/api/http-v1-batch-request-schema.json +++ b/docs/api/v1/http-v1-batch-request-schema.json @@ -23,6 +23,5 @@ } } }, - "required": ["objects", "operation"], - "additionalProperties": false + "required": ["objects", "operation"] } diff --git a/docs/api/http-v1-batch-response-schema.json b/docs/api/v1/http-v1-batch-response-schema.json similarity index 96% rename from docs/api/http-v1-batch-response-schema.json rename to docs/api/v1/http-v1-batch-response-schema.json index 478af7f0..afb83904 100644 --- a/docs/api/http-v1-batch-response-schema.json +++ b/docs/api/v1/http-v1-batch-response-schema.json @@ -72,6 +72,5 @@ "type": "string" }, }, - "required": ["objects"], - "additionalProperties": false + "required": ["objects"] } diff --git a/docs/api/http-v1-batch.md b/docs/api/v1/http-v1-batch.md similarity index 79% rename from docs/api/http-v1-batch.md rename to docs/api/v1/http-v1-batch.md index a69f68de..cae50c95 100644 --- a/docs/api/http-v1-batch.md +++ b/docs/api/v1/http-v1-batch.md @@ -1,12 +1,12 @@ # Git LFS v1 Batch API -The Git LFS Batch API works like the [original v1 API][v1], but uses a single +The Git LFS Batch API works like the [legacy v1 API][v1], but uses a single endpoint that accepts multiple OIDs. All requests should have the following: Accept: application/vnd.git-lfs+json Content-Type: application/vnd.git-lfs+json -[v1]: ./http-v1-original.md +[v1]: ./http-v1-legacy.md This is a newer API introduced in Git LFS v0.5.2, and made the default in Git LFS v0.6.0. The client automatically detects if the server does not @@ -21,7 +21,7 @@ manually through the Git config: ## Authentication -The Batch API authenticates the same as the original v1 API with one exception: +The Batch API authenticates the same as the legacy v1 API with one exception: The client will attempt to make requests without any authentication. This slight change allows anonymous access to public Git LFS objects. The client stores the result of this in the `lfs..access` config setting, where @@ -285,3 +285,65 @@ track usage. Some server errors may trigger the client to retry requests, such as 500, 502, 503, and 504. + +## Extended upload & download protocols + +By default it is assumed that all transfers (uploads & downloads) will be +performed via a singular HTTP resource and that the URLs provided in the +response are implemented as such. In this case each object is uploaded or +downloaded in its entirety through that one URL. + +However, in order to support more advanced transfer features such as resuming, +chunking or delegation to other services, the client can indicate in the request +its ability to handle other transfer mechanisms. + +Here's a possible example: + +```json +{ + "operation": "upload", + "accept-transfers": "tus,resumable.js", + "objects": [ + { + "oid": "1111111", + "size": 123 + } + ] +} +``` + +The `accept-transfers` field is a comma-separated list of identifiers which the +client is able to support, in order of preference. In this hypothetical example +the client is indicating it is able to support resumable uploads using either +the tus.io protocol, or the resumable.js protocol. It is implicit that basic +HTTP resources are always supported regardless of the presence or content of +this item. + +If the server is able to support one of the extended transfer mechanisms, it can +provide resources specific to that mechanism in the response, with an indicator +of which one it picked: + +```json +{ + "transfer": "tus", + "objects": [ + { + "oid": "1111111", + "size": 123, + "actions": { + "upload": { + "href": "https://my.tus.server.com/files/1111111" + } + } + } + ] +} +``` + +In this case the server has chosen [tus.io](http://tus.io); in this case the +underlying transport is still HTTP, so the `href` is still a web URL, but the +exact sequence of calls and the headers sent & received are different from a +single resource upload. Other transfers may use other protocols. + +__Note__: these API features are provided for future extension and the examples +shown may not represent actual features present in the current client). diff --git a/docs/api/http-v1-original.md b/docs/api/v1/http-v1-legacy.md similarity index 98% rename from docs/api/http-v1-original.md rename to docs/api/v1/http-v1-legacy.md index a7c147a8..a6f12bbc 100644 --- a/docs/api/http-v1-original.md +++ b/docs/api/v1/http-v1-legacy.md @@ -1,6 +1,6 @@ -# Git LFS v1 Original API +# Git LFS v1 Legacy API -This describes the original API for Git LFS v0.5.x. It's already deprecated by +This describes the legacy API for Git LFS v0.5.x. It's already deprecated by the [batch API][batch]. All requests should have: Accept: application/vnd.git-lfs+json diff --git a/docs/man/git-lfs-clone.1.ronn b/docs/man/git-lfs-clone.1.ronn index 4b8ed09c..ac257d46 100644 --- a/docs/man/git-lfs-clone.1.ronn +++ b/docs/man/git-lfs-clone.1.ronn @@ -19,6 +19,23 @@ downloads performed by 'git lfs pull'. All options supported by 'git clone' +* `-I` `--include=`: + See [INCLUDE AND EXCLUDE] + +* `-X` `--exclude=`: + See [INCLUDE AND EXCLUDE] + +## INCLUDE AND EXCLUDE + +You can configure Git LFS to only fetch objects to satisfy references in certain +paths of the repo, and/or to exclude certain paths of the repo, to reduce the +time you spend downloading things you do not use. + +In lfsconfig, set lfs.fetchinclude and lfs.fetchexclude to comma-separated lists +of paths to include/exclude in the fetch (wildcard matching as per gitignore). +Only paths which are matched by fetchinclude and not matched by fetchexclude +will have objects fetched for them. + ## SEE ALSO git-clone(1), git-lfs-pull(1). diff --git a/docs/man/git-lfs-config.5.ronn b/docs/man/git-lfs-config.5.ronn index 2bc6a803..080969d7 100644 --- a/docs/man/git-lfs-config.5.ronn +++ b/docs/man/git-lfs-config.5.ronn @@ -26,6 +26,12 @@ lfs option can be scoped inside the configuration for a remote. The number of concurrent uploads/downloads. Default 3. +* `lfs.basictransfersonly` + + If set to true, only basic HTTP upload/download transfers will be used, + ignoring any more advanced transfers that the client/server may support. + This is primarily to work around bugs or incompatibilities. + * `lfs.batch` Whether to use the batch API instead of requesting objects individually. diff --git a/docs/man/git-lfs-logs.1.ronn b/docs/man/git-lfs-logs.1.ronn index 6d959b8d..7102bf6f 100644 --- a/docs/man/git-lfs-logs.1.ronn +++ b/docs/man/git-lfs-logs.1.ronn @@ -5,14 +5,22 @@ git-lfs-logs(1) - Show errors from the git-lfs command `git lfs logs`
`git lfs logs`
-`git lfs logs` --clear
-`git lfs logs` --boomtown
+`git lfs logs clear`
+`git lfs logs boomtown`
## DESCRIPTION Display errors from the git-lfs command. Any time it crashes, the details are saved to ".git/lfs/logs". +## COMMANDS + +* `clear`: + Clears all of the existing logged errors. + +* `boomtown`: + Triggers a dummy exception. + ## OPTIONS Without any options, `git lfs logs` simply shows the list of error logs. @@ -20,12 +28,6 @@ Without any options, `git lfs logs` simply shows the list of error logs. * : Shows the specified error log. Use "last" to show the most recent error. -* `--clear`: - Clears all of the existing logged errors. - -* `--boomtown`: - Triggers a dummy exception. - ## SEE ALSO Part of the git-lfs(1) suite. diff --git a/docs/man/git-lfs-track.1.ronn b/docs/man/git-lfs-track.1.ronn index 9c9ef83b..458c9c80 100644 --- a/docs/man/git-lfs-track.1.ronn +++ b/docs/man/git-lfs-track.1.ronn @@ -3,7 +3,7 @@ git-lfs-track(1) - View or add Git LFS paths to Git attributes ## SYNOPSIS -`git lfs track` [...] +`git lfs track` [options] [...] ## DESCRIPTION @@ -11,6 +11,22 @@ Start tracking the given path(s) through Git LFS. The argument can be a pattern or a file path. If no paths are provided, simply list the currently-tracked paths. +## OPTIONS + +* `--verbose` `-v`: + If enabled, have `git lfs track` log files which it will touch. Disabled by + default. + +* `--dry-run` `-d`: + If enabled, have `git lfs track` log all actions it would normally take + (adding entries to .gitattributes, touching files on disk, etc) without + performing any mutative operations to the disk. + + `git lfs track --dry-run [files]` also implicitly mocks the behavior of + passing the `--verbose`, and will log in greater detail what it is doing. + + Disabled by default. + ## EXAMPLES * List the paths that Git LFS is currently tracking: diff --git a/docs/proposals/locking.md b/docs/proposals/locking.md new file mode 100644 index 00000000..50575819 --- /dev/null +++ b/docs/proposals/locking.md @@ -0,0 +1,315 @@ +# Locking feature proposal + +We need the ability to lock files to discourage (we can never prevent) parallel +editing of binary files which will result in an unmergeable situation. This is +not a common theme in git (for obvious reasons, it conflicts with its +distributed, parallel nature), but is a requirement of any binary management +system, since files are very often completely unmergeable, and no-one likes +having to throw their work away & do it again. + +## What not to do: single branch model + +The simplest way to organise locking is to require that if binary files are only +ever edited on a single branch, and therefore editing this file can follow a +simple sequence: + +1. File starts out read-only locally +2. User locks the file, user is required to have the latest version locally from + the 'main' branch +3. User edits file & commits 1 or more times +4. User pushes these commits to the main branch +5. File is unlocked (and made read only locally again) + +## A more usable approach: multi-branch model + +In practice teams need to work on more than one branch, and sometimes that work +will have corresponding binary edits. + +It's important to remember that the core requirement is to prevent *unintended +parallel edits of an unmergeable file*. + +One way to address this would be to say that locking a file locks it across all +branches, and that lock is only released when the branch where the edit is is +merged back into a 'primary' branch. The problem is that although that allows +branching and also prevents merge conflicts, it forces merging of feature +branches before a further edit can be made by someone else. + +An alternative is that locking a file locks it across all branches, but when the +lock is released, further locks on that file can only be taken on a descendant +of the latest edit that has been made, whichever branch it is on. That means +a change to the rules of the lock sequence, namely: + +1. File starts out read-only locally +2. User tries to lock a file. This is only allowed if: + * The file is not already locked by anyone else, AND + * One of the following are true: + * The user has, or agrees to check out, a descendant of the latest commit + that was made for that file, whatever branch that was on, OR + * The user stays on their current commit but resets the locked file to the + state of the latest commit (making it modified locally, and + also cherry-picking changes for that file in practice). +3. User edits file & commits 1 or more times, on any branch they like +4. User pushes the commits +5. File is unlocked if: + * the latest commit to that file has been pushed (on any branch), and + * the file is not locally edited + +This means that long-running branches can be maintained but that editing of a +binary file must always incorporate the latest binary edits. This means that if +this system is always respected, there is only ever one linear stream of +development for this binary file, even though that 'thread' may wind its way +across many different branches in the process. + +This does mean that no-one's changes are accidentally lost, but it does mean +that we are either making new branches dependent on others, OR we're +cherry-picking changes to individual files across branches. This does change +the traditional git workflow, but importantly it achieves the core requirement +of never *accidentally* losing anyone's changes. How changes are threaded +across branches is always under the user's control. + +## Breaking the rules +We must allow the user to break the rules if they know what they are doing. +Locking is there to prevent unintended binary merge conflicts, but sometimes you +might want to intentionally create one, with the full knowledge that you're +going to have to manually merge the result (or more likely, pick one side and +discard the other) later down the line. There are 2 cases of rule breaking to +support: + +1. **Break someone else's lock** + People lock files and forget they've locked them, then go on holiday, or + worse, leave the company. You can't be stuck not being able to edit that file + so must be able to forcibly break someone else's lock. Ideally this should + result in some kind of notification to the original locker (might need to be a + special value-add on BB/Stash). This effectively removes the other person's + lock and is likely to cause them problems if they had edited and try to push + next time. + +2. **Allow a parallel lock** + Actually similar to breaking someone else's lock, except it lets you take + another lock on a file in parallel, leaving their lock in place too, and + knowing that you're going to have to resolve the merge problem later. You + could handle this just by manually making files read/write, then using 'force + push' to override hooks that prevent pushing when not locked. However by + explicitly registering a parallel lock (possible form: 'git lfs lock + --force') this could be recorded and communicated to anyone else with a lock, + letting them know about possible merge issues down the line. + +## Detailed feature points +|No | Feature | Notes +|---|---------|------------------ +|1 |Lock server must be available at same API URL| +|2 |Identify unmergeable files as subset of lfs files|`git lfs track -b` ? +|3 |Make unmergeable files read-only on checkout|Perform in smudge filter +|4 |Lock a file
  • Check with server which must atomically check/set
  • Check person requesting the lock is checked out on a commit which is a descendent of the last edit of that file (locally or on server, although last lock shouldn't have been released until push anyway), or allow --force to break rule
  • Record lock on server
  • Make file read/write locally if success
|`git lfs lock `? +|5 |Release a lock
  • Check if locally modified, if so must discard
  • Check if user has more recent commit of this file than server, if so must push first
  • Release lock on server atomically
  • Make local file read-only
|`git lfs unlock `? +|6 |Break a lock, ie override someone else's lock and take it yourself.
  • Release lock on server atomically
  • Proceed as per 'Lock a file'
  • Notify original lock holder HOW?
|`git lfs lock -break `? +|7 |Release lock on reset (maybe). Configurable option / prompt? May be resetting just to start editing again| +|8 |Release lock on push (maybe, if unmodified). See above| +|9 |Cater for read-only binary files when merging locally
  • Because files are read-only this might prevent merge from working when actually it's valid.
  • Always fine to merge the latest version of a binary file to anywhere else
  • Fine to merge the non-latest version if user is aware that this may cause merge problems (see Breaking the rules)
  • Therefore this feature is about dealing with the read-only flag and issuing a warning if not the latest
| +|10 |List current locks
  • That the current user has
  • That anyone has
  • Potentially scoped to folder
|`git lfs lock --list [paths...]` +|11 |Reject a push containing a binary file currently locked by someone else|pre-receive hook on server, allow --force to override (i.e. existing parameter to git push) + +## Implementation details +### Types +To make the implementing locking on the lfs-test-server as well as other servers +in the future easier, it makes sense to create a `lock` package that can be +depended upon from any server. This will go along with Steve's refactor which +touches the `lfs` package quite a bit. + +Below are enumerated some of the types that will presumably land in this +sub-package. + +```go +// Lock represents a single lock that against a particular path. +// +// Locks returned from the API may or may not be currently active, according to +// the Expired flag. +type Lock struct { + // Id is the unique identifier corresponding to this particular Lock. It + // must be consistent with the local copy, and the server's copy. + Id string `json:"id"` + // Path is an absolute path to the file that is locked as a part of this + // lock. + Path string `json:"path"` + // Committer is the author who initiated this lock. + Committer struct { + Name string `json:"name"` + Email string `json:"email"` + } `json:"creator"` + // CommitSHA is the commit that this Lock was created against. It is + // strictly equal to the SHA of the minimum commit negotiated in order + // to create this lock. + CommitSHA string `json:"commit_sha" + // LockedAt is a required parameter that represents the instant in time + // that this lock was created. For most server implementations, this + // should be set to the instant at which the lock was initially + // received. + LockedAt time.Time `json:"locked_at"` + // ExpiresAt is an optional parameter that represents the instant in + // time that the lock stopped being active. If the lock is still active, + // the server can either a) not send this field, or b) send the + // zero-value of time.Time. + UnlockedAt time.Time `json:"unlocked_at,omitempty"` +} + +// Active returns whether or not the given lock is still active against the file +// that it is protecting. +func (l *Lock) Active() bool { + return time.IsZero(l.UnlockedAt) +} +``` + +### Proposed Commands + +#### `git lfs lock ` + +The `lock` command will be used in accordance with the multi-branch flow as +proposed above to request that lock be granted to the specific path passed an +argument to the command. + +```go +// LockRequest encapsulates the payload sent across the API when a client would +// like to obtain a lock against a particular path on a given remote. +type LockRequest struct { + // Path is the path that the client would like to obtain a lock against. + Path string `json:"path"` + // LatestRemoteCommit is the SHA of the last known commit from the + // remote that we are trying to create the lock against, as found in + // `.git/refs/origin/`. + LatestRemoteCommit string `json:"latest_remote_commit"` + // Committer is the individual that wishes to obtain the lock. + Committer struct { + // Name is the name of the individual who would like to obtain the + // lock, for instance: "Rick Olson". + Name string `json:"name"` + // Email is the email assopsicated with the individual who would + // like to obtain the lock, for instance: "rick@github.com". + Email string `json:"email"` + } `json:"committer"` +} +``` + +```go +// LockResponse encapsulates the information sent over the API in response to +// a `LockRequest`. +type LockResponse struct { + // Lock is the Lock that was optionally created in response to the + // payload that was sent (see above). If the lock already exists, then + // the existing lock is sent in this field instead, and the author of + // that lock remains the same, meaning that the client failed to obtain + // that lock. An HTTP status of "409 - Conflict" is used here. + // + // If the lock was unable to be created, this field will hold the + // zero-value of Lock and the Err field will provide a more detailed set + // of information. + // + // If an error was experienced in creating this lock, then the + // zero-value of Lock should be sent here instead. + Lock Lock `json:"lock"` + // CommitNeeded holds the minimum commit SHA that client must have to + // obtain the lock. + CommitNeeded string `json:"commit_needed"` + // Err is the optional error that was encountered while trying to create + // the above lock. + Err error `json:"error,omitempty"` +} +``` + + +#### `git lfs unlock ` + +The `unlock` command is responsible for releasing the lock against a particular +file. The command takes a `` argument which the LFS client will have to +internally resolve into a Id to unlock. + +The API associated with this command can also be used on the server to remove +existing locks after a push. + +```go +// An UnlockRequest is sent by the client over the API when they wish to remove +// a lock associated with the given Id. +type UnlockRequest struct { + // Id is the identifier of the lock that the client wishes to remove. + Id string `json:"id"` +} +``` + +```go +// UnlockResult is the result sent back from the API when asked to remove a +// lock. +type UnlockResult struct { + // Lock is the lock corresponding to the asked-about lock in the + // `UnlockPayload` (see above). If no matching lock was found, this + // field will take the zero-value of Lock, and Err will be non-nil. + Lock Lock `json:"lock"` + // Err is an optional field which holds any error that was experienced + // while removing the lock. + Err error `json:"error,omitempty"` +} +``` + +Clients can determine whether or not their lock was removed by calling the +`Active()` method on the returned Lock, if `UnlockResult.Err` is nil. + +#### `git lfs locks (-r |-b )|(-i id)` + +For many operations, the LFS client will need to have knowledge of existing +locks on the server. Additionally, the client should not have to self-sort/index +this (potentially) large set. To remove this need, both the `locks` command and +corresponding API method take several filters. + +Clients should turn the flag-values that were passed during the command +invocation into `Filter`s as described below, and batched up into the `Filters` +field in the `LockListRequest`. + +```go +// Property is a constant-type that narrows fields pertaining to the server's +// Locks. +type Property string + +const ( + Branch Property = "branch" + Id Property = "id" + // (etc) ... +) + +// LockListRequest encapsulates the request sent to the server when the client +// would like a list of locks that match the given criteria. +type LockListRequest struct { + // Filters is the set of filters to query against. If the client wishes + // to obtain a list of all locks, an empty array should be passed here. + Filters []{ + // Prop is the property to search against. + Prop Property `json:"prop"` + // Value is the value that the property must take. + Value string `json:"value"` + } `json:"filters"` + // Cursor is an optional field used to tell the server which lock was + // seen last, if scanning through multiple pages of results. + // + // Servers must return a list of locks sorted in reverse chronological + // order, so the Cursor provides a consistent method of viewing all + // locks, even if more were created between two requests. + Cursor string `json:"cursor,omitempty"` + // Limit is the maximum number of locks to return in a single page. + Limit int `json:"limit"` +} +``` + +```go +// LockList encapsulates a set of Locks. +type LockList struct { + // Locks is the set of locks returned back, typically matching the query + // parameters sent in the LockListRequest call. If no locks were matched + // from a given query, then `Locks` will be represented as an empty + // array. + Locks []Lock `json:"locks"` + // NextCursor returns the Id of the Lock the client should update its + // cursor to, if there are multiple pages of results for a particular + // `LockListRequest`. + NextCursor string `json:"next_cursor,omitempty"` + // Err populates any error that was encountered during the search. If no + // error was encountered and the operation was succesful, then a value + // of nil will be passed here. + Err error `json:"error,omitempty"` +} diff --git a/docs/proposals/locking_api.md b/docs/proposals/locking_api.md new file mode 100644 index 00000000..ad469085 --- /dev/null +++ b/docs/proposals/locking_api.md @@ -0,0 +1,180 @@ +# Locking API proposal + +## POST /locks + +| Method | Accept | Content-Type | Authorization | +|---------|--------------------------------|--------------------------------|---------------| +| `POST` | `application/vnd.git-lfs+json` | `application/vnd.git-lfs+json` | Basic | + +### Request + +``` +> GET https://git-lfs-server.com/locks +> Accept: application/vnd.git-lfs+json +> Authorization: Basic +> Content-Type: application/vnd.git-lfs+json +> +> { +> path: "/path/to/file", +> remote: "origin", +> latest_remote_commit: "d3adbeef", +> committer: { +> name: "Jane Doe", +> email: "jane@example.com" +> } +> } +``` + +### Response + +* **Successful response** +``` +< HTTP/1.1 201 Created +< Content-Type: application/vnd.git-lfs+json +< +< { +< lock: { +< id: "some-uuid", +< path: "/path/to/file", +< committer: { +< name: "Jane Doe", +< email: "jane@example.com" +< }, +< commit_sha: "d3adbeef", +< locked_at: "2016-05-17T15:49:06+00:00" +< } +< } +``` + +* **Bad request: minimum commit not met** +``` +< HTTP/1.1 400 Bad request +< Content-Type: application/vnd.git-lfs+json +< +< { +< "commit_needed": "other_sha" +< } +``` + +* **Bad request: lock already present** +``` +< HTTP/1.1 409 Conflict +< Content-Type: application/vnd.git-lfs+json +< +< { +< lock: { +< /* the previously created lock */ +< }, +< error: "already created lock" +< } +``` + +* **Bad repsonse: server error** +``` +< HTTP/1.1 500 Internal server error +< Content-Type: application/vnd.git-lfs+json +< +< { +< error: "unable to create lock" +< } +``` + +## POST /locks/:id/unlock + +| Method | Accept | Content-Type | Authorization | +|---------|--------------------------------|--------------|---------------| +| `POST` | `application/vnd.git-lfs+json` | None | Basic | + +### Request + +``` +> POST https://git-lfs-server.com/locks/:id/unlock +> Accept: application/vnd.git-lfs+json +> Authorization: Basic +``` + +### Repsonse + +* **Success: unlocked** +``` +< HTTP/1.1 200 Ok +< Content-Type: application/vnd.git-lfs+json +< +< { +< lock: { +< id: "some-uuid", +< path: "/path/to/file", +< committer: { +< name: "Jane Doe", +< email: "jane@example.com" +< }, +< commit_sha: "d3adbeef", +< locked_at: "2016-05-17T15:49:06+00:00", +< unlocked_at: "2016-05-17T15:49:06+00:00" +< } +< } +} +``` + +* **Bad response: server error** +``` +< HTTP/1.1 500 Internal error +< Content-Type: application/vnd.git-lfs+json +< +< { +< error: "github/git-lfs: internal server error" +< } +``` + +## GET /locks + +| Method | Accept | Content-Type | Authorization | +|--------|-------------------------------|--------------|---------------| +| `GET` | `application/vnd.git-lfs+json | None | Basic | + +### Request + +``` +> GET https://git-lfs-server.com/locks?filters...&cursor=&limit= +> Accept: application/vnd.git-lfs+json +> Authorization: Basic +``` + +### Response + +* **Success: locks found** + +Note: no matching locks yields a payload of `locks: []`, and a status of 200. + +``` +< HTTP/1.1 200 Ok +< Content-Type: application/vnd.git-lfs+json +< +< { +< locks: [ +< { +< id: "some-uuid", +< path: "/path/to/file", +< committer": { +< name: "Jane Doe", +< email: "jane@example.com" +< }, +< commit_sha: "1ec245f", +< locked_at: "2016-05-17T15:49:06+00:00" +< } +< ], +< next_cursor: "optional-next-id", +< error: "optional error" +< } +``` + +* **Bad response: some locks may have matched, but the server encountered an error** +``` +< HTTP/1.1 500 Internal error +< Content-Type: application/vnd.git-lfs+json +< +< { +< locks: [], +< error: "github/git-lfs: internal server error" +< } +``` diff --git a/docs/proposals/transfer_adapters.md b/docs/proposals/transfer_adapters.md new file mode 100644 index 00000000..e801e83b --- /dev/null +++ b/docs/proposals/transfer_adapters.md @@ -0,0 +1,91 @@ +# Transfer adapters for resumable upload / download + +## Concept + +To allow the uploading and downloading of LFS content to be implemented in more +ways than the current simple HTTP GET/PUT approach. Features that could be +supported by opening this up to other protocols might include: + + - Resumable transfers + - Block-level de-duplication + - Delegation to 3rd party services like Dropbox / Google Drive / OneDrive + - Non-HTTP services + +## API extensions + +See the [API documentation](../http-v1-batch.md) for specifics. All changes +are optional extras so there are no breaking changes to the API. + +The current HTTP GET/PUT system will remain the default. When a version of the +git-lfs client supports alternative transfer mechanisms, it notifies the server +in the API request using the `accept-transfers` field. + +If the server also supports one of the mechanisms the client advertised, it may +select one and alter the upload / download URLs to point at resources +compatible with this transfer mechanism. It must also indicate the chosen +transfer mechanism in the response using the `transfer` field. + +The URLs provided in this case may not be HTTP, they may be custom protocols. +It is up to each individual transfer mechanism to define how URLs are used. + +## Client extensions + +### Phase 1: refactoring & abstraction + +1. Introduce a new concept of 'transfer adapter'. +2. Adapters can provide either upload or download support, or both. This is + necessary because some mechanisms are unidirectional, e.g. HTTP Content-Range + is download only, tus.io is upload only. +3. Refactor our current HTTP GET/PUT mechanism to be the default implementation + for both upload & download +4. The LFS core will pass oids to transfer to this adapter in bulk, and receive + events back from the adapter for transfer progress, and file completion. +5. Each adapter is responsible for its own parallelism, but should respect the + `lfs.concurrenttransfers` setting. For example the default (current) approach + will parallelise on files (oids), but others may parallelise in other ways + e.g. downloading multiple parts of the same file at once +6. Each adapter should store its own temporary files. On file completion it must + notify the core which in the case of a download is then responsible for + moving a completed file into permanent storage. +7. Update the core to have a registry of available transfer mechanisms which it + passes to the API, and can recognise a chosen one in the response. Default + to our refactored original. + +### Phase 2: basic resumable downloads + +1. Add a client transfer adapter for [HTTP Range headers](https://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.35) +2. Add a range request reference implementation to our integration test server + +### Phase 3: basic resumable uploads + +1. Add a client transfer adapter for [tus.io](http://tus.io) (upload only) +2. Add a tus.io reference implementation to our integration test server + +### Phase 4: external transfer adapters + +Ideally we should allow people to add other transfer implementations so that +we don't have to implement everything, or bloat the git-lfs binary with every +custom system possible. + +Because Go is statically linked it's not possible to extend client functionality +at runtime through loading libaries, so instead I propose allowing an external +process to be invoked, and communicated with via a defined stream protocol. This +protocol will be logically identical to the internal adapters; the core passing +oids and receiving back progress and completion notifications; just that the +implementation will be in an external process and the messages will be +serialised over streams. + +Only one process will be launched and will remain for the entire period of all +transfers. Like internal adapters, the external process will be responsible for +its own parallelism and temporary storage, so internally they can (should) do +multiple transfers at once. + +1. Build a generic 'external' adapter which can invoke a named process and + communicate with it using the standard stream protocol (probably just over + stdout / stdin) +2. Establish a configuration for external adapters; minimum is an identifier + (client and server must agree on what that is) and a path to invoke +3. Implement a small test process in Go which simply wraps the default HTTP + mechanism in an external process, to prove the approach (not in release) + + diff --git a/lfs/errors.go b/errutil/errors.go similarity index 91% rename from lfs/errors.go rename to errutil/errors.go index ee596644..8a6c351a 100644 --- a/lfs/errors.go +++ b/errutil/errors.go @@ -1,4 +1,6 @@ -package lfs +// Package errutil provides common error handling tools +// NOTE: Subject to change, do not rely on this package from outside git-lfs source +package errutil // The LFS error system provides a simple wrapper around Go errors and the // ability to inspect errors. It is strongly influenced by Dave Cheney's post @@ -39,9 +41,9 @@ package lfs // Example: // // err := lfs.SomeFunction() -// lfs.ErrorSetContext(err, "foo", "bar") -// lfs.ErrorGetContext(err, "foo") // => "bar" -// lfs.ErrorDelContext(err, "foo") +// errutil.ErrorSetContext(err, "foo", "bar") +// errutil.ErrorGetContext(err, "foo") // => "bar" +// errutil.ErrorDelContext(err, "foo") // // Wrapped errors also contain the stack from the point at which they are // called. The stack is accessed via ErrorStack(). Calling ErrorStack() on a @@ -175,6 +177,17 @@ func IsBadPointerKeyError(err error) bool { return false } +// If an error is abad pointer error of any type, returns NotAPointerError +func StandardizeBadPointerError(err error) error { + if IsBadPointerKeyError(err) { + badErr := err.(badPointerKeyError) + if badErr.Expected == "version" { + return NewNotAPointerError(err) + } + } + return err +} + // IsDownloadDeclinedError indicates that the smudge operation should not download. // TODO: I don't really like using errors to control that flow, it should be refactored. func IsDownloadDeclinedError(err error) bool { @@ -369,7 +382,7 @@ func (e fatalError) Fatal() bool { return true } -func newFatalError(err error) error { +func NewFatalError(err error) error { return fatalError{newWrappedError(err, "Fatal error")} } @@ -387,7 +400,7 @@ func (e notImplementedError) NotImplemented() bool { return true } -func newNotImplementedError(err error) error { +func NewNotImplementedError(err error) error { return notImplementedError{newWrappedError(err, "Not implemented")} } @@ -405,7 +418,7 @@ func (e authError) AuthError() bool { return true } -func newAuthError(err error) error { +func NewAuthError(err error) error { return authError{newWrappedError(err, "Authentication required")} } @@ -423,7 +436,7 @@ func (e invalidPointerError) InvalidPointer() bool { return true } -func newInvalidPointerError(err error) error { +func NewInvalidPointerError(err error) error { return invalidPointerError{newWrappedError(err, "Invalid pointer")} } @@ -441,7 +454,7 @@ func (e invalidRepoError) InvalidRepo() bool { return true } -func newInvalidRepoError(err error) error { +func NewInvalidRepoError(err error) error { return invalidRepoError{newWrappedError(err, "Not in a git repository")} } @@ -459,7 +472,7 @@ func (e smudgeError) SmudgeError() bool { return true } -func newSmudgeError(err error, oid, filename string) error { +func NewSmudgeError(err error, oid, filename string) error { e := smudgeError{newWrappedError(err, "Smudge error")} ErrorSetContext(e, "OID", oid) ErrorSetContext(e, "FileName", filename) @@ -480,7 +493,7 @@ func (e cleanPointerError) CleanPointerError() bool { return true } -func newCleanPointerError(err error, pointer *Pointer, bytes []byte) error { +func NewCleanPointerError(err error, pointer interface{}, bytes []byte) error { e := cleanPointerError{newWrappedError(err, "Clean pointer error")} ErrorSetContext(e, "pointer", pointer) ErrorSetContext(e, "bytes", bytes) @@ -501,7 +514,7 @@ func (e notAPointerError) NotAPointerError() bool { return true } -func newNotAPointerError(err error) error { +func NewNotAPointerError(err error) error { return notAPointerError{newWrappedError(err, "Not a valid Git LFS pointer file.")} } @@ -519,7 +532,7 @@ func (e badPointerKeyError) BadPointerKeyError() bool { return true } -func newBadPointerKeyError(expected, actual string) error { +func NewBadPointerKeyError(expected, actual string) error { err := fmt.Errorf("Error parsing LFS Pointer. Expected key %s, got %s", expected, actual) return badPointerKeyError{expected, actual, newWrappedError(err, "")} } @@ -538,7 +551,7 @@ func (e downloadDeclinedError) DownloadDeclinedError() bool { return true } -func newDownloadDeclinedError(err error) error { +func NewDownloadDeclinedError(err error) error { return downloadDeclinedError{newWrappedError(err, "File missing and download is not allowed")} } @@ -556,7 +569,7 @@ func (e retriableError) RetriableError() bool { return true } -func newRetriableError(err error) error { +func NewRetriableError(err error) error { return retriableError{newWrappedError(err, "")} } diff --git a/lfs/errors_test.go b/errutil/errors_test.go similarity index 91% rename from lfs/errors_test.go rename to errutil/errors_test.go index a1db0566..45b93cc4 100644 --- a/lfs/errors_test.go +++ b/errutil/errors_test.go @@ -1,4 +1,4 @@ -package lfs +package errutil import ( "errors" @@ -16,7 +16,7 @@ func TestChecksHandleGoErrors(t *testing.T) { func TestCheckHandlesWrappedErrors(t *testing.T) { err := errors.New("Go error") - fatal := newFatalError(err) + fatal := NewFatalError(err) if !IsFatalError(fatal) { t.Error("expected error to be fatal") @@ -26,8 +26,8 @@ func TestCheckHandlesWrappedErrors(t *testing.T) { func TestBehaviorWraps(t *testing.T) { err := errors.New("Go error") - fatal := newFatalError(err) - ni := newNotImplementedError(fatal) + fatal := NewFatalError(err) + ni := NewNotImplementedError(fatal) if !IsNotImplementedError(ni) { t.Error("expected erro to be not implemeted") @@ -54,7 +54,7 @@ func TestContextOnGoErrors(t *testing.T) { } func TestContextOnWrappedErrors(t *testing.T) { - err := newFatalError(errors.New("Go error")) + err := NewFatalError(errors.New("Go error")) ErrorSetContext(err, "foo", "bar") @@ -80,7 +80,7 @@ func TestStack(t *testing.T) { t.Error("expected to get no stack from a Go error") } - s = ErrorStack(newFatalError(errors.New("Go error"))) + s = ErrorStack(NewFatalError(errors.New("Go error"))) if len(s) == 0 { t.Error("expected to get a stack from a wrapped error") } diff --git a/git-lfs.go b/git-lfs.go index b6a50258..0c9a9f36 100644 --- a/git-lfs.go +++ b/git-lfs.go @@ -8,6 +8,7 @@ import ( "syscall" "github.com/github/git-lfs/commands" + "github.com/github/git-lfs/httputil" "github.com/github/git-lfs/lfs" ) @@ -32,12 +33,12 @@ func main() { }() commands.Run() - lfs.LogHttpStats() + httputil.LogHttpStats() once.Do(clearTempObjects) } func clearTempObjects() { if err := lfs.ClearTempObjects(); err != nil { - fmt.Fprintf(os.Stderr, "Error opening %q to clear old temp files: %s\n", lfs.LocalObjectTempDir, err) + fmt.Fprintf(os.Stderr, "Error clearing old temp files: %s\n", err) } } diff --git a/git/git.go b/git/git.go index 1f981f61..2edaa997 100644 --- a/git/git.go +++ b/git/git.go @@ -1,4 +1,5 @@ // Package git contains various commands that shell out to git +// NOTE: Subject to change, do not rely on this package from outside git-lfs source package git import ( @@ -15,7 +16,7 @@ import ( "time" "github.com/github/git-lfs/subprocess" - "github.com/github/git-lfs/vendor/_nuts/github.com/rubyist/tracerx" + "github.com/rubyist/tracerx" ) type RefType int @@ -66,12 +67,21 @@ func ResolveRef(ref string) (*Ref, error) { if err != nil { return nil, err } - lines := strings.Split(outp, "\n") - if len(lines) <= 1 { + if outp == "" { return nil, fmt.Errorf("Git can't resolve ref: %q", ref) } + lines := strings.Split(outp, "\n") fullref := &Ref{Sha: lines[0]} + + if len(lines) == 1 { + // ref is a sha1 and has no symbolic-full-name + fullref.Name = lines[0] // fullref.Sha + fullref.Type = RefTypeOther + return fullref, nil + } + + // parse the symbolic-full-name fullref.Type, fullref.Name = ParseRefToTypeAndName(lines[1]) return fullref, nil } @@ -115,7 +125,8 @@ func RemoteForCurrentBranch() (string, error) { return remote, nil } -// RemoteRefForCurrentBranch returns the full remote ref (remote/remotebranch) that the current branch is tracking +// RemoteRefForCurrentBranch returns the full remote ref (refs/remotes/{remote}/{remotebranch}) +// that the current branch is tracking. func RemoteRefNameForCurrentBranch() (string, error) { ref, err := CurrentRef() if err != nil { @@ -133,7 +144,7 @@ func RemoteRefNameForCurrentBranch() (string, error) { remotebranch := RemoteBranchForLocalBranch(ref.Name) - return remote + "/" + remotebranch, nil + return fmt.Sprintf("refs/remotes/%s/%s", remote, remotebranch), nil } // RemoteForBranch returns the remote name that a given local branch is tracking (blank if none) @@ -183,9 +194,13 @@ func LocalRefs() ([]*Ref, error) { if err != nil { return nil, fmt.Errorf("Failed to call git show-ref: %v", err) } - cmd.Start() var refs []*Ref + + if err := cmd.Start(); err != nil { + return refs, err + } + scanner := bufio.NewScanner(outp) for scanner.Scan() { line := strings.TrimSpace(scanner.Text()) @@ -895,13 +910,16 @@ func RemoteRefs(remoteName string) ([]*Ref, error) { // Both pattern and the results are relative to the current working directory, not // the root of the repository func GetTrackedFiles(pattern string) ([]string, error) { + safePattern := sanitizePattern(pattern) + sanitized := len(safePattern) < len(pattern) + var ret []string cmd := subprocess.ExecCommand("git", "-c", "core.quotepath=false", // handle special chars in filenames "ls-files", "--cached", // include things which are staged but not committed right now "--", // no ambiguous patterns - pattern) + safePattern) outp, err := cmd.StdoutPipe() if err != nil { @@ -911,8 +929,23 @@ func GetTrackedFiles(pattern string) ([]string, error) { scanner := bufio.NewScanner(outp) for scanner.Scan() { line := scanner.Text() + + // If the given pattern was sanitized, then skip all files which + // are not direct cendantsof the repository's root. + if sanitized && filepath.Dir(line) != "." { + continue + } + ret = append(ret, strings.TrimSpace(line)) } return ret, cmd.Wait() } + +func sanitizePattern(pattern string) string { + if strings.HasPrefix(pattern, "/") { + return pattern[1:] + } + + return pattern +} diff --git a/git/git_test.go b/git/git_test.go index 1306cb2c..62752947 100644 --- a/git/git_test.go +++ b/git/git_test.go @@ -10,7 +10,7 @@ import ( . "github.com/github/git-lfs/git" "github.com/github/git-lfs/test" - "github.com/github/git-lfs/vendor/_nuts/github.com/technoweenie/assert" + "github.com/stretchr/testify/assert" ) func TestCurrentRefAndCurrentRemoteRef(t *testing.T) { @@ -51,26 +51,30 @@ func TestCurrentRefAndCurrentRemoteRef(t *testing.T) { outputs := repo.AddCommits(inputs) // last commit was on branch3 ref, err := CurrentRef() - assert.Equal(t, nil, err) + assert.Nil(t, err) assert.Equal(t, &Ref{"branch3", RefTypeLocalBranch, outputs[3].Sha}, ref) test.RunGitCommand(t, true, "checkout", "master") ref, err = CurrentRef() - assert.Equal(t, nil, err) + assert.Nil(t, err) assert.Equal(t, &Ref{"master", RefTypeLocalBranch, outputs[2].Sha}, ref) // Check remote repo.AddRemote("origin") test.RunGitCommand(t, true, "push", "-u", "origin", "master:someremotebranch") ref, err = CurrentRemoteRef() - assert.Equal(t, nil, err) + assert.Nil(t, err) assert.Equal(t, &Ref{"origin/someremotebranch", RefTypeRemoteBranch, outputs[2].Sha}, ref) refname, err := RemoteRefNameForCurrentBranch() - assert.Equal(t, nil, err) - assert.Equal(t, "origin/someremotebranch", refname) + assert.Nil(t, err) + assert.Equal(t, "refs/remotes/origin/someremotebranch", refname) remote, err := RemoteForCurrentBranch() - assert.Equal(t, nil, err) + assert.Nil(t, err) assert.Equal(t, "origin", remote) + + ref, err = ResolveRef(outputs[2].Sha) + assert.Nil(t, err) + assert.Equal(t, &Ref{outputs[2].Sha, RefTypeOther, outputs[2].Sha}, ref) } func TestRecentBranches(t *testing.T) { @@ -262,19 +266,26 @@ func TestWorkTrees(t *testing.T) { } func TestVersionCompare(t *testing.T) { - assert.Equal(t, true, IsVersionAtLeast("2.6.0", "2.6.0")) - assert.Equal(t, true, IsVersionAtLeast("2.6.0", "2.6")) - assert.Equal(t, true, IsVersionAtLeast("2.6.0", "2")) - assert.Equal(t, true, IsVersionAtLeast("2.6.10", "2.6.5")) - assert.Equal(t, true, IsVersionAtLeast("2.8.1", "2.7.2")) + assert.True(t, IsVersionAtLeast("2.6.0", "2.6.0")) + assert.True(t, IsVersionAtLeast("2.6.0", "2.6")) + assert.True(t, IsVersionAtLeast("2.6.0", "2")) + assert.True(t, IsVersionAtLeast("2.6.10", "2.6.5")) + assert.True(t, IsVersionAtLeast("2.8.1", "2.7.2")) - assert.Equal(t, false, IsVersionAtLeast("1.6.0", "2")) - assert.Equal(t, false, IsVersionAtLeast("2.5.0", "2.6")) - assert.Equal(t, false, IsVersionAtLeast("2.5.0", "2.5.1")) - assert.Equal(t, false, IsVersionAtLeast("2.5.2", "2.5.10")) + assert.False(t, IsVersionAtLeast("1.6.0", "2")) + assert.False(t, IsVersionAtLeast("2.5.0", "2.6")) + assert.False(t, IsVersionAtLeast("2.5.0", "2.5.1")) + assert.False(t, IsVersionAtLeast("2.5.2", "2.5.10")) } func TestGitAndRootDirs(t *testing.T) { + repo := test.NewRepo(t) + repo.Pushd() + defer func() { + repo.Popd() + repo.Cleanup() + }() + git, root, err := GitAndRootDirs() if err != nil { t.Fatal(err) @@ -314,25 +325,25 @@ func TestGetTrackedFiles(t *testing.T) { repo.AddCommits(inputs) tracked, err := GetTrackedFiles("*.txt") - assert.Equal(t, nil, err) + assert.Nil(t, err) sort.Strings(tracked) // for direct comparison fulllist := []string{"file1.txt", "file2.txt", "file3.txt", "file4.txt", "folder1/anotherfile.txt", "folder1/file10.txt", "folder2/folder3/deep.txt", "folder2/something.txt"} assert.Equal(t, fulllist, tracked) tracked, err = GetTrackedFiles("*file*.txt") - assert.Equal(t, nil, err) + assert.Nil(t, err) sort.Strings(tracked) sublist := []string{"file1.txt", "file2.txt", "file3.txt", "file4.txt", "folder1/anotherfile.txt", "folder1/file10.txt"} assert.Equal(t, sublist, tracked) tracked, err = GetTrackedFiles("folder1/*") - assert.Equal(t, nil, err) + assert.Nil(t, err) sort.Strings(tracked) sublist = []string{"folder1/anotherfile.txt", "folder1/file10.txt"} assert.Equal(t, sublist, tracked) tracked, err = GetTrackedFiles("folder2/*") - assert.Equal(t, nil, err) + assert.Nil(t, err) sort.Strings(tracked) sublist = []string{"folder2/folder3/deep.txt", "folder2/something.txt"} assert.Equal(t, sublist, tracked) @@ -340,17 +351,23 @@ func TestGetTrackedFiles(t *testing.T) { // relative dir os.Chdir("folder1") tracked, err = GetTrackedFiles("*.txt") - assert.Equal(t, nil, err) + assert.Nil(t, err) sort.Strings(tracked) sublist = []string{"anotherfile.txt", "file10.txt"} assert.Equal(t, sublist, tracked) os.Chdir("..") + // absolute paths only includes matches in repo root + tracked, err = GetTrackedFiles("/*.txt") + assert.Nil(t, err) + sort.Strings(tracked) + assert.Equal(t, []string{"file1.txt", "file2.txt", "file3.txt", "file4.txt"}, tracked) + // Test includes staged but uncommitted files ioutil.WriteFile("z_newfile.txt", []byte("Hello world"), 0644) test.RunGitCommand(t, true, "add", "z_newfile.txt") tracked, err = GetTrackedFiles("*.txt") - assert.Equal(t, nil, err) + assert.Nil(t, err) sort.Strings(tracked) fulllist = append(fulllist, "z_newfile.txt") assert.Equal(t, fulllist, tracked) @@ -358,21 +375,21 @@ func TestGetTrackedFiles(t *testing.T) { // Test includes modified files (not staged) ioutil.WriteFile("file1.txt", []byte("Modifications"), 0644) tracked, err = GetTrackedFiles("*.txt") - assert.Equal(t, nil, err) + assert.Nil(t, err) sort.Strings(tracked) assert.Equal(t, fulllist, tracked) // Test includes modified files (staged) test.RunGitCommand(t, true, "add", "file1.txt") tracked, err = GetTrackedFiles("*.txt") - assert.Equal(t, nil, err) + assert.Nil(t, err) sort.Strings(tracked) assert.Equal(t, fulllist, tracked) // Test excludes deleted files (not committed) test.RunGitCommand(t, true, "rm", "file2.txt") tracked, err = GetTrackedFiles("*.txt") - assert.Equal(t, nil, err) + assert.Nil(t, err) sort.Strings(tracked) deletedlist := []string{"file1.txt", "file3.txt", "file4.txt", "folder1/anotherfile.txt", "folder1/file10.txt", "folder2/folder3/deep.txt", "folder2/something.txt", "z_newfile.txt"} assert.Equal(t, deletedlist, tracked) @@ -380,12 +397,38 @@ func TestGetTrackedFiles(t *testing.T) { } func TestLocalRefs(t *testing.T) { + repo := test.NewRepo(t) + repo.Pushd() + defer func() { + repo.Popd() + repo.Cleanup() + }() + + repo.AddCommits([]*test.CommitInput{ + { + Files: []*test.FileInput{ + {Filename: "file1.txt", Size: 20}, + }, + }, + { + NewBranch: "branch", + ParentBranches: []string{"master"}, + Files: []*test.FileInput{ + {Filename: "file1.txt", Size: 20}, + }, + }, + }) + + test.RunGitCommand(t, true, "tag", "v1") + refs, err := LocalRefs() if err != nil { t.Fatal(err) } + actual := make(map[string]bool) for _, r := range refs { + t.Logf("REF: %s", r.Name) switch r.Type { case RefTypeHEAD: t.Errorf("Local HEAD ref: %v", r) @@ -393,6 +436,22 @@ func TestLocalRefs(t *testing.T) { t.Errorf("Stash or unknown ref: %v", r) case RefTypeRemoteBranch, RefTypeRemoteTag: t.Errorf("Remote ref: %v", r) + default: + actual[r.Name] = true } } + + expected := []string{"master", "branch", "v1"} + found := 0 + for _, refname := range expected { + if actual[refname] { + found += 1 + } else { + t.Errorf("could not find ref %q", refname) + } + } + + if found != len(expected) { + t.Errorf("Unexpected local refs: %v", actual) + } } diff --git a/glide.lock b/glide.lock new file mode 100644 index 00000000..4eaaa5a9 --- /dev/null +++ b/glide.lock @@ -0,0 +1,40 @@ +hash: 275cba8ff30fe108a79618e8ad803f600136572260dc78a7d879bd248b3bdbdb +updated: 2016-05-25T10:47:36.829238548-06:00 +imports: +- name: github.com/bgentry/go-netrc + version: 9fd32a8b3d3d3f9d43c341bfe098430e07609480 + subpackages: + - netrc +- name: github.com/cheggaaa/pb + version: bd14546a551971ae7f460e6d6e527c5b56cd38d7 +- name: github.com/inconshreveable/mousetrap + version: 76626ae9c91c4f2a10f34cad8ce83ea42c93bb75 +- name: github.com/kr/pretty + version: 088c856450c08c03eb32f7a6c221e6eefaa10e6f +- name: github.com/kr/pty + version: 5cf931ef8f76dccd0910001d74a58a7fca84a83d +- name: github.com/kr/text + version: 6807e777504f54ad073ecef66747de158294b639 +- name: github.com/olekukonko/ts + version: ecf753e7c962639ab5a1fb46f7da627d4c0a04b8 +- name: github.com/rubyist/tracerx + version: d7bcc0bc315bed2a841841bee5dbecc8d7d7582f +- name: github.com/spf13/cobra + version: c55cdf33856a08e4822738728b41783292812889 +- name: github.com/spf13/pflag + version: 580b9be06c33d8ba9dcc8757ea56b7642472c2f5 +- name: github.com/stretchr/testify + version: 6cb3b85ef5a0efef77caef88363ec4d4b5c0976d +- name: github.com/technoweenie/go-contentaddressable + version: 38171def3cd15e3b76eb156219b3d48704643899 +- name: github.com/ThomsonReutersEikon/go-ntlm + version: b00ec39bbdd04f845950f4dbb4fd0a2c3155e830 + subpackages: + - ntlm +- name: github.com/xeipuuv/gojsonpointer + version: e0fe6f68307607d540ed8eac07a342c33fa1b54a +- name: github.com/xeipuuv/gojsonreference + version: e02fc20de94c78484cd5ffb007f8af96be030a45 +- name: github.com/xeipuuv/gojsonschema + version: d5336c75940ef31c9ceeb0ae64cf92944bccb4ee +devImports: [] diff --git a/glide.yaml b/glide.yaml new file mode 100644 index 00000000..671f084c --- /dev/null +++ b/glide.yaml @@ -0,0 +1,39 @@ +package: github.com/github/git-lfs +import: +- package: github.com/bgentry/go-netrc + version: 9fd32a8b3d3d3f9d43c341bfe098430e07609480 + subpackages: + - netrc +- package: github.com/cheggaaa/pb + version: bd14546a551971ae7f460e6d6e527c5b56cd38d7 +- package: github.com/kr/pretty + version: 088c856450c08c03eb32f7a6c221e6eefaa10e6f +- package: github.com/kr/pty + version: 5cf931ef8f76dccd0910001d74a58a7fca84a83d +- package: github.com/kr/text + version: 6807e777504f54ad073ecef66747de158294b639 +- package: github.com/inconshreveable/mousetrap + version: 76626ae9c91c4f2a10f34cad8ce83ea42c93bb75 +- package: github.com/olekukonko/ts + version: ecf753e7c962639ab5a1fb46f7da627d4c0a04b8 +- package: github.com/rubyist/tracerx + version: d7bcc0bc315bed2a841841bee5dbecc8d7d7582f +- package: github.com/spf13/cobra + version: c55cdf33856a08e4822738728b41783292812889 +- package: github.com/spf13/pflag + version: 580b9be06c33d8ba9dcc8757ea56b7642472c2f5 +- package: github.com/stretchr/testify + version: 6cb3b85ef5a0efef77caef88363ec4d4b5c0976d +- package: github.com/ThomsonReutersEikon/go-ntlm + version: b00ec39bbdd04f845950f4dbb4fd0a2c3155e830 + # go-ntlm includes a util/ directory, which was removed + subpackages: + - ntlm +- package: github.com/technoweenie/go-contentaddressable + version: 38171def3cd15e3b76eb156219b3d48704643899 +- package: github.com/xeipuuv/gojsonpointer + version: e0fe6f68307607d540ed8eac07a342c33fa1b54a +- package: github.com/xeipuuv/gojsonreference + version: e02fc20de94c78484cd5ffb007f8af96be030a45 +- package: github.com/xeipuuv/gojsonschema + version: d5336c75940ef31c9ceeb0ae64cf92944bccb4ee diff --git a/lfs/certs.go b/httputil/certs.go similarity index 83% rename from lfs/certs.go rename to httputil/certs.go index d98288a6..03b5cbb3 100644 --- a/lfs/certs.go +++ b/httputil/certs.go @@ -1,4 +1,4 @@ -package lfs +package httputil import ( "crypto/x509" @@ -6,19 +6,20 @@ import ( "io/ioutil" "path/filepath" - "github.com/github/git-lfs/vendor/_nuts/github.com/rubyist/tracerx" + "github.com/github/git-lfs/config" + "github.com/rubyist/tracerx" ) // isCertVerificationDisabledForHost returns whether SSL certificate verification // has been disabled for the given host, or globally func isCertVerificationDisabledForHost(host string) bool { - hostSslVerify, _ := Config.GitConfig(fmt.Sprintf("http.https://%v/.sslverify", host)) + hostSslVerify, _ := config.Config.GitConfig(fmt.Sprintf("http.https://%v/.sslverify", host)) if hostSslVerify == "false" { return true } - globalSslVerify, _ := Config.GitConfig("http.sslverify") - if globalSslVerify == "false" || Config.GetenvBool("GIT_SSL_NO_VERIFY", false) { + globalSslVerify, _ := config.Config.GitConfig("http.sslverify") + if globalSslVerify == "false" || config.Config.GetenvBool("GIT_SSL_NO_VERIFY", false) { return true } @@ -47,25 +48,25 @@ func appendRootCAsForHostFromGitconfig(pool *x509.CertPool, host string) *x509.C // Accumulate certs from all these locations: // GIT_SSL_CAINFO first - if cafile, ok := Config.envVars["GIT_SSL_CAINFO"]; ok { + if cafile := config.Config.Getenv("GIT_SSL_CAINFO"); len(cafile) > 0 { return appendCertsFromFile(pool, cafile) } // http..sslcainfo // we know we have simply "host" or "host:port" key := fmt.Sprintf("http.https://%v/.sslcainfo", host) - if cafile, ok := Config.GitConfig(key); ok { + if cafile, ok := config.Config.GitConfig(key); ok { return appendCertsFromFile(pool, cafile) } // http.sslcainfo - if cafile, ok := Config.GitConfig("http.sslcainfo"); ok { + if cafile, ok := config.Config.GitConfig("http.sslcainfo"); ok { return appendCertsFromFile(pool, cafile) } // GIT_SSL_CAPATH - if cadir, ok := Config.envVars["GIT_SSL_CAPATH"]; ok { + if cadir := config.Config.Getenv("GIT_SSL_CAPATH"); len(cadir) > 0 { return appendCertsFromFilesInDir(pool, cadir) } // http.sslcapath - if cadir, ok := Config.GitConfig("http.sslcapath"); ok { + if cadir, ok := config.Config.GitConfig("http.sslcapath"); ok { return appendCertsFromFilesInDir(pool, cadir) } diff --git a/lfs/certs_darwin.go b/httputil/certs_darwin.go similarity index 95% rename from lfs/certs_darwin.go rename to httputil/certs_darwin.go index d05d40a6..75eb88a6 100644 --- a/lfs/certs_darwin.go +++ b/httputil/certs_darwin.go @@ -1,4 +1,4 @@ -package lfs +package httputil import ( "crypto/x509" @@ -6,7 +6,7 @@ import ( "strings" "github.com/github/git-lfs/subprocess" - "github.com/github/git-lfs/vendor/_nuts/github.com/rubyist/tracerx" + "github.com/rubyist/tracerx" ) func appendRootCAsForHostFromPlatform(pool *x509.CertPool, host string) *x509.CertPool { diff --git a/lfs/certs_freebsd.go b/httputil/certs_freebsd.go similarity index 90% rename from lfs/certs_freebsd.go rename to httputil/certs_freebsd.go index 81d44827..e665ccfa 100644 --- a/lfs/certs_freebsd.go +++ b/httputil/certs_freebsd.go @@ -1,4 +1,4 @@ -package lfs +package httputil import "crypto/x509" diff --git a/lfs/certs_linux.go b/httputil/certs_linux.go similarity index 90% rename from lfs/certs_linux.go rename to httputil/certs_linux.go index 81d44827..e665ccfa 100644 --- a/lfs/certs_linux.go +++ b/httputil/certs_linux.go @@ -1,4 +1,4 @@ -package lfs +package httputil import "crypto/x509" diff --git a/lfs/certs_test.go b/httputil/certs_test.go similarity index 53% rename from lfs/certs_test.go rename to httputil/certs_test.go index fb5157af..5d98af80 100644 --- a/lfs/certs_test.go +++ b/httputil/certs_test.go @@ -1,13 +1,13 @@ -package lfs +package httputil import ( - "crypto/x509" "io/ioutil" "os" "path/filepath" "testing" - "github.com/github/git-lfs/vendor/_nuts/github.com/technoweenie/assert" + "github.com/github/git-lfs/config" + "github.com/stretchr/testify/assert" ) var testCert = `-----BEGIN CERTIFICATE----- @@ -35,163 +35,152 @@ NpW5Hh0w4/5iIetCkJ0= -----END CERTIFICATE-----` func TestCertFromSSLCAInfoConfig(t *testing.T) { + defer config.Config.ResetConfig() tempfile, err := ioutil.TempFile("", "testcert") - assert.Equal(t, nil, err, "Error creating temp cert file") + assert.Nil(t, err, "Error creating temp cert file") defer os.Remove(tempfile.Name()) _, err = tempfile.WriteString(testCert) - assert.Equal(t, nil, err, "Error writing temp cert file") + assert.Nil(t, err, "Error writing temp cert file") tempfile.Close() - oldGitConfig := Config.gitConfig - defer func() { - Config.gitConfig = oldGitConfig - }() - - Config.gitConfig = map[string]string{"http.https://git-lfs.local/.sslcainfo": tempfile.Name()} + config.Config.ClearConfig() + config.Config.SetConfig("http.https://git-lfs.local/.sslcainfo", tempfile.Name()) // Should match pool := getRootCAsForHost("git-lfs.local") - assert.NotEqual(t, (*x509.CertPool)(nil), pool) + assert.NotNil(t, pool) // Shouldn't match pool = getRootCAsForHost("wronghost.com") - assert.Equal(t, (*x509.CertPool)(nil), pool) + assert.Nil(t, pool) // Ports have to match pool = getRootCAsForHost("git-lfs.local:8443") - assert.Equal(t, (*x509.CertPool)(nil), pool) + assert.Nil(t, pool) // Now use global sslcainfo - Config.gitConfig = map[string]string{"http.sslcainfo": tempfile.Name()} + config.Config.ClearConfig() + config.Config.SetConfig("http.sslcainfo", tempfile.Name()) // Should match anything pool = getRootCAsForHost("git-lfs.local") - assert.NotEqual(t, (*x509.CertPool)(nil), pool) + assert.NotNil(t, pool) pool = getRootCAsForHost("wronghost.com") - assert.NotEqual(t, (*x509.CertPool)(nil), pool) + assert.NotNil(t, pool) pool = getRootCAsForHost("git-lfs.local:8443") - assert.NotEqual(t, (*x509.CertPool)(nil), pool) + assert.NotNil(t, pool) } func TestCertFromSSLCAInfoEnv(t *testing.T) { tempfile, err := ioutil.TempFile("", "testcert") - assert.Equal(t, nil, err, "Error creating temp cert file") + assert.Nil(t, err, "Error creating temp cert file") defer os.Remove(tempfile.Name()) _, err = tempfile.WriteString(testCert) - assert.Equal(t, nil, err, "Error writing temp cert file") + assert.Nil(t, err, "Error writing temp cert file") tempfile.Close() - oldEnv := Config.envVars + oldEnv := config.Config.GetAllEnv() defer func() { - Config.envVars = oldEnv + config.Config.SetAllEnv(oldEnv) }() - - Config.envVars = map[string]string{"GIT_SSL_CAINFO": tempfile.Name()} + config.Config.SetAllEnv(map[string]string{"GIT_SSL_CAINFO": tempfile.Name()}) // Should match any host at all pool := getRootCAsForHost("git-lfs.local") - assert.NotEqual(t, (*x509.CertPool)(nil), pool) + assert.NotNil(t, pool) pool = getRootCAsForHost("wronghost.com") - assert.NotEqual(t, (*x509.CertPool)(nil), pool) + assert.NotNil(t, pool) pool = getRootCAsForHost("notthisone.com:8888") - assert.NotEqual(t, (*x509.CertPool)(nil), pool) + assert.NotNil(t, pool) } func TestCertFromSSLCAPathConfig(t *testing.T) { + defer config.Config.ResetConfig() tempdir, err := ioutil.TempDir("", "testcertdir") - assert.Equal(t, nil, err, "Error creating temp cert dir") + assert.Nil(t, err, "Error creating temp cert dir") defer os.RemoveAll(tempdir) err = ioutil.WriteFile(filepath.Join(tempdir, "cert1.pem"), []byte(testCert), 0644) - assert.Equal(t, nil, err, "Error creating cert file") + assert.Nil(t, err, "Error creating cert file") - oldGitConfig := Config.gitConfig - defer func() { - Config.gitConfig = oldGitConfig - }() - - Config.gitConfig = map[string]string{"http.sslcapath": tempdir} + config.Config.ClearConfig() + config.Config.SetConfig("http.sslcapath", tempdir) // Should match any host at all pool := getRootCAsForHost("git-lfs.local") - assert.NotEqual(t, (*x509.CertPool)(nil), pool) + assert.NotNil(t, pool) pool = getRootCAsForHost("wronghost.com") - assert.NotEqual(t, (*x509.CertPool)(nil), pool) + assert.NotNil(t, pool) pool = getRootCAsForHost("notthisone.com:8888") - assert.NotEqual(t, (*x509.CertPool)(nil), pool) + assert.NotNil(t, pool) } func TestCertFromSSLCAPathEnv(t *testing.T) { tempdir, err := ioutil.TempDir("", "testcertdir") - assert.Equal(t, nil, err, "Error creating temp cert dir") + assert.Nil(t, err, "Error creating temp cert dir") defer os.RemoveAll(tempdir) err = ioutil.WriteFile(filepath.Join(tempdir, "cert1.pem"), []byte(testCert), 0644) - assert.Equal(t, nil, err, "Error creating cert file") + assert.Nil(t, err, "Error creating cert file") - oldEnv := Config.envVars + oldEnv := config.Config.GetAllEnv() defer func() { - Config.envVars = oldEnv + config.Config.SetAllEnv(oldEnv) }() - - Config.envVars = map[string]string{"GIT_SSL_CAPATH": tempdir} + config.Config.SetAllEnv(map[string]string{"GIT_SSL_CAPATH": tempdir}) // Should match any host at all pool := getRootCAsForHost("git-lfs.local") - assert.NotEqual(t, (*x509.CertPool)(nil), pool) + assert.NotNil(t, pool) pool = getRootCAsForHost("wronghost.com") - assert.NotEqual(t, (*x509.CertPool)(nil), pool) + assert.NotNil(t, pool) pool = getRootCAsForHost("notthisone.com:8888") - assert.NotEqual(t, (*x509.CertPool)(nil), pool) + assert.NotNil(t, pool) } func TestCertVerifyDisabledGlobalEnv(t *testing.T) { - assert.Equal(t, false, isCertVerificationDisabledForHost("anyhost.com")) + assert.False(t, isCertVerificationDisabledForHost("anyhost.com")) - oldEnv := Config.envVars + oldEnv := config.Config.GetAllEnv() defer func() { - Config.envVars = oldEnv + config.Config.SetAllEnv(oldEnv) }() - Config.envVars = map[string]string{"GIT_SSL_NO_VERIFY": "1"} + config.Config.SetAllEnv(map[string]string{"GIT_SSL_NO_VERIFY": "1"}) - assert.Equal(t, true, isCertVerificationDisabledForHost("anyhost.com")) + assert.True(t, isCertVerificationDisabledForHost("anyhost.com")) } func TestCertVerifyDisabledGlobalConfig(t *testing.T) { + defer config.Config.ResetConfig() - assert.Equal(t, false, isCertVerificationDisabledForHost("anyhost.com")) + assert.False(t, isCertVerificationDisabledForHost("anyhost.com")) - oldGitConfig := Config.gitConfig - defer func() { - Config.gitConfig = oldGitConfig - }() - Config.gitConfig = map[string]string{"http.sslverify": "false"} + config.Config.ClearConfig() + config.Config.SetConfig("http.sslverify", "false") - assert.Equal(t, true, isCertVerificationDisabledForHost("anyhost.com")) + assert.True(t, isCertVerificationDisabledForHost("anyhost.com")) } func TestCertVerifyDisabledHostConfig(t *testing.T) { + defer config.Config.ResetConfig() - assert.Equal(t, false, isCertVerificationDisabledForHost("specifichost.com")) - assert.Equal(t, false, isCertVerificationDisabledForHost("otherhost.com")) + assert.False(t, isCertVerificationDisabledForHost("specifichost.com")) + assert.False(t, isCertVerificationDisabledForHost("otherhost.com")) - oldGitConfig := Config.gitConfig - defer func() { - Config.gitConfig = oldGitConfig - }() - Config.gitConfig = map[string]string{"http.https://specifichost.com/.sslverify": "false"} + config.Config.ClearConfig() + config.Config.SetConfig("http.https://specifichost.com/.sslverify", "false") - assert.Equal(t, true, isCertVerificationDisabledForHost("specifichost.com")) - assert.Equal(t, false, isCertVerificationDisabledForHost("otherhost.com")) + assert.True(t, isCertVerificationDisabledForHost("specifichost.com")) + assert.False(t, isCertVerificationDisabledForHost("otherhost.com")) } diff --git a/lfs/certs_windows.go b/httputil/certs_windows.go similarity index 92% rename from lfs/certs_windows.go rename to httputil/certs_windows.go index bf678198..c762dba9 100644 --- a/lfs/certs_windows.go +++ b/httputil/certs_windows.go @@ -1,4 +1,4 @@ -package lfs +package httputil import "crypto/x509" diff --git a/lfs/http.go b/httputil/http.go similarity index 63% rename from lfs/http.go rename to httputil/http.go index cf90e9e1..26f894e4 100644 --- a/lfs/http.go +++ b/httputil/http.go @@ -1,4 +1,6 @@ -package lfs +// Package httputil provides additional helper functions for http services +// NOTE: Subject to change, do not rely on this package from outside git-lfs source +package httputil import ( "bufio" @@ -16,34 +18,38 @@ import ( "sync" "time" - "github.com/github/git-lfs/vendor/_nuts/github.com/rubyist/tracerx" + "github.com/github/git-lfs/config" + "github.com/rubyist/tracerx" ) -type transferStats struct { +type httpTransferStats struct { HeaderSize int BodySize int Start time.Time Stop time.Time } -type transfer struct { - requestStats *transferStats - responseStats *transferStats +type httpTransfer struct { + requestStats *httpTransferStats + responseStats *httpTransferStats } var ( // TODO should use some locks - transfers = make(map[*http.Response]*transfer) - transferBuckets = make(map[string][]*http.Response) - transfersLock sync.Mutex - transferBucketsLock sync.Mutex + httpTransfers = make(map[*http.Response]*httpTransfer) + httpTransferBuckets = make(map[string][]*http.Response) + httpTransfersLock sync.Mutex + httpTransferBucketsLock sync.Mutex + httpClients map[string]*HttpClient + httpClientsMutex sync.Mutex + UserAgent string ) func LogTransfer(key string, res *http.Response) { - if Config.isLoggingStats { - transferBucketsLock.Lock() - transferBuckets[key] = append(transferBuckets[key], res) - transferBucketsLock.Unlock() + if config.Config.IsLoggingStats { + httpTransferBucketsLock.Lock() + httpTransferBuckets[key] = append(httpTransferBuckets[key], res) + httpTransferBucketsLock.Unlock() } } @@ -72,7 +78,7 @@ func (c *HttpClient) Do(req *http.Request) (*http.Response, error) { cresp := countingResponse(res) res.Body = cresp - if Config.isLoggingStats { + if config.Config.IsLoggingStats { reqHeaderSize := 0 resHeaderSize := 0 @@ -84,29 +90,29 @@ func (c *HttpClient) Do(req *http.Request) (*http.Response, error) { resHeaderSize = len(dump) } - reqstats := &transferStats{HeaderSize: reqHeaderSize, BodySize: crc.Count} + reqstats := &httpTransferStats{HeaderSize: reqHeaderSize, BodySize: crc.Count} // Response body size cannot be figured until it is read. Do not rely on a Content-Length // header because it may not exist or be -1 in the case of chunked responses. - resstats := &transferStats{HeaderSize: resHeaderSize, Start: start} - t := &transfer{requestStats: reqstats, responseStats: resstats} - transfersLock.Lock() - transfers[res] = t - transfersLock.Unlock() + resstats := &httpTransferStats{HeaderSize: resHeaderSize, Start: start} + t := &httpTransfer{requestStats: reqstats, responseStats: resstats} + httpTransfersLock.Lock() + httpTransfers[res] = t + httpTransfersLock.Unlock() } return res, err } -// HttpClient returns a new HttpClient for the given host (which may be "host:port") -func (c *Configuration) HttpClient(host string) *HttpClient { - c.httpClientsMutex.Lock() - defer c.httpClientsMutex.Unlock() +// NewHttpClient returns a new HttpClient for the given host (which may be "host:port") +func NewHttpClient(c *config.Configuration, host string) *HttpClient { + httpClientsMutex.Lock() + defer httpClientsMutex.Unlock() - if c.httpClients == nil { - c.httpClients = make(map[string]*HttpClient) + if httpClients == nil { + httpClients = make(map[string]*HttpClient) } - if client, ok := c.httpClients[host]; ok { + if client, ok := httpClients[host]; ok { return client } @@ -132,14 +138,14 @@ func (c *Configuration) HttpClient(host string) *HttpClient { } client := &HttpClient{ - &http.Client{Transport: tr, CheckRedirect: checkRedirect}, + &http.Client{Transport: tr, CheckRedirect: CheckRedirect}, } - c.httpClients[host] = client + httpClients[host] = client return client } -func checkRedirect(req *http.Request, via []*http.Request) error { +func CheckRedirect(req *http.Request, via []*http.Request) error { if len(via) >= 3 { return errors.New("stopped after 3 redirects") } @@ -164,9 +170,9 @@ func checkRedirect(req *http.Request, via []*http.Request) error { var tracedTypes = []string{"json", "text", "xml", "html"} func traceHttpRequest(req *http.Request) { - tracerx.Printf("HTTP: %s", traceHttpReq(req)) + tracerx.Printf("HTTP: %s", TraceHttpReq(req)) - if Config.isTracingHttp == false { + if config.Config.IsTracingHttp == false { return } @@ -185,7 +191,7 @@ func traceHttpResponse(res *http.Response) { tracerx.Printf("HTTP: %d", res.StatusCode) - if Config.isTracingHttp == false { + if config.Config.IsTracingHttp == false { return } @@ -208,7 +214,7 @@ func traceHttpDump(direction string, dump []byte) { for scanner.Scan() { line := scanner.Text() - if !Config.isDebuggingHttp && strings.HasPrefix(strings.ToLower(line), "authorization: basic") { + if !config.Config.IsDebuggingHttp && strings.HasPrefix(strings.ToLower(line), "authorization: basic") { fmt.Fprintf(os.Stderr, "%s Authorization: Basic * * * * *\n", direction) } else { fmt.Fprintf(os.Stderr, "%s %s\n", direction, line) @@ -226,8 +232,8 @@ func isTraceableContent(h http.Header) bool { return false } -func countingRequest(req *http.Request) *countingReadCloser { - return &countingReadCloser{ +func countingRequest(req *http.Request) *CountingReadCloser { + return &CountingReadCloser{ request: req, ReadCloser: req.Body, isTraceableType: isTraceableContent(req.Header), @@ -235,8 +241,8 @@ func countingRequest(req *http.Request) *countingReadCloser { } } -func countingResponse(res *http.Response) *countingReadCloser { - return &countingReadCloser{ +func countingResponse(res *http.Response) *CountingReadCloser { + return &CountingReadCloser{ response: res, ReadCloser: res.Body, isTraceableType: isTraceableContent(res.Header), @@ -244,7 +250,7 @@ func countingResponse(res *http.Response) *countingReadCloser { } } -type countingReadCloser struct { +type CountingReadCloser struct { Count int request *http.Request response *http.Response @@ -253,7 +259,7 @@ type countingReadCloser struct { io.ReadCloser } -func (c *countingReadCloser) Read(b []byte) (int, error) { +func (c *CountingReadCloser) Read(b []byte) (int, error) { n, err := c.ReadCloser.Read(b) if err != nil && err != io.EOF { return n, err @@ -267,31 +273,31 @@ func (c *countingReadCloser) Read(b []byte) (int, error) { tracerx.Printf("HTTP: %s", chunk) } - if Config.isTracingHttp { + if config.Config.IsTracingHttp { fmt.Fprint(os.Stderr, chunk) } } - if err == io.EOF && Config.isLoggingStats { - // This transfer is done, we're checking it this way so we can also - // catch transfers where the caller forgets to Close() the Body. + if err == io.EOF && config.Config.IsLoggingStats { + // This httpTransfer is done, we're checking it this way so we can also + // catch httpTransfers where the caller forgets to Close() the Body. if c.response != nil { - transfersLock.Lock() - if transfer, ok := transfers[c.response]; ok { - transfer.responseStats.BodySize = c.Count - transfer.responseStats.Stop = time.Now() + httpTransfersLock.Lock() + if httpTransfer, ok := httpTransfers[c.response]; ok { + httpTransfer.responseStats.BodySize = c.Count + httpTransfer.responseStats.Stop = time.Now() } - transfersLock.Unlock() + httpTransfersLock.Unlock() } } return n, err } // LogHttpStats is intended to be called after all HTTP operations for the -// commmand have finished. It dumps k/v logs, one line per transfer into +// commmand have finished. It dumps k/v logs, one line per httpTransfer into // a log file with the current timestamp. func LogHttpStats() { - if !Config.isLoggingStats { + if !config.Config.IsLoggingStats { return } @@ -301,11 +307,11 @@ func LogHttpStats() { return } - fmt.Fprintf(file, "concurrent=%d batch=%v time=%d version=%s\n", Config.ConcurrentTransfers(), Config.BatchTransfer(), time.Now().Unix(), Version) + fmt.Fprintf(file, "concurrent=%d batch=%v time=%d version=%s\n", config.Config.ConcurrentTransfers(), config.Config.BatchTransfer(), time.Now().Unix(), config.Version) - for key, responses := range transferBuckets { + for key, responses := range httpTransferBuckets { for _, response := range responses { - stats := transfers[response] + stats := httpTransfers[response] fmt.Fprintf(file, "key=%s reqheader=%d reqbody=%d resheader=%d resbody=%d restime=%d status=%d url=%s\n", key, stats.requestStats.HeaderSize, @@ -322,7 +328,7 @@ func LogHttpStats() { } func statsLogFile() (*os.File, error) { - logBase := filepath.Join(LocalLogDir, "http") + logBase := filepath.Join(config.LocalLogDir, "http") if err := os.MkdirAll(logBase, 0755); err != nil { return nil, err } @@ -330,3 +336,11 @@ func statsLogFile() (*os.File, error) { logFile := fmt.Sprintf("http-%d.log", time.Now().Unix()) return os.Create(filepath.Join(logBase, logFile)) } + +func TraceHttpReq(req *http.Request) string { + return fmt.Sprintf("%s %s", req.Method, strings.SplitN(req.URL.String(), "?", 2)[0]) +} + +func init() { + UserAgent = config.VersionDesc +} diff --git a/lfs/ntlm.go b/httputil/ntlm.go similarity index 88% rename from lfs/ntlm.go rename to httputil/ntlm.go index 32358ab4..712b2638 100644 --- a/lfs/ntlm.go +++ b/httputil/ntlm.go @@ -1,4 +1,4 @@ -package lfs +package httputil import ( "bytes" @@ -12,12 +12,14 @@ import ( "strings" "sync/atomic" - "github.com/github/git-lfs/vendor/_nuts/github.com/ThomsonReutersEikon/go-ntlm/ntlm" + "github.com/ThomsonReutersEikon/go-ntlm/ntlm" + "github.com/github/git-lfs/auth" + "github.com/github/git-lfs/config" ) -func (c *Configuration) ntlmClientSession(creds Creds) (ntlm.ClientSession, error) { - if c.ntlmSession != nil { - return c.ntlmSession, nil +func ntlmClientSession(c *config.Configuration, creds auth.Creds) (ntlm.ClientSession, error) { + if c.NtlmSession != nil { + return c.NtlmSession, nil } splits := strings.Split(creds["username"], "\\") @@ -33,17 +35,17 @@ func (c *Configuration) ntlmClientSession(creds Creds) (ntlm.ClientSession, erro } session.SetUserInfo(splits[1], creds["password"], strings.ToUpper(splits[0])) - c.ntlmSession = session + c.NtlmSession = session return session, nil } -func DoNTLMRequest(request *http.Request, retry bool) (*http.Response, error) { +func doNTLMRequest(request *http.Request, retry bool) (*http.Response, error) { handReq, err := cloneRequest(request) if err != nil { return nil, err } - res, err := Config.HttpClient(handReq.Host).Do(handReq) + res, err := NewHttpClient(config.Config, handReq.Host).Do(handReq) if err != nil && res == nil { return nil, err } @@ -51,7 +53,7 @@ func DoNTLMRequest(request *http.Request, retry bool) (*http.Response, error) { //If the status is 401 then we need to re-authenticate, otherwise it was successful if res.StatusCode == 401 { - creds, err := getCreds(request) + creds, err := auth.GetCreds(request) if err != nil { return nil, err } @@ -78,10 +80,10 @@ func DoNTLMRequest(request *http.Request, retry bool) (*http.Response, error) { //If the status is 401 then we need to re-authenticate if res.StatusCode == 401 && retry == true { - return DoNTLMRequest(challengeReq, false) + return doNTLMRequest(challengeReq, false) } - saveCredentials(creds, res) + auth.SaveCredentials(creds, res) return res, nil } @@ -90,7 +92,7 @@ func DoNTLMRequest(request *http.Request, retry bool) (*http.Response, error) { func negotiate(request *http.Request, message string) ([]byte, error) { request.Header.Add("Authorization", message) - res, err := Config.HttpClient(request.Host).Do(request) + res, err := NewHttpClient(config.Config, request.Host).Do(request) if res == nil && err != nil { return nil, err @@ -107,13 +109,13 @@ func negotiate(request *http.Request, message string) ([]byte, error) { return ret, nil } -func challenge(request *http.Request, challengeBytes []byte, creds Creds) (*http.Response, error) { +func challenge(request *http.Request, challengeBytes []byte, creds auth.Creds) (*http.Response, error) { challenge, err := ntlm.ParseChallengeMessage(challengeBytes) if err != nil { return nil, err } - session, err := Config.ntlmClientSession(creds) + session, err := ntlmClientSession(config.Config, creds) if err != nil { return nil, err } @@ -126,7 +128,7 @@ func challenge(request *http.Request, challengeBytes []byte, creds Creds) (*http authMsg := base64.StdEncoding.EncodeToString(authenticate.Bytes()) request.Header.Add("Authorization", "NTLM "+authMsg) - return Config.HttpClient(request.Host).Do(request) + return NewHttpClient(config.Config, request.Host).Do(request) } func parseChallengeResponse(response *http.Response) ([]byte, error) { diff --git a/lfs/ntlm_test.go b/httputil/ntlm_test.go similarity index 77% rename from lfs/ntlm_test.go rename to httputil/ntlm_test.go index 02ca1a7a..dbb6468f 100644 --- a/lfs/ntlm_test.go +++ b/httputil/ntlm_test.go @@ -1,4 +1,4 @@ -package lfs +package httputil import ( "bytes" @@ -8,49 +8,51 @@ import ( "strings" "testing" - "github.com/github/git-lfs/vendor/_nuts/github.com/technoweenie/assert" + "github.com/github/git-lfs/auth" + "github.com/github/git-lfs/config" + "github.com/stretchr/testify/assert" ) func TestNtlmClientSession(t *testing.T) { //Make sure to clear ntlmSession so test order doesn't matter. - Config.ntlmSession = nil + config.Config.NtlmSession = nil - creds := Creds{"username": "MOOSEDOMAIN\\canadian", "password": "MooseAntlersYeah"} - _, err := Config.ntlmClientSession(creds) - assert.Equal(t, err, nil) + creds := auth.Creds{"username": "MOOSEDOMAIN\\canadian", "password": "MooseAntlersYeah"} + _, err := ntlmClientSession(config.Config, creds) + assert.Nil(t, err) //The second call should ignore creds and give the session we just created. - badCreds := Creds{"username": "badusername", "password": "MooseAntlersYeah"} - _, err = Config.ntlmClientSession(badCreds) - assert.Equal(t, err, nil) + badCreds := auth.Creds{"username": "badusername", "password": "MooseAntlersYeah"} + _, err = ntlmClientSession(config.Config, badCreds) + assert.Nil(t, err) //clean up - Config.ntlmSession = nil + config.Config.NtlmSession = nil } func TestNtlmClientSessionBadCreds(t *testing.T) { //Make sure to clear ntlmSession so test order doesn't matter. - Config.ntlmSession = nil + config.Config.NtlmSession = nil - creds := Creds{"username": "badusername", "password": "MooseAntlersYeah"} - _, err := Config.ntlmClientSession(creds) - assert.NotEqual(t, err, nil) + creds := auth.Creds{"username": "badusername", "password": "MooseAntlersYeah"} + _, err := ntlmClientSession(config.Config, creds) + assert.NotNil(t, err) //clean up - Config.ntlmSession = nil + config.Config.NtlmSession = nil } func TestNtlmCloneRequest(t *testing.T) { req1, _ := http.NewRequest("Method", "url", nil) cloneOfReq1, err := cloneRequest(req1) - assert.Equal(t, err, nil) + assert.Nil(t, err) assertRequestsEqual(t, req1, cloneOfReq1) req2, _ := http.NewRequest("Method", "url", bytes.NewReader([]byte("Moose can be request bodies"))) cloneOfReq2, err := cloneRequest(req2) - assert.Equal(t, err, nil) + assert.Nil(t, err) assertRequestsEqual(t, req2, cloneOfReq2) } @@ -62,11 +64,11 @@ func assertRequestsEqual(t *testing.T, req1 *http.Request, req2 *http.Request) { } if req1.Body == nil { - assert.Equal(t, req2.Body, nil) + assert.Nil(t, req2.Body) } else { bytes1, _ := ioutil.ReadAll(req1.Body) bytes2, _ := ioutil.ReadAll(req2.Body) - assert.Equal(t, bytes.Compare(bytes1, bytes2), 0) + assert.Equal(t, bytes1, bytes2) } } @@ -75,8 +77,8 @@ func TestNtlmHeaderParseValid(t *testing.T) { res.Header = make(map[string][]string) res.Header.Add("Www-Authenticate", "NTLM "+base64.StdEncoding.EncodeToString([]byte("I am a moose"))) bytes, err := parseChallengeResponse(&res) - assert.Equal(t, err, nil) - assert.Equal(t, strings.HasPrefix(string(bytes), "NTLM"), false) + assert.Nil(t, err) + assert.False(t, strings.HasPrefix(string(bytes), "NTLM")) } func TestNtlmHeaderParseInvalidLength(t *testing.T) { @@ -98,7 +100,7 @@ func TestNtlmHeaderParseInvalid(t *testing.T) { res.Header = make(map[string][]string) res.Header.Add("Www-Authenticate", base64.StdEncoding.EncodeToString([]byte("NTLM I am a moose"))) _, err := parseChallengeResponse(&res) - assert.NotEqual(t, err, nil) + assert.NotNil(t, err) } func TestCloneSmallBody(t *testing.T) { diff --git a/httputil/request.go b/httputil/request.go new file mode 100644 index 00000000..25a4c25f --- /dev/null +++ b/httputil/request.go @@ -0,0 +1,185 @@ +package httputil + +import ( + "bytes" + "io" + "io/ioutil" + "net/http" + "net/url" + "strings" + + "github.com/github/git-lfs/auth" + "github.com/github/git-lfs/config" + "github.com/github/git-lfs/errutil" + + "github.com/rubyist/tracerx" +) + +type ClientError struct { + Message string `json:"message"` + DocumentationUrl string `json:"documentation_url,omitempty"` + RequestId string `json:"request_id,omitempty"` +} + +func (e *ClientError) Error() string { + msg := e.Message + if len(e.DocumentationUrl) > 0 { + msg += "\nDocs: " + e.DocumentationUrl + } + if len(e.RequestId) > 0 { + msg += "\nRequest ID: " + e.RequestId + } + return msg +} + +// Internal http request management +func doHttpRequest(req *http.Request, creds auth.Creds) (*http.Response, error) { + var ( + res *http.Response + err error + ) + + if config.Config.NtlmAccess(auth.GetOperationForRequest(req)) { + res, err = doNTLMRequest(req, true) + } else { + res, err = NewHttpClient(config.Config, req.Host).Do(req) + } + + if res == nil { + res = &http.Response{ + StatusCode: 0, + Header: make(http.Header), + Request: req, + Body: ioutil.NopCloser(bytes.NewBufferString("")), + } + } + + if err != nil { + if errutil.IsAuthError(err) { + SetAuthType(req, res) + doHttpRequest(req, creds) + } else { + err = errutil.Error(err) + } + } else { + err = handleResponse(res, creds) + } + + if err != nil { + if res != nil { + SetErrorResponseContext(err, res) + } else { + setErrorRequestContext(err, req) + } + } + + return res, err +} + +// DoHttpRequest performs a single HTTP request +func DoHttpRequest(req *http.Request, useCreds bool) (*http.Response, error) { + var creds auth.Creds + if useCreds { + c, err := auth.GetCreds(req) + if err != nil { + return nil, err + } + creds = c + } + + return doHttpRequest(req, creds) +} + +// DoHttpRequestWithRedirects runs a HTTP request and responds to redirects +func DoHttpRequestWithRedirects(req *http.Request, via []*http.Request, useCreds bool) (*http.Response, error) { + var creds auth.Creds + if useCreds { + c, err := auth.GetCreds(req) + if err != nil { + return nil, err + } + creds = c + } + + res, err := doHttpRequest(req, creds) + if err != nil { + return res, err + } + + if res.StatusCode == 307 { + redirectTo := res.Header.Get("Location") + locurl, err := url.Parse(redirectTo) + if err == nil && !locurl.IsAbs() { + locurl = req.URL.ResolveReference(locurl) + redirectTo = locurl.String() + } + + redirectedReq, err := NewHttpRequest(req.Method, redirectTo, nil) + if err != nil { + return res, errutil.Errorf(err, err.Error()) + } + + via = append(via, req) + + // Avoid seeking and re-wrapping the CountingReadCloser, just get the "real" body + realBody := req.Body + if wrappedBody, ok := req.Body.(*CountingReadCloser); ok { + realBody = wrappedBody.ReadCloser + } + + seeker, ok := realBody.(io.Seeker) + if !ok { + return res, errutil.Errorf(nil, "Request body needs to be an io.Seeker to handle redirects.") + } + + if _, err := seeker.Seek(0, 0); err != nil { + return res, errutil.Error(err) + } + redirectedReq.Body = realBody + redirectedReq.ContentLength = req.ContentLength + + if err = CheckRedirect(redirectedReq, via); err != nil { + return res, errutil.Errorf(err, err.Error()) + } + + return DoHttpRequestWithRedirects(redirectedReq, via, useCreds) + } + + return res, nil +} + +// NewHttpRequest creates a template request, with the given headers & UserAgent supplied +func NewHttpRequest(method, rawurl string, header map[string]string) (*http.Request, error) { + req, err := http.NewRequest(method, rawurl, nil) + if err != nil { + return nil, err + } + + for key, value := range header { + req.Header.Set(key, value) + } + + req.Header.Set("User-Agent", UserAgent) + + return req, nil +} + +func SetAuthType(req *http.Request, res *http.Response) { + authType := GetAuthType(res) + operation := auth.GetOperationForRequest(req) + config.Config.SetAccess(operation, authType) + tracerx.Printf("api: http response indicates %q authentication. Resubmitting...", authType) +} + +func GetAuthType(res *http.Response) string { + auth := res.Header.Get("Www-Authenticate") + if len(auth) < 1 { + auth = res.Header.Get("Lfs-Authenticate") + } + + if strings.HasPrefix(strings.ToLower(auth), "ntlm") { + return "ntlm" + } + + return "basic" +} diff --git a/lfs/client_error_test.go b/httputil/request_error_test.go similarity index 95% rename from lfs/client_error_test.go rename to httputil/request_error_test.go index cdf47599..aec638fe 100644 --- a/lfs/client_error_test.go +++ b/httputil/request_error_test.go @@ -1,4 +1,4 @@ -package lfs +package httputil import ( "bytes" @@ -8,6 +8,8 @@ import ( "net/http" "net/url" "testing" + + "github.com/github/git-lfs/errutil" ) func TestSuccessStatus(t *testing.T) { @@ -71,7 +73,7 @@ func TestErrorStatusWithCustomMessage(t *testing.T) { continue } - if IsFatalError(err) == (panicMsg != "panic") { + if errutil.IsFatalError(err) == (panicMsg != "panic") { t.Errorf("Error for HTTP %d should %s", status, panicMsg) continue } @@ -134,7 +136,7 @@ func TestErrorStatusWithDefaultMessage(t *testing.T) { continue } - if IsFatalError(err) == (results[1] != "panic") { + if errutil.IsFatalError(err) == (results[1] != "panic") { t.Errorf("Error for HTTP %d should %s", status, results[1]) continue } diff --git a/httputil/response.go b/httputil/response.go new file mode 100644 index 00000000..59b67c2b --- /dev/null +++ b/httputil/response.go @@ -0,0 +1,127 @@ +package httputil + +import ( + "encoding/json" + "fmt" + "io" + "io/ioutil" + "net/http" + "regexp" + + "github.com/github/git-lfs/auth" + "github.com/github/git-lfs/config" + "github.com/github/git-lfs/errutil" +) + +var ( + lfsMediaTypeRE = regexp.MustCompile(`\Aapplication/vnd\.git\-lfs\+json(;|\z)`) + jsonMediaTypeRE = regexp.MustCompile(`\Aapplication/json(;|\z)`) + hiddenHeaders = map[string]bool{ + "Authorization": true, + } + + defaultErrors = map[int]string{ + 400: "Client error: %s", + 401: "Authorization error: %s\nCheck that you have proper access to the repository", + 403: "Authorization error: %s\nCheck that you have proper access to the repository", + 404: "Repository or object not found: %s\nCheck that it exists and that you have proper access to it", + 500: "Server error: %s", + } +) + +// DecodeResponse attempts to decode the contents of the response as a JSON object +func DecodeResponse(res *http.Response, obj interface{}) error { + ctype := res.Header.Get("Content-Type") + if !(lfsMediaTypeRE.MatchString(ctype) || jsonMediaTypeRE.MatchString(ctype)) { + return nil + } + + err := json.NewDecoder(res.Body).Decode(obj) + io.Copy(ioutil.Discard, res.Body) + res.Body.Close() + + if err != nil { + return errutil.Errorf(err, "Unable to parse HTTP response for %s", TraceHttpReq(res.Request)) + } + + return nil +} + +// GetDefaultError returns the default text for standard error codes (blank if none) +func GetDefaultError(code int) string { + if s, ok := defaultErrors[code]; ok { + return s + } + return "" +} + +// Check the response from a HTTP request for problems +func handleResponse(res *http.Response, creds auth.Creds) error { + auth.SaveCredentials(creds, res) + + if res.StatusCode < 400 { + return nil + } + + defer func() { + io.Copy(ioutil.Discard, res.Body) + res.Body.Close() + }() + + cliErr := &ClientError{} + err := DecodeResponse(res, cliErr) + if err == nil { + if len(cliErr.Message) == 0 { + err = defaultError(res) + } else { + err = errutil.Error(cliErr) + } + } + + if res.StatusCode == 401 { + return errutil.NewAuthError(err) + } + + if res.StatusCode > 499 && res.StatusCode != 501 && res.StatusCode != 509 { + return errutil.NewFatalError(err) + } + + return err +} + +func defaultError(res *http.Response) error { + var msgFmt string + + if f, ok := defaultErrors[res.StatusCode]; ok { + msgFmt = f + } else if res.StatusCode < 500 { + msgFmt = defaultErrors[400] + fmt.Sprintf(" from HTTP %d", res.StatusCode) + } else { + msgFmt = defaultErrors[500] + fmt.Sprintf(" from HTTP %d", res.StatusCode) + } + + return errutil.Error(fmt.Errorf(msgFmt, res.Request.URL)) +} + +func SetErrorResponseContext(err error, res *http.Response) { + errutil.ErrorSetContext(err, "Status", res.Status) + setErrorHeaderContext(err, "Request", res.Header) + setErrorRequestContext(err, res.Request) +} + +func setErrorRequestContext(err error, req *http.Request) { + errutil.ErrorSetContext(err, "Endpoint", config.Config.Endpoint(auth.GetOperationForRequest(req)).Url) + errutil.ErrorSetContext(err, "URL", TraceHttpReq(req)) + setErrorHeaderContext(err, "Response", req.Header) +} + +func setErrorHeaderContext(err error, prefix string, head http.Header) { + for key, _ := range head { + contextKey := fmt.Sprintf("%s:%s", prefix, key) + if _, skip := hiddenHeaders[key]; skip { + errutil.ErrorSetContext(err, contextKey, "--") + } else { + errutil.ErrorSetContext(err, contextKey, head.Get(key)) + } + } +} diff --git a/lfs/batcher.go b/lfs/batcher.go index c0ad8c98..e90263e7 100644 --- a/lfs/batcher.go +++ b/lfs/batcher.go @@ -11,16 +11,16 @@ import "sync/atomic" type Batcher struct { exited uint32 batchSize int - input chan Transferable - batchReady chan []Transferable + input chan interface{} + batchReady chan []interface{} } // NewBatcher creates a Batcher with the batchSize. func NewBatcher(batchSize int) *Batcher { b := &Batcher{ batchSize: batchSize, - input: make(chan Transferable, batchSize), - batchReady: make(chan []Transferable), + input: make(chan interface{}, batchSize), + batchReady: make(chan []interface{}), } go b.acceptInput() @@ -29,9 +29,9 @@ func NewBatcher(batchSize int) *Batcher { // Add adds an item to the batcher. Add is safe to call from multiple // goroutines. -func (b *Batcher) Add(t Transferable) { +func (b *Batcher) Add(t interface{}) { if atomic.CompareAndSwapUint32(&b.exited, 1, 0) { - b.input = make(chan Transferable, b.batchSize) + b.input = make(chan interface{}, b.batchSize) go b.acceptInput() } @@ -40,7 +40,7 @@ func (b *Batcher) Add(t Transferable) { // Next will wait for the one of the above batch triggers to occur and return // the accumulated batch. -func (b *Batcher) Next() []Transferable { +func (b *Batcher) Next() []interface{} { return <-b.batchReady } @@ -58,7 +58,7 @@ func (b *Batcher) acceptInput() { exit := false for { - batch := make([]Transferable, 0, b.batchSize) + batch := make([]interface{}, 0, b.batchSize) Loop: for len(batch) < b.batchSize { t, ok := <-b.input diff --git a/lfs/batcher_test.go b/lfs/batcher_test.go index bac8fe3d..918732ea 100644 --- a/lfs/batcher_test.go +++ b/lfs/batcher_test.go @@ -3,7 +3,7 @@ package lfs import ( "testing" - "github.com/github/git-lfs/vendor/_nuts/github.com/technoweenie/assert" + "github.com/stretchr/testify/assert" ) func TestBatcherSizeMet(t *testing.T) { @@ -65,7 +65,7 @@ func runBatcherTests(cases []batcherTestCase, t *testing.T) { items := c.ItemCount for i := 0; i < c.Batches(); i++ { group := b.Next() - assert.Equal(t, c.BatchSize, len(group)) + assert.Len(t, group, c.BatchSize) items -= c.BatchSize } } diff --git a/lfs/client.go b/lfs/client.go deleted file mode 100644 index 47077e0b..00000000 --- a/lfs/client.go +++ /dev/null @@ -1,813 +0,0 @@ -package lfs - -import ( - "bytes" - "encoding/base64" - "encoding/json" - "errors" - "fmt" - "io" - "io/ioutil" - "net/http" - "net/url" - "os" - "path/filepath" - "regexp" - "strconv" - "strings" - - "github.com/github/git-lfs/git" - "github.com/github/git-lfs/vendor/_nuts/github.com/rubyist/tracerx" -) - -const ( - mediaType = "application/vnd.git-lfs+json; charset=utf-8" -) - -var ( - lfsMediaTypeRE = regexp.MustCompile(`\Aapplication/vnd\.git\-lfs\+json(;|\z)`) - jsonMediaTypeRE = regexp.MustCompile(`\Aapplication/json(;|\z)`) - hiddenHeaders = map[string]bool{ - "Authorization": true, - } - - defaultErrors = map[int]string{ - 400: "Client error: %s", - 401: "Authorization error: %s\nCheck that you have proper access to the repository", - 403: "Authorization error: %s\nCheck that you have proper access to the repository", - 404: "Repository or object not found: %s\nCheck that it exists and that you have proper access to it", - 500: "Server error: %s", - } -) - -type ObjectError struct { - Code int `json:"code"` - Message string `json:"message"` -} - -func (e *ObjectError) Error() string { - return fmt.Sprintf("[%d] %s", e.Code, e.Message) -} - -type ObjectResource struct { - Oid string `json:"oid,omitempty"` - Size int64 `json:"size"` - Actions map[string]*linkRelation `json:"actions,omitempty"` - Links map[string]*linkRelation `json:"_links,omitempty"` - Error *ObjectError `json:"error,omitempty"` -} - -func (o *ObjectResource) NewRequest(relation, method string) (*http.Request, error) { - rel, ok := o.Rel(relation) - if !ok { - if relation == "download" { - return nil, errors.New("Object not found on the server.") - } - return nil, fmt.Errorf("No %q action for this object.", relation) - - } - - req, err := newClientRequest(method, rel.Href, rel.Header) - if err != nil { - return nil, err - } - - return req, nil -} - -func (o *ObjectResource) Rel(name string) (*linkRelation, bool) { - var rel *linkRelation - var ok bool - - if o.Actions != nil { - rel, ok = o.Actions[name] - } else { - rel, ok = o.Links[name] - } - - return rel, ok -} - -type linkRelation struct { - Href string `json:"href"` - Header map[string]string `json:"header,omitempty"` -} - -type ClientError struct { - Message string `json:"message"` - DocumentationUrl string `json:"documentation_url,omitempty"` - RequestId string `json:"request_id,omitempty"` -} - -func (e *ClientError) Error() string { - msg := e.Message - if len(e.DocumentationUrl) > 0 { - msg += "\nDocs: " + e.DocumentationUrl - } - if len(e.RequestId) > 0 { - msg += "\nRequest ID: " + e.RequestId - } - return msg -} - -// Download will attempt to download the object with the given oid. The batched -// API will be used, but if the server does not implement the batch operations -// it will fall back to the legacy API. -func Download(oid string, size int64) (io.ReadCloser, int64, error) { - if !Config.BatchTransfer() { - return DownloadLegacy(oid) - } - - objects := []*ObjectResource{ - &ObjectResource{Oid: oid, Size: size}, - } - - objs, err := Batch(objects, "download") - if err != nil { - if IsNotImplementedError(err) { - git.Config.SetLocal("", "lfs.batch", "false") - return DownloadLegacy(oid) - } - return nil, 0, err - } - - if len(objs) != 1 { // Expecting to find one object - return nil, 0, Error(fmt.Errorf("Object not found: %s", oid)) - } - - return DownloadObject(objs[0]) -} - -// DownloadLegacy attempts to download the object for the given oid using the -// legacy API. -func DownloadLegacy(oid string) (io.ReadCloser, int64, error) { - req, err := newApiRequest("GET", oid) - if err != nil { - return nil, 0, Error(err) - } - - res, obj, err := doLegacyApiRequest(req) - if err != nil { - return nil, 0, err - } - LogTransfer("lfs.api.download", res) - req, err = obj.NewRequest("download", "GET") - if err != nil { - return nil, 0, Error(err) - } - - res, err = doStorageRequest(req) - if err != nil { - return nil, 0, err - } - LogTransfer("lfs.data.download", res) - - return res.Body, res.ContentLength, nil -} - -type byteCloser struct { - *bytes.Reader -} - -func DownloadCheck(oid string) (*ObjectResource, error) { - req, err := newApiRequest("GET", oid) - if err != nil { - return nil, Error(err) - } - - res, obj, err := doLegacyApiRequest(req) - if err != nil { - return nil, err - } - LogTransfer("lfs.api.download", res) - - _, err = obj.NewRequest("download", "GET") - if err != nil { - return nil, Error(err) - } - - return obj, nil -} - -func DownloadObject(obj *ObjectResource) (io.ReadCloser, int64, error) { - req, err := obj.NewRequest("download", "GET") - if err != nil { - return nil, 0, Error(err) - } - - res, err := doStorageRequest(req) - if err != nil { - return nil, 0, newRetriableError(err) - } - LogTransfer("lfs.data.download", res) - - return res.Body, res.ContentLength, nil -} - -func (b *byteCloser) Close() error { - return nil -} - -func Batch(objects []*ObjectResource, operation string) ([]*ObjectResource, error) { - if len(objects) == 0 { - return nil, nil - } - - o := map[string]interface{}{"objects": objects, "operation": operation} - - by, err := json.Marshal(o) - if err != nil { - return nil, Error(err) - } - - req, err := newBatchApiRequest(operation) - if err != nil { - return nil, Error(err) - } - - req.Header.Set("Content-Type", mediaType) - req.Header.Set("Content-Length", strconv.Itoa(len(by))) - req.ContentLength = int64(len(by)) - req.Body = &byteCloser{bytes.NewReader(by)} - - tracerx.Printf("api: batch %d files", len(objects)) - - res, objs, err := doApiBatchRequest(req) - - if err != nil { - - if res == nil { - return nil, newRetriableError(err) - } - - if res.StatusCode == 0 { - return nil, newRetriableError(err) - } - - if IsAuthError(err) { - setAuthType(req, res) - return Batch(objects, operation) - } - - switch res.StatusCode { - case 404, 410: - tracerx.Printf("api: batch not implemented: %d", res.StatusCode) - return nil, newNotImplementedError(nil) - } - - tracerx.Printf("api error: %s", err) - return nil, Error(err) - } - LogTransfer("lfs.api.batch", res) - - if res.StatusCode != 200 { - return nil, Error(fmt.Errorf("Invalid status for %s: %d", traceHttpReq(req), res.StatusCode)) - } - - return objs, nil -} - -func UploadCheck(oidPath string) (*ObjectResource, error) { - oid := filepath.Base(oidPath) - - stat, err := os.Stat(oidPath) - if err != nil { - return nil, Error(err) - } - - reqObj := &ObjectResource{ - Oid: oid, - Size: stat.Size(), - } - - by, err := json.Marshal(reqObj) - if err != nil { - return nil, Error(err) - } - - req, err := newApiRequest("POST", oid) - if err != nil { - return nil, Error(err) - } - - req.Header.Set("Content-Type", mediaType) - req.Header.Set("Content-Length", strconv.Itoa(len(by))) - req.ContentLength = int64(len(by)) - req.Body = &byteCloser{bytes.NewReader(by)} - - tracerx.Printf("api: uploading (%s)", oid) - res, obj, err := doLegacyApiRequest(req) - - if err != nil { - if IsAuthError(err) { - setAuthType(req, res) - return UploadCheck(oidPath) - } - - return nil, newRetriableError(err) - } - LogTransfer("lfs.api.upload", res) - - if res.StatusCode == 200 { - return nil, nil - } - - if obj.Oid == "" { - obj.Oid = oid - } - if obj.Size == 0 { - obj.Size = reqObj.Size - } - - return obj, nil -} - -func UploadObject(o *ObjectResource, cb CopyCallback) error { - path, err := LocalMediaPath(o.Oid) - if err != nil { - return Error(err) - } - - file, err := os.Open(path) - if err != nil { - return Error(err) - } - defer file.Close() - - reader := &CallbackReader{ - C: cb, - TotalSize: o.Size, - Reader: file, - } - - req, err := o.NewRequest("upload", "PUT") - if err != nil { - return Error(err) - } - - if len(req.Header.Get("Content-Type")) == 0 { - req.Header.Set("Content-Type", "application/octet-stream") - } - - if req.Header.Get("Transfer-Encoding") == "chunked" { - req.TransferEncoding = []string{"chunked"} - } else { - req.Header.Set("Content-Length", strconv.FormatInt(o.Size, 10)) - } - - req.ContentLength = o.Size - req.Body = ioutil.NopCloser(reader) - - res, err := doStorageRequest(req) - if err != nil { - return newRetriableError(err) - } - LogTransfer("lfs.data.upload", res) - - // A status code of 403 likely means that an authentication token for the - // upload has expired. This can be safely retried. - if res.StatusCode == 403 { - return newRetriableError(err) - } - - if res.StatusCode > 299 { - return Errorf(nil, "Invalid status for %s: %d", traceHttpReq(req), res.StatusCode) - } - - io.Copy(ioutil.Discard, res.Body) - res.Body.Close() - - if _, ok := o.Rel("verify"); !ok { - return nil - } - - req, err = o.NewRequest("verify", "POST") - if err != nil { - return Error(err) - } - - by, err := json.Marshal(o) - if err != nil { - return Error(err) - } - - req.Header.Set("Content-Type", mediaType) - req.Header.Set("Content-Length", strconv.Itoa(len(by))) - req.ContentLength = int64(len(by)) - req.Body = ioutil.NopCloser(bytes.NewReader(by)) - res, err = doAPIRequest(req, true) - if err != nil { - return err - } - - LogTransfer("lfs.data.verify", res) - io.Copy(ioutil.Discard, res.Body) - res.Body.Close() - - return err -} - -// doLegacyApiRequest runs the request to the LFS legacy API. -func doLegacyApiRequest(req *http.Request) (*http.Response, *ObjectResource, error) { - via := make([]*http.Request, 0, 4) - res, err := doApiRequestWithRedirects(req, via, true) - if err != nil { - return res, nil, err - } - - obj := &ObjectResource{} - err = decodeApiResponse(res, obj) - - if err != nil { - setErrorResponseContext(err, res) - return nil, nil, err - } - - return res, obj, nil -} - -// getOperationForHttpRequest determines the operation type for a http.Request -func getOperationForHttpRequest(req *http.Request) string { - operation := "download" - if req.Method == "POST" || req.Method == "PUT" { - operation = "upload" - } - return operation -} - -// doApiBatchRequest runs the request to the LFS batch API. If the API returns a -// 401, the repo will be marked as having private access and the request will be -// re-run. When the repo is marked as having private access, credentials will -// be retrieved. -func doApiBatchRequest(req *http.Request) (*http.Response, []*ObjectResource, error) { - res, err := doAPIRequest(req, Config.PrivateAccess(getOperationForHttpRequest(req))) - - if err != nil { - if res != nil && res.StatusCode == 401 { - return res, nil, newAuthError(err) - } - return res, nil, err - } - - var objs map[string][]*ObjectResource - err = decodeApiResponse(res, &objs) - - if err != nil { - setErrorResponseContext(err, res) - } - - return res, objs["objects"], err -} - -// doStorageREquest runs the request to the storage API from a link provided by -// the "actions" or "_links" properties an LFS API response. -func doStorageRequest(req *http.Request) (*http.Response, error) { - creds, err := getCreds(req) - if err != nil { - return nil, err - } - - return doHttpRequest(req, creds) -} - -// doAPIRequest runs the request to the LFS API, without parsing the response -// body. If the API returns a 401, the repo will be marked as having private -// access and the request will be re-run. When the repo is marked as having -// private access, credentials will be retrieved. -func doAPIRequest(req *http.Request, useCreds bool) (*http.Response, error) { - via := make([]*http.Request, 0, 4) - return doApiRequestWithRedirects(req, via, useCreds) -} - -// doHttpRequest runs the given HTTP request. LFS or Storage API requests should -// use doApiBatchRequest() or doStorageRequest() instead. -func doHttpRequest(req *http.Request, creds Creds) (*http.Response, error) { - var ( - res *http.Response - err error - ) - - if Config.NtlmAccess(getOperationForHttpRequest(req)) { - res, err = DoNTLMRequest(req, true) - } else { - res, err = Config.HttpClient(req.Host).Do(req) - } - - if res == nil { - res = &http.Response{ - StatusCode: 0, - Header: make(http.Header), - Request: req, - Body: ioutil.NopCloser(bytes.NewBufferString("")), - } - } - - if err != nil { - if IsAuthError(err) { - setAuthType(req, res) - doHttpRequest(req, creds) - } else { - err = Error(err) - } - } else { - err = handleResponse(res, creds) - } - - if err != nil { - if res != nil { - setErrorResponseContext(err, res) - } else { - setErrorRequestContext(err, req) - } - } - - return res, err -} - -func doApiRequestWithRedirects(req *http.Request, via []*http.Request, useCreds bool) (*http.Response, error) { - var creds Creds - if useCreds { - c, err := getCreds(req) - if err != nil { - return nil, err - } - creds = c - } - - res, err := doHttpRequest(req, creds) - if err != nil { - return res, err - } - - if res.StatusCode == 307 { - redirectTo := res.Header.Get("Location") - locurl, err := url.Parse(redirectTo) - if err == nil && !locurl.IsAbs() { - locurl = req.URL.ResolveReference(locurl) - redirectTo = locurl.String() - } - - redirectedReq, err := newClientRequest(req.Method, redirectTo, nil) - if err != nil { - return res, Errorf(err, err.Error()) - } - - via = append(via, req) - - // Avoid seeking and re-wrapping the countingReadCloser, just get the "real" body - realBody := req.Body - if wrappedBody, ok := req.Body.(*countingReadCloser); ok { - realBody = wrappedBody.ReadCloser - } - - seeker, ok := realBody.(io.Seeker) - if !ok { - return res, Errorf(nil, "Request body needs to be an io.Seeker to handle redirects.") - } - - if _, err := seeker.Seek(0, 0); err != nil { - return res, Error(err) - } - redirectedReq.Body = realBody - redirectedReq.ContentLength = req.ContentLength - - if err = checkRedirect(redirectedReq, via); err != nil { - return res, Errorf(err, err.Error()) - } - - return doApiRequestWithRedirects(redirectedReq, via, useCreds) - } - - return res, nil -} - -func handleResponse(res *http.Response, creds Creds) error { - saveCredentials(creds, res) - - if res.StatusCode < 400 { - return nil - } - - defer func() { - io.Copy(ioutil.Discard, res.Body) - res.Body.Close() - }() - - cliErr := &ClientError{} - err := decodeApiResponse(res, cliErr) - if err == nil { - if len(cliErr.Message) == 0 { - err = defaultError(res) - } else { - err = Error(cliErr) - } - } - - if res.StatusCode == 401 { - return newAuthError(err) - } - - if res.StatusCode > 499 && res.StatusCode != 501 && res.StatusCode != 509 { - return newFatalError(err) - } - - return err -} - -func decodeApiResponse(res *http.Response, obj interface{}) error { - ctype := res.Header.Get("Content-Type") - if !(lfsMediaTypeRE.MatchString(ctype) || jsonMediaTypeRE.MatchString(ctype)) { - return nil - } - - err := json.NewDecoder(res.Body).Decode(obj) - io.Copy(ioutil.Discard, res.Body) - res.Body.Close() - - if err != nil { - return Errorf(err, "Unable to parse HTTP response for %s", traceHttpReq(res.Request)) - } - - return nil -} - -func defaultError(res *http.Response) error { - var msgFmt string - - if f, ok := defaultErrors[res.StatusCode]; ok { - msgFmt = f - } else if res.StatusCode < 500 { - msgFmt = defaultErrors[400] + fmt.Sprintf(" from HTTP %d", res.StatusCode) - } else { - msgFmt = defaultErrors[500] + fmt.Sprintf(" from HTTP %d", res.StatusCode) - } - - return Error(fmt.Errorf(msgFmt, res.Request.URL)) -} - -func newApiRequest(method, oid string) (*http.Request, error) { - objectOid := oid - operation := "download" - if method == "POST" { - if oid != "batch" { - objectOid = "" - operation = "upload" - } - } - endpoint := Config.Endpoint(operation) - - res, err := sshAuthenticate(endpoint, operation, oid) - if err != nil { - tracerx.Printf("ssh: attempted with %s. Error: %s", - endpoint.SshUserAndHost, err.Error(), - ) - return nil, err - } - - if len(res.Href) > 0 { - endpoint.Url = res.Href - } - - u, err := ObjectUrl(endpoint, objectOid) - if err != nil { - return nil, err - } - - req, err := newClientRequest(method, u.String(), res.Header) - if err != nil { - return nil, err - } - - req.Header.Set("Accept", mediaType) - return req, nil -} - -func newClientRequest(method, rawurl string, header map[string]string) (*http.Request, error) { - req, err := http.NewRequest(method, rawurl, nil) - if err != nil { - return nil, err - } - - for key, value := range header { - req.Header.Set(key, value) - } - - req.Header.Set("User-Agent", UserAgent) - - return req, nil -} - -func newBatchApiRequest(operation string) (*http.Request, error) { - endpoint := Config.Endpoint(operation) - - res, err := sshAuthenticate(endpoint, operation, "") - if err != nil { - tracerx.Printf("ssh: %s attempted with %s. Error: %s", - operation, endpoint.SshUserAndHost, err.Error(), - ) - return nil, err - } - - if len(res.Href) > 0 { - endpoint.Url = res.Href - } - - u, err := ObjectUrl(endpoint, "batch") - if err != nil { - return nil, err - } - - req, err := newBatchClientRequest("POST", u.String()) - if err != nil { - return nil, err - } - - req.Header.Set("Accept", mediaType) - if res.Header != nil { - for key, value := range res.Header { - req.Header.Set(key, value) - } - } - - return req, nil -} - -func newBatchClientRequest(method, rawurl string) (*http.Request, error) { - req, err := http.NewRequest(method, rawurl, nil) - if err != nil { - return nil, err - } - - req.Header.Set("User-Agent", UserAgent) - - return req, nil -} - -func setRequestAuthFromUrl(req *http.Request, u *url.URL) bool { - if !Config.NtlmAccess(getOperationForHttpRequest(req)) && u.User != nil { - if pass, ok := u.User.Password(); ok { - fmt.Fprintln(os.Stderr, "warning: current Git remote contains credentials") - setRequestAuth(req, u.User.Username(), pass) - return true - } - } - - return false -} - -func setAuthType(req *http.Request, res *http.Response) { - authType := getAuthType(res) - operation := getOperationForHttpRequest(req) - Config.SetAccess(operation, authType) - tracerx.Printf("api: http response indicates %q authentication. Resubmitting...", authType) -} - -func getAuthType(res *http.Response) string { - auth := res.Header.Get("Www-Authenticate") - if len(auth) < 1 { - auth = res.Header.Get("Lfs-Authenticate") - } - - if strings.HasPrefix(strings.ToLower(auth), "ntlm") { - return "ntlm" - } - - return "basic" -} - -func setRequestAuth(req *http.Request, user, pass string) { - if Config.NtlmAccess(getOperationForHttpRequest(req)) { - return - } - - if len(user) == 0 && len(pass) == 0 { - return - } - - token := fmt.Sprintf("%s:%s", user, pass) - auth := "Basic " + strings.TrimSpace(base64.StdEncoding.EncodeToString([]byte(token))) - req.Header.Set("Authorization", auth) -} - -func setErrorResponseContext(err error, res *http.Response) { - ErrorSetContext(err, "Status", res.Status) - setErrorHeaderContext(err, "Request", res.Header) - setErrorRequestContext(err, res.Request) -} - -func setErrorRequestContext(err error, req *http.Request) { - ErrorSetContext(err, "Endpoint", Config.Endpoint(getOperationForHttpRequest(req)).Url) - ErrorSetContext(err, "URL", traceHttpReq(req)) - setErrorHeaderContext(err, "Response", req.Header) -} - -func setErrorHeaderContext(err error, prefix string, head http.Header) { - for key, _ := range head { - contextKey := fmt.Sprintf("%s:%s", prefix, key) - if _, skip := hiddenHeaders[key]; skip { - ErrorSetContext(err, contextKey, "--") - } else { - ErrorSetContext(err, contextKey, head.Get(key)) - } - } -} diff --git a/lfs/client_test.go b/lfs/client_test.go deleted file mode 100644 index e2e375a5..00000000 --- a/lfs/client_test.go +++ /dev/null @@ -1,29 +0,0 @@ -package lfs - -import ( - "encoding/base64" - "fmt" - "io/ioutil" - "net/http/httptest" - "net/url" - "strings" - "testing" -) - -func tempdir(t *testing.T) string { - dir, err := ioutil.TempDir("", "git-lfs-test") - if err != nil { - t.Fatalf("Error getting temp dir: %s", err) - } - return dir -} - -func expectedAuth(t *testing.T, server *httptest.Server) string { - u, err := url.Parse(server.URL) - if err != nil { - t.Fatal(err) - } - - token := fmt.Sprintf("%s:%s", u.Host, "monkey") - return "Basic " + strings.TrimSpace(base64.StdEncoding.EncodeToString([]byte(token))) -} diff --git a/lfs/download_queue.go b/lfs/download_queue.go index a3396e58..2e94c28c 100644 --- a/lfs/download_queue.go +++ b/lfs/download_queue.go @@ -1,73 +1,56 @@ package lfs -// The ability to check that a file can be downloaded -type DownloadCheckable struct { - Pointer *WrappedPointer - object *ObjectResource +import ( + "github.com/github/git-lfs/api" + "github.com/github/git-lfs/transfer" +) + +type Downloadable struct { + pointer *WrappedPointer + object *api.ObjectResource } -func NewDownloadCheckable(p *WrappedPointer) *DownloadCheckable { - return &DownloadCheckable{Pointer: p} -} - -func (d *DownloadCheckable) Check() (*ObjectResource, error) { - return DownloadCheck(d.Pointer.Oid) -} - -func (d *DownloadCheckable) Transfer(cb CopyCallback) error { - // just report completion of check but don't do anything - cb(d.Size(), d.Size(), int(d.Size())) - return nil -} - -func (d *DownloadCheckable) Object() *ObjectResource { +func (d *Downloadable) Object() *api.ObjectResource { return d.object } -func (d *DownloadCheckable) Oid() string { - return d.Pointer.Oid +func (d *Downloadable) Oid() string { + return d.pointer.Oid } -func (d *DownloadCheckable) Size() int64 { - return d.Pointer.Size +func (d *Downloadable) Size() int64 { + return d.pointer.Size } -func (d *DownloadCheckable) Name() string { - return d.Pointer.Name +func (d *Downloadable) Name() string { + return d.pointer.Name } -func (d *DownloadCheckable) SetObject(o *ObjectResource) { +func (d *Downloadable) Path() string { + p, _ := LocalMediaPath(d.pointer.Oid) + return p +} + +func (d *Downloadable) SetObject(o *api.ObjectResource) { d.object = o } -// NewDownloadCheckQueue builds a checking queue, allowing `workers` concurrent check operations. -func NewDownloadCheckQueue(files int, size int64, dryRun bool) *TransferQueue { - q := newTransferQueue(files, size, dryRun) - // API operation is still download, but it will only perform the API call (check) - q.transferKind = "download" - return q -} - -// The ability to actually download -type Downloadable struct { - *DownloadCheckable +// TODO remove this legacy method & only support batch +func (d *Downloadable) LegacyCheck() (*api.ObjectResource, error) { + return api.DownloadCheck(d.pointer.Oid) } func NewDownloadable(p *WrappedPointer) *Downloadable { - return &Downloadable{DownloadCheckable: NewDownloadCheckable(p)} + return &Downloadable{pointer: p} } -func (d *Downloadable) Transfer(cb CopyCallback) error { - err := PointerSmudgeObject(d.Pointer.Pointer, d.object, cb) - if err != nil { - return Error(err) - } - return nil +// NewDownloadCheckQueue builds a checking queue, checks that objects are there but doesn't download +func NewDownloadCheckQueue(files int, size int64) *TransferQueue { + // Always dry run + return newTransferQueue(files, size, true, transfer.Download) } -// NewDownloadQueue builds a DownloadQueue, allowing `workers` concurrent downloads. +// NewDownloadQueue builds a DownloadQueue, allowing concurrent downloads. func NewDownloadQueue(files int, size int64, dryRun bool) *TransferQueue { - q := newTransferQueue(files, size, dryRun) - q.transferKind = "download" - return q + return newTransferQueue(files, size, dryRun, transfer.Download) } diff --git a/lfs/download_test.go b/lfs/download_test.go deleted file mode 100644 index ab52635b..00000000 --- a/lfs/download_test.go +++ /dev/null @@ -1,719 +0,0 @@ -package lfs - -import ( - "encoding/json" - "fmt" - "io/ioutil" - "net/http" - "net/http/httptest" - "os" - "strconv" - "strings" - "testing" -) - -func TestSuccessfulDownload(t *testing.T) { - mux := http.NewServeMux() - server := httptest.NewServer(mux) - defer server.Close() - - tmp := tempdir(t) - defer os.RemoveAll(tmp) - - mux.HandleFunc("/media/objects/oid", func(w http.ResponseWriter, r *http.Request) { - t.Logf("Server: %s %s", r.Method, r.URL) - t.Logf("request header: %v", r.Header) - - if r.Method != "GET" { - w.WriteHeader(405) - return - } - - if r.Header.Get("Accept") != mediaType { - t.Error("Invalid Accept") - } - - if r.Header.Get("Authorization") != expectedAuth(t, server) { - t.Error("Invalid Authorization") - } - - obj := &ObjectResource{ - Oid: "oid", - Size: 4, - Actions: map[string]*linkRelation{ - "download": &linkRelation{ - Href: server.URL + "/download", - Header: map[string]string{"A": "1"}, - }, - }, - } - - by, err := json.Marshal(obj) - if err != nil { - t.Fatal(err) - } - - head := w.Header() - head.Set("Content-Type", mediaType) - head.Set("Content-Length", strconv.Itoa(len(by))) - w.WriteHeader(200) - w.Write(by) - }) - - mux.HandleFunc("/download", func(w http.ResponseWriter, r *http.Request) { - t.Logf("Server: %s %s", r.Method, r.URL) - t.Logf("request header: %v", r.Header) - - if r.Method != "GET" { - w.WriteHeader(405) - return - } - - if r.Header.Get("Accept") != "" { - t.Error("Invalid Accept") - } - - if r.Header.Get("A") != "1" { - t.Error("invalid A") - } - - head := w.Header() - head.Set("Content-Type", "application/octet-stream") - head.Set("Content-Length", "4") - w.WriteHeader(200) - w.Write([]byte("test")) - }) - - defer Config.ResetConfig() - Config.SetConfig("lfs.batch", "false") - Config.SetConfig("lfs.url", server.URL+"/media") - - reader, size, err := Download("oid", 0) - if err != nil { - if isDockerConnectionError(err) { - return - } - t.Fatalf("unexpected error: %s", err) - } - defer reader.Close() - - if size != 4 { - t.Errorf("unexpected size: %d", size) - } - - by, err := ioutil.ReadAll(reader) - if err != nil { - t.Fatalf("unexpected error: %s", err) - } - - if body := string(by); body != "test" { - t.Errorf("unexpected body: %s", body) - } -} - -// nearly identical to TestSuccessfulDownload -// called multiple times to return different 3xx status codes -func TestSuccessfulDownloadWithRedirects(t *testing.T) { - mux := http.NewServeMux() - server := httptest.NewServer(mux) - defer server.Close() - - tmp := tempdir(t) - defer os.RemoveAll(tmp) - - // all of these should work for GET requests - redirectCodes := []int{301, 302, 303, 307} - redirectIndex := 0 - - mux.HandleFunc("/redirect/objects/oid", func(w http.ResponseWriter, r *http.Request) { - t.Logf("Server: %s %s", r.Method, r.URL) - t.Logf("request header: %v", r.Header) - - if r.Method != "GET" { - w.WriteHeader(405) - return - } - - w.Header().Set("Location", server.URL+"/redirect2/objects/oid") - w.WriteHeader(redirectCodes[redirectIndex]) - t.Logf("redirect with %d", redirectCodes[redirectIndex]) - }) - - mux.HandleFunc("/redirect2/objects/oid", func(w http.ResponseWriter, r *http.Request) { - t.Logf("Server: %s %s", r.Method, r.URL) - t.Logf("request header: %v", r.Header) - - if r.Method != "GET" { - w.WriteHeader(405) - return - } - - w.Header().Set("Location", server.URL+"/media/objects/oid") - w.WriteHeader(redirectCodes[redirectIndex]) - t.Logf("redirect again with %d", redirectCodes[redirectIndex]) - redirectIndex += 1 - }) - - mux.HandleFunc("/media/objects/oid", func(w http.ResponseWriter, r *http.Request) { - t.Logf("Server: %s %s", r.Method, r.URL) - t.Logf("request header: %v", r.Header) - - if r.Method != "GET" { - w.WriteHeader(405) - return - } - - if r.Header.Get("Accept") != mediaType { - t.Error("Invalid Accept") - } - - if r.Header.Get("Authorization") != expectedAuth(t, server) { - t.Error("Invalid Authorization") - } - - obj := &ObjectResource{ - Oid: "oid", - Size: 4, - Actions: map[string]*linkRelation{ - "download": &linkRelation{ - Href: server.URL + "/download", - Header: map[string]string{"A": "1"}, - }, - }, - } - - by, err := json.Marshal(obj) - if err != nil { - t.Fatal(err) - } - - head := w.Header() - head.Set("Content-Type", mediaType) - head.Set("Content-Length", strconv.Itoa(len(by))) - w.WriteHeader(200) - w.Write(by) - }) - - mux.HandleFunc("/download", func(w http.ResponseWriter, r *http.Request) { - t.Logf("Server: %s %s", r.Method, r.URL) - t.Logf("request header: %v", r.Header) - - if r.Method != "GET" { - w.WriteHeader(405) - return - } - - if r.Header.Get("Accept") != "" { - t.Error("Invalid Accept") - } - - if r.Header.Get("A") != "1" { - t.Error("invalid A") - } - - head := w.Header() - head.Set("Content-Type", "application/octet-stream") - head.Set("Content-Length", "4") - w.WriteHeader(200) - w.Write([]byte("test")) - }) - - defer Config.ResetConfig() - Config.SetConfig("lfs.batch", "false") - Config.SetConfig("lfs.url", server.URL+"/redirect") - - for _, redirect := range redirectCodes { - reader, size, err := Download("oid", 0) - if err != nil { - if isDockerConnectionError(err) { - return - } - t.Fatalf("unexpected error for %d status: %s", redirect, err) - } - - if size != 4 { - t.Errorf("unexpected size for %d status: %d", redirect, size) - } - - by, err := ioutil.ReadAll(reader) - reader.Close() - if err != nil { - t.Fatalf("unexpected error for %d status: %s", redirect, err) - } - - if body := string(by); body != "test" { - t.Errorf("unexpected body for %d status: %s", redirect, body) - } - } -} - -// nearly identical to TestSuccessfulDownload -// the api request returns a custom Authorization header -func TestSuccessfulDownloadWithAuthorization(t *testing.T) { - mux := http.NewServeMux() - server := httptest.NewServer(mux) - defer server.Close() - - tmp := tempdir(t) - defer os.RemoveAll(tmp) - - mux.HandleFunc("/media/objects/oid", func(w http.ResponseWriter, r *http.Request) { - t.Logf("Server: %s %s", r.Method, r.URL) - t.Logf("request header: %v", r.Header) - - if r.Method != "GET" { - w.WriteHeader(405) - return - } - - if r.Header.Get("Accept") != mediaType { - t.Error("Invalid Accept") - } - - if r.Header.Get("Authorization") != expectedAuth(t, server) { - t.Error("Invalid Authorization") - } - - obj := &ObjectResource{ - Oid: "oid", - Size: 4, - Actions: map[string]*linkRelation{ - "download": &linkRelation{ - Href: server.URL + "/download", - Header: map[string]string{ - "A": "1", - "Authorization": "custom", - }, - }, - }, - } - - by, err := json.Marshal(obj) - if err != nil { - t.Fatal(err) - } - - head := w.Header() - head.Set("Content-Type", "application/json; charset=utf-8") - head.Set("Content-Length", strconv.Itoa(len(by))) - w.WriteHeader(200) - w.Write(by) - }) - - mux.HandleFunc("/download", func(w http.ResponseWriter, r *http.Request) { - t.Logf("Server: %s %s", r.Method, r.URL) - t.Logf("request header: %v", r.Header) - - if r.Method != "GET" { - w.WriteHeader(405) - return - } - - if r.Header.Get("Accept") != "" { - t.Error("Invalid Accept") - } - - if r.Header.Get("Authorization") != "custom" { - t.Error("Invalid Authorization") - } - - if r.Header.Get("A") != "1" { - t.Error("invalid A") - } - - head := w.Header() - head.Set("Content-Type", "application/octet-stream") - head.Set("Content-Length", "4") - w.WriteHeader(200) - w.Write([]byte("test")) - }) - - defer Config.ResetConfig() - Config.SetConfig("lfs.batch", "false") - Config.SetConfig("lfs.url", server.URL+"/media") - reader, size, err := Download("oid", 0) - if err != nil { - if isDockerConnectionError(err) { - return - } - t.Fatalf("unexpected error: %s", err) - } - defer reader.Close() - - if size != 4 { - t.Errorf("unexpected size: %d", size) - } - - by, err := ioutil.ReadAll(reader) - if err != nil { - t.Fatalf("unexpected error: %s", err) - } - - if body := string(by); body != "test" { - t.Errorf("unexpected body: %s", body) - } -} - -// nearly identical to TestSuccessfulDownload -// download is served from a second server -func TestSuccessfulDownloadFromSeparateHost(t *testing.T) { - mux := http.NewServeMux() - server := httptest.NewServer(mux) - defer server.Close() - - mux2 := http.NewServeMux() - server2 := httptest.NewServer(mux2) - defer server2.Close() - - tmp := tempdir(t) - defer os.RemoveAll(tmp) - - mux.HandleFunc("/media/objects/oid", func(w http.ResponseWriter, r *http.Request) { - t.Logf("Server: %s %s", r.Method, r.URL) - t.Logf("request header: %v", r.Header) - - if r.Method != "GET" { - w.WriteHeader(405) - return - } - - if r.Header.Get("Accept") != mediaType { - t.Error("Invalid Accept") - } - - if r.Header.Get("Authorization") != expectedAuth(t, server) { - t.Error("Invalid Authorization") - } - - obj := &ObjectResource{ - Oid: "oid", - Size: 4, - Actions: map[string]*linkRelation{ - "download": &linkRelation{ - Href: server2.URL + "/download", - Header: map[string]string{"A": "1"}, - }, - }, - } - - by, err := json.Marshal(obj) - if err != nil { - t.Fatal(err) - } - - head := w.Header() - head.Set("Content-Type", mediaType) - head.Set("Content-Length", strconv.Itoa(len(by))) - w.WriteHeader(200) - w.Write(by) - }) - - mux2.HandleFunc("/download", func(w http.ResponseWriter, r *http.Request) { - t.Logf("Server: %s %s", r.Method, r.URL) - t.Logf("request header: %v", r.Header) - - if r.Method != "GET" { - w.WriteHeader(405) - return - } - - if r.Header.Get("Accept") != "" { - t.Error("Invalid Accept") - } - - if r.Header.Get("A") != "1" { - t.Error("invalid A") - } - - head := w.Header() - head.Set("Content-Type", "application/octet-stream") - head.Set("Content-Length", "4") - w.WriteHeader(200) - w.Write([]byte("test")) - }) - - defer Config.ResetConfig() - Config.SetConfig("lfs.batch", "false") - Config.SetConfig("lfs.url", server.URL+"/media") - reader, size, err := Download("oid", 0) - if err != nil { - if isDockerConnectionError(err) { - return - } - t.Fatalf("unexpected error: %s", err) - } - defer reader.Close() - - if size != 4 { - t.Errorf("unexpected size: %d", size) - } - - by, err := ioutil.ReadAll(reader) - if err != nil { - t.Fatalf("unexpected error: %s", err) - } - - if body := string(by); body != "test" { - t.Errorf("unexpected body: %s", body) - } -} - -// nearly identical to TestSuccessfulDownload -// download is served from a second server -func TestSuccessfulDownloadFromSeparateRedirectedHost(t *testing.T) { - mux := http.NewServeMux() - server := httptest.NewServer(mux) - defer server.Close() - - mux2 := http.NewServeMux() - server2 := httptest.NewServer(mux2) - defer server2.Close() - - mux3 := http.NewServeMux() - server3 := httptest.NewServer(mux3) - defer server3.Close() - - tmp := tempdir(t) - defer os.RemoveAll(tmp) - - // all of these should work for GET requests - redirectCodes := []int{301, 302, 303, 307} - redirectIndex := 0 - - mux.HandleFunc("/media/objects/oid", func(w http.ResponseWriter, r *http.Request) { - t.Logf("Server 1: %s %s", r.Method, r.URL) - t.Logf("request header: %v", r.Header) - - if r.Method != "GET" { - w.WriteHeader(405) - return - } - - if r.Header.Get("Accept") != mediaType { - t.Error("Invalid Accept") - } - - if r.Header.Get("Authorization") != expectedAuth(t, server) { - t.Error("Invalid Authorization") - } - - w.Header().Set("Location", server2.URL+"/media/objects/oid") - w.WriteHeader(redirectCodes[redirectIndex]) - t.Logf("redirect with %d", redirectCodes[redirectIndex]) - redirectIndex += 1 - }) - - mux2.HandleFunc("/media/objects/oid", func(w http.ResponseWriter, r *http.Request) { - t.Logf("Server 2: %s %s", r.Method, r.URL) - t.Logf("request header: %v", r.Header) - - if r.Method != "GET" { - w.WriteHeader(405) - return - } - - if r.Header.Get("Accept") != mediaType { - t.Error("Invalid Accept") - } - - if r.Header.Get("Authorization") != "" { - t.Error("Invalid Authorization") - } - - obj := &ObjectResource{ - Oid: "oid", - Size: 4, - Actions: map[string]*linkRelation{ - "download": &linkRelation{ - Href: server3.URL + "/download", - Header: map[string]string{"A": "1"}, - }, - }, - } - - by, err := json.Marshal(obj) - if err != nil { - t.Fatal(err) - } - - head := w.Header() - head.Set("Content-Type", mediaType) - head.Set("Content-Length", strconv.Itoa(len(by))) - w.WriteHeader(200) - w.Write(by) - }) - - mux3.HandleFunc("/download", func(w http.ResponseWriter, r *http.Request) { - t.Logf("Server 3: %s %s", r.Method, r.URL) - t.Logf("request header: %v", r.Header) - - if r.Method != "GET" { - w.WriteHeader(405) - return - } - - if r.Header.Get("Accept") != "" { - t.Error("Invalid Accept") - } - - if r.Header.Get("A") != "1" { - t.Error("invalid A") - } - - head := w.Header() - head.Set("Content-Type", "application/octet-stream") - head.Set("Content-Length", "4") - w.WriteHeader(200) - w.Write([]byte("test")) - }) - - defer Config.ResetConfig() - Config.SetConfig("lfs.batch", "false") - Config.SetConfig("lfs.url", server.URL+"/media") - - for _, redirect := range redirectCodes { - reader, size, err := Download("oid", 0) - if err != nil { - if isDockerConnectionError(err) { - return - } - t.Fatalf("unexpected error for %d status: %s", redirect, err) - } - - if size != 4 { - t.Errorf("unexpected size for %d status: %d", redirect, size) - } - - by, err := ioutil.ReadAll(reader) - reader.Close() - if err != nil { - t.Fatalf("unexpected error for %d status: %s", redirect, err) - } - - if body := string(by); body != "test" { - t.Errorf("unexpected body for %d status: %s", redirect, body) - } - } -} - -func TestDownloadAPIError(t *testing.T) { - mux := http.NewServeMux() - server := httptest.NewServer(mux) - defer server.Close() - - tmp := tempdir(t) - defer os.RemoveAll(tmp) - - mux.HandleFunc("/media/objects/oid", func(w http.ResponseWriter, r *http.Request) { - w.WriteHeader(404) - }) - - defer Config.ResetConfig() - Config.SetConfig("lfs.batch", "false") - Config.SetConfig("lfs.url", server.URL+"/media") - _, _, err := Download("oid", 0) - if err == nil { - t.Fatal("no error?") - } - - if IsFatalError(err) { - t.Fatal("should not panic") - } - - if isDockerConnectionError(err) { - return - } - - if err.Error() != fmt.Sprintf(defaultErrors[404], server.URL+"/media/objects/oid") { - t.Fatalf("Unexpected error: %s", err.Error()) - } -} - -func TestDownloadStorageError(t *testing.T) { - mux := http.NewServeMux() - server := httptest.NewServer(mux) - defer server.Close() - - tmp := tempdir(t) - defer os.RemoveAll(tmp) - - mux.HandleFunc("/media/objects/oid", func(w http.ResponseWriter, r *http.Request) { - t.Logf("Server: %s %s", r.Method, r.URL) - t.Logf("request header: %v", r.Header) - - if r.Method != "GET" { - w.WriteHeader(405) - return - } - - if r.Header.Get("Accept") != mediaType { - t.Error("Invalid Accept") - } - - if r.Header.Get("Authorization") != expectedAuth(t, server) { - t.Error("Invalid Authorization") - } - - obj := &ObjectResource{ - Oid: "oid", - Size: 4, - Actions: map[string]*linkRelation{ - "download": &linkRelation{ - Href: server.URL + "/download", - Header: map[string]string{"A": "1"}, - }, - }, - } - - by, err := json.Marshal(obj) - if err != nil { - t.Fatal(err) - } - - head := w.Header() - head.Set("Content-Type", mediaType) - head.Set("Content-Length", strconv.Itoa(len(by))) - w.WriteHeader(200) - w.Write(by) - }) - - mux.HandleFunc("/download", func(w http.ResponseWriter, r *http.Request) { - w.WriteHeader(500) - }) - - defer Config.ResetConfig() - Config.SetConfig("lfs.batch", "false") - Config.SetConfig("lfs.url", server.URL+"/media") - _, _, err := Download("oid", 0) - if err == nil { - t.Fatal("no error?") - } - - if isDockerConnectionError(err) { - return - } - - if !IsFatalError(err) { - t.Fatal("should panic") - } - - if err.Error() != fmt.Sprintf(defaultErrors[500], server.URL+"/download") { - t.Fatalf("Unexpected error: %s", err.Error()) - } -} - -// guards against connection errors that only seem to happen on debian docker -// images. -func isDockerConnectionError(err error) bool { - if err == nil { - return false - } - - if os.Getenv("TRAVIS") == "true" { - return false - } - - e := err.Error() - return strings.Contains(e, "connection reset by peer") || - strings.Contains(e, "connection refused") -} diff --git a/lfs/extension.go b/lfs/extension.go index 1e61a1f7..04b97489 100644 --- a/lfs/extension.go +++ b/lfs/extension.go @@ -9,24 +9,16 @@ import ( "io" "os" "os/exec" - "sort" "strings" -) -// An Extension describes how to manipulate files during smudge and clean. -// Extensions are parsed from the Git config. -type Extension struct { - Name string - Clean string - Smudge string - Priority int -} + "github.com/github/git-lfs/config" +) type pipeRequest struct { action string reader io.Reader fileName string - extensions []Extension + extensions []config.Extension } type pipeResponse struct { @@ -48,30 +40,6 @@ type extCommand struct { result *pipeExtResult } -// SortExtensions sorts a map of extensions in ascending order by Priority -func SortExtensions(m map[string]Extension) ([]Extension, error) { - pMap := make(map[int]Extension) - priorities := make([]int, 0, len(m)) - for n, ext := range m { - p := ext.Priority - if _, exist := pMap[p]; exist { - err := fmt.Errorf("duplicate priority %d on %s", p, n) - return nil, err - } - pMap[p] = ext - priorities = append(priorities, p) - } - - sort.Ints(priorities) - - result := make([]Extension, len(priorities)) - for i, p := range priorities { - result[i] = pMap[p] - } - - return result, nil -} - func pipeExtensions(request *pipeRequest) (response pipeResponse, err error) { var extcmds []*extCommand for _, e := range request.extensions { diff --git a/lfs/hook.go b/lfs/hook.go index eb4f3d2d..49b94843 100644 --- a/lfs/hook.go +++ b/lfs/hook.go @@ -7,6 +7,9 @@ import ( "os" "path/filepath" "strings" + + "github.com/github/git-lfs/config" + "github.com/github/git-lfs/errutil" ) // A Hook represents a githook as described in http://git-scm.com/docs/githooks. @@ -26,7 +29,7 @@ func (h *Hook) Exists() bool { // Path returns the desired (or actual, if installed) location where this hook // should be installed, relative to the local Git directory. func (h *Hook) Path() string { - return filepath.Join(LocalGitDir, "hooks", string(h.Type)) + return filepath.Join(config.LocalGitDir, "hooks", string(h.Type)) } // Install installs this Git hook on disk, or upgrades it if it does exist, and @@ -34,7 +37,7 @@ func (h *Hook) Path() string { // directory. It returns and halts at any errors, and returns nil if the // operation was a success. func (h *Hook) Install(force bool) error { - if err := os.MkdirAll(filepath.Join(LocalGitDir, "hooks"), 0755); err != nil { + if err := os.MkdirAll(filepath.Join(config.LocalGitDir, "hooks"), 0755); err != nil { return err } @@ -73,7 +76,7 @@ func (h *Hook) Upgrade() error { // or any of the past versions of this hook. func (h *Hook) Uninstall() error { if !InRepo() { - return newInvalidRepoError(nil) + return errutil.NewInvalidRepoError(nil) } match, err := h.matchesCurrent() diff --git a/lfs/lfs.go b/lfs/lfs.go index bfc36967..dd88aaac 100644 --- a/lfs/lfs.go +++ b/lfs/lfs.go @@ -1,104 +1,101 @@ +// Package lfs brings together the core LFS functionality +// NOTE: Subject to change, do not rely on this package from outside git-lfs source package lfs import ( "fmt" - "io/ioutil" - "net/http" "os" "path/filepath" - "runtime" "strings" - "github.com/github/git-lfs/git" + "github.com/github/git-lfs/config" "github.com/github/git-lfs/localstorage" - "github.com/github/git-lfs/vendor/_nuts/github.com/rubyist/tracerx" + "github.com/github/git-lfs/tools" + "github.com/rubyist/tracerx" ) const ( - Version = "1.2.0" - tempDirPerms = 0755 - localMediaDirPerms = 0755 - localLogDirPerms = 0755 + Version = "1.2.1" ) var ( LargeSizeThreshold = 5 * 1024 * 1024 - TempDir = filepath.Join(os.TempDir(), "git-lfs") - GitCommit string - UserAgent string - LocalWorkingDir string - LocalGitDir string // parent of index / config / hooks etc - LocalGitStorageDir string // parent of objects/lfs (may be same as LocalGitDir but may not) - LocalMediaDir string // root of lfs objects - LocalObjectTempDir string // where temporarily downloading objects are stored - LocalReferenceDir string // alternative local media dir (relative to clone reference repo) - objects *localstorage.LocalStorage - LocalLogDir string - checkedTempDir string ) -func TempFile(prefix string) (*os.File, error) { - if checkedTempDir != TempDir { - if err := os.MkdirAll(TempDir, tempDirPerms); err != nil { - return nil, err - } - checkedTempDir = TempDir +// LocalMediaDir returns the root of lfs objects +func LocalMediaDir() string { + if localstorage.Objects() != nil { + return localstorage.Objects().RootDir } - - return ioutil.TempFile(TempDir, prefix) + return "" } -func ResetTempDir() error { - checkedTempDir = "" - return os.RemoveAll(TempDir) +func LocalObjectTempDir() string { + if localstorage.Objects() != nil { + return localstorage.Objects().TempDir + } + return "" +} + +func TempDir() string { + return localstorage.TempDir +} + +func TempFile(prefix string) (*os.File, error) { + return localstorage.TempFile(prefix) } func LocalMediaPath(oid string) (string, error) { - return objects.BuildObjectPath(oid) + return localstorage.Objects().BuildObjectPath(oid) +} + +func LocalMediaPathReadOnly(oid string) string { + return localstorage.Objects().ObjectPath(oid) } func LocalReferencePath(sha string) string { - if LocalReferenceDir == "" { + if config.LocalReferenceDir == "" { return "" } - return filepath.Join(LocalReferenceDir, sha[0:2], sha[2:4], sha) + return filepath.Join(config.LocalReferenceDir, sha[0:2], sha[2:4], sha) } func ObjectExistsOfSize(oid string, size int64) bool { - path := objects.ObjectPath(oid) - return FileExistsOfSize(path, size) + path := localstorage.Objects().ObjectPath(oid) + return tools.FileExistsOfSize(path, size) } func Environ() []string { osEnviron := os.Environ() env := make([]string, 0, len(osEnviron)+7) env = append(env, - fmt.Sprintf("LocalWorkingDir=%s", LocalWorkingDir), - fmt.Sprintf("LocalGitDir=%s", LocalGitDir), - fmt.Sprintf("LocalGitStorageDir=%s", LocalGitStorageDir), - fmt.Sprintf("LocalMediaDir=%s", LocalMediaDir), - fmt.Sprintf("LocalReferenceDir=%s", LocalReferenceDir), - fmt.Sprintf("TempDir=%s", TempDir), - fmt.Sprintf("ConcurrentTransfers=%d", Config.ConcurrentTransfers()), - fmt.Sprintf("BatchTransfer=%v", Config.BatchTransfer()), - fmt.Sprintf("SkipDownloadErrors=%v", Config.SkipDownloadErrors()), - fmt.Sprintf("FetchRecentAlways=%v", Config.FetchPruneConfig().FetchRecentAlways), - fmt.Sprintf("FetchRecentRefsDays=%d", Config.FetchPruneConfig().FetchRecentRefsDays), - fmt.Sprintf("FetchRecentCommitsDays=%d", Config.FetchPruneConfig().FetchRecentCommitsDays), - fmt.Sprintf("FetchRecentRefsIncludeRemotes=%v", Config.FetchPruneConfig().FetchRecentRefsIncludeRemotes), - fmt.Sprintf("PruneOffsetDays=%d", Config.FetchPruneConfig().PruneOffsetDays), - fmt.Sprintf("PruneVerifyRemoteAlways=%v", Config.FetchPruneConfig().PruneVerifyRemoteAlways), - fmt.Sprintf("PruneRemoteName=%s", Config.FetchPruneConfig().PruneRemoteName), - fmt.Sprintf("AccessDownload=%s", Config.Access("download")), - fmt.Sprintf("AccessUpload=%s", Config.Access("upload")), + fmt.Sprintf("LocalWorkingDir=%s", config.LocalWorkingDir), + fmt.Sprintf("LocalGitDir=%s", config.LocalGitDir), + fmt.Sprintf("LocalGitStorageDir=%s", config.LocalGitStorageDir), + fmt.Sprintf("LocalMediaDir=%s", LocalMediaDir()), + fmt.Sprintf("LocalReferenceDir=%s", config.LocalReferenceDir), + fmt.Sprintf("TempDir=%s", TempDir()), + fmt.Sprintf("ConcurrentTransfers=%d", config.Config.ConcurrentTransfers()), + fmt.Sprintf("BasicTransfersOnly=%v", config.Config.BasicTransfersOnly()), + fmt.Sprintf("BatchTransfer=%v", config.Config.BatchTransfer()), + fmt.Sprintf("SkipDownloadErrors=%v", config.Config.SkipDownloadErrors()), + fmt.Sprintf("FetchRecentAlways=%v", config.Config.FetchPruneConfig().FetchRecentAlways), + fmt.Sprintf("FetchRecentRefsDays=%d", config.Config.FetchPruneConfig().FetchRecentRefsDays), + fmt.Sprintf("FetchRecentCommitsDays=%d", config.Config.FetchPruneConfig().FetchRecentCommitsDays), + fmt.Sprintf("FetchRecentRefsIncludeRemotes=%v", config.Config.FetchPruneConfig().FetchRecentRefsIncludeRemotes), + fmt.Sprintf("PruneOffsetDays=%d", config.Config.FetchPruneConfig().PruneOffsetDays), + fmt.Sprintf("PruneVerifyRemoteAlways=%v", config.Config.FetchPruneConfig().PruneVerifyRemoteAlways), + fmt.Sprintf("PruneRemoteName=%s", config.Config.FetchPruneConfig().PruneRemoteName), + fmt.Sprintf("AccessDownload=%s", config.Config.Access("download")), + fmt.Sprintf("AccessUpload=%s", config.Config.Access("upload")), ) - if len(Config.FetchExcludePaths()) > 0 { - env = append(env, fmt.Sprintf("FetchExclude=%s", strings.Join(Config.FetchExcludePaths(), ", "))) + if len(config.Config.FetchExcludePaths()) > 0 { + env = append(env, fmt.Sprintf("FetchExclude=%s", strings.Join(config.Config.FetchExcludePaths(), ", "))) } - if len(Config.FetchIncludePaths()) > 0 { - env = append(env, fmt.Sprintf("FetchInclude=%s", strings.Join(Config.FetchIncludePaths(), ", "))) + if len(config.Config.FetchIncludePaths()) > 0 { + env = append(env, fmt.Sprintf("FetchInclude=%s", strings.Join(config.Config.FetchIncludePaths(), ", "))) } - for _, ext := range Config.Extensions() { + for _, ext := range config.Config.Extensions() { env = append(env, fmt.Sprintf("Extension[%d]=%s", ext.Priority, ext.Name)) } @@ -113,134 +110,25 @@ func Environ() []string { } func InRepo() bool { - return LocalGitDir != "" -} - -func ResolveDirs() { - var err error - LocalGitDir, LocalWorkingDir, err = git.GitAndRootDirs() - if err == nil { - // Make sure we've fully evaluated symlinks, failure to do consistently - // can cause discrepancies - LocalGitDir = ResolveSymlinks(LocalGitDir) - LocalWorkingDir = ResolveSymlinks(LocalWorkingDir) - - LocalGitStorageDir = resolveGitStorageDir(LocalGitDir) - LocalReferenceDir = resolveReferenceDir(LocalGitStorageDir) - TempDir = filepath.Join(LocalGitDir, "lfs", "tmp") // temp files per worktree - - objs, err := localstorage.New( - filepath.Join(LocalGitStorageDir, "lfs", "objects"), - filepath.Join(TempDir, "objects"), - ) - - if err != nil { - panic(fmt.Sprintf("Error trying to init LocalStorage: %s", err)) - } - - objects = objs - LocalMediaDir = objs.RootDir - LocalObjectTempDir = objs.TempDir - LocalLogDir = filepath.Join(objs.RootDir, "logs") - if err := os.MkdirAll(LocalLogDir, localLogDirPerms); err != nil { - panic(fmt.Errorf("Error trying to create log directory in '%s': %s", LocalLogDir, err)) - } - } else { - errMsg := err.Error() - tracerx.Printf("Error running 'git rev-parse': %s", errMsg) - if !strings.Contains(errMsg, "Not a git repository") { - fmt.Fprintf(os.Stderr, "Error: %s\n", errMsg) - } - } + return config.LocalGitDir != "" } func ClearTempObjects() error { - if objects == nil { + if localstorage.Objects() == nil { return nil } - return objects.ClearTempObjects() + return localstorage.Objects().ClearTempObjects() } func ScanObjectsChan() <-chan localstorage.Object { - return objects.ScanObjectsChan() + return localstorage.Objects().ScanObjectsChan() } func init() { tracerx.DefaultKey = "GIT" tracerx.Prefix = "trace git-lfs: " - ResolveDirs() - - gitCommit := "" - if len(GitCommit) > 0 { - gitCommit = "; git " + GitCommit - } - UserAgent = fmt.Sprintf("git-lfs/%s (GitHub; %s %s; go %s%s)", - Version, - runtime.GOOS, - runtime.GOARCH, - strings.Replace(runtime.Version(), "go", "", 1), - gitCommit, - ) -} - -func processGitRedirectFile(file, prefix string) (string, error) { - data, err := ioutil.ReadFile(file) - if err != nil { - return "", err - } - - contents := string(data) - var dir string - if len(prefix) > 0 { - if !strings.HasPrefix(contents, prefix) { - // Prefix required & not found - return "", nil - } - dir = strings.TrimSpace(contents[len(prefix):]) - } else { - dir = strings.TrimSpace(contents) - } - - if !filepath.IsAbs(dir) { - // The .git file contains a relative path. - // Create an absolute path based on the directory the .git file is located in. - dir = filepath.Join(filepath.Dir(file), dir) - } - - return dir, nil -} - -func resolveReferenceDir(gitStorageDir string) string { - cloneReferencePath := filepath.Join(gitStorageDir, "objects", "info", "alternates") - if FileExists(cloneReferencePath) { - buffer, err := ioutil.ReadFile(cloneReferencePath) - if err == nil { - path := strings.TrimSpace(string(buffer[:])) - referenceLfsStoragePath := filepath.Join(filepath.Dir(path), "lfs", "objects") - if DirExists(referenceLfsStoragePath) { - return referenceLfsStoragePath - } - } - } - return "" -} - -// From a git dir, get the location that objects are to be stored (we will store lfs alongside) -// Sometimes there is an additional level of redirect on the .git folder by way of a commondir file -// before you find object storage, e.g. 'git worktree' uses this. It redirects to gitdir either by GIT_DIR -// (during setup) or .git/git-dir: (during use), but this only contains the index etc, the objects -// are found in another git dir via 'commondir'. -func resolveGitStorageDir(gitDir string) string { - commondirpath := filepath.Join(gitDir, "commondir") - if FileExists(commondirpath) && !DirExists(filepath.Join(gitDir, "objects")) { - // no git-dir: prefix in commondir - storage, err := processGitRedirectFile(commondirpath, "") - if err == nil { - return storage - } - } - return gitDir + localstorage.ResolveDirs() } const ( @@ -248,13 +136,9 @@ const ( gitPtrPrefix = "gitdir: " ) -func traceHttpReq(req *http.Request) string { - return fmt.Sprintf("%s %s", req.Method, strings.SplitN(req.URL.String(), "?", 2)[0]) -} - // only used in tests func AllObjects() []localstorage.Object { - return objects.AllObjects() + return localstorage.Objects().AllObjects() } func LinkOrCopyFromReference(oid string, size int64) error { @@ -266,7 +150,7 @@ func LinkOrCopyFromReference(oid string, size int64) error { if err != nil { return err } - if altMediafile != "" && FileExistsOfSize(altMediafile, size) { + if altMediafile != "" && tools.FileExistsOfSize(altMediafile, size) { return LinkOrCopy(altMediafile, mediafile) } return nil diff --git a/lfs/lfs_test.go b/lfs/lfs_test.go index 50463fb1..6f9f177e 100644 --- a/lfs/lfs_test.go +++ b/lfs/lfs_test.go @@ -7,7 +7,7 @@ import ( "github.com/github/git-lfs/lfs" "github.com/github/git-lfs/test" - "github.com/github/git-lfs/vendor/_nuts/github.com/technoweenie/assert" + "github.com/stretchr/testify/assert" ) func TestAllCurrentObjectsNone(t *testing.T) { diff --git a/lfs/pointer.go b/lfs/pointer.go index 1be6bb6d..d8a1a4d2 100644 --- a/lfs/pointer.go +++ b/lfs/pointer.go @@ -11,6 +11,9 @@ import ( "sort" "strconv" "strings" + + "github.com/github/git-lfs/errutil" + "github.com/github/git-lfs/progress" ) var ( @@ -57,7 +60,7 @@ func NewPointerExtension(name string, priority int, oid string) *PointerExtensio return &PointerExtension{name, priority, oid, oidType} } -func (p *Pointer) Smudge(writer io.Writer, workingfile string, download bool, cb CopyCallback) error { +func (p *Pointer) Smudge(writer io.Writer, workingfile string, download bool, cb progress.CopyCallback) error { return PointerSmudge(writer, p, workingfile, download, cb) } @@ -91,7 +94,7 @@ func DecodePointerFromFile(file string) (*Pointer, error) { return nil, err } if stat.Size() > blobSizeCutoff { - return nil, newNotAPointerError(nil) + return nil, errutil.NewNotAPointerError(nil) } f, err := os.OpenFile(file, os.O_RDONLY, 0644) if err != nil { @@ -120,7 +123,7 @@ func DecodeFrom(reader io.Reader) ([]byte, *Pointer, error) { func verifyVersion(version string) error { if len(version) == 0 { - return newNotAPointerError(errors.New("Missing version")) + return errutil.NewNotAPointerError(errors.New("Missing version")) } for _, v := range v1Aliases { @@ -135,11 +138,8 @@ func verifyVersion(version string) error { func decodeKV(data []byte) (*Pointer, error) { kvps, exts, err := decodeKVData(data) if err != nil { - if IsBadPointerKeyError(err) { - badErr := err.(badPointerKeyError) - if badErr.Expected == "version" { - return nil, newNotAPointerError(err) - } + if errutil.IsBadPointerKeyError(err) { + return nil, errutil.StandardizeBadPointerError(err) } return nil, err } @@ -233,7 +233,7 @@ func decodeKVData(data []byte) (kvps map[string]string, exts map[string]string, kvps = make(map[string]string) if !matcherRE.Match(data) { - err = newNotAPointerError(err) + err = errutil.NewNotAPointerError(err) return } @@ -262,7 +262,7 @@ func decodeKVData(data []byte) (kvps map[string]string, exts map[string]string, if expected := pointerKeys[line]; key != expected { if !extRE.Match([]byte(key)) { - err = newBadPointerKeyError(expected, key) + err = errutil.NewBadPointerKeyError(expected, key) return } if exts == nil { diff --git a/lfs/pointer_clean.go b/lfs/pointer_clean.go index 0dbf7c3a..ba368a03 100644 --- a/lfs/pointer_clean.go +++ b/lfs/pointer_clean.go @@ -6,6 +6,11 @@ import ( "encoding/hex" "io" "os" + + "github.com/github/git-lfs/config" + "github.com/github/git-lfs/errutil" + "github.com/github/git-lfs/progress" + "github.com/github/git-lfs/tools" ) type cleanedAsset struct { @@ -13,8 +18,8 @@ type cleanedAsset struct { *Pointer } -func PointerClean(reader io.Reader, fileName string, fileSize int64, cb CopyCallback) (*cleanedAsset, error) { - extensions, err := SortExtensions(Config.Extensions()) +func PointerClean(reader io.Reader, fileName string, fileSize int64, cb progress.CopyCallback) (*cleanedAsset, error) { + extensions, err := config.Config.SortedExtensions() if err != nil { return nil, err } @@ -56,7 +61,7 @@ func PointerClean(reader io.Reader, fileName string, fileSize int64, cb CopyCall return &cleanedAsset{tmp.Name(), pointer}, err } -func copyToTemp(reader io.Reader, fileSize int64, cb CopyCallback) (oid string, size int64, tmp *os.File, err error) { +func copyToTemp(reader io.Reader, fileSize int64, cb progress.CopyCallback) (oid string, size int64, tmp *os.File, err error) { tmp, err = TempFile("") if err != nil { return @@ -73,12 +78,12 @@ func copyToTemp(reader io.Reader, fileSize int64, cb CopyCallback) (oid string, by, ptr, err := DecodeFrom(reader) if err == nil && len(by) < 512 { - err = newCleanPointerError(err, ptr, by) + err = errutil.NewCleanPointerError(err, ptr, by) return } multi := io.MultiReader(bytes.NewReader(by), reader) - size, err = CopyWithCallback(writer, multi, fileSize, cb) + size, err = tools.CopyWithCallback(writer, multi, fileSize, cb) if err != nil { return diff --git a/lfs/pointer_smudge.go b/lfs/pointer_smudge.go index b9af0df2..a9043316 100644 --- a/lfs/pointer_smudge.go +++ b/lfs/pointer_smudge.go @@ -1,20 +1,23 @@ package lfs import ( - "crypto/sha256" - "encoding/hex" "fmt" - "hash" "io" - "io/ioutil" "os" "path/filepath" - "github.com/github/git-lfs/vendor/_nuts/github.com/cheggaaa/pb" - "github.com/github/git-lfs/vendor/_nuts/github.com/rubyist/tracerx" + "github.com/cheggaaa/pb" + "github.com/github/git-lfs/tools" + "github.com/github/git-lfs/transfer" + + "github.com/github/git-lfs/api" + "github.com/github/git-lfs/config" + "github.com/github/git-lfs/errutil" + "github.com/github/git-lfs/progress" + "github.com/rubyist/tracerx" ) -func PointerSmudgeToFile(filename string, ptr *Pointer, download bool, cb CopyCallback) error { +func PointerSmudgeToFile(filename string, ptr *Pointer, download bool, cb progress.CopyCallback) error { os.MkdirAll(filepath.Dir(filename), 0755) file, err := os.Create(filename) if err != nil { @@ -22,7 +25,7 @@ func PointerSmudgeToFile(filename string, ptr *Pointer, download bool, cb CopyCa } defer file.Close() if err := PointerSmudge(file, ptr, filename, download, cb); err != nil { - if IsDownloadDeclinedError(err) { + if errutil.IsDownloadDeclinedError(err) { // write placeholder data instead file.Seek(0, os.SEEK_SET) ptr.Encode(file) @@ -34,7 +37,7 @@ func PointerSmudgeToFile(filename string, ptr *Pointer, download bool, cb CopyCa return nil } -func PointerSmudge(writer io.Writer, ptr *Pointer, workingfile string, download bool, cb CopyCallback) error { +func PointerSmudge(writer io.Writer, ptr *Pointer, workingfile string, download bool, cb progress.CopyCallback) error { mediafile, err := LocalMediaPath(ptr.Oid) if err != nil { return err @@ -57,159 +60,57 @@ func PointerSmudge(writer io.Writer, ptr *Pointer, workingfile string, download if download { err = downloadFile(writer, ptr, workingfile, mediafile, cb) } else { - return newDownloadDeclinedError(nil) + return errutil.NewDownloadDeclinedError(nil) } } else { err = readLocalFile(writer, ptr, mediafile, workingfile, cb) } if err != nil { - return newSmudgeError(err, ptr.Oid, mediafile) + return errutil.NewSmudgeError(err, ptr.Oid, mediafile) } return nil } -// PointerSmudgeObject uses a Pointer and ObjectResource to download the object to the -// media directory. It does not write the file to the working directory. -func PointerSmudgeObject(ptr *Pointer, obj *ObjectResource, cb CopyCallback) error { - mediafile, err := LocalMediaPath(obj.Oid) - if err != nil { - return err - } - - stat, statErr := os.Stat(mediafile) - if statErr == nil && stat != nil { - fileSize := stat.Size() - if fileSize == 0 || fileSize != obj.Size { - tracerx.Printf("Removing %s, size %d is invalid", mediafile, fileSize) - os.RemoveAll(mediafile) - stat = nil - } - } - - if statErr != nil || stat == nil { - err := downloadObject(ptr, obj, mediafile, cb) - - if err != nil { - return newSmudgeError(err, obj.Oid, mediafile) - } - } - - return nil -} - -func downloadObject(ptr *Pointer, obj *ObjectResource, mediafile string, cb CopyCallback) error { - reader, size, err := DownloadObject(obj) - if reader != nil { - defer reader.Close() - } - - if err != nil { - return Errorf(err, "Error downloading %s", mediafile) - } - - if ptr.Size == 0 { - ptr.Size = size - } - - if err := bufferDownloadedFile(mediafile, reader, ptr.Size, cb); err != nil { - return Errorf(err, "Error buffering media file: %s", err) - } - - return nil -} - -func downloadFile(writer io.Writer, ptr *Pointer, workingfile, mediafile string, cb CopyCallback) error { +func downloadFile(writer io.Writer, ptr *Pointer, workingfile, mediafile string, cb progress.CopyCallback) error { fmt.Fprintf(os.Stderr, "Downloading %s (%s)\n", workingfile, pb.FormatBytes(ptr.Size)) - reader, size, err := Download(filepath.Base(mediafile), ptr.Size) - if reader != nil { - defer reader.Close() - } + xfers := transfer.GetDownloadAdapterNames() + obj, adapterName, err := api.BatchOrLegacySingle(&api.ObjectResource{Oid: ptr.Oid, Size: ptr.Size}, "download", xfers) if err != nil { - return Errorf(err, "Error downloading %s: %s", filepath.Base(mediafile), err) + return errutil.Errorf(err, "Error downloading %s: %s", filepath.Base(mediafile), err) } if ptr.Size == 0 { - ptr.Size = size + ptr.Size = obj.Size } - if err := bufferDownloadedFile(mediafile, reader, ptr.Size, cb); err != nil { - return Errorf(err, "Error buffering media file: %s", err) + adapter := transfer.NewDownloadAdapter(adapterName) + var tcb transfer.TransferProgressCallback + if cb != nil { + tcb = func(name string, totalSize, readSoFar int64, readSinceLast int) error { + return cb(totalSize, readSoFar, readSinceLast) + } + } + // Single download + adapterResultChan := make(chan transfer.TransferResult, 1) + adapter.Begin(1, tcb, adapterResultChan) + adapter.Add(transfer.NewTransfer(filepath.Base(workingfile), obj, mediafile)) + adapter.End() + res := <-adapterResultChan + + if res.Error != nil { + return errutil.Errorf(err, "Error buffering media file: %s", res.Error) } return readLocalFile(writer, ptr, mediafile, workingfile, nil) } -// Writes the content of reader to filename atomically by writing to a temp file -// first, and confirming the content SHA-256 is valid. This is basically a copy -// of atomic.WriteFile() at: -// -// https://github.com/natefinch/atomic/blob/a62ce929ffcc871a51e98c6eba7b20321e3ed62d/atomic.go#L12-L17 -// -// filename - Absolute path to a file to write, with the filename a 64 character -// SHA-256 hex signature. -// reader - Any io.Reader -// size - Expected byte size of the content. Used for the progress bar in -// the optional CopyCallback. -// cb - Optional CopyCallback object for providing download progress to -// external Git LFS tools. -func bufferDownloadedFile(filename string, reader io.Reader, size int64, cb CopyCallback) error { - oid := filepath.Base(filename) - f, err := ioutil.TempFile(LocalObjectTempDir, oid+"-") - if err != nil { - return fmt.Errorf("cannot create temp file: %v", err) - } - - defer func() { - if err != nil { - // Don't leave the temp file lying around on error. - _ = os.Remove(f.Name()) // yes, ignore the error, not much we can do about it. - } - }() - - hasher := newHashingReader(reader) - - // ensure we always close f. Note that this does not conflict with the - // close below, as close is idempotent. - defer f.Close() - name := f.Name() - written, err := CopyWithCallback(f, hasher, size, cb) - if err != nil { - return fmt.Errorf("cannot write data to tempfile %q: %v", name, err) - } - if err := f.Close(); err != nil { - return fmt.Errorf("can't close tempfile %q: %v", name, err) - } - - if actual := hasher.Hash(); actual != oid { - return fmt.Errorf("Expected OID %s, got %s after %d bytes written", oid, actual, written) - } - - // get the file mode from the original file and use that for the replacement - // file, too. - info, err := os.Stat(filename) - if os.IsNotExist(err) { - // no original file - } else if err != nil { - return err - } else { - if err := os.Chmod(name, info.Mode()); err != nil { - return fmt.Errorf("can't set filemode on tempfile %q: %v", name, err) - } - } - - if err := os.Rename(name, filename); err != nil { - return fmt.Errorf("cannot replace %q with tempfile %q: %v", filename, name, err) - } - return nil -} - -func readLocalFile(writer io.Writer, ptr *Pointer, mediafile string, workingfile string, cb CopyCallback) error { +func readLocalFile(writer io.Writer, ptr *Pointer, mediafile string, workingfile string, cb progress.CopyCallback) error { reader, err := os.Open(mediafile) if err != nil { - return Errorf(err, "Error opening media file.") + return errutil.Errorf(err, "Error opening media file.") } defer reader.Close() @@ -220,24 +121,24 @@ func readLocalFile(writer io.Writer, ptr *Pointer, mediafile string, workingfile } if len(ptr.Extensions) > 0 { - registeredExts := Config.Extensions() - extensions := make(map[string]Extension) + registeredExts := config.Config.Extensions() + extensions := make(map[string]config.Extension) for _, ptrExt := range ptr.Extensions { ext, ok := registeredExts[ptrExt.Name] if !ok { err := fmt.Errorf("Extension '%s' is not configured.", ptrExt.Name) - return Error(err) + return errutil.Error(err) } ext.Priority = ptrExt.Priority extensions[ext.Name] = ext } - exts, err := SortExtensions(extensions) + exts, err := config.SortExtensions(extensions) if err != nil { - return Error(err) + return errutil.Error(err) } // pipe extensions in reverse order - var extsR []Extension + var extsR []config.Extension for i := range exts { ext := exts[len(exts)-1-i] extsR = append(extsR, ext) @@ -247,7 +148,7 @@ func readLocalFile(writer io.Writer, ptr *Pointer, mediafile string, workingfile response, err := pipeExtensions(request) if err != nil { - return Error(err) + return errutil.Error(err) } actualExts := make(map[string]*pipeExtResult) @@ -259,58 +160,33 @@ func readLocalFile(writer io.Writer, ptr *Pointer, mediafile string, workingfile oid := response.results[0].oidIn if ptr.Oid != oid { err = fmt.Errorf("Actual oid %s during smudge does not match expected %s", oid, ptr.Oid) - return Error(err) + return errutil.Error(err) } for _, expected := range ptr.Extensions { actual := actualExts[expected.Name] if actual.name != expected.Name { err = fmt.Errorf("Actual extension name '%s' does not match expected '%s'", actual.name, expected.Name) - return Error(err) + return errutil.Error(err) } if actual.oidOut != expected.Oid { err = fmt.Errorf("Actual oid %s for extension '%s' does not match expected %s", actual.oidOut, expected.Name, expected.Oid) - return Error(err) + return errutil.Error(err) } } // setup reader reader, err = os.Open(response.file.Name()) if err != nil { - return Errorf(err, "Error opening smudged file: %s", err) + return errutil.Errorf(err, "Error opening smudged file: %s", err) } defer reader.Close() } - _, err = CopyWithCallback(writer, reader, ptr.Size, cb) + _, err = tools.CopyWithCallback(writer, reader, ptr.Size, cb) if err != nil { - return Errorf(err, "Error reading from media file: %s", err) + return errutil.Errorf(err, "Error reading from media file: %s", err) } return nil } - -type hashingReader struct { - reader io.Reader - hasher hash.Hash -} - -func newHashingReader(r io.Reader) *hashingReader { - return &hashingReader{r, sha256.New()} -} - -func (r *hashingReader) Hash() string { - return hex.EncodeToString(r.hasher.Sum(nil)) -} - -func (r *hashingReader) Read(b []byte) (int, error) { - w, err := r.reader.Read(b) - if err == nil || err == io.EOF { - _, e := r.hasher.Write(b[0:w]) - if e != nil && err == nil { - return w, e - } - } - - return w, err -} diff --git a/lfs/pointer_test.go b/lfs/pointer_test.go index 30e8fe6c..78585ff7 100644 --- a/lfs/pointer_test.go +++ b/lfs/pointer_test.go @@ -8,14 +8,15 @@ import ( "strings" "testing" - "github.com/github/git-lfs/vendor/_nuts/github.com/technoweenie/assert" + "github.com/github/git-lfs/errutil" + "github.com/stretchr/testify/assert" ) func TestEncode(t *testing.T) { var buf bytes.Buffer pointer := NewPointer("booya", 12345, nil) _, err := EncodePointer(&buf, pointer) - assert.Equal(t, nil, err) + assert.Nil(t, err) bufReader := bufio.NewReader(&buf) assertLine(t, bufReader, "version https://git-lfs.github.com/spec/v1\n") @@ -50,7 +51,7 @@ func TestEncodeExtensions(t *testing.T) { } pointer := NewPointer("main_oid", 12345, exts) _, err := EncodePointer(&buf, pointer) - assert.Equal(t, nil, err) + assert.Nil(t, err) bufReader := bufio.NewReader(&buf) assertLine(t, bufReader, "version https://git-lfs.github.com/spec/v1\n") @@ -69,7 +70,7 @@ func TestEncodeExtensions(t *testing.T) { func assertLine(t *testing.T, r *bufio.Reader, expected string) { actual, err := r.ReadString('\n') - assert.Equal(t, nil, err) + assert.Nil(t, err) assert.Equal(t, expected, actual) } @@ -80,7 +81,7 @@ func TestDecodeTinyFile(t *testing.T) { t.Errorf("pointer was decoded: %v", p) } - if !IsNotAPointerError(err) { + if !errutil.IsNotAPointerError(err) { t.Errorf("error is not a NotAPointerError: %s: '%v'", reflect.TypeOf(err), err) } } @@ -289,5 +290,5 @@ size 12345`, } func assertEqualWithExample(t *testing.T, example string, expected, actual interface{}) { - assert.Equalf(t, expected, actual, "Example:\n%s", strings.TrimSpace(example)) + assert.Equal(t, expected, actual, "Example:\n%s", strings.TrimSpace(example)) } diff --git a/lfs/scanner.go b/lfs/scanner.go index 17122e7f..ecc6967c 100644 --- a/lfs/scanner.go +++ b/lfs/scanner.go @@ -15,7 +15,7 @@ import ( "time" "github.com/github/git-lfs/git" - "github.com/github/git-lfs/vendor/_nuts/github.com/rubyist/tracerx" + "github.com/rubyist/tracerx" ) const ( diff --git a/lfs/scanner_git_test.go b/lfs/scanner_git_test.go index 9832b371..de403aad 100644 --- a/lfs/scanner_git_test.go +++ b/lfs/scanner_git_test.go @@ -11,7 +11,7 @@ import ( . "github.com/github/git-lfs/lfs" "github.com/github/git-lfs/test" - "github.com/github/git-lfs/vendor/_nuts/github.com/technoweenie/assert" + "github.com/stretchr/testify/assert" ) func TestScanUnpushed(t *testing.T) { @@ -54,36 +54,36 @@ func TestScanUnpushed(t *testing.T) { repo.AddRemote("upstream") pointers, err := ScanUnpushed("") - assert.Equal(t, nil, err, "Should be no error calling ScanUnpushed") - assert.Equal(t, 4, len(pointers), "Should be 4 pointers because none pushed") + assert.Nil(t, err, "Should be no error calling ScanUnpushed") + assert.Len(t, pointers, 4, "Should be 4 pointers because none pushed") test.RunGitCommand(t, true, "push", "origin", "branch2") // Branch2 will have pushed 2 commits pointers, err = ScanUnpushed("") - assert.Equal(t, nil, err, "Should be no error calling ScanUnpushed") - assert.Equal(t, 2, len(pointers), "Should be 2 pointers") + assert.Nil(t, err, "Should be no error calling ScanUnpushed") + assert.Len(t, pointers, 2, "Should be 2 pointers") test.RunGitCommand(t, true, "push", "upstream", "master") // Master pushes 1 more commit pointers, err = ScanUnpushed("") - assert.Equal(t, nil, err, "Should be no error calling ScanUnpushed") - assert.Equal(t, 1, len(pointers), "Should be 1 pointer") + assert.Nil(t, err, "Should be no error calling ScanUnpushed") + assert.Len(t, pointers, 1, "Should be 1 pointer") test.RunGitCommand(t, true, "push", "origin", "branch3") // All pushed (somewhere) pointers, err = ScanUnpushed("") - assert.Equal(t, nil, err, "Should be no error calling ScanUnpushed") - assert.Equal(t, 0, len(pointers), "Should be 0 pointers unpushed") + assert.Nil(t, err, "Should be no error calling ScanUnpushed") + assert.Empty(t, pointers, "Should be 0 pointers unpushed") // Check origin pointers, err = ScanUnpushed("origin") - assert.Equal(t, nil, err, "Should be no error calling ScanUnpushed") - assert.Equal(t, 0, len(pointers), "Should be 0 pointers unpushed to origin") + assert.Nil(t, err, "Should be no error calling ScanUnpushed") + assert.Empty(t, pointers, "Should be 0 pointers unpushed to origin") // Check upstream pointers, err = ScanUnpushed("upstream") - assert.Equal(t, nil, err, "Should be no error calling ScanUnpushed") - assert.Equal(t, 2, len(pointers), "Should be 2 pointers unpushed to upstream") + assert.Nil(t, err, "Should be no error calling ScanUnpushed") + assert.Len(t, pointers, 2, "Should be 2 pointers unpushed to upstream") } func TestScanPreviousVersions(t *testing.T) { diff --git a/lfs/scanner_test.go b/lfs/scanner_test.go index 32ec4f7e..46db8834 100644 --- a/lfs/scanner_test.go +++ b/lfs/scanner_test.go @@ -4,7 +4,7 @@ import ( "strings" "testing" - "github.com/github/git-lfs/vendor/_nuts/github.com/technoweenie/assert" + "github.com/stretchr/testify/assert" ) var ( @@ -108,7 +108,7 @@ func TestParseLogOutputToPointersAdditions(t *testing.T) { for p := range pchan { pointers = append(pointers, p) } - assert.Equal(t, 5, len(pointers)) + assert.Len(t, pointers, 5) // modification, + side assert.Equal(t, "radial_1.png", pointers[0].Name) @@ -142,7 +142,7 @@ func TestParseLogOutputToPointersAdditions(t *testing.T) { for p := range pchan { pointers = append(pointers, p) } - assert.Equal(t, 1, len(pointers)) + assert.Len(t, pointers, 1) assert.Equal(t, "waveNM.png", pointers[0].Name) assert.Equal(t, "fe2c2f236b97bba4585d9909a227a8fa64897d9bbe297fa272f714302d86c908", pointers[0].Oid) assert.Equal(t, int64(125873), pointers[0].Size) @@ -158,7 +158,7 @@ func TestParseLogOutputToPointersAdditions(t *testing.T) { for p := range pchan { pointers = append(pointers, p) } - assert.Equal(t, 4, len(pointers)) + assert.Len(t, pointers, 4) assert.Equal(t, "radial_1.png", pointers[0].Name) assert.Equal(t, "3301b3da173d231f0f6b1f9bf075e573758cd79b3cfeff7623a953d708d6688b", pointers[0].Oid) assert.Equal(t, int64(3152388), pointers[0].Size) @@ -188,7 +188,7 @@ func TestParseLogOutputToPointersDeletion(t *testing.T) { pointers = append(pointers, p) } - assert.Equal(t, 4, len(pointers)) + assert.Len(t, pointers, 4) // deletion, - side assert.Equal(t, "smoke_1.png", pointers[0].Name) @@ -218,7 +218,7 @@ func TestParseLogOutputToPointersDeletion(t *testing.T) { for p := range pchan { pointers = append(pointers, p) } - assert.Equal(t, 1, len(pointers)) + assert.Len(t, pointers, 1) assert.Equal(t, "flare_1.png", pointers[0].Name) assert.Equal(t, "ea61c67cc5e8b3504d46de77212364045f31d9a023ad4448a1ace2a2fb4eed28", pointers[0].Oid) assert.Equal(t, int64(72982), pointers[0].Size) @@ -234,7 +234,7 @@ func TestParseLogOutputToPointersDeletion(t *testing.T) { for p := range pchan { pointers = append(pointers, p) } - assert.Equal(t, 3, len(pointers)) + assert.Len(t, pointers, 3) assert.Equal(t, "smoke_1.png", pointers[0].Name) assert.Equal(t, "8eb65d66303acc60062f44b44ef1f7360d7189db8acf3d066e59e2528f39514e", pointers[0].Oid) assert.Equal(t, int64(35022), pointers[0].Size) diff --git a/lfs/ssh_test.go b/lfs/ssh_test.go deleted file mode 100644 index bec03f53..00000000 --- a/lfs/ssh_test.go +++ /dev/null @@ -1,91 +0,0 @@ -package lfs - -import ( - "path/filepath" - "testing" - - "github.com/github/git-lfs/vendor/_nuts/github.com/technoweenie/assert" -) - -func TestSSHGetExeAndArgsSsh(t *testing.T) { - endpoint := Config.Endpoint("download") - endpoint.SshUserAndHost = "user@foo.com" - oldGITSSH := Config.Getenv("GIT_SSH") - Config.Setenv("GIT_SSH", "") - exe, args := sshGetExeAndArgs(endpoint) - assert.Equal(t, "ssh", exe) - assert.Equal(t, []string{"user@foo.com"}, args) - - Config.Setenv("GIT_SSH", oldGITSSH) -} - -func TestSSHGetExeAndArgsSshCustomPort(t *testing.T) { - endpoint := Config.Endpoint("download") - endpoint.SshUserAndHost = "user@foo.com" - endpoint.SshPort = "8888" - oldGITSSH := Config.Getenv("GIT_SSH") - Config.Setenv("GIT_SSH", "") - exe, args := sshGetExeAndArgs(endpoint) - assert.Equal(t, "ssh", exe) - assert.Equal(t, []string{"-p", "8888", "user@foo.com"}, args) - - Config.Setenv("GIT_SSH", oldGITSSH) -} - -func TestSSHGetExeAndArgsPlink(t *testing.T) { - endpoint := Config.Endpoint("download") - endpoint.SshUserAndHost = "user@foo.com" - oldGITSSH := Config.Getenv("GIT_SSH") - // this will run on non-Windows platforms too but no biggie - plink := filepath.Join("Users", "joebloggs", "bin", "plink.exe") - Config.Setenv("GIT_SSH", plink) - exe, args := sshGetExeAndArgs(endpoint) - assert.Equal(t, plink, exe) - assert.Equal(t, []string{"user@foo.com"}, args) - - Config.Setenv("GIT_SSH", oldGITSSH) -} - -func TestSSHGetExeAndArgsPlinkCustomPort(t *testing.T) { - endpoint := Config.Endpoint("download") - endpoint.SshUserAndHost = "user@foo.com" - endpoint.SshPort = "8888" - oldGITSSH := Config.Getenv("GIT_SSH") - // this will run on non-Windows platforms too but no biggie - plink := filepath.Join("Users", "joebloggs", "bin", "plink") - Config.Setenv("GIT_SSH", plink) - exe, args := sshGetExeAndArgs(endpoint) - assert.Equal(t, plink, exe) - assert.Equal(t, []string{"-P", "8888", "user@foo.com"}, args) - - Config.Setenv("GIT_SSH", oldGITSSH) -} - -func TestSSHGetExeAndArgsTortoisePlink(t *testing.T) { - endpoint := Config.Endpoint("download") - endpoint.SshUserAndHost = "user@foo.com" - oldGITSSH := Config.Getenv("GIT_SSH") - // this will run on non-Windows platforms too but no biggie - plink := filepath.Join("Users", "joebloggs", "bin", "tortoiseplink.exe") - Config.Setenv("GIT_SSH", plink) - exe, args := sshGetExeAndArgs(endpoint) - assert.Equal(t, plink, exe) - assert.Equal(t, []string{"-batch", "user@foo.com"}, args) - - Config.Setenv("GIT_SSH", oldGITSSH) -} - -func TestSSHGetExeAndArgsTortoisePlinkCustomPort(t *testing.T) { - endpoint := Config.Endpoint("download") - endpoint.SshUserAndHost = "user@foo.com" - endpoint.SshPort = "8888" - oldGITSSH := Config.Getenv("GIT_SSH") - // this will run on non-Windows platforms too but no biggie - plink := filepath.Join("Users", "joebloggs", "bin", "tortoiseplink") - Config.Setenv("GIT_SSH", plink) - exe, args := sshGetExeAndArgs(endpoint) - assert.Equal(t, plink, exe) - assert.Equal(t, []string{"-batch", "-P", "8888", "user@foo.com"}, args) - - Config.Setenv("GIT_SSH", oldGITSSH) -} diff --git a/lfs/transfer_queue.go b/lfs/transfer_queue.go index 010233e8..b238f572 100644 --- a/lfs/transfer_queue.go +++ b/lfs/transfer_queue.go @@ -4,8 +4,13 @@ import ( "sync" "sync/atomic" + "github.com/github/git-lfs/api" + "github.com/github/git-lfs/config" + "github.com/github/git-lfs/errutil" "github.com/github/git-lfs/git" - "github.com/github/git-lfs/vendor/_nuts/github.com/rubyist/tracerx" + "github.com/github/git-lfs/progress" + "github.com/github/git-lfs/transfer" + "github.com/rubyist/tracerx" ) const ( @@ -13,45 +18,53 @@ const ( ) type Transferable interface { - Check() (*ObjectResource, error) - Transfer(CopyCallback) error - Object() *ObjectResource Oid() string Size() int64 Name() string - SetObject(*ObjectResource) + Path() string + Object() *api.ObjectResource + SetObject(*api.ObjectResource) + // Legacy API check - TODO remove this and only support batch + LegacyCheck() (*api.ObjectResource, error) } -// TransferQueue provides a queue that will allow concurrent transfers. +// TransferQueue organises the wider process of uploading and downloading, +// including calling the API, passing the actual transfer request to transfer +// adapters, and dealing with progress, errors and retries type TransferQueue struct { - retrying uint32 - meter *ProgressMeter - workers int // Number of transfer workers to spawn - transferKind string - errors []error - transferables map[string]Transferable - retries []Transferable - batcher *Batcher - apic chan Transferable // Channel for processing individual API requests - transferc chan Transferable // Channel for processing transfers - retriesc chan Transferable // Channel for processing retries - errorc chan error // Channel for processing errors - watchers []chan string - trMutex *sync.Mutex - errorwait sync.WaitGroup - retrywait sync.WaitGroup - wait sync.WaitGroup + direction transfer.Direction + adapter transfer.TransferAdapter + adapterInProgress bool + adapterResultChan chan transfer.TransferResult + adapterInitMutex sync.Mutex + dryRun bool + retrying uint32 + meter *progress.ProgressMeter + errors []error + transferables map[string]Transferable + retries []Transferable + batcher *Batcher + apic chan Transferable // Channel for processing individual API requests + retriesc chan Transferable // Channel for processing retries + errorc chan error // Channel for processing errors + watchers []chan string + trMutex *sync.Mutex + errorwait sync.WaitGroup + retrywait sync.WaitGroup + wait sync.WaitGroup // Incremented on Add(), decremented on transfer complete or skip + oldApiWorkers int // Number of non-batch API workers to spawn (deprecated) } -// newTransferQueue builds a TransferQueue, allowing `workers` concurrent transfers. -func newTransferQueue(files int, size int64, dryRun bool) *TransferQueue { +// newTransferQueue builds a TransferQueue, direction and underlying mechanism determined by adapter +func newTransferQueue(files int, size int64, dryRun bool, dir transfer.Direction) *TransferQueue { q := &TransferQueue{ - meter: NewProgressMeter(files, size, dryRun), + direction: dir, + dryRun: dryRun, + meter: progress.NewProgressMeter(files, size, dryRun, config.Config.Getenv("GIT_LFS_PROGRESS")), apic: make(chan Transferable, batchSize), - transferc: make(chan Transferable, batchSize), retriesc: make(chan Transferable, batchSize), errorc: make(chan error), - workers: Config.ConcurrentTransfers(), + oldApiWorkers: config.Config.ConcurrentTransfers(), transferables: make(map[string]Transferable), trMutex: &sync.Mutex{}, } @@ -79,10 +92,116 @@ func (q *TransferQueue) Add(t Transferable) { q.apic <- t } +func (q *TransferQueue) useAdapter(name string) { + q.adapterInitMutex.Lock() + defer q.adapterInitMutex.Unlock() + + if q.adapter != nil { + if q.adapter.Name() == name { + // re-use, this is the normal path + return + } + // If the adapter we're using isn't the same as the one we've been + // told to use now, must wait for the current one to finish then switch + // This will probably never happen but is just in case server starts + // changing adapter support in between batches + q.finishAdapter() + } + q.adapter = transfer.NewAdapterOrDefault(name, q.direction) +} + +func (q *TransferQueue) finishAdapter() { + if q.adapterInProgress { + q.adapter.End() + q.adapterInProgress = false + q.adapter = nil + } +} + +func (q *TransferQueue) addToAdapter(t Transferable) { + + tr := transfer.NewTransfer(t.Name(), t.Object(), t.Path()) + + if q.dryRun { + // Don't actually transfer + res := transfer.TransferResult{tr, nil} + q.handleTransferResult(res) + return + } + q.ensureAdapterBegun() + q.adapter.Add(tr) +} + func (q *TransferQueue) Skip(size int64) { q.meter.Skip(size) } +func (q *TransferQueue) transferKind() string { + if q.direction == transfer.Download { + return "download" + } else { + return "upload" + } +} + +func (q *TransferQueue) ensureAdapterBegun() { + q.adapterInitMutex.Lock() + defer q.adapterInitMutex.Unlock() + + if q.adapterInProgress { + return + } + + adapterResultChan := make(chan transfer.TransferResult, 20) + + // Progress callback - receives byte updates + cb := func(name string, total, read int64, current int) error { + q.meter.TransferBytes(q.transferKind(), name, read, total, current) + return nil + } + + tracerx.Printf("tq: starting transfer adapter %q", q.adapter.Name()) + q.adapter.Begin(config.Config.ConcurrentTransfers(), cb, adapterResultChan) + q.adapterInProgress = true + + // Collector for completed transfers + // q.wait.Done() in handleTransferResult is enough to know when this is complete for all transfers + go func() { + for res := range adapterResultChan { + q.handleTransferResult(res) + } + }() + +} + +func (q *TransferQueue) handleTransferResult(res transfer.TransferResult) { + if res.Error != nil { + if q.canRetry(res.Error) { + tracerx.Printf("tq: retrying object %s", res.Transfer.Object.Oid) + q.trMutex.Lock() + t, ok := q.transferables[res.Transfer.Object.Oid] + q.trMutex.Unlock() + if ok { + q.retry(t) + } else { + q.errorc <- res.Error + } + } else { + q.errorc <- res.Error + } + } else { + oid := res.Transfer.Object.Oid + for _, c := range q.watchers { + c <- oid + } + + q.meter.FinishTransfer(res.Transfer.Name) + } + + q.wait.Done() + +} + // Wait waits for the queue to finish processing all transfers. Once Wait is // called, Add will no longer add transferables to the queue. Any failed // transfers will be automatically retried once. @@ -112,7 +231,7 @@ func (q *TransferQueue) Wait() { atomic.StoreUint32(&q.retrying, 0) close(q.apic) - close(q.transferc) + q.finishAdapter() close(q.errorc) for _, watcher := range q.watchers { @@ -135,9 +254,10 @@ func (q *TransferQueue) Watch() chan string { // a POST call for each object, feeding the results to the transfer workers. // If configured, the object transfers can still happen concurrently, the // sequential nature here is only for the meta POST calls. +// TODO LEGACY API: remove when legacy API removed func (q *TransferQueue) individualApiRoutine(apiWaiter chan interface{}) { for t := range q.apic { - obj, err := t.Check() + obj, err := t.LegacyCheck() if err != nil { if q.canRetry(err) { q.retry(t) @@ -156,10 +276,12 @@ func (q *TransferQueue) individualApiRoutine(apiWaiter chan interface{}) { } } + // Legacy API has no support for anything but basic transfer adapter + q.useAdapter(transfer.BasicAdapterName) if obj != nil { t.SetObject(obj) q.meter.Add(t.Name()) - q.transferc <- t + q.addToAdapter(t) } else { q.Skip(t.Size()) q.wait.Done() @@ -170,13 +292,14 @@ func (q *TransferQueue) individualApiRoutine(apiWaiter chan interface{}) { // legacyFallback is used when a batch request is made to a server that does // not support the batch endpoint. When this happens, the Transferables are // fed from the batcher into apic to be processed individually. -func (q *TransferQueue) legacyFallback(failedBatch []Transferable) { +// TODO LEGACY API: remove when legacy API removed +func (q *TransferQueue) legacyFallback(failedBatch []interface{}) { tracerx.Printf("tq: batch api not implemented, falling back to individual") q.launchIndividualApiRoutines() for _, t := range failedBatch { - q.apic <- t + q.apic <- t.(Transferable) } for { @@ -186,7 +309,7 @@ func (q *TransferQueue) legacyFallback(failedBatch []Transferable) { } for _, t := range batch { - q.apic <- t + q.apic <- t.(Transferable) } } } @@ -197,6 +320,8 @@ func (q *TransferQueue) legacyFallback(failedBatch []Transferable) { func (q *TransferQueue) batchApiRoutine() { var startProgress sync.Once + transferAdapterNames := transfer.GetAdapterNames(q.direction) + for { batch := q.batcher.Next() if batch == nil { @@ -205,14 +330,19 @@ func (q *TransferQueue) batchApiRoutine() { tracerx.Printf("tq: sending batch of size %d", len(batch)) - transfers := make([]*ObjectResource, 0, len(batch)) - for _, t := range batch { - transfers = append(transfers, &ObjectResource{Oid: t.Oid(), Size: t.Size()}) + transfers := make([]*api.ObjectResource, 0, len(batch)) + for _, i := range batch { + t := i.(Transferable) + transfers = append(transfers, &api.ObjectResource{Oid: t.Oid(), Size: t.Size()}) } - objects, err := Batch(transfers, q.transferKind) + if len(transfers) == 0 { + continue + } + + objs, adapterName, err := api.Batch(transfers, q.transferKind(), transferAdapterNames) if err != nil { - if IsNotImplementedError(err) { + if errutil.IsNotImplementedError(err) { git.Config.SetLocal("", "lfs.batch", "false") go q.legacyFallback(batch) @@ -221,7 +351,7 @@ func (q *TransferQueue) batchApiRoutine() { if q.canRetry(err) { for _, t := range batch { - q.retry(t) + q.retry(t.(Transferable)) } } else { q.errorc <- err @@ -231,17 +361,18 @@ func (q *TransferQueue) batchApiRoutine() { continue } + q.useAdapter(adapterName) startProgress.Do(q.meter.Start) - for _, o := range objects { + for _, o := range objs { if o.Error != nil { - q.errorc <- Errorf(o.Error, "[%v] %v", o.Oid, o.Error.Message) + q.errorc <- errutil.Errorf(o.Error, "[%v] %v", o.Oid, o.Error.Message) q.Skip(o.Size) q.wait.Done() continue } - if _, ok := o.Rel(q.transferKind); ok { + if _, ok := o.Rel(q.transferKind()); ok { // This object needs to be transferred q.trMutex.Lock() transfer, ok := q.transferables[o.Oid] @@ -250,7 +381,7 @@ func (q *TransferQueue) batchApiRoutine() { if ok { transfer.SetObject(o) q.meter.Add(transfer.Name()) - q.transferc <- transfer + q.addToAdapter(transfer) } else { q.Skip(transfer.Size()) q.wait.Done() @@ -278,33 +409,6 @@ func (q *TransferQueue) retryCollector() { q.retrywait.Done() } -func (q *TransferQueue) transferWorker() { - for transfer := range q.transferc { - cb := func(total, read int64, current int) error { - q.meter.TransferBytes(q.transferKind, transfer.Name(), read, total, current) - return nil - } - - if err := transfer.Transfer(cb); err != nil { - if q.canRetry(err) { - tracerx.Printf("tq: retrying object %s", transfer.Oid()) - q.retry(transfer) - } else { - q.errorc <- err - } - } else { - oid := transfer.Oid() - for _, c := range q.watchers { - c <- oid - } - } - - q.meter.FinishTransfer(transfer.Name()) - - q.wait.Done() - } -} - // launchIndividualApiRoutines first launches a single api worker. When it // receives the first successful api request it launches workers - 1 more // workers. This prevents being prompted for credentials multiple times at once @@ -316,7 +420,7 @@ func (q *TransferQueue) launchIndividualApiRoutines() { <-apiWaiter - for i := 0; i < q.workers-1; i++ { + for i := 0; i < q.oldApiWorkers-1; i++ { go q.individualApiRoutine(nil) } }() @@ -329,12 +433,7 @@ func (q *TransferQueue) run() { go q.errorCollector() go q.retryCollector() - tracerx.Printf("tq: starting %d transfer workers", q.workers) - for i := 0; i < q.workers; i++ { - go q.transferWorker() - } - - if Config.BatchTransfer() { + if config.Config.BatchTransfer() { tracerx.Printf("tq: running as batched queue, batch size of %d", batchSize) q.batcher = NewBatcher(batchSize) go q.batchApiRoutine() @@ -349,7 +448,7 @@ func (q *TransferQueue) retry(t Transferable) { } func (q *TransferQueue) canRetry(err error) bool { - if !IsRetriableError(err) || atomic.LoadUint32(&q.retrying) == 1 { + if !errutil.IsRetriableError(err) || atomic.LoadUint32(&q.retrying) == 1 { return false } diff --git a/lfs/upload_queue.go b/lfs/upload_queue.go index ce05df66..df3a77b6 100644 --- a/lfs/upload_queue.go +++ b/lfs/upload_queue.go @@ -4,6 +4,11 @@ import ( "fmt" "os" "path/filepath" + + "github.com/github/git-lfs/api" + "github.com/github/git-lfs/config" + "github.com/github/git-lfs/errutil" + "github.com/github/git-lfs/transfer" ) // Uploadable describes a file that can be uploaded. @@ -12,45 +17,10 @@ type Uploadable struct { OidPath string Filename string size int64 - object *ObjectResource + object *api.ObjectResource } -// NewUploadable builds the Uploadable from the given information. -// "filename" can be empty if a raw object is pushed (see "object-id" flag in push command)/ -func NewUploadable(oid, filename string) (*Uploadable, error) { - localMediaPath, err := LocalMediaPath(oid) - if err != nil { - return nil, Errorf(err, "Error uploading file %s (%s)", filename, oid) - } - - if len(filename) > 0 { - if err := ensureFile(filename, localMediaPath); err != nil { - return nil, err - } - } - - fi, err := os.Stat(localMediaPath) - if err != nil { - return nil, Errorf(err, "Error uploading file %s (%s)", filename, oid) - } - - return &Uploadable{oid: oid, OidPath: localMediaPath, Filename: filename, size: fi.Size()}, nil -} - -func (u *Uploadable) Check() (*ObjectResource, error) { - return UploadCheck(u.OidPath) -} - -func (u *Uploadable) Transfer(cb CopyCallback) error { - wcb := func(total, read int64, current int) error { - cb(total, read, current) - return nil - } - - return UploadObject(u.object, wcb) -} - -func (u *Uploadable) Object() *ObjectResource { +func (u *Uploadable) Object() *api.ObjectResource { return u.object } @@ -66,15 +36,44 @@ func (u *Uploadable) Name() string { return u.Filename } -func (u *Uploadable) SetObject(o *ObjectResource) { +func (u *Uploadable) SetObject(o *api.ObjectResource) { u.object = o } +func (u *Uploadable) Path() string { + return u.OidPath +} + +// TODO LEGACY API: remove when legacy API removed +func (u *Uploadable) LegacyCheck() (*api.ObjectResource, error) { + return api.UploadCheck(u.Oid(), u.Size()) +} + +// NewUploadable builds the Uploadable from the given information. +// "filename" can be empty if a raw object is pushed (see "object-id" flag in push command)/ +func NewUploadable(oid, filename string) (*Uploadable, error) { + localMediaPath, err := LocalMediaPath(oid) + if err != nil { + return nil, errutil.Errorf(err, "Error uploading file %s (%s)", filename, oid) + } + + if len(filename) > 0 { + if err := ensureFile(filename, localMediaPath); err != nil { + return nil, err + } + } + + fi, err := os.Stat(localMediaPath) + if err != nil { + return nil, errutil.Errorf(err, "Error uploading file %s (%s)", filename, oid) + } + + return &Uploadable{oid: oid, OidPath: localMediaPath, Filename: filename, size: fi.Size()}, nil +} + // NewUploadQueue builds an UploadQueue, allowing `workers` concurrent uploads. func NewUploadQueue(files int, size int64, dryRun bool) *TransferQueue { - q := newTransferQueue(files, size, dryRun) - q.transferKind = "upload" - return q + return newTransferQueue(files, size, dryRun, transfer.Upload) } // ensureFile makes sure that the cleanPath exists before pushing it. If it @@ -85,7 +84,7 @@ func ensureFile(smudgePath, cleanPath string) error { } expectedOid := filepath.Base(cleanPath) - localPath := filepath.Join(LocalWorkingDir, smudgePath) + localPath := filepath.Join(config.LocalWorkingDir, smudgePath) file, err := os.Open(localPath) if err != nil { return err diff --git a/lfs/upload_test.go b/lfs/upload_test.go deleted file mode 100644 index b7eee86c..00000000 --- a/lfs/upload_test.go +++ /dev/null @@ -1,925 +0,0 @@ -package lfs - -import ( - "bytes" - "encoding/json" - "fmt" - "io" - "io/ioutil" - "net/http" - "net/http/httptest" - "os" - "strconv" - "testing" -) - -func TestExistingUpload(t *testing.T) { - mux := http.NewServeMux() - server := httptest.NewServer(mux) - tmp := tempdir(t) - olddir := LocalMediaDir - LocalMediaDir = tmp - defer func() { - LocalMediaDir = olddir - }() - defer server.Close() - defer os.RemoveAll(tmp) - - postCalled := false - putCalled := false - verifyCalled := false - - mux.HandleFunc("/media/objects", func(w http.ResponseWriter, r *http.Request) { - t.Logf("Server: %s %s", r.Method, r.URL) - - if r.Method != "POST" { - w.WriteHeader(405) - return - } - - if r.Header.Get("Accept") != mediaType { - t.Errorf("Invalid Accept") - } - - if r.Header.Get("Content-Type") != mediaType { - t.Errorf("Invalid Content-Type") - } - - buf := &bytes.Buffer{} - tee := io.TeeReader(r.Body, buf) - reqObj := &ObjectResource{} - err := json.NewDecoder(tee).Decode(reqObj) - t.Logf("request header: %v", r.Header) - t.Logf("request body: %s", buf.String()) - if err != nil { - t.Fatal(err) - } - - if reqObj.Oid != "988881adc9fc3655077dc2d4d757d480b5ea0e11" { - t.Errorf("invalid oid from request: %s", reqObj.Oid) - } - - if reqObj.Size != 4 { - t.Errorf("invalid size from request: %d", reqObj.Size) - } - - obj := &ObjectResource{ - Oid: reqObj.Oid, - Size: reqObj.Size, - Actions: map[string]*linkRelation{ - "upload": &linkRelation{ - Href: server.URL + "/upload", - Header: map[string]string{"A": "1"}, - }, - "verify": &linkRelation{ - Href: server.URL + "/verify", - Header: map[string]string{"B": "2"}, - }, - }, - } - - by, err := json.Marshal(obj) - if err != nil { - t.Fatal(err) - } - - postCalled = true - head := w.Header() - head.Set("Content-Type", mediaType) - head.Set("Content-Length", strconv.Itoa(len(by))) - w.WriteHeader(200) - w.Write(by) - }) - - mux.HandleFunc("/upload", func(w http.ResponseWriter, r *http.Request) { - t.Logf("Server: %s %s", r.Method, r.URL) - putCalled = true - w.WriteHeader(200) - }) - - mux.HandleFunc("/verify", func(w http.ResponseWriter, r *http.Request) { - t.Logf("Server: %s %s", r.Method, r.URL) - verifyCalled = true - w.WriteHeader(200) - }) - - defer Config.ResetConfig() - Config.SetConfig("lfs.url", server.URL+"/media") - - oidPath, _ := LocalMediaPath("988881adc9fc3655077dc2d4d757d480b5ea0e11") - if err := ioutil.WriteFile(oidPath, []byte("test"), 0744); err != nil { - t.Fatal(err) - } - - o, err := UploadCheck(oidPath) - if err != nil { - if isDockerConnectionError(err) { - return - } - t.Fatal(err) - } - if o != nil { - t.Errorf("Got an object back") - } - - if !postCalled { - t.Errorf("POST not called") - } - - if putCalled { - t.Errorf("PUT not skipped") - } - - if verifyCalled { - t.Errorf("verify not skipped") - } -} - -func TestUploadWithRedirect(t *testing.T) { - mux := http.NewServeMux() - server := httptest.NewServer(mux) - tmp := tempdir(t) - olddir := LocalMediaDir - LocalMediaDir = tmp - defer func() { - LocalMediaDir = olddir - }() - defer server.Close() - defer os.RemoveAll(tmp) - - mux.HandleFunc("/redirect/objects", func(w http.ResponseWriter, r *http.Request) { - t.Logf("Server: %s %s", r.Method, r.URL) - - if r.Method != "POST" { - w.WriteHeader(405) - return - } - - w.Header().Set("Location", server.URL+"/redirect2/objects") - w.WriteHeader(307) - }) - - mux.HandleFunc("/redirect2/objects", func(w http.ResponseWriter, r *http.Request) { - t.Logf("Server: %s %s", r.Method, r.URL) - - if r.Method != "POST" { - w.WriteHeader(405) - return - } - - w.Header().Set("Location", server.URL+"/media/objects") - w.WriteHeader(307) - }) - - mux.HandleFunc("/media/objects", func(w http.ResponseWriter, r *http.Request) { - t.Logf("Server: %s %s", r.Method, r.URL) - - if r.Method != "POST" { - w.WriteHeader(405) - return - } - - if r.Header.Get("Accept") != mediaType { - t.Errorf("Invalid Accept") - } - - if r.Header.Get("Content-Type") != mediaType { - t.Errorf("Invalid Content-Type") - } - - buf := &bytes.Buffer{} - tee := io.TeeReader(r.Body, buf) - reqObj := &ObjectResource{} - err := json.NewDecoder(tee).Decode(reqObj) - t.Logf("request header: %v", r.Header) - t.Logf("request body: %s", buf.String()) - if err != nil { - t.Fatal(err) - } - - if reqObj.Oid != "988881adc9fc3655077dc2d4d757d480b5ea0e11" { - t.Errorf("invalid oid from request: %s", reqObj.Oid) - } - - if reqObj.Size != 4 { - t.Errorf("invalid size from request: %d", reqObj.Size) - } - - obj := &ObjectResource{ - Actions: map[string]*linkRelation{ - "upload": &linkRelation{ - Href: server.URL + "/upload", - Header: map[string]string{"A": "1"}, - }, - "verify": &linkRelation{ - Href: server.URL + "/verify", - Header: map[string]string{"B": "2"}, - }, - }, - } - - by, err := json.Marshal(obj) - if err != nil { - t.Fatal(err) - } - - head := w.Header() - head.Set("Content-Type", mediaType) - head.Set("Content-Length", strconv.Itoa(len(by))) - w.WriteHeader(200) - w.Write(by) - }) - - defer Config.ResetConfig() - Config.SetConfig("lfs.url", server.URL+"/redirect") - - oidPath, _ := LocalMediaPath("988881adc9fc3655077dc2d4d757d480b5ea0e11") - if err := ioutil.WriteFile(oidPath, []byte("test"), 0744); err != nil { - t.Fatal(err) - } - - obj, err := UploadCheck(oidPath) - if err != nil { - if isDockerConnectionError(err) { - return - } - t.Fatal(err) - } - - if obj != nil { - t.Fatal("Received an object") - } -} - -func TestSuccessfulUploadWithVerify(t *testing.T) { - mux := http.NewServeMux() - server := httptest.NewServer(mux) - tmp := tempdir(t) - olddir := LocalMediaDir - LocalMediaDir = tmp - defer func() { - LocalMediaDir = olddir - }() - defer server.Close() - defer os.RemoveAll(tmp) - - postCalled := false - putCalled := false - verifyCalled := false - - mux.HandleFunc("/media/objects", func(w http.ResponseWriter, r *http.Request) { - t.Logf("Server: %s %s", r.Method, r.URL) - - if r.Method != "POST" { - w.WriteHeader(405) - return - } - - if r.Header.Get("Accept") != mediaType { - t.Errorf("Invalid Accept") - } - - if r.Header.Get("Content-Type") != mediaType { - t.Errorf("Invalid Content-Type") - } - - buf := &bytes.Buffer{} - tee := io.TeeReader(r.Body, buf) - reqObj := &ObjectResource{} - err := json.NewDecoder(tee).Decode(reqObj) - t.Logf("request header: %v", r.Header) - t.Logf("request body: %s", buf.String()) - if err != nil { - t.Fatal(err) - } - - if reqObj.Oid != "988881adc9fc3655077dc2d4d757d480b5ea0e11" { - t.Errorf("invalid oid from request: %s", reqObj.Oid) - } - - if reqObj.Size != 4 { - t.Errorf("invalid size from request: %d", reqObj.Size) - } - - obj := &ObjectResource{ - Oid: reqObj.Oid, - Size: reqObj.Size, - Actions: map[string]*linkRelation{ - "upload": &linkRelation{ - Href: server.URL + "/upload", - Header: map[string]string{"A": "1"}, - }, - "verify": &linkRelation{ - Href: server.URL + "/verify", - Header: map[string]string{"B": "2"}, - }, - }, - } - - by, err := json.Marshal(obj) - if err != nil { - t.Fatal(err) - } - - postCalled = true - head := w.Header() - head.Set("Content-Type", mediaType) - head.Set("Content-Length", strconv.Itoa(len(by))) - w.WriteHeader(202) - w.Write(by) - }) - - mux.HandleFunc("/upload", func(w http.ResponseWriter, r *http.Request) { - t.Logf("Server: %s %s", r.Method, r.URL) - - if r.Method != "PUT" { - w.WriteHeader(405) - return - } - - if r.Header.Get("A") != "1" { - t.Error("Invalid A") - } - - if r.Header.Get("Content-Type") != "application/octet-stream" { - t.Error("Invalid Content-Type") - } - - if r.Header.Get("Content-Length") != "4" { - t.Error("Invalid Content-Length") - } - - if r.Header.Get("Transfer-Encoding") != "" { - t.Fatal("Transfer-Encoding is set") - } - - by, err := ioutil.ReadAll(r.Body) - if err != nil { - t.Error(err) - } - - t.Logf("request header: %v", r.Header) - t.Logf("request body: %s", string(by)) - - if str := string(by); str != "test" { - t.Errorf("unexpected body: %s", str) - } - - putCalled = true - w.WriteHeader(200) - }) - - mux.HandleFunc("/verify", func(w http.ResponseWriter, r *http.Request) { - t.Logf("Server: %s %s", r.Method, r.URL) - - if r.Method != "POST" { - w.WriteHeader(405) - return - } - - if r.Header.Get("B") != "2" { - t.Error("Invalid B") - } - - if r.Header.Get("Content-Type") != mediaType { - t.Error("Invalid Content-Type") - } - - buf := &bytes.Buffer{} - tee := io.TeeReader(r.Body, buf) - reqObj := &ObjectResource{} - err := json.NewDecoder(tee).Decode(reqObj) - t.Logf("request header: %v", r.Header) - t.Logf("request body: %s", buf.String()) - if err != nil { - t.Fatal(err) - } - - if reqObj.Oid != "988881adc9fc3655077dc2d4d757d480b5ea0e11" { - t.Errorf("invalid oid from request: %s", reqObj.Oid) - } - - if reqObj.Size != 4 { - t.Errorf("invalid size from request: %d", reqObj.Size) - } - - verifyCalled = true - w.WriteHeader(200) - }) - - defer Config.ResetConfig() - Config.SetConfig("lfs.url", server.URL+"/media") - - oidPath, _ := LocalMediaPath("988881adc9fc3655077dc2d4d757d480b5ea0e11") - if err := ioutil.WriteFile(oidPath, []byte("test"), 0744); err != nil { - t.Fatal(err) - } - - // stores callbacks - calls := make([][]int64, 0, 5) - cb := func(total int64, written int64, current int) error { - calls = append(calls, []int64{total, written}) - return nil - } - - obj, err := UploadCheck(oidPath) - if err != nil { - if isDockerConnectionError(err) { - return - } - t.Fatal(err) - } - err = UploadObject(obj, cb) - if err != nil { - t.Fatal(err) - } - - if !postCalled { - t.Errorf("POST not called") - } - - if !putCalled { - t.Errorf("PUT not called") - } - - if !verifyCalled { - t.Errorf("verify not called") - } - - t.Logf("CopyCallback: %v", calls) - - if len(calls) < 1 { - t.Errorf("CopyCallback was not used") - } - - lastCall := calls[len(calls)-1] - if lastCall[0] != 4 || lastCall[1] != 4 { - t.Errorf("Last CopyCallback call should be the total") - } -} - -func TestSuccessfulUploadWithoutVerify(t *testing.T) { - mux := http.NewServeMux() - server := httptest.NewServer(mux) - tmp := tempdir(t) - olddir := LocalMediaDir - LocalMediaDir = tmp - defer func() { - LocalMediaDir = olddir - }() - defer server.Close() - defer os.RemoveAll(tmp) - - postCalled := false - putCalled := false - - mux.HandleFunc("/media/objects", func(w http.ResponseWriter, r *http.Request) { - t.Logf("Server: %s %s", r.Method, r.URL) - - if r.Method != "POST" { - w.WriteHeader(405) - return - } - - if r.Header.Get("Accept") != mediaType { - t.Errorf("Invalid Accept") - } - - if r.Header.Get("Content-Type") != mediaType { - t.Errorf("Invalid Content-Type") - } - - buf := &bytes.Buffer{} - tee := io.TeeReader(r.Body, buf) - reqObj := &ObjectResource{} - err := json.NewDecoder(tee).Decode(reqObj) - t.Logf("request header: %v", r.Header) - t.Logf("request body: %s", buf.String()) - if err != nil { - t.Fatal(err) - } - - if reqObj.Oid != "988881adc9fc3655077dc2d4d757d480b5ea0e11" { - t.Errorf("invalid oid from request: %s", reqObj.Oid) - } - - if reqObj.Size != 4 { - t.Errorf("invalid size from request: %d", reqObj.Size) - } - - obj := &ObjectResource{ - Oid: reqObj.Oid, - Size: reqObj.Size, - Actions: map[string]*linkRelation{ - "upload": &linkRelation{ - Href: server.URL + "/upload", - Header: map[string]string{"A": "1"}, - }, - }, - } - - by, err := json.Marshal(obj) - if err != nil { - t.Fatal(err) - } - - postCalled = true - head := w.Header() - head.Set("Content-Type", mediaType) - head.Set("Content-Length", strconv.Itoa(len(by))) - w.WriteHeader(202) - w.Write(by) - }) - - mux.HandleFunc("/upload", func(w http.ResponseWriter, r *http.Request) { - t.Logf("Server: %s %s", r.Method, r.URL) - - if r.Method != "PUT" { - w.WriteHeader(405) - return - } - - if a := r.Header.Get("A"); a != "1" { - t.Errorf("Invalid A: %s", a) - } - - if r.Header.Get("Content-Type") != "application/octet-stream" { - t.Error("Invalid Content-Type") - } - - if r.Header.Get("Content-Length") != "4" { - t.Error("Invalid Content-Length") - } - - if r.Header.Get("Transfer-Encoding") != "" { - t.Fatal("Transfer-Encoding is set") - } - - by, err := ioutil.ReadAll(r.Body) - if err != nil { - t.Error(err) - } - - t.Logf("request header: %v", r.Header) - t.Logf("request body: %s", string(by)) - - if str := string(by); str != "test" { - t.Errorf("unexpected body: %s", str) - } - - putCalled = true - w.WriteHeader(200) - }) - - defer Config.ResetConfig() - Config.SetConfig("lfs.url", server.URL+"/media") - - oidPath, _ := LocalMediaPath("988881adc9fc3655077dc2d4d757d480b5ea0e11") - if err := ioutil.WriteFile(oidPath, []byte("test"), 0744); err != nil { - t.Fatal(err) - } - - obj, err := UploadCheck(oidPath) - if err != nil { - if isDockerConnectionError(err) { - return - } - t.Fatal(err) - } - err = UploadObject(obj, nil) - if err != nil { - t.Fatal(err) - } - - if !postCalled { - t.Errorf("POST not called") - } - - if !putCalled { - t.Errorf("PUT not called") - } -} - -func TestUploadApiError(t *testing.T) { - mux := http.NewServeMux() - server := httptest.NewServer(mux) - tmp := tempdir(t) - olddir := LocalMediaDir - LocalMediaDir = olddir - defer func() { - LocalMediaDir = olddir - }() - defer server.Close() - defer os.RemoveAll(tmp) - - postCalled := false - - mux.HandleFunc("/media/objects", func(w http.ResponseWriter, r *http.Request) { - postCalled = true - w.WriteHeader(404) - }) - - defer Config.ResetConfig() - Config.SetConfig("lfs.url", server.URL+"/media") - - oidPath, _ := LocalMediaPath("988881adc9fc3655077dc2d4d757d480b5ea0e11") - if err := ioutil.WriteFile(oidPath, []byte("test"), 0744); err != nil { - t.Fatal(err) - } - - _, err := UploadCheck(oidPath) - if err == nil { - t.Fatal(err) - } - - if IsFatalError(err) { - t.Fatal("should not panic") - } - - if isDockerConnectionError(err) { - return - } - - if err.Error() != fmt.Sprintf(defaultErrors[404], server.URL+"/media/objects") { - t.Fatalf("Unexpected error: %s", err.Error()) - } - - if !postCalled { - t.Errorf("POST not called") - } -} - -func TestUploadStorageError(t *testing.T) { - mux := http.NewServeMux() - server := httptest.NewServer(mux) - tmp := tempdir(t) - olddir := LocalMediaDir - LocalMediaDir = tmp - defer func() { - LocalMediaDir = olddir - }() - defer server.Close() - defer os.RemoveAll(tmp) - - postCalled := false - putCalled := false - - mux.HandleFunc("/media/objects", func(w http.ResponseWriter, r *http.Request) { - t.Logf("Server: %s %s", r.Method, r.URL) - - if r.Method != "POST" { - w.WriteHeader(405) - return - } - - if r.Header.Get("Accept") != mediaType { - t.Errorf("Invalid Accept") - } - - if r.Header.Get("Content-Type") != mediaType { - t.Errorf("Invalid Content-Type") - } - - buf := &bytes.Buffer{} - tee := io.TeeReader(r.Body, buf) - reqObj := &ObjectResource{} - err := json.NewDecoder(tee).Decode(reqObj) - t.Logf("request header: %v", r.Header) - t.Logf("request body: %s", buf.String()) - if err != nil { - t.Fatal(err) - } - - if reqObj.Oid != "988881adc9fc3655077dc2d4d757d480b5ea0e11" { - t.Errorf("invalid oid from request: %s", reqObj.Oid) - } - - if reqObj.Size != 4 { - t.Errorf("invalid size from request: %d", reqObj.Size) - } - - obj := &ObjectResource{ - Oid: reqObj.Oid, - Size: reqObj.Size, - Actions: map[string]*linkRelation{ - "upload": &linkRelation{ - Href: server.URL + "/upload", - Header: map[string]string{"A": "1"}, - }, - "verify": &linkRelation{ - Href: server.URL + "/verify", - Header: map[string]string{"B": "2"}, - }, - }, - } - - by, err := json.Marshal(obj) - if err != nil { - t.Fatal(err) - } - - postCalled = true - head := w.Header() - head.Set("Content-Type", mediaType) - head.Set("Content-Length", strconv.Itoa(len(by))) - w.WriteHeader(202) - w.Write(by) - }) - - mux.HandleFunc("/upload", func(w http.ResponseWriter, r *http.Request) { - putCalled = true - w.WriteHeader(404) - }) - - defer Config.ResetConfig() - Config.SetConfig("lfs.url", server.URL+"/media") - - oidPath, _ := LocalMediaPath("988881adc9fc3655077dc2d4d757d480b5ea0e11") - if err := ioutil.WriteFile(oidPath, []byte("test"), 0744); err != nil { - t.Fatal(err) - } - - obj, err := UploadCheck(oidPath) - if err != nil { - if isDockerConnectionError(err) { - return - } - t.Fatal(err) - } - err = UploadObject(obj, nil) - if err == nil { - t.Fatal("Expected an error") - } - - if IsFatalError(err) { - t.Fatal("should not panic") - } - - if err.Error() != fmt.Sprintf(defaultErrors[404], server.URL+"/upload") { - t.Fatalf("Unexpected error: %s", err.Error()) - } - - if !postCalled { - t.Errorf("POST not called") - } - - if !putCalled { - t.Errorf("PUT not called") - } -} - -func TestUploadVerifyError(t *testing.T) { - mux := http.NewServeMux() - server := httptest.NewServer(mux) - tmp := tempdir(t) - olddir := LocalMediaDir - LocalMediaDir = tmp - defer func() { - LocalMediaDir = olddir - }() - defer server.Close() - defer os.RemoveAll(tmp) - - postCalled := false - putCalled := false - verifyCalled := false - - mux.HandleFunc("/media/objects", func(w http.ResponseWriter, r *http.Request) { - t.Logf("Server: %s %s", r.Method, r.URL) - - if r.Method != "POST" { - w.WriteHeader(405) - return - } - - if r.Header.Get("Accept") != mediaType { - t.Errorf("Invalid Accept") - } - - if r.Header.Get("Content-Type") != mediaType { - t.Errorf("Invalid Content-Type") - } - - buf := &bytes.Buffer{} - tee := io.TeeReader(r.Body, buf) - reqObj := &ObjectResource{} - err := json.NewDecoder(tee).Decode(reqObj) - t.Logf("request header: %v", r.Header) - t.Logf("request body: %s", buf.String()) - if err != nil { - t.Fatal(err) - } - - if reqObj.Oid != "988881adc9fc3655077dc2d4d757d480b5ea0e11" { - t.Errorf("invalid oid from request: %s", reqObj.Oid) - } - - if reqObj.Size != 4 { - t.Errorf("invalid size from request: %d", reqObj.Size) - } - - obj := &ObjectResource{ - Oid: reqObj.Oid, - Size: reqObj.Size, - Actions: map[string]*linkRelation{ - "upload": &linkRelation{ - Href: server.URL + "/upload", - Header: map[string]string{"A": "1"}, - }, - "verify": &linkRelation{ - Href: server.URL + "/verify", - Header: map[string]string{"B": "2"}, - }, - }, - } - - by, err := json.Marshal(obj) - if err != nil { - t.Fatal(err) - } - - postCalled = true - head := w.Header() - head.Set("Content-Type", mediaType) - head.Set("Content-Length", strconv.Itoa(len(by))) - w.WriteHeader(202) - w.Write(by) - }) - - mux.HandleFunc("/upload", func(w http.ResponseWriter, r *http.Request) { - t.Logf("Server: %s %s", r.Method, r.URL) - - if r.Method != "PUT" { - w.WriteHeader(405) - return - } - - if r.Header.Get("A") != "1" { - t.Error("Invalid A") - } - - if r.Header.Get("Content-Type") != "application/octet-stream" { - t.Error("Invalid Content-Type") - } - - by, err := ioutil.ReadAll(r.Body) - if err != nil { - t.Error(err) - } - - t.Logf("request header: %v", r.Header) - t.Logf("request body: %s", string(by)) - - if str := string(by); str != "test" { - t.Errorf("unexpected body: %s", str) - } - - putCalled = true - w.WriteHeader(200) - }) - - mux.HandleFunc("/verify", func(w http.ResponseWriter, r *http.Request) { - verifyCalled = true - w.WriteHeader(404) - }) - - defer Config.ResetConfig() - Config.SetConfig("lfs.url", server.URL+"/media") - - oidPath, _ := LocalMediaPath("988881adc9fc3655077dc2d4d757d480b5ea0e11") - if err := ioutil.WriteFile(oidPath, []byte("test"), 0744); err != nil { - t.Fatal(err) - } - - obj, err := UploadCheck(oidPath) - if err != nil { - if isDockerConnectionError(err) { - return - } - t.Fatal(err) - } - err = UploadObject(obj, nil) - if err == nil { - t.Fatal("Expected an error") - } - - if IsFatalError(err) { - t.Fatal("should not panic") - } - - if err.Error() != fmt.Sprintf(defaultErrors[404], server.URL+"/verify") { - t.Fatalf("Unexpected error: %s", err.Error()) - } - - if !postCalled { - t.Errorf("POST not called") - } - - if !putCalled { - t.Errorf("PUT not called") - } - - if !verifyCalled { - t.Errorf("verify not called") - } -} diff --git a/lfs/util.go b/lfs/util.go index 0946c97d..0f460052 100644 --- a/lfs/util.go +++ b/lfs/util.go @@ -8,14 +8,11 @@ import ( "path/filepath" "runtime" "strings" -) -type CallbackReader struct { - C CopyCallback - TotalSize int64 - ReadSize int64 - io.Reader -} + "github.com/github/git-lfs/config" + "github.com/github/git-lfs/progress" + "github.com/github/git-lfs/tools" +) type Platform int @@ -29,43 +26,8 @@ const ( var currentPlatform = PlatformUndetermined -type CopyCallback func(totalSize int64, readSoFar int64, readSinceLast int) error - -func (w *CallbackReader) Read(p []byte) (int, error) { - n, err := w.Reader.Read(p) - - if n > 0 { - w.ReadSize += int64(n) - } - - if err == nil && w.C != nil { - err = w.C(w.TotalSize, w.ReadSize, n) - } - - return n, err -} - -func CopyWithCallback(writer io.Writer, reader io.Reader, totalSize int64, cb CopyCallback) (int64, error) { - if success, _ := CloneFile(writer, reader); success { - if cb != nil { - cb(totalSize, totalSize, 0) - } - return totalSize, nil - } - if cb == nil { - return io.Copy(writer, reader) - } - - cbReader := &CallbackReader{ - C: cb, - TotalSize: totalSize, - Reader: reader, - } - return io.Copy(writer, cbReader) -} - -func CopyCallbackFile(event, filename string, index, totalFiles int) (CopyCallback, *os.File, error) { - logPath := Config.Getenv("GIT_LFS_PROGRESS") +func CopyCallbackFile(event, filename string, index, totalFiles int) (progress.CopyCallback, *os.File, error) { + logPath := config.Config.Getenv("GIT_LFS_PROGRESS") if len(logPath) == 0 || len(filename) == 0 || len(event) == 0 { return nil, nil, nil } @@ -86,7 +48,7 @@ func CopyCallbackFile(event, filename string, index, totalFiles int) (CopyCallba var prevWritten int64 - cb := CopyCallback(func(total int64, written int64, current int) error { + cb := progress.CopyCallback(func(total int64, written int64, current int) error { if written != prevWritten { _, err := file.Write([]byte(fmt.Sprintf("%s %d/%d %d/%d %s\n", event, index, totalFiles, written, total, filename))) file.Sync() @@ -200,11 +162,11 @@ func ConvertRepoFilesRelativeToCwd(repochan <-chan string) (<-chan string, error if err != nil { return nil, fmt.Errorf("Unable to get working dir: %v", err) } - wd = ResolveSymlinks(wd) + wd = tools.ResolveSymlinks(wd) // Early-out if working dir is root dir, same result passthrough := false - if LocalWorkingDir == wd { + if config.LocalWorkingDir == wd { passthrough = true } @@ -216,7 +178,7 @@ func ConvertRepoFilesRelativeToCwd(repochan <-chan string) (<-chan string, error outchan <- f continue } - abs := filepath.Join(LocalWorkingDir, f) + abs := filepath.Join(config.LocalWorkingDir, f) rel, err := filepath.Rel(wd, abs) if err != nil { // Use absolute file instead @@ -241,11 +203,11 @@ func ConvertCwdFilesRelativeToRepo(cwdchan <-chan string) (<-chan string, error) return nil, fmt.Errorf("Could not retrieve current directory: %v", err) } // Make sure to resolve symlinks - curdir = ResolveSymlinks(curdir) + curdir = tools.ResolveSymlinks(curdir) // Early-out if working dir is root dir, same result passthrough := false - if LocalWorkingDir == curdir { + if config.LocalWorkingDir == curdir { passthrough = true } @@ -258,11 +220,11 @@ func ConvertCwdFilesRelativeToRepo(cwdchan <-chan string) (<-chan string, error) } var abs string if filepath.IsAbs(p) { - abs = ResolveSymlinks(p) + abs = tools.ResolveSymlinks(p) } else { abs = filepath.Join(curdir, p) } - reltoroot, err := filepath.Rel(LocalWorkingDir, abs) + reltoroot, err := filepath.Rel(config.LocalWorkingDir, abs) if err != nil { // Can't do this, use absolute as best fallback outchan <- abs @@ -277,59 +239,13 @@ func ConvertCwdFilesRelativeToRepo(cwdchan <-chan string) (<-chan string, error) } -// ResolveSymlinks ensures that if the path supplied is a symlink, it is -// resolved to the actual concrete path -func ResolveSymlinks(path string) string { - if len(path) == 0 { - return path - } - - if resolved, err := filepath.EvalSymlinks(path); err == nil { - return resolved - } - return path -} - // Are we running on Windows? Need to handle some extra path shenanigans func IsWindows() bool { return GetPlatform() == PlatformWindows } -// FileOrDirExists determines if a file/dir exists, returns IsDir() results too. -func FileOrDirExists(path string) (exists bool, isDir bool) { - fi, err := os.Stat(path) - if err != nil { - return false, false - } else { - return true, fi.IsDir() - } -} - -// FileExists determines if a file (NOT dir) exists. -func FileExists(path string) bool { - ret, isDir := FileOrDirExists(path) - return ret && !isDir -} - -// DirExists determines if a dir (NOT file) exists. -func DirExists(path string) bool { - ret, isDir := FileOrDirExists(path) - return ret && isDir -} - -// FileExistsOfSize determines if a file exists and is of a specific size. -func FileExistsOfSize(path string, sz int64) bool { - fi, err := os.Stat(path) - - if err != nil { - return false - } - - return !fi.IsDir() && fi.Size() == sz -} - func CopyFileContents(src string, dst string) error { - tmp, err := ioutil.TempFile(TempDir, filepath.Base(dst)) + tmp, err := ioutil.TempFile(TempDir(), filepath.Base(dst)) if err != nil { return err } diff --git a/lfs/util_test.go b/lfs/util_test.go index 910e994e..b2e776e4 100644 --- a/lfs/util_test.go +++ b/lfs/util_test.go @@ -2,18 +2,18 @@ package lfs import ( "bytes" - "io/ioutil" "strings" "testing" - "github.com/github/git-lfs/vendor/_nuts/github.com/technoweenie/assert" + "github.com/github/git-lfs/progress" + "github.com/stretchr/testify/assert" ) func TestWriterWithCallback(t *testing.T) { called := 0 calledRead := make([]int64, 0, 2) - reader := &CallbackReader{ + reader := &progress.CallbackReader{ TotalSize: 5, Reader: bytes.NewBufferString("BOOYA"), C: func(total int64, read int64, current int) error { @@ -26,39 +26,19 @@ func TestWriterWithCallback(t *testing.T) { readBuf := make([]byte, 3) n, err := reader.Read(readBuf) - assert.Equal(t, nil, err) + assert.Nil(t, err) assert.Equal(t, "BOO", string(readBuf[0:n])) n, err = reader.Read(readBuf) - assert.Equal(t, nil, err) + assert.Nil(t, err) assert.Equal(t, "YA", string(readBuf[0:n])) assert.Equal(t, 2, called) - assert.Equal(t, 2, len(calledRead)) + assert.Len(t, calledRead, 2) assert.Equal(t, 3, int(calledRead[0])) assert.Equal(t, 5, int(calledRead[1])) } -func TestCopyWithCallback(t *testing.T) { - buf := bytes.NewBufferString("BOOYA") - - called := 0 - calledWritten := make([]int64, 0, 2) - - n, err := CopyWithCallback(ioutil.Discard, buf, 5, func(total int64, written int64, current int) error { - called += 1 - calledWritten = append(calledWritten, written) - assert.Equal(t, 5, int(total)) - return nil - }) - assert.Equal(t, nil, err) - assert.Equal(t, 5, int(n)) - - assert.Equal(t, 1, called) - assert.Equal(t, 1, len(calledWritten)) - assert.Equal(t, 5, int(calledWritten[0])) -} - type TestIncludeExcludeCase struct { expectedResult bool includes []string diff --git a/localstorage/currentstore.go b/localstorage/currentstore.go new file mode 100644 index 00000000..f31809f3 --- /dev/null +++ b/localstorage/currentstore.go @@ -0,0 +1,63 @@ +package localstorage + +import ( + "fmt" + "io/ioutil" + "os" + "path/filepath" + + "github.com/github/git-lfs/config" +) + +const ( + tempDirPerms = 0755 + localMediaDirPerms = 0755 + localLogDirPerms = 0755 +) + +var ( + objects *LocalStorage + TempDir = filepath.Join(os.TempDir(), "git-lfs") + checkedTempDir string +) + +func Objects() *LocalStorage { + return objects +} + +func ResolveDirs() { + + config.ResolveGitBasicDirs() + TempDir = filepath.Join(config.LocalGitDir, "lfs", "tmp") // temp files per worktree + + objs, err := NewStorage( + filepath.Join(config.LocalGitStorageDir, "lfs", "objects"), + filepath.Join(TempDir, "objects"), + ) + + if err != nil { + panic(fmt.Sprintf("Error trying to init LocalStorage: %s", err)) + } + + objects = objs + config.LocalLogDir = filepath.Join(objs.RootDir, "logs") + if err := os.MkdirAll(config.LocalLogDir, localLogDirPerms); err != nil { + panic(fmt.Errorf("Error trying to create log directory in '%s': %s", config.LocalLogDir, err)) + } +} + +func TempFile(prefix string) (*os.File, error) { + if checkedTempDir != TempDir { + if err := os.MkdirAll(TempDir, tempDirPerms); err != nil { + return nil, err + } + checkedTempDir = TempDir + } + + return ioutil.TempFile(TempDir, prefix) +} + +func ResetTempDir() error { + checkedTempDir = "" + return os.RemoveAll(TempDir) +} diff --git a/localstorage/localstorage.go b/localstorage/localstorage.go index eae34575..aec93eb0 100644 --- a/localstorage/localstorage.go +++ b/localstorage/localstorage.go @@ -1,3 +1,5 @@ +// Package localstorage handles LFS content stored locally +// NOTE: Subject to change, do not rely on this package from outside git-lfs source package localstorage import ( @@ -28,7 +30,7 @@ type Object struct { Size int64 } -func New(storageDir, tempDir string) (*LocalStorage, error) { +func NewStorage(storageDir, tempDir string) (*LocalStorage, error) { if err := os.MkdirAll(storageDir, dirPerms); err != nil { return nil, err } diff --git a/localstorage/scan.go b/localstorage/scan.go index 2a97da97..8596fc0b 100644 --- a/localstorage/scan.go +++ b/localstorage/scan.go @@ -4,7 +4,7 @@ import ( "os" "path/filepath" - "github.com/github/git-lfs/vendor/_nuts/github.com/rubyist/tracerx" + "github.com/rubyist/tracerx" ) // AllObjects returns a slice of the the objects stored in this LocalStorage diff --git a/localstorage/temp.go b/localstorage/temp.go index 2a0e2ea7..7b49613e 100644 --- a/localstorage/temp.go +++ b/localstorage/temp.go @@ -6,7 +6,7 @@ import ( "strings" "time" - "github.com/github/git-lfs/vendor/_nuts/github.com/rubyist/tracerx" + "github.com/rubyist/tracerx" ) func (s *LocalStorage) ClearTempObjects() error { diff --git a/progress/copycallback.go b/progress/copycallback.go new file mode 100644 index 00000000..b9075fd2 --- /dev/null +++ b/progress/copycallback.go @@ -0,0 +1,26 @@ +package progress + +import "io" + +type CopyCallback func(totalSize int64, readSoFar int64, readSinceLast int) error + +type CallbackReader struct { + C CopyCallback + TotalSize int64 + ReadSize int64 + io.Reader +} + +func (w *CallbackReader) Read(p []byte) (int, error) { + n, err := w.Reader.Read(p) + + if n > 0 { + w.ReadSize += int64(n) + } + + if err == nil && w.C != nil { + err = w.C(w.TotalSize, w.ReadSize, n) + } + + return n, err +} diff --git a/progress/logger.go b/progress/logger.go new file mode 100644 index 00000000..e2d98ac1 --- /dev/null +++ b/progress/logger.go @@ -0,0 +1,64 @@ +package progress + +import ( + "fmt" + "os" + "path/filepath" +) + +// progressLogger provides a wrapper around an os.File that can either +// write to the file or ignore all writes completely. +type progressLogger struct { + writeData bool + log *os.File +} + +// Write will write to the file and perform a Sync() if writing succeeds. +func (l *progressLogger) Write(b []byte) error { + if l.writeData { + if _, err := l.log.Write(b); err != nil { + return err + } + return l.log.Sync() + } + return nil +} + +// Close will call Close() on the underlying file +func (l *progressLogger) Close() error { + if l.log != nil { + return l.log.Close() + } + return nil +} + +// Shutdown will cause the logger to ignore any further writes. It should +// be used when writing causes an error. +func (l *progressLogger) Shutdown() { + l.writeData = false +} + +// newProgressLogger creates a progressLogger with a log file path. +// If a log file is able to be created, the logger will write to the file. If +// there is an err creating the file, the logger will ignore all writes. +func newProgressLogger(logPath string) (*progressLogger, error) { + + if len(logPath) == 0 { + return &progressLogger{}, nil + } + if !filepath.IsAbs(logPath) { + return &progressLogger{}, fmt.Errorf("GIT_LFS_PROGRESS must be an absolute path") + } + + cbDir := filepath.Dir(logPath) + if err := os.MkdirAll(cbDir, 0755); err != nil { + return &progressLogger{}, err + } + + file, err := os.OpenFile(logPath, os.O_WRONLY|os.O_CREATE|os.O_APPEND, 0666) + if err != nil { + return &progressLogger{}, err + } + + return &progressLogger{true, file}, nil +} diff --git a/lfs/progress_meter.go b/progress/meter.go similarity index 56% rename from lfs/progress_meter.go rename to progress/meter.go index 6b610e81..eae2037f 100644 --- a/lfs/progress_meter.go +++ b/progress/meter.go @@ -1,17 +1,14 @@ -package lfs +package progress import ( "fmt" - "io" "os" - "path/filepath" - "runtime" "strings" "sync" "sync/atomic" "time" - "github.com/github/git-lfs/vendor/_nuts/github.com/olekukonko/ts" + "github.com/olekukonko/ts" ) // ProgressMeter provides a progress bar type output for the TransferQueue. It @@ -26,7 +23,7 @@ type ProgressMeter struct { currentBytes int64 skippedBytes int64 started int32 - estimatedFiles int + estimatedFiles int32 startTime time.Time finished chan interface{} logger *progressLogger @@ -37,8 +34,8 @@ type ProgressMeter struct { // NewProgressMeter creates a new ProgressMeter for the number and size of // files given. -func NewProgressMeter(estFiles int, estBytes int64, dryRun bool) *ProgressMeter { - logger, err := newProgressLogger() +func NewProgressMeter(estFiles int, estBytes int64, dryRun bool, logPath string) *ProgressMeter { + logger, err := newProgressLogger(logPath) if err != nil { fmt.Fprintf(os.Stderr, "Error creating progress logger: %s\n", err) } @@ -49,7 +46,7 @@ func NewProgressMeter(estFiles int, estBytes int64, dryRun bool) *ProgressMeter fileIndex: make(map[string]int64), fileIndexMutex: &sync.Mutex{}, finished: make(chan interface{}), - estimatedFiles: estFiles, + estimatedFiles: int32(estFiles), estimatedBytes: estBytes, dryRun: dryRun, } @@ -75,6 +72,10 @@ func (p *ProgressMeter) Add(name string) { func (p *ProgressMeter) Skip(size int64) { atomic.AddInt64(&p.skippedFiles, 1) atomic.AddInt64(&p.skippedBytes, size) + // Reduce bytes and files so progress easier to parse + atomic.AddInt32(&p.estimatedFiles, -1) + atomic.AddInt64(&p.estimatedBytes, -size) + } // TransferBytes increments the number of bytes transferred @@ -124,7 +125,7 @@ func (p *ProgressMeter) writer() { } func (p *ProgressMeter) update() { - if p.dryRun || p.estimatedFiles == 0 { + if p.dryRun || (p.estimatedFiles == 0 && p.skippedFiles == 0) { return } @@ -154,65 +155,6 @@ func (p *ProgressMeter) update() { fmt.Fprintf(os.Stdout, out) } -// progressLogger provides a wrapper around an os.File that can either -// write to the file or ignore all writes completely. -type progressLogger struct { - writeData bool - log *os.File -} - -// Write will write to the file and perform a Sync() if writing succeeds. -func (l *progressLogger) Write(b []byte) error { - if l.writeData { - if _, err := l.log.Write(b); err != nil { - return err - } - return l.log.Sync() - } - return nil -} - -// Close will call Close() on the underlying file -func (l *progressLogger) Close() error { - if l.log != nil { - return l.log.Close() - } - return nil -} - -// Shutdown will cause the logger to ignore any further writes. It should -// be used when writing causes an error. -func (l *progressLogger) Shutdown() { - l.writeData = false -} - -// newProgressLogger creates a progressLogger based on the presence of -// the GIT_LFS_PROGRESS environment variable. If it is present and a log file -// is able to be created, the logger will write to the file. If it is absent, -// or there is an err creating the file, the logger will ignore all writes. -func newProgressLogger() (*progressLogger, error) { - logPath := Config.Getenv("GIT_LFS_PROGRESS") - - if len(logPath) == 0 { - return &progressLogger{}, nil - } - if !filepath.IsAbs(logPath) { - return &progressLogger{}, fmt.Errorf("GIT_LFS_PROGRESS must be an absolute path") - } - - cbDir := filepath.Dir(logPath) - if err := os.MkdirAll(cbDir, 0755); err != nil { - return &progressLogger{}, err - } - - file, err := os.OpenFile(logPath, os.O_WRONLY|os.O_CREATE|os.O_APPEND, 0666) - if err != nil { - return &progressLogger{}, err - } - - return &progressLogger{true, file}, nil -} - func formatBytes(i int64) string { switch { case i > 1099511627776: @@ -227,58 +169,3 @@ func formatBytes(i int64) string { return fmt.Sprintf("%d B", i) } - -// Indeterminate progress indicator 'spinner' -type Spinner struct { - stage int - msg string -} - -var spinnerChars = []byte{'|', '/', '-', '\\'} - -// Print a spinner (stage) to out followed by msg (no linefeed) -func (s *Spinner) Print(out io.Writer, msg string) { - s.msg = msg - s.Spin(out) -} - -// Just spin the spinner one more notch & use the last message -func (s *Spinner) Spin(out io.Writer) { - s.stage = (s.stage + 1) % len(spinnerChars) - s.update(out, string(spinnerChars[s.stage]), s.msg) -} - -// Finish the spinner with a completion message & newline -func (s *Spinner) Finish(out io.Writer, finishMsg string) { - s.msg = finishMsg - s.stage = 0 - var sym string - if runtime.GOOS == "windows" { - // Windows console sucks, can't do nice check mark except in ConEmu (not cmd or git bash) - // So play it safe & boring - sym = "*" - } else { - sym = fmt.Sprintf("%c", '\u2714') - } - s.update(out, sym, finishMsg) - out.Write([]byte{'\n'}) -} - -func (s *Spinner) update(out io.Writer, prefix, msg string) { - - str := fmt.Sprintf("%v %v", prefix, msg) - - width := 80 // default to 80 chars wide if ts.GetSize() fails - size, err := ts.GetSize() - if err == nil { - width = size.Col() - } - padding := strings.Repeat(" ", width-len(str)) - - fmt.Fprintf(out, "\r%v%v", str, padding) - -} - -func NewSpinner() *Spinner { - return &Spinner{} -} diff --git a/progress/progress.go b/progress/progress.go new file mode 100644 index 00000000..0d0d7f89 --- /dev/null +++ b/progress/progress.go @@ -0,0 +1,3 @@ +// Package progress provides common progress monitoring / display features +// NOTE: Subject to change, do not rely on this package from outside git-lfs source +package progress diff --git a/progress/spinner.go b/progress/spinner.go new file mode 100644 index 00000000..02955eea --- /dev/null +++ b/progress/spinner.go @@ -0,0 +1,65 @@ +package progress + +import ( + "fmt" + "io" + "runtime" + "strings" + + "github.com/olekukonko/ts" +) + +// Indeterminate progress indicator 'spinner' +type Spinner struct { + stage int + msg string +} + +var spinnerChars = []byte{'|', '/', '-', '\\'} + +// Print a spinner (stage) to out followed by msg (no linefeed) +func (s *Spinner) Print(out io.Writer, msg string) { + s.msg = msg + s.Spin(out) +} + +// Just spin the spinner one more notch & use the last message +func (s *Spinner) Spin(out io.Writer) { + s.stage = (s.stage + 1) % len(spinnerChars) + s.update(out, string(spinnerChars[s.stage]), s.msg) +} + +// Finish the spinner with a completion message & newline +func (s *Spinner) Finish(out io.Writer, finishMsg string) { + s.msg = finishMsg + s.stage = 0 + var sym string + if runtime.GOOS == "windows" { + // Windows console sucks, can't do nice check mark except in ConEmu (not cmd or git bash) + // So play it safe & boring + sym = "*" + } else { + sym = fmt.Sprintf("%c", '\u2714') + } + s.update(out, sym, finishMsg) + out.Write([]byte{'\n'}) +} + +func (s *Spinner) update(out io.Writer, prefix, msg string) { + + str := fmt.Sprintf("%v %v", prefix, msg) + + width := 80 // default to 80 chars wide if ts.GetSize() fails + size, err := ts.GetSize() + if err == nil { + width = size.Col() + } + padding := strings.Repeat(" ", width-len(str)) + + fmt.Fprintf(out, "\r%v%v", str, padding) + +} + +func NewSpinner() *Spinner { + return &Spinner{} +} diff --git a/rpm/SPECS/git-lfs.spec b/rpm/SPECS/git-lfs.spec index 9c3c4be0..c7998f1e 100644 --- a/rpm/SPECS/git-lfs.spec +++ b/rpm/SPECS/git-lfs.spec @@ -1,5 +1,5 @@ Name: git-lfs -Version: 1.2.0 +Version: 1.2.1 Release: 1%{?dist} Summary: Git extension for versioning large files diff --git a/script/bootstrap b/script/bootstrap index 3cf96be8..14ae3373 100755 --- a/script/bootstrap +++ b/script/bootstrap @@ -12,4 +12,4 @@ fi script/fmt rm -rf bin/* -GO15VENDOREXPERIMENT=0 go run script/*.go -cmd build "$@" +GO15VENDOREXPERIMENT=1 go run script/*.go -cmd build "$@" diff --git a/script/build.go b/script/build.go index cd75a249..877c3667 100644 --- a/script/build.go +++ b/script/build.go @@ -12,7 +12,7 @@ import ( "runtime" "strings" - "github.com/github/git-lfs/lfs" + "github.com/github/git-lfs/config" ) var ( @@ -47,7 +47,7 @@ func mainBuild() { cmd, _ := exec.Command("git", "rev-parse", "--short", "HEAD").Output() if len(cmd) > 0 { - LdFlag = strings.TrimSpace("-X github.com/github/git-lfs/lfs.GitCommit=" + string(cmd)) + LdFlag = strings.TrimSpace("-X github.com/github/git-lfs/config.GitCommit=" + string(cmd)) } buildMatrix := make(map[string]Release) @@ -96,7 +96,7 @@ func mainBuild() { func build(buildos, buildarch string, buildMatrix map[string]Release) error { addenv := len(buildos) > 0 && len(buildarch) > 0 - name := "git-lfs-" + lfs.Version + name := "git-lfs-" + config.Version dir := "bin" if addenv { @@ -262,7 +262,7 @@ func logAndRun(cmd *exec.Cmd) error { } func zipName(os, arch string) string { - return fmt.Sprintf("git-lfs-%s-%s-%s", os, arch, lfs.Version) + return fmt.Sprintf("git-lfs-%s-%s-%s", os, arch, config.Version) } func releaseLabel(buildos, buildarch string) string { diff --git a/script/changelog b/script/changelog new file mode 100755 index 00000000..5d5ef709 --- /dev/null +++ b/script/changelog @@ -0,0 +1,88 @@ +#!/usr/bin/env bash +# +# Interactively generates a changelog over a range of commits: + +commit_summary() { + local hash=$1 + + pr=$(git show $hash | grep -o "#\([0-9]*\)" | cut -c 2-) + prjson="$(curl -n https://api.github.com/repos/github/git-lfs/pulls/$pr 2>/dev/null)" + title="$(echo $prjson | jq -r -e ".title")" + id="$(echo $prjson | jq -r -e ".number")" + author="$(echo $prjson | jq -r -e ".user.login")" + + # If the title begins with "Backport", then strip everything until the actual + # pull-request title. + if grep -q "Backport" <(echo $title); then + title="$(echo $title | sed 's/^[^:]*: //g')" + fi + + echo "* $title #$id (@$author)" +} + +range=$1 + +if [ "$range" = "" ]; then + echo "Usage: $0 [options] base..next" + exit 1 +fi + +features="" +bugs="" +misc="" + +for rev in $(git rev-list --merges $range); do + git show $rev + + processed=0 + while [ $processed -eq 0 ]; do + echo "Categorize this change: [f,b,m,s,?] ?" + read -n 1 opt + echo "" + + case $opt in + [fbms]) + processed=1 + ;; + ?) + echo "f - mark this merge as a feature" + echo "b - mark this merge as a bugfix" + echo "m - make this merge as a misc. change" + echo "s - skip this merge, excluding it from the changelog" + echo "? - display this help message" + ;; + *) + echo "Unknown option: $opt, try again." + ;; + esac + done + + if [ $opt != "s" ]; then + summary="$(commit_summary $rev)" + fi + + case $opt in + f) + features="$(printf "%s\n%s\n" "$features" "$summary")" + ;; + b) + bugs="$(printf "%s\n%s\n" "$bugs" "$summary")" + ;; + m) + misc="$(printf "%s\n%s\n" "$misc" "$summary")" + ;; + esac +done + +echo "" >&2 + +cat <<- EOF +### Features +$features + +### Bugs +$bugs + +### Misc +$misc +EOF diff --git a/script/genmakefile/genmakefile.go b/script/genmakefile/genmakefile.go new file mode 100644 index 00000000..516eadc2 --- /dev/null +++ b/script/genmakefile/genmakefile.go @@ -0,0 +1,49 @@ +package main + +import ( + "fmt" + "go/build" + "os" + "strings" +) + +var packages map[string]string = make(map[string]string) + +func generate_target(srcdir string, pkgdir string, prefix string, ctx build.Context) string { + pkg, _ := ctx.ImportDir(srcdir+pkgdir, 0) + name := pkg.Name + var deps []string + for _, imp := range pkg.Imports { + if strings.HasPrefix(imp, prefix) { + imp = strings.TrimPrefix(imp, prefix) + if packages[imp] == "" { + packages[imp] = generate_target(srcdir, imp, prefix, ctx) + } + deps = append(deps, "$(LIBS_"+packages[imp]+")") + } + } + if pkgdir != "" { + fmt.Printf("SRCDIR_%s := $(SRCDIR)%s/\n", name, pkgdir) + } else { + fmt.Printf("SRCDIR_%s := $(SRCDIR)\n", name) + } + fmt.Printf("SRC_%s := $(addprefix $(SRCDIR_%s), %s)\n", name, name, strings.Join(pkg.GoFiles, " ")) + fmt.Printf("DEPS_%s := %s\n", name, strings.Join(deps, " ")) + if pkgdir != "" { + fmt.Printf("OBJ_%s := $(LIBDIR)/%s.o\n", name, pkgdir) + fmt.Printf("LIB_%s := $(LIBDIR)/%s.a\n", name, pkgdir) + fmt.Printf("LIBS_%s := $(LIB_%s) $(DEPS_%s)\n", name, name, name) + fmt.Printf("$(OBJ_%s) : $(SRC_%s) $(DEPS_%s)\n", name, name, name) + fmt.Printf("\t@mkdir -p $(dir $@)\n") + fmt.Printf("\t$(GOC) $(GOFLAGS) -c -o $@ $(SRC_%s)\n", name) + } + return name +} + +func main() { + srcdir := os.Args[1] + prefix := os.Args[2] + ctx := build.Default + ctx.CgoEnabled = false + generate_target(srcdir, "", prefix, ctx) +} diff --git a/script/integration b/script/integration index 884dbd15..9d78d62b 100755 --- a/script/integration +++ b/script/integration @@ -46,4 +46,4 @@ parallel=${GIT_LFS_TEST_MAXPROCS:-4} echo "Running this maxprocs=$parallel" echo -GO15VENDOREXPERIMENT=0 GIT_LFS_TEST_MAXPROCS=$parallel GIT_LFS_TEST_DIR="$GIT_LFS_TEST_DIR" SHUTDOWN_LFS="no" go run script/*.go -cmd integration "$@" +GO15VENDOREXPERIMENT=1 GIT_LFS_TEST_MAXPROCS=$parallel GIT_LFS_TEST_DIR="$GIT_LFS_TEST_DIR" SHUTDOWN_LFS="no" go run script/*.go -cmd integration "$@" diff --git a/script/lint b/script/lint index 119f39f5..3ae8cd88 100755 --- a/script/lint +++ b/script/lint @@ -1,6 +1,6 @@ #!/usr/bin/env bash -deps=$(go list -f '{{join .Deps "\n"}}' . | xargs go list -f '{{if not .Standard}}{{.ImportPath}}{{end}}' | grep -v "github.com/github/git-lfs") +deps=$(GO15VENDOREXPERIMENT=1 go list -f '{{join .Deps "\n"}}' . | xargs go list -f '{{if not .Standard}}{{.ImportPath}}{{end}}' | grep -v "github.com/github/git-lfs") # exit 0 means non-vendored deps were found if [ $? -eq 0 ]; @@ -8,8 +8,8 @@ then echo "Non vendored dependencies found:" for d in $deps; do echo "\t$d"; done echo - echo "These dependencies should be tracked in 'Nut.toml', with an import prefix of:" - echo "\tgithub.com/github/git-lfs/vendor/_nuts" + echo "These dependencies should be tracked in 'glide.yaml'." + echo "Consider running "glide update" or "glide get" to vendor a new dependency." exit 1 else echo "Looks good!" diff --git a/script/packagecloud.rb b/script/packagecloud.rb index db585691..8ca5dd82 100644 --- a/script/packagecloud.rb +++ b/script/packagecloud.rb @@ -8,9 +8,13 @@ end require "json" +packagecloud_ruby_minimum_version = "1.0.4" begin + gem "packagecloud-ruby", ">=#{packagecloud_ruby_minimum_version}" require "packagecloud" + puts "Using packagecloud-ruby:#{Gem.loaded_specs["packagecloud-ruby"].version}" rescue LoadError + puts "Requires packagecloud-ruby >=#{packagecloud_ruby_minimum_version}" puts %(gem install packagecloud-ruby) exit 1 end diff --git a/script/release b/script/release index 5a7e6cd7..0efa6d21 100755 --- a/script/release +++ b/script/release @@ -1,3 +1,3 @@ #!/usr/bin/env bash script/fmt -GO15VENDOREXPERIMENT=0 go run script/*.go -cmd release "$@" +GO15VENDOREXPERIMENT=1 go run script/*.go -cmd release "$@" diff --git a/script/run b/script/run index 7f4cfe92..5348a17f 100755 --- a/script/run +++ b/script/run @@ -1,4 +1,4 @@ #!/usr/bin/env bash script/fmt commit=`git rev-parse --short HEAD` -GO15VENDOREXPERIMENT=0 go run -ldflags="-X github.com/github/git-lfs/lfs.GitCommit=$commit" ./git-lfs.go "$@" +GO15VENDOREXPERIMENT=1 go run -ldflags="-X github.com/github/git-lfs/config.GitCommit=$commit" ./git-lfs.go "$@" diff --git a/script/test b/script/test index 7f9fe3b0..1f6be456 100755 --- a/script/test +++ b/script/test @@ -1,10 +1,17 @@ #!/usr/bin/env bash -#/ Usage: script/test # run all tests +#/ Usage: script/test # run all non-vendored tests #/ script/test # run just a package's tests script/fmt -suite="./${1:-"lfs"} ./${1:-"git"}" if [ $# -gt 0 ]; then - shift + GO15VENDOREXPERIMENT=1 go test "./$@" +else + # The following vendor test-exclusion grep-s typically need to match the same set in + # debian/rules variable DH_GOLANG_EXCLUDES, so update those when adding here. + GO15VENDOREXPERIMENT=1 go test \ + $(GO15VENDOREXPERIMENT=1 go list ./... \ + | grep -v "github.com/olekukonko/ts" \ + | grep -v "github.com/xeipuuv/gojsonschema" \ + | grep -v "github.com/technoweenie/go-contentaddressable" \ + ) fi -GO15VENDOREXPERIMENT=0 go test $suite "$@" diff --git a/script/vendor b/script/vendor index 40ca9689..6c946802 100755 --- a/script/vendor +++ b/script/vendor @@ -1,2 +1,5 @@ #!/usr/bin/env bash -nut install +glide update -s -u +glide install -s -u + +rm -rf vendor/github.com/ThomsonReutersEikon/go-ntlm/utils diff --git a/script/windows-installer/inno-setup-git-lfs-installer.iss b/script/windows-installer/inno-setup-git-lfs-installer.iss index e4ea7b43..89604f2d 100644 --- a/script/windows-installer/inno-setup-git-lfs-installer.iss +++ b/script/windows-installer/inno-setup-git-lfs-installer.iss @@ -2,7 +2,7 @@ ; SEE THE DOCUMENTATION FOR DETAILS ON CREATING INNO SETUP SCRIPT FILES! #define MyAppName "Git LFS" -#define MyAppVersion "1.2.0" +#define MyAppVersion "1.2.1" #define MyAppPublisher "GitHub, Inc" #define MyAppURL "https://git-lfs.github.com/" #define MyAppFilePrefix "git-lfs-windows" @@ -56,12 +56,12 @@ var ExecStdOut: AnsiString; ResultCode: integer; -begin +begin TmpFileName := ExpandConstant('{tmp}') + '\git_location.txt'; - + Exec( ExpandConstant('{cmd}'), - '/C "for %i in (git.exe) do @echo. %~$PATH:i > "' + TmpFileName + '"', + '/C "for %i in (git.exe) do @echo. %~$PATH:i > "' + TmpFileName + '"', '', SW_HIDE, ewWaitUntilTerminated, ResultCode ); @@ -94,9 +94,9 @@ begin end; // look for the path with leading and trailing semicolon and with or without \ ending // Pos() returns 0 if not found - Result := Pos(';' + UpperCase(ParamExpanded) + ';', ';' + UpperCase(OrigPath) + ';') = 0; + Result := Pos(';' + UpperCase(ParamExpanded) + ';', ';' + UpperCase(OrigPath) + ';') = 0; if Result = True then - Result := Pos(';' + UpperCase(ParamExpanded) + '\;', ';' + UpperCase(OrigPath) + ';') = 0; + Result := Pos(';' + UpperCase(ParamExpanded) + '\;', ';' + UpperCase(OrigPath) + ';') = 0; end; // Runs the lfs initialization. @@ -106,7 +106,7 @@ var begin Exec( ExpandConstant('{cmd}'), - ExpandConstant('/C ""{app}\git-lfs.exe" install"'), + ExpandConstant('/C ""{app}\git-lfs.exe" install"'), '', SW_HIDE, ewWaitUntilTerminated, ResultCode ); if not ResultCode = 1 then @@ -122,9 +122,8 @@ var begin Exec( ExpandConstant('{cmd}'), - ExpandConstant('/C ""{app}\git-lfs.exe" uninstall"'), + ExpandConstant('/C ""{app}\git-lfs.exe" uninstall"'), '', SW_HIDE, ewWaitUntilTerminated, ResultCode ); Result := True; end; - diff --git a/subprocess/pty_nix.go b/subprocess/pty_nix.go index 217b23ff..9cd71c07 100644 --- a/subprocess/pty_nix.go +++ b/subprocess/pty_nix.go @@ -6,7 +6,7 @@ import ( "os/exec" "syscall" - "github.com/github/git-lfs/vendor/_nuts/github.com/kr/pty" + "github.com/kr/pty" ) // NewTty creates a pseudo-TTY for a command and modifies it appropriately so diff --git a/subprocess/subprocess.go b/subprocess/subprocess.go index 495e28a9..826a3471 100644 --- a/subprocess/subprocess.go +++ b/subprocess/subprocess.go @@ -1,3 +1,5 @@ +// Package subprocess provides helper functions for forking new processes +// NOTE: Subject to change, do not rely on this package from outside git-lfs source package subprocess import ( @@ -6,7 +8,7 @@ import ( "os/exec" "strings" - "github.com/github/git-lfs/vendor/_nuts/github.com/rubyist/tracerx" + "github.com/rubyist/tracerx" ) // SimpleExec is a small wrapper around os/exec.Command. diff --git a/test/README.md b/test/README.md index fb48f28b..2cc14812 100644 --- a/test/README.md +++ b/test/README.md @@ -67,7 +67,7 @@ There are a few environment variables that you can set to change the test suite behavior: * `GIT_LFS_TEST_DIR=path` - This sets the directory that is used as the current -working directory of the tests. By default, this will be in your temp dir. It's +working directory of the tests. By default, this will be in your temp dir. It's recommended that this is set to a directory outside of any Git repository. * `GIT_LFS_TEST_MAXPROCS=N` - This tells `script/integration` how many tests to run in parallel. Default: 4. @@ -77,6 +77,9 @@ and the remote repository data in `test/remote`. tests when you're running the same test script multiple times without changing any Go code. +Also ensure that your `noproxy` environment variable contains `127.0.0.1` host, +to allow git commands to reach the local Git server `lfstest-gitserver`. + ### Test Suite The `testenv.sh` script includes some global variables used in tests. This diff --git a/test/cmd/git-credential-lfsnoop.go b/test/cmd/git-credential-lfsnoop.go new file mode 100644 index 00000000..da29a2ca --- /dev/null +++ b/test/cmd/git-credential-lfsnoop.go @@ -0,0 +1,4 @@ +package main + +func main() { +} diff --git a/test/cmd/git-credential-lfstest.go b/test/cmd/git-credential-lfstest.go index 2301123a..42a57305 100644 --- a/test/cmd/git-credential-lfstest.go +++ b/test/cmd/git-credential-lfstest.go @@ -1,3 +1,5 @@ +// +build testtools + package main import ( diff --git a/test/cmd/lfstest-gitserver.go b/test/cmd/lfstest-gitserver.go index 9326940d..ff107429 100644 --- a/test/cmd/lfstest-gitserver.go +++ b/test/cmd/lfstest-gitserver.go @@ -1,8 +1,11 @@ +// +build testtools + package main import ( "bufio" "bytes" + "crypto/rand" "crypto/sha256" "encoding/base64" "encoding/hex" @@ -12,14 +15,18 @@ import ( "io" "io/ioutil" "log" + "math" "net/http" "net/http/httptest" "net/textproto" "os" "os/exec" "regexp" + "sort" + "strconv" "strings" "sync" + "time" ) var ( @@ -42,6 +49,7 @@ var ( "status-batch-403", "status-batch-404", "status-batch-410", "status-batch-422", "status-batch-500", "status-storage-403", "status-storage-404", "status-storage-410", "status-storage-422", "status-storage-500", "status-legacy-404", "status-legacy-410", "status-legacy-422", "status-legacy-403", "status-legacy-500", + "status-batch-resume-206", "batch-resume-fail-fallback", "return-expired-action", } ) @@ -60,6 +68,8 @@ func main() { mux.HandleFunc("/storage/", storageHandler) mux.HandleFunc("/redirect307/", redirect307Handler) + mux.HandleFunc("/locks", locksHandler) + mux.HandleFunc("/locks/", locksHandler) mux.HandleFunc("/", func(w http.ResponseWriter, r *http.Request) { if strings.Contains(r.URL.Path, "/info/lfs") { if !skipIfBadAuth(w, r) { @@ -118,8 +128,9 @@ type lfsObject struct { } type lfsLink struct { - Href string `json:"href"` - Header map[string]string `json:"header,omitempty"` + Href string `json:"href"` + Header map[string]string `json:"header,omitempty"` + ExpiresAt time.Time `json:"expires_at,omitempty"` } type lfsError struct { @@ -131,8 +142,8 @@ type lfsError struct { func lfsHandler(w http.ResponseWriter, r *http.Request) { repo, err := repoFromLfsUrl(r.URL.Path) if err != nil { - w.Write([]byte(err.Error())) w.WriteHeader(500) + w.Write([]byte(err.Error())) return } @@ -282,9 +293,14 @@ func lfsBatchHandler(w http.ResponseWriter, r *http.Request, repo string) { } type batchReq struct { + Transfers []string `json:"transfers"` Operation string `json:"operation"` Objects []lfsObject `json:"objects"` } + type batchResp struct { + Transfer string `json:"transfer,omitempty"` + Objects []lfsObject `json:"objects"` + } buf := &bytes.Buffer{} tee := io.TeeReader(r.Body, buf) @@ -302,6 +318,18 @@ func lfsBatchHandler(w http.ResponseWriter, r *http.Request, repo string) { res := []lfsObject{} testingChunked := testingChunkedTransferEncoding(r) + testingTus := testingTusUploadInBatchReq(r) + testingTusInterrupt := testingTusUploadInterruptedInBatchReq(r) + var transferChoice string + if testingTus { + for _, t := range objs.Transfers { + if t == "tus" { + transferChoice = "tus" + break + } + + } + } for _, obj := range objs.Objects { action := objs.Operation @@ -324,7 +352,9 @@ func lfsBatchHandler(w http.ResponseWriter, r *http.Request, repo string) { } } - switch oidHandlers[obj.Oid] { + handler := oidHandlers[obj.Oid] + + switch handler { case "status-batch-403": o.Err = &lfsError{Code: 403, Message: "welp"} case "status-batch-404": @@ -337,23 +367,31 @@ func lfsBatchHandler(w http.ResponseWriter, r *http.Request, repo string) { o.Err = &lfsError{Code: 500, Message: "welp"} default: // regular 200 response if addAction { - o.Actions = map[string]lfsLink{ - action: lfsLink{ - Href: lfsUrl(repo, obj.Oid), - Header: map[string]string{}, - }, + a := lfsLink{ + Href: lfsUrl(repo, obj.Oid), + Header: map[string]string{}, } + + if handler == "return-expired-action" && canServeExpired(repo) { + a.ExpiresAt = time.Now().Add(-5 * time.Minute) + serveExpired(repo) + } + + o.Actions = map[string]lfsLink{action: a} } } if testingChunked { o.Actions[action].Header["Transfer-Encoding"] = "chunked" } + if testingTusInterrupt { + o.Actions[action].Header["Lfs-Tus-Interrupt"] = "true" + } res = append(res, o) } - ores := map[string][]lfsObject{"objects": res} + ores := batchResp{Transfer: transferChoice, Objects: res} by, err := json.Marshal(ores) if err != nil { @@ -367,12 +405,41 @@ func lfsBatchHandler(w http.ResponseWriter, r *http.Request, repo string) { w.Write(by) } +// emu guards expiredRepos +var emu sync.Mutex + +// expiredRepos is a map keyed by repository name, valuing to whether or not it +// has yet served an expired object. +var expiredRepos = map[string]bool{} + +// canServeExpired returns whether or not a repository is capable of serving an +// expired object. In other words, canServeExpired returns whether or not the +// given repo has yet served an expired object. +func canServeExpired(repo string) bool { + emu.Lock() + defer emu.Unlock() + + return !expiredRepos[repo] +} + +// serveExpired marks the given repo as having served an expired object, making +// it unable for that same repository to return an expired object in the future +func serveExpired(repo string) { + emu.Lock() + defer emu.Unlock() + + expiredRepos[repo] = true +} + +// Persistent state across requests +var batchResumeFailFallbackStorageAttempts = 0 +var tusStorageAttempts = 0 + // handles any /storage/{oid} requests func storageHandler(w http.ResponseWriter, r *http.Request) { repo := r.URL.Query().Get("r") parts := strings.Split(r.URL.Path, "/") oid := parts[len(parts)-1] - if missingRequiredCreds(w, r, repo) { return } @@ -413,6 +480,7 @@ func storageHandler(w http.ResponseWriter, r *http.Request) { hash := sha256.New() buf := &bytes.Buffer{} + io.Copy(io.MultiWriter(hash, buf), r.Body) oid := hex.EncodeToString(hash.Sum(nil)) if !strings.HasSuffix(r.URL.Path, "/"+oid) { @@ -425,18 +493,154 @@ func storageHandler(w http.ResponseWriter, r *http.Request) { case "GET": parts := strings.Split(r.URL.Path, "/") oid := parts[len(parts)-1] + statusCode := 200 + byteLimit := 0 + resumeAt := int64(0) if by, ok := largeObjects.Get(repo, oid); ok { - w.Write(by) + if len(by) == len("status-batch-resume-206") && string(by) == "status-batch-resume-206" { + // Resume if header includes range, otherwise deliberately interrupt + if rangeHdr := r.Header.Get("Range"); rangeHdr != "" { + regex := regexp.MustCompile(`bytes=(\d+)\-.*`) + match := regex.FindStringSubmatch(rangeHdr) + if match != nil && len(match) > 1 { + statusCode = 206 + resumeAt, _ = strconv.ParseInt(match[1], 10, 32) + w.Header().Set("Content-Range", fmt.Sprintf("bytes %d-%d/%d", resumeAt, len(by), resumeAt-int64(len(by)))) + } + } else { + byteLimit = 10 + } + } else if len(by) == len("batch-resume-fail-fallback") && string(by) == "batch-resume-fail-fallback" { + // Fail any Range: request even though we said we supported it + // To make sure client can fall back + if rangeHdr := r.Header.Get("Range"); rangeHdr != "" { + w.WriteHeader(416) + return + } + if batchResumeFailFallbackStorageAttempts == 0 { + // Truncate output on FIRST attempt to cause resume + // Second attempt (without range header) is fallback, complete successfully + byteLimit = 8 + batchResumeFailFallbackStorageAttempts++ + } + } + w.WriteHeader(statusCode) + if byteLimit > 0 { + w.Write(by[0:byteLimit]) + } else if resumeAt > 0 { + w.Write(by[resumeAt:]) + } else { + w.Write(by) + } return } w.WriteHeader(404) + case "HEAD": + // tus.io + if !validateTusHeaders(r) { + w.WriteHeader(400) + return + } + parts := strings.Split(r.URL.Path, "/") + oid := parts[len(parts)-1] + var offset int64 + if by, ok := largeObjects.GetIncomplete(repo, oid); ok { + offset = int64(len(by)) + } + w.Header().Set("Upload-Offset", strconv.FormatInt(offset, 10)) + w.WriteHeader(200) + case "PATCH": + // tus.io + if !validateTusHeaders(r) { + w.WriteHeader(400) + return + } + parts := strings.Split(r.URL.Path, "/") + oid := parts[len(parts)-1] + + offsetHdr := r.Header.Get("Upload-Offset") + offset, err := strconv.ParseInt(offsetHdr, 10, 64) + if err != nil { + log.Fatal("Unable to parse Upload-Offset header in request: ", err) + w.WriteHeader(400) + return + } + hash := sha256.New() + buf := &bytes.Buffer{} + out := io.MultiWriter(hash, buf) + + if by, ok := largeObjects.GetIncomplete(repo, oid); ok { + if offset != int64(len(by)) { + log.Fatal(fmt.Sprintf("Incorrect offset in request, got %d expected %d", offset, len(by))) + w.WriteHeader(400) + return + } + _, err := out.Write(by) + if err != nil { + log.Fatal("Error reading incomplete bytes from store: ", err) + w.WriteHeader(500) + return + } + largeObjects.DeleteIncomplete(repo, oid) + log.Printf("Resuming upload of %v at byte %d", oid, offset) + } + + // As a test, we intentionally break the upload from byte 0 by only + // reading some bytes the quitting & erroring, this forces a resume + // any offset > 0 will work ok + var copyErr error + if r.Header.Get("Lfs-Tus-Interrupt") == "true" && offset == 0 { + chdr := r.Header.Get("Content-Length") + contentLen, err := strconv.ParseInt(chdr, 10, 64) + if err != nil { + log.Fatal(fmt.Sprintf("Invalid Content-Length %q", chdr)) + w.WriteHeader(400) + return + } + truncated := contentLen / 3 + _, _ = io.CopyN(out, r.Body, truncated) + r.Body.Close() + copyErr = fmt.Errorf("Simulated copy error") + } else { + _, copyErr = io.Copy(out, r.Body) + } + if copyErr != nil { + b := buf.Bytes() + if len(b) > 0 { + log.Printf("Incomplete upload of %v, %d bytes", oid, len(b)) + largeObjects.SetIncomplete(repo, oid, b) + } + w.WriteHeader(500) + } else { + checkoid := hex.EncodeToString(hash.Sum(nil)) + if checkoid != oid { + log.Fatal(fmt.Sprintf("Incorrect oid after calculation, got %q expected %q", checkoid, oid)) + w.WriteHeader(403) + return + } + + b := buf.Bytes() + largeObjects.Set(repo, oid, b) + w.Header().Set("Upload-Offset", strconv.FormatInt(int64(len(b)), 10)) + w.WriteHeader(204) + } + default: w.WriteHeader(405) } } +func validateTusHeaders(r *http.Request) bool { + if len(r.Header.Get("Tus-Resumable")) == 0 { + log.Fatal("Missing Tus-Resumable header in request") + return false + } + return true + +} + func gitHandler(w http.ResponseWriter, r *http.Request) { defer func() { io.Copy(ioutil.Discard, r.Body) @@ -500,6 +704,217 @@ func redirect307Handler(w http.ResponseWriter, r *http.Request) { w.WriteHeader(307) } +type Committer struct { + Name string `json:"name"` + Email string `json:"email"` +} + +type Lock struct { + Id string `json:"id"` + Path string `json:"path"` + Committer Committer `json:"committer"` + CommitSHA string `json:"commit_sha"` + LockedAt time.Time `json:"locked_at"` + UnlockedAt time.Time `json:"unlocked_at,omitempty"` +} + +type LockRequest struct { + Path string `json:"path"` + LatestRemoteCommit string `json:"latest_remote_commit"` + Committer Committer `json:"committer"` +} + +type LockResponse struct { + Lock *Lock `json:"lock"` + CommitNeeded string `json:"commit_needed,omitempty"` + Err string `json:"error,omitempty"` +} + +type UnlockRequest struct { + Id string `json:"id"` + Force bool `json:"force"` +} + +type UnlockResponse struct { + Lock *Lock `json:"lock"` + Err string `json:"error,omitempty"` +} + +type LockList struct { + Locks []Lock `json:"locks"` + NextCursor string `json:"next_cursor,omitempty"` + Err string `json:"error,omitempty"` +} + +var ( + lmu sync.RWMutex + locks = []Lock{} +) + +func addLocks(l ...Lock) { + lmu.Lock() + defer lmu.Unlock() + + locks = append(locks, l...) + + sort.Sort(LocksByCreatedAt(locks)) +} + +func getLocks() []Lock { + lmu.RLock() + defer lmu.RUnlock() + + return locks +} + +type LocksByCreatedAt []Lock + +func (c LocksByCreatedAt) Len() int { return len(c) } +func (c LocksByCreatedAt) Less(i, j int) bool { return c[i].LockedAt.Before(c[j].LockedAt) } +func (c LocksByCreatedAt) Swap(i, j int) { c[i], c[j] = c[j], c[i] } + +var lockRe = regexp.MustCompile(`/locks/?$`) + +func locksHandler(w http.ResponseWriter, r *http.Request) { + dec := json.NewDecoder(r.Body) + enc := json.NewEncoder(w) + + switch r.Method { + case "GET": + if !lockRe.MatchString(r.URL.Path) { + http.NotFound(w, r) + } else { + if err := r.ParseForm(); err != nil { + http.Error(w, "could not parse form values", http.StatusInternalServerError) + return + } + + ll := &LockList{} + locks := getLocks() + + if cursor := r.FormValue("cursor"); cursor != "" { + lastSeen := -1 + for i, l := range locks { + if l.Id == cursor { + lastSeen = i + break + } + } + + if lastSeen > -1 { + locks = locks[lastSeen:] + } else { + enc.Encode(&LockList{ + Err: fmt.Sprintf("cursor (%s) not found", cursor), + }) + } + } + + if path := r.FormValue("path"); path != "" { + var filtered []Lock + for _, l := range locks { + if l.Path == path { + filtered = append(filtered, l) + } + } + + locks = filtered + } + + if limit := r.FormValue("limit"); limit != "" { + size, err := strconv.Atoi(r.FormValue("limit")) + if err != nil { + enc.Encode(&LockList{ + Err: "unable to parse limit amount", + }) + } else { + size = int(math.Min(float64(len(locks)), 3)) + if size < 0 { + locks = []Lock{} + } else { + locks = locks[:size] + if size+1 < len(locks) { + ll.NextCursor = locks[size+1].Id + } + } + + } + } + + ll.Locks = locks + + enc.Encode(ll) + } + case "POST": + if strings.HasSuffix(r.URL.Path, "unlock") { + var unlockRequest UnlockRequest + if err := dec.Decode(&unlockRequest); err != nil { + enc.Encode(&UnlockResponse{ + Err: err.Error(), + }) + } + + lockIndex := -1 + for i, l := range locks { + if l.Id == unlockRequest.Id { + lockIndex = i + break + } + } + + if lockIndex > -1 { + enc.Encode(&UnlockResponse{ + Lock: &locks[lockIndex], + }) + + locks = append(locks[:lockIndex], locks[lockIndex+1:]...) + } else { + enc.Encode(&UnlockResponse{ + Err: "unable to find lock", + }) + } + } else { + var lockRequest LockRequest + if err := dec.Decode(&lockRequest); err != nil { + enc.Encode(&LockResponse{ + Err: err.Error(), + }) + } + + for _, l := range getLocks() { + if l.Path == lockRequest.Path { + enc.Encode(&LockResponse{ + Err: "lock already created", + }) + return + } + } + + var id [20]byte + rand.Read(id[:]) + + lock := &Lock{ + Id: fmt.Sprintf("%x", id[:]), + Path: lockRequest.Path, + Committer: lockRequest.Committer, + CommitSHA: lockRequest.LatestRemoteCommit, + LockedAt: time.Now(), + } + + addLocks(*lock) + + // TODO(taylor): commit_needed case + // TODO(taylor): err case + + enc.Encode(&LockResponse{ + Lock: lock, + }) + } + default: + http.NotFound(w, r) + } +} + func missingRequiredCreds(w http.ResponseWriter, r *http.Request, repo string) bool { if repo != "requirecreds" { return false @@ -508,15 +923,15 @@ func missingRequiredCreds(w http.ResponseWriter, r *http.Request, repo string) b auth := r.Header.Get("Authorization") user, pass, err := extractAuth(auth) if err != nil { - w.Write([]byte(`{"message":"` + err.Error() + `"}`)) w.WriteHeader(403) + w.Write([]byte(`{"message":"` + err.Error() + `"}`)) return true } if user != "requirecreds" || pass != "pass" { errmsg := fmt.Sprintf("Got: '%s' => '%s' : '%s'", auth, user, pass) - w.Write([]byte(`{"message":"` + errmsg + `"}`)) w.WriteHeader(403) + w.Write([]byte(`{"message":"` + errmsg + `"}`)) return true } @@ -527,6 +942,13 @@ func testingChunkedTransferEncoding(r *http.Request) bool { return strings.HasPrefix(r.URL.String(), "/test-chunked-transfer-encoding") } +func testingTusUploadInBatchReq(r *http.Request) bool { + return strings.HasPrefix(r.URL.String(), "/test-tus-upload") +} +func testingTusUploadInterruptedInBatchReq(r *http.Request) bool { + return strings.HasPrefix(r.URL.String(), "/test-tus-upload-interrupt") +} + var lfsUrlRE = regexp.MustCompile(`\A/?([^/]+)/info/lfs`) func repoFromLfsUrl(urlpath string) (string, error) { @@ -543,8 +965,9 @@ func repoFromLfsUrl(urlpath string) (string, error) { } type lfsStorage struct { - objects map[string]map[string][]byte - mutex *sync.Mutex + objects map[string]map[string][]byte + incomplete map[string]map[string][]byte + mutex *sync.Mutex } func (s *lfsStorage) Get(repo, oid string) ([]byte, bool) { @@ -591,10 +1014,43 @@ func (s *lfsStorage) Delete(repo, oid string) { } } +func (s *lfsStorage) GetIncomplete(repo, oid string) ([]byte, bool) { + s.mutex.Lock() + defer s.mutex.Unlock() + repoObjects, ok := s.incomplete[repo] + if !ok { + return nil, ok + } + + by, ok := repoObjects[oid] + return by, ok +} + +func (s *lfsStorage) SetIncomplete(repo, oid string, by []byte) { + s.mutex.Lock() + defer s.mutex.Unlock() + repoObjects, ok := s.incomplete[repo] + if !ok { + repoObjects = make(map[string][]byte) + s.incomplete[repo] = repoObjects + } + repoObjects[oid] = by +} + +func (s *lfsStorage) DeleteIncomplete(repo, oid string) { + s.mutex.Lock() + defer s.mutex.Unlock() + repoObjects, ok := s.incomplete[repo] + if ok { + delete(repoObjects, oid) + } +} + func newLfsStorage() *lfsStorage { return &lfsStorage{ - objects: make(map[string]map[string][]byte), - mutex: &sync.Mutex{}, + objects: make(map[string]map[string][]byte), + incomplete: make(map[string]map[string][]byte), + mutex: &sync.Mutex{}, } } diff --git a/test/cmd/lfstest-testutils.go b/test/cmd/lfstest-testutils.go index 3c8e4bc9..a5da7613 100644 --- a/test/cmd/lfstest-testutils.go +++ b/test/cmd/lfstest-testutils.go @@ -1,3 +1,5 @@ +// +build testtools + package main import ( diff --git a/test/git-lfs-test-server-api/main.go b/test/git-lfs-test-server-api/main.go index 03058755..3854a3dd 100644 --- a/test/git-lfs-test-server-api/main.go +++ b/test/git-lfs-test-server-api/main.go @@ -10,10 +10,12 @@ import ( "strconv" "strings" - "github.com/github/git-lfs/test" - + "github.com/github/git-lfs/api" + "github.com/github/git-lfs/config" + "github.com/github/git-lfs/errutil" "github.com/github/git-lfs/lfs" - "github.com/github/git-lfs/vendor/_nuts/github.com/spf13/cobra" + "github.com/github/git-lfs/test" + "github.com/spf13/cobra" ) type TestObject struct { @@ -59,16 +61,16 @@ func testServerApi(cmd *cobra.Command, args []string) { } // Force loading of config before we alter it - lfs.Config.AllGitConfig() + config.Config.AllGitConfig() // Configure the endpoint manually - var endp lfs.Endpoint + var endp config.Endpoint if len(cloneUrl) > 0 { - endp = lfs.NewEndpointFromCloneURL(cloneUrl) + endp = config.NewEndpointFromCloneURL(cloneUrl) } else { - endp = lfs.NewEndpoint(apiUrl) + endp = config.NewEndpoint(apiUrl) } - lfs.Config.SetManualEndpoint(endp) + config.Config.SetManualEndpoint(endp) var oidsExist, oidsMissing []TestObject if len(args) >= 2 { @@ -168,7 +170,7 @@ func buildTestData() (oidsExist, oidsMissing []TestObject, err error) { uploadQueue.Wait() for _, err := range uploadQueue.Errors() { - if lfs.IsFatalError(err) { + if errutil.IsFatalError(err) { exit("Fatal error setting up test data: %s", err) } } @@ -242,13 +244,17 @@ func addTest(name string, f func(oidsExist, oidsMissing []TestObject) error) { tests = append(tests, ServerTest{Name: name, F: f}) } -func callBatchApi(op string, objs []TestObject) ([]*lfs.ObjectResource, error) { +func callBatchApi(op string, objs []TestObject) ([]*api.ObjectResource, error) { - apiobjs := make([]*lfs.ObjectResource, 0, len(objs)) + apiobjs := make([]*api.ObjectResource, 0, len(objs)) for _, o := range objs { - apiobjs = append(apiobjs, &lfs.ObjectResource{Oid: o.Oid, Size: o.Size}) + apiobjs = append(apiobjs, &api.ObjectResource{Oid: o.Oid, Size: o.Size}) } - return lfs.Batch(apiobjs, op) + o, _, err := api.Batch(apiobjs, op, []string{"basic"}) + if err != nil { + return nil, err + } + return o, nil } // Combine 2 slices into one by "randomly" interleaving diff --git a/test/test-clone.sh b/test/test-clone.sh index e29842e2..374762d5 100755 --- a/test/test-clone.sh +++ b/test/test-clone.sh @@ -223,3 +223,144 @@ begin_test "clone with flags" ) end_test + +begin_test "clone (with include/exclude args)" +( + set -e + + reponame="clone_include_exclude" + setup_remote_repo "$reponame" + clone_repo "$reponame" "$reponame" + + git lfs track "*.dat" 2>&1 | tee track.log + grep "Tracking \*.dat" track.log + + contents_a="a" + contents_a_oid=$(calc_oid "$contents_a") + printf "$contents_a" > "a.dat" + + contents_b="b" + contents_b_oid=$(calc_oid "$contents_b") + printf "$contents_b" > "b.dat" + + git add a.dat b.dat .gitattributes + git commit -m "add a.dat, b.dat" 2>&1 | tee commit.log + grep "master (root-commit)" commit.log + grep "3 files changed" commit.log + grep "create mode 100644 a.dat" commit.log + grep "create mode 100644 b.dat" commit.log + grep "create mode 100644 .gitattributes" commit.log + + git push origin master 2>&1 | tee push.log + grep "master -> master" push.log + grep "Git LFS: (2 of 2 files)" push.log + + cd "$TRASHDIR" + + local_reponame="clone_with_includes" + git lfs clone "$GITSERVER/$reponame" "$local_reponame" -I "a.dat" + pushd "$local_reponame" + assert_local_object "$contents_a_oid" 1 + refute_local_object "$contents_b_oid" + popd + + local_reponame="clone_with_excludes" + git lfs clone "$GITSERVER/$reponame" "$local_reponame" -I "b.dat" -X "a.dat" + pushd "$local_reponame" + assert_local_object "$contents_b_oid" 1 + refute_local_object "$contents_a_oid" + popd +) +end_test + +begin_test "clone (with .lfsconfig)" +( + set -e + + reponame="clone_with_lfsconfig" + setup_remote_repo "$reponame" + clone_repo "$reponame" "$reponame" + + git lfs track "*.dat" 2>&1 | tee track.log + grep "Tracking \*.dat" track.log + + contents_a="a" + contents_a_oid=$(calc_oid "$contents_a") + printf "$contents_a" > "a.dat" + + contents_b="b" + contents_b_oid=$(calc_oid "$contents_b") + printf "$contents_b" > "b.dat" + + + + git add a.dat b.dat .gitattributes + git commit -m "add a.dat, b.dat" 2>&1 | tee commit.log + grep "master (root-commit)" commit.log + grep "3 files changed" commit.log + grep "create mode 100644 a.dat" commit.log + grep "create mode 100644 b.dat" commit.log + grep "create mode 100644 .gitattributes" commit.log + + git config -f ".lfsconfig" "lfs.fetchinclude" "a*" + git add ".lfsconfig" + git commit -m "config lfs.fetchinclude a*" 2>&1 | tee commit.log + grep "master" commit.log + grep "1 file changed" commit.log + grep "create mode 100644 .lfsconfig" commit.log + + git push origin master 2>&1 | tee push.log + grep "master -> master" push.log + grep "Git LFS: (2 of 2 files)" push.log + + pushd "$TRASHDIR" + + echo "test: clone with lfs.fetchinclude in .lfsconfig" + local_reponame="clone_with_config_include" + git lfs clone "$GITSERVER/$reponame" "$local_reponame" + pushd "$local_reponame" + assert_local_object "$contents_a_oid" 1 + refute_local_object "$contents_b_oid" + popd + + echo "test: clone with lfs.fetchinclude in .lfsconfig, and args" + local_reponame="clone_with_config_include_and_args" + git lfs clone "$GITSERVER/$reponame" "$local_reponame" -I "b.dat" + pushd "$local_reponame" + refute_local_object "$contents_a_oid" + assert_local_object "$contents_b_oid" 1 + popd + + popd + + git config -f ".lfsconfig" "lfs.fetchinclude" "b*" + git config -f ".lfsconfig" "lfs.fetchexclude" "a*" + git add .lfsconfig + git commit -m "config lfs.fetchinclude a*" 2>&1 | tee commit.log + grep "master" commit.log + grep "1 file changed" commit.log + git push origin master 2>&1 | tee push.log + grep "master -> master" push.log + + pushd "$TRASHDIR" + + echo "test: clone with lfs.fetchexclude in .lfsconfig" + local_reponame="clone_with_config_exclude" + git lfs clone "$GITSERVER/$reponame" "$local_reponame" + pushd "$local_reponame" + cat ".lfsconfig" + assert_local_object "$contents_b_oid" 1 + refute_local_object "$contents_a_oid" + popd + + echo "test: clone with lfs.fetchexclude in .lfsconfig, and args" + local_reponame="clone_with_config_exclude_and_args" + git lfs clone "$GITSERVER/$reponame" "$local_reponame" -I "a.dat" -X "b.dat" + pushd "$local_reponame" + assert_local_object "$contents_a_oid" 1 + refute_local_object "$contents_b_oid" + popd + + popd +) +end_test diff --git a/test/test-credentials-no-prompt.sh b/test/test-credentials-no-prompt.sh index b19b554a..f5dd94f4 100755 --- a/test/test-credentials-no-prompt.sh +++ b/test/test-credentials-no-prompt.sh @@ -34,8 +34,10 @@ begin_test "attempt private access without credential helper" git add .gitattributes git commit -m "initial commit" - git config --unset credential.helper - git config --global --unset credential.helper + git config credential.usehttppath true + git config --global credential.helper lfsnoop + git config credential.helper lfsnoop + git config -l GIT_TERMINAL_PROMPT=0 git push origin master 2>&1 | tee push.log grep "Authorization error: $GITSERVER/$reponame" push.log || diff --git a/test/test-env.sh b/test/test-env.sh index 974f6ee3..4c223667 100755 --- a/test/test-env.sh +++ b/test/test-env.sh @@ -30,6 +30,7 @@ LocalMediaDir=%s LocalReferenceDir= TempDir=%s ConcurrentTransfers=3 +BasicTransfersOnly=false BatchTransfer=true SkipDownloadErrors=false FetchRecentAlways=false @@ -77,6 +78,7 @@ LocalMediaDir=%s LocalReferenceDir= TempDir=%s ConcurrentTransfers=3 +BasicTransfersOnly=false BatchTransfer=true SkipDownloadErrors=false FetchRecentAlways=false @@ -131,6 +133,7 @@ LocalMediaDir=%s LocalReferenceDir= TempDir=%s ConcurrentTransfers=3 +BasicTransfersOnly=false BatchTransfer=true SkipDownloadErrors=false FetchRecentAlways=false @@ -183,6 +186,7 @@ LocalMediaDir=%s LocalReferenceDir= TempDir=%s ConcurrentTransfers=3 +BasicTransfersOnly=false BatchTransfer=true SkipDownloadErrors=false FetchRecentAlways=false @@ -237,6 +241,7 @@ LocalMediaDir=%s LocalReferenceDir= TempDir=%s ConcurrentTransfers=3 +BasicTransfersOnly=false BatchTransfer=true SkipDownloadErrors=false FetchRecentAlways=false @@ -292,6 +297,7 @@ LocalMediaDir=%s LocalReferenceDir= TempDir=%s ConcurrentTransfers=3 +BasicTransfersOnly=false BatchTransfer=true SkipDownloadErrors=false FetchRecentAlways=false @@ -349,6 +355,7 @@ LocalMediaDir=%s LocalReferenceDir= TempDir=%s ConcurrentTransfers=5 +BasicTransfersOnly=false BatchTransfer=false SkipDownloadErrors=false FetchRecentAlways=false @@ -412,6 +419,7 @@ LocalMediaDir=%s LocalReferenceDir= TempDir=%s ConcurrentTransfers=3 +BasicTransfersOnly=false BatchTransfer=true SkipDownloadErrors=false FetchRecentAlways=false @@ -469,6 +477,7 @@ LocalMediaDir=%s LocalReferenceDir= TempDir=%s ConcurrentTransfers=3 +BasicTransfersOnly=false BatchTransfer=true SkipDownloadErrors=false FetchRecentAlways=false @@ -519,6 +528,7 @@ LocalMediaDir=%s LocalReferenceDir= TempDir=%s ConcurrentTransfers=3 +BasicTransfersOnly=false BatchTransfer=true SkipDownloadErrors=false FetchRecentAlways=false @@ -560,6 +570,7 @@ LocalMediaDir=%s LocalReferenceDir= TempDir=%s ConcurrentTransfers=3 +BasicTransfersOnly=false BatchTransfer=true SkipDownloadErrors=false FetchRecentAlways=false @@ -590,6 +601,7 @@ LocalMediaDir=%s LocalReferenceDir= TempDir=%s ConcurrentTransfers=3 +BasicTransfersOnly=false BatchTransfer=true SkipDownloadErrors=false FetchRecentAlways=false @@ -619,6 +631,7 @@ LocalMediaDir=%s LocalReferenceDir= TempDir=%s ConcurrentTransfers=3 +BasicTransfersOnly=false BatchTransfer=true SkipDownloadErrors=false FetchRecentAlways=false @@ -659,6 +672,7 @@ LocalMediaDir=%s LocalReferenceDir= TempDir=%s ConcurrentTransfers=3 +BasicTransfersOnly=false BatchTransfer=true SkipDownloadErrors=false FetchRecentAlways=false @@ -730,6 +744,7 @@ LocalMediaDir=%s LocalReferenceDir= TempDir=%s ConcurrentTransfers=3 +BasicTransfersOnly=false BatchTransfer=true SkipDownloadErrors=true FetchRecentAlways=false @@ -759,6 +774,7 @@ LocalMediaDir=%s LocalReferenceDir= TempDir=%s ConcurrentTransfers=3 +BasicTransfersOnly=false BatchTransfer=true SkipDownloadErrors=true FetchRecentAlways=false diff --git a/test/test-fetch.sh b/test/test-fetch.sh index d1258433..6308748c 100755 --- a/test/test-fetch.sh +++ b/test/test-fetch.sh @@ -80,6 +80,20 @@ begin_test "fetch" assert_local_object "$contents_oid" 1 assert_local_object "$b_oid" 1 + # test with master commit sha1 specified + rm -rf .git/lfs/objects + master_sha1=$(git rev-parse master) + git lfs fetch origin "$master_sha1" + assert_local_object "$contents_oid" 1 + refute_local_object "$b_oid" 1 + + # test with newbranch commit sha1 specified + rm -rf .git/lfs/objects + newbranch_sha1=$(git rev-parse newbranch) + git lfs fetch origin "$newbranch_sha1" + assert_local_object "$contents_oid" 1 + assert_local_object "$b_oid" 1 + # Test include / exclude filters supplied in gitconfig rm -rf .git/lfs/objects git config "lfs.fetchinclude" "a*" @@ -400,6 +414,78 @@ begin_test "fetch-all" assert_local_object "${oid[$a]}" "${#content[$a]}" done + # Make a bare clone of the repository + cd .. + git clone --bare "$GITSERVER/$reponame" "$reponame-bare" + cd "$reponame-bare" + + # Preform the same assertion as above, on the same data + git lfs fetch --all origin + for ((a=0; a < NUMFILES ; a++)); do + assert_local_object "${oid[$a]}" "${#content[$a]}" + done +) +end_test + +begin_test "fetch include/exclude with unclean paths" +( + set -e + + reponame="fetch-unclean-paths" + setup_remote_repo $reponame + clone_repo $reponame include_exclude_repo + + git lfs track "*.dat" 2>&1 | tee track.log + grep "Tracking \*.dat" track.log + + contents="a" + contents_oid=$(calc_oid "$contents") + + mkdir dir + printf "$contents" > dir/a.dat + + git add dir/a.dat + git add .gitattributes + git commit -m "add dir/a.dat" 2>&1 | tee commit.log + grep "master (root-commit)" commit.log + grep "2 files changed" commit.log + grep "create mode 100644 dir/a.dat" commit.log + grep "create mode 100644 .gitattributes" commit.log + + [ "a" = "$(cat dir/a.dat)" ] + + assert_local_object "$contents_oid" 1 + refute_server_object "$contents_oid" + + git push origin master 2>&1 | tee push.log + grep "(1 of 1 files)" push.log + grep "master -> master" push.log + + assert_server_object "$reponame" "$contents_oid" + + echo "lfs pull with include/exclude filters in gitconfig" + + rm -rf .git/lfs/objects + git config "lfs.fetchinclude" "dir/" + git lfs pull + assert_local_object "$contents_oid" 1 + git config --unset "lfs.fetchinclude" + + rm -rf .git/lfs/objects + git config "lfs.fetchexclude" "dir/" + git lfs pull + refute_local_object "$contents_oid" + git config --unset "lfs.fetchexclude" + + echo "lfs pull with include/exclude filters in arguments" + + rm -rf .git/lfs/objects + git lfs pull -I="dir/" + assert_local_object "$contents_oid" 1 + + rm -rf .git/lfs/objects + git lfs pull -X="dir/" + refute_local_object "$contents_oid" ) end_test diff --git a/test/test-lock.sh b/test/test-lock.sh new file mode 100755 index 00000000..f6a48e81 --- /dev/null +++ b/test/test-lock.sh @@ -0,0 +1,61 @@ +#!/usr/bin/env bash + +. "test/testlib.sh" + +begin_test "creating a lock" +( + set -e + + setup_remote_repo_with_file "lock_create_simple" "a.dat" + + git lfs lock "a.dat" | tee lock.log + grep "'a.dat' was locked" lock.log + + id=$(grep -oh "\((.*)\)" lock.log | tr -d "()") + assert_server_lock $id +) +end_test + +begin_test "locking a previously locked file" +( + set -e + + setup_remote_repo_with_file "lock_create_previously_created" "b.dat" + + git lfs lock "b.dat" | tee lock.log + grep "'b.dat' was locked" lock.log + + id=$(grep -oh "\((.*)\)" lock.log | tr -d "()") + assert_server_lock $id + + grep "lock already created" <(git lfs lock "b.dat" 2>&1) +) +end_test + +begin_test "locking a directory" +( + set -e + + reponame="locking_directories" + setup_remote_repo "remote_$reponame" + clone_repo "remote_$reponame" "clone_$reponame" + + git lfs track "*.dat" + mkdir dir + echo "a" > dir/a.dat + + git add dir/a.dat .gitattributes + + git commit -m "add dir/a.dat" | tee commit.log + grep "master (root-commit)" commit.log + grep "2 files changed" commit.log + grep "create mode 100644 dir/a.dat" commit.log + grep "create mode 100644 .gitattributes" commit.log + + git push origin master 2>&1 | tee push.log + grep "master -> master" push.log + + git lfs lock ./dir/ 2>&1 | tee lock.log + grep "cannot lock directory" lock.log +) +end_test diff --git a/test/test-locks.sh b/test/test-locks.sh new file mode 100755 index 00000000..e9532ee3 --- /dev/null +++ b/test/test-locks.sh @@ -0,0 +1,90 @@ +#!/usr/bin/env bash + +. "test/testlib.sh" + +begin_test "list a single lock" +( + set -e + + setup_remote_repo_with_file "locks_list_single" "f.dat" + + git lfs lock "f.dat" | tee lock.log + + id=$(grep -oh "\((.*)\)" lock.log | tr -d "()") + assert_server_lock $id + + git lfs locks --path "f.dat" | tee locks.log + grep "1 lock(s) matched query" locks.log + grep "f.dat" locks.log +) +end_test + +begin_test "list locks with a limit" +( + set -e + + reponame="locks_list_limit" + setup_remote_repo "remote_$reponame" + clone_repo "remote_$reponame" "clone_$reponame" + + git lfs track "*.dat" + echo "foo" > "g_1.dat" + echo "bar" > "g_2.dat" + + git add "g_1.dat" "g_2.dat" ".gitattributes" + git commit -m "add files" | tee commit.log + grep "3 files changed" commit.log + grep "create mode 100644 g_1.dat" commit.log + grep "create mode 100644 g_2.dat" commit.log + grep "create mode 100644 .gitattributes" commit.log + + + git push origin master 2>&1 | tee push.log + grep "master -> master" push.log + + git lfs lock "g_1.dat" | tee lock.log + assert_server_lock "$(grep -oh "\((.*)\)" lock.log | tr -d "()")" + + git lfs lock "g_2.dat" | tee lock.log + assert_server_lock "$(grep -oh "\((.*)\)" lock.log | tr -d "()")" + + git lfs locks --limit 1 | tee locks.log + grep "1 lock(s) matched query" locks.log +) +end_test + +begin_test "list locks with pagination" +( + set -e + + reponame="locks_list_paginate" + setup_remote_repo "remote_$reponame" + clone_repo "remote_$reponame" "clone_$reponame" + + git lfs track "*.dat" + for i in $(seq 1 5); do + echo "$i" > "h_$i.dat" + done + + git add "h_1.dat" "h_2.dat" "h_3.dat" "h_4.dat" "h_5.dat" ".gitattributes" + + git commit -m "add files" | tee commit.log + grep "6 files changed" commit.log + for i in $(seq 1 5); do + grep "create mode 100644 h_$i.dat" commit.log + done + grep "create mode 100644 .gitattributes" commit.log + + git push origin master 2>&1 | tee push.log + grep "master -> master" push.log + + for i in $(seq 1 5); do + git lfs lock "h_$i.dat" | tee lock.log + assert_server_lock "$(grep -oh "\((.*)\)" lock.log | tr -d "()")" + done + + # The server will return, at most, three locks at a time + git lfs locks --limit 4 | tee locks.log + grep "4 lock(s) matched query" locks.log +) +end_test diff --git a/test/test-pre-push.sh b/test/test-pre-push.sh index f209052b..0d88c6f6 100755 --- a/test/test-pre-push.sh +++ b/test/test-pre-push.sh @@ -100,7 +100,7 @@ begin_test "pre-push 307 redirects" echo "refs/heads/master master refs/heads/master 0000000000000000000000000000000000000000" | git lfs pre-push origin "$GITSERVER/redirect307/rel/$reponame.git/info/lfs" 2>&1 | tee push.log - grep "(0 of 1 files, 1 skipped)" push.log + grep "(0 of 0 files, 1 skipped)" push.log assert_server_object "$reponame" 98ea6e4f216f2fb4b69fff9b3a44842c38686ca685f3f55dc48c5d3fb1107be4 @@ -116,7 +116,7 @@ begin_test "pre-push 307 redirects" echo "refs/heads/master master refs/heads/master 0000000000000000000000000000000000000000" | git lfs pre-push origin "$GITSERVER/redirect307/abs/$reponame.git/info/lfs" 2>&1 | tee push.log - grep "(0 of 1 files, 1 skipped)" push.log + grep "(0 of 0 files, 1 skipped)" push.log ) end_test diff --git a/test/test-push.sh b/test/test-push.sh index 2248155e..25b72665 100755 --- a/test/test-push.sh +++ b/test/test-push.sh @@ -42,7 +42,7 @@ begin_test "push" rm -rf .git/refs/remotes git lfs push origin push-b 2>&1 | tee push.log - grep "(1 of 2 files, 1 skipped)" push.log + grep "(1 of 1 files, 1 skipped)" push.log ) end_test @@ -162,7 +162,7 @@ begin_test "push --all (no ref args)" [ $(grep -c "push" push.log) -eq 6 ] git push --all origin 2>&1 | tee push.log - grep "(2 of 3 files, 1 skipped)" push.log + grep "(2 of 2 files, 1 skipped)" push.log grep "(3 of 3 files)" push.log [ $(grep -c "files)" push.log) -eq 1 ] [ $(grep -c "skipped)" push.log) -eq 1 ] @@ -343,7 +343,7 @@ begin_test "push object id(s)" git lfs push --object-id origin \ 4c48d2a6991c9895bcddcf027e1e4907280bcf21975492b1afbade396d6a3340 \ 2>&1 | tee push.log - grep "(0 of 1 files, 1 skipped)" push.log + grep "(0 of 0 files, 1 skipped)" push.log echo "push b" > b.dat git add b.dat @@ -353,7 +353,7 @@ begin_test "push object id(s)" 4c48d2a6991c9895bcddcf027e1e4907280bcf21975492b1afbade396d6a3340 \ 82be50ad35070a4ef3467a0a650c52d5b637035e7ad02c36652e59d01ba282b7 \ 2>&1 | tee push.log - grep "(0 of 2 files, 2 skipped)" push.log + grep "(0 of 0 files, 2 skipped)" push.log ) end_test @@ -486,3 +486,28 @@ begin_test "push ambiguous branch name" ) end_test + +begin_test "push (retry with expired actions)" +( + set -e + + reponame="push_retry_expired_action" + setup_remote_repo "$reponame" + clone_repo "$reponame" "$reponame" + + git lfs track "*.dat" + printf "return-expired-action" > a.dat + git add .gitattributes a.dat + + git commit -m "add a.dat, .gitattributes" 2>&1 | tee commit.log + grep "master (root-commit)" commit.log + grep "2 files changed" commit.log + grep "create mode 100644 a.dat" commit.log + grep "create mode 100644 .gitattributes" commit.log + + GIT_TRACE=1 git push origin master 2>&1 | tee push.log + + [ "1" -eq "$(grep -c "expired, retrying..." push.log)" ] + grep "(1 of 1 files)" push.log +) +end_test diff --git a/test/test-resume-http-range.sh b/test/test-resume-http-range.sh new file mode 100755 index 00000000..3fc1cd86 --- /dev/null +++ b/test/test-resume-http-range.sh @@ -0,0 +1,87 @@ +#!/usr/bin/env bash + +. "test/testlib.sh" + +begin_test "resume-http-range" +( + set -e + + reponame="$(basename "$0" ".sh")" + setup_remote_repo "$reponame" + + clone_repo "$reponame" $reponame + + git lfs track "*.dat" 2>&1 | tee track.log + grep "Tracking \*.dat" track.log + + # this string announces to server that we want a test that + # interrupts the transfer when started from 0 to cause resume + contents="status-batch-resume-206" + contents_oid=$(calc_oid "$contents") + + printf "$contents" > a.dat + git add a.dat + git add .gitattributes + git commit -m "add a.dat" 2>&1 | tee commit.log + git push origin master + + assert_server_object "$reponame" "$contents_oid" + + # delete local copy then fetch it back + # server will abort the transfer mid way (so will error) when not resuming + # then we can restart it + rm -rf .git/lfs/objects + git lfs fetch 2>&1 | tee fetchinterrupted.log + refute_local_object "$contents_oid" + + # now fetch again, this should try to resume and server should send remainder + # this time (it does not cut short when Range is requested) + GIT_TRACE=1 git lfs fetch 2>&1 | tee fetchresume.log + grep "xfer: server accepted resume" fetchresume.log + assert_local_object "$contents_oid" "${#contents}" + +) +end_test + +begin_test "resume-http-range-fallback" +( + set -e + + reponame="resume-http-range-fallback" + setup_remote_repo "$reponame" + + clone_repo "$reponame" $reponame + + git lfs track "*.dat" 2>&1 | tee track.log + grep "Tracking \*.dat" track.log + + # this string announces to server that we want it to abort the download part + # way, but reject the Range: header and fall back on re-downloading instead + contents="batch-resume-fail-fallback" + contents_oid=$(calc_oid "$contents") + + printf "$contents" > a.dat + git add a.dat + git add .gitattributes + git commit -m "add a.dat" 2>&1 | tee commit.log + git push origin master + + assert_server_object "$reponame" "$contents_oid" + + # delete local copy then fetch it back + # server will abort the transfer mid way (so will error) when not resuming + # then we can restart it + rm -rf .git/lfs/objects + git lfs fetch 2>&1 | tee fetchinterrupted.log + refute_local_object "$contents_oid" + + # now fetch again, this should try to resume but server should reject the Range + # header, which should cause client to re-download + GIT_TRACE=1 git lfs fetch 2>&1 | tee fetchresumefallback.log + grep "xfer: server rejected resume" fetchresumefallback.log + # re-download should still have worked + assert_local_object "$contents_oid" "${#contents}" + +) +end_test + diff --git a/test/test-resume-tus.sh b/test/test-resume-tus.sh new file mode 100755 index 00000000..37a323e6 --- /dev/null +++ b/test/test-resume-tus.sh @@ -0,0 +1,69 @@ +#!/usr/bin/env bash + +. "test/testlib.sh" + +begin_test "tus-upload-uninterrupted" +( + set -e + + # this repo name is the indicator to the server to use tus + reponame="test-tus-upload" + setup_remote_repo "$reponame" + + clone_repo "$reponame" $reponame + + git lfs track "*.dat" 2>&1 | tee track.log + grep "Tracking \*.dat" track.log + + contents="jksgdfljkgsdlkjafg lsjdgf alkjgsd lkfjag sldjkgf alkjsgdflkjagsd kljfg asdjgf kalsd" + contents_oid=$(calc_oid "$contents") + + printf "$contents" > a.dat + git add a.dat + git add .gitattributes + git commit -m "add a.dat" 2>&1 | tee commit.log + GIT_TRACE=1 git push origin master 2>&1 | tee pushtus.log + grep "xfer: tus.io uploading" pushtus.log + + assert_server_object "$reponame" "$contents_oid" + +) +end_test + +begin_test "tus-upload-interrupted-resume" +( + set -e + + # this repo name is the indicator to the server to use tus, AND to + # interrupt the upload part way + reponame="test-tus-upload-interrupt" + setup_remote_repo "$reponame" + + clone_repo "$reponame" $reponame + + git lfs track "*.dat" 2>&1 | tee track.log + grep "Tracking \*.dat" track.log + + # this string announces to server that we want it to abort the download part + # way, but reject the Range: header and fall back on re-downloading instead + contents="234587134187634598o634857619384765b747qcvtuedvoaicwtvseudtvcoqi7280r7qvow4i7r8c46pr9q6v9pri6ioq2r8" + contents_oid=$(calc_oid "$contents") + + printf "$contents" > a.dat + git add a.dat + git add .gitattributes + git commit -m "add a.dat" 2>&1 | tee commit.log + GIT_TRACE=1 git push origin master 2>&1 | tee pushtus_resume.log + # first attempt will start from the beginning + grep "xfer: tus.io uploading" pushtus_resume.log + grep "HTTP: 500" pushtus_resume.log + # that will have failed but retry on 500 will resume it + grep "xfer: tus.io resuming" pushtus_resume.log + grep "HTTP: 204" pushtus_resume.log + + # should have completed in the end + assert_server_object "$reponame" "$contents_oid" + +) +end_test + diff --git a/test/test-track.sh b/test/test-track.sh index 777281fd..492f74b0 100755 --- a/test/test-track.sh +++ b/test/test-track.sh @@ -45,6 +45,44 @@ begin_test "track" ) end_test +begin_test "track --verbose" +( + set -e + + reponame="track_verbose_logs" + mkdir "$reponame" + cd "$reponame" + git init + + touch foo.dat + git add foo.dat + + git lfs track --verbose "foo.dat" 2>&1 > track.log + grep "touching foo.dat" track.log +) +end_test + +begin_test "track --dry-run" +( + set -e + + reponame="track_dry_run" + mkdir "$reponame" + cd "$reponame" + git init + + touch foo.dat + git add foo.dat + + git lfs track --dry-run "foo.dat" 2>&1 > track.log + grep "Tracking foo.dat" track.log + grep "Git LFS: touching foo.dat" track.log + + git status --porcelain 2>&1 > status.log + grep "A foo.dat" status.log +) +end_test + begin_test "track directory" ( set -e @@ -220,3 +258,38 @@ begin_test "track in symlinked dir" } ) end_test + +begin_test "track blocklisted files by name" +( + set -e + + repo="track_blocklisted_by_name" + mkdir "$repo" + cd "$repo" + git init + + touch .gitattributes + git add .gitattributes + + git lfs track .gitattributes 2>&1 > track.log + grep "Pattern .gitattributes matches forbidden file .gitattributes" track.log +) +end_test + +begin_test "track blocklisted files with glob" +( + set -e + + repo="track_blocklisted_glob" + mkdir "$repo" + cd "$repo" + git init + + touch .gitattributes + git add .gitattributes + + git lfs track ".git*" 2>&1 > track.log + grep "Pattern .git\* matches forbidden file" track.log +) +end_test + diff --git a/test/test-unlock.sh b/test/test-unlock.sh new file mode 100755 index 00000000..1eb623c3 --- /dev/null +++ b/test/test-unlock.sh @@ -0,0 +1,52 @@ +#!/usr/bin/env bash + +. "test/testlib.sh" + +begin_test "unlocking a lock by path" +( + set -e + + setup_remote_repo_with_file "unlock_by_path" "c.dat" + + git lfs lock "c.dat" | tee lock.log + + id=$(grep -oh "\((.*)\)" lock.log | tr -d "()") + assert_server_lock $id + + git lfs unlock "c.dat" 2>&1 | tee unlock.log + refute_server_lock $id +) +end_test + +begin_test "unlocking a lock by id" +( + set -e + + setup_remote_repo_with_file "unlock_by_id" "d.dat" + + git lfs lock "d.dat" | tee lock.log + + id=$(grep -oh "\((.*)\)" lock.log | tr -d "()") + assert_server_lock $id + + git lfs unlock --id="$id" 2>&1 | tee unlock.log + refute_server_lock $id +) +end_test + +begin_test "unlocking a lock without sufficient info" +( + set -e + + setup_remote_repo_with_file "unlock_ambiguous" "e.dat" + + git lfs lock "e.dat" | tee lock.log + + id=$(grep -oh "\((.*)\)" lock.log | tr -d "()") + assert_server_lock $id + + git lfs unlock 2>&1 | tee unlock.log + grep "Usage: git lfs unlock" unlock.log + assert_server_lock $id +) +end_test diff --git a/test/test-worktree.sh b/test/test-worktree.sh index e31a5b5e..90ac4512 100755 --- a/test/test-worktree.sh +++ b/test/test-worktree.sh @@ -27,6 +27,7 @@ LocalMediaDir=$(native_path_escaped "$TRASHDIR/$reponame/.git/lfs/objects") LocalReferenceDir= TempDir=$(native_path_escaped "$TRASHDIR/$reponame/.git/lfs/tmp") ConcurrentTransfers=3 +BasicTransfersOnly=false BatchTransfer=true SkipDownloadErrors=false FetchRecentAlways=false @@ -59,6 +60,7 @@ LocalMediaDir=$(native_path_escaped "$TRASHDIR/$reponame/.git/lfs/objects") LocalReferenceDir= TempDir=$(native_path_escaped "$TRASHDIR/$reponame/.git/worktrees/$worktreename/lfs/tmp") ConcurrentTransfers=3 +BasicTransfersOnly=false BatchTransfer=true SkipDownloadErrors=false FetchRecentAlways=false diff --git a/test/testhelpers.sh b/test/testhelpers.sh index 583daf6a..f3d19169 100644 --- a/test/testhelpers.sh +++ b/test/testhelpers.sh @@ -110,6 +110,37 @@ assert_server_object() { } } +# assert that a lock with the given ID exists on the test server +assert_server_lock() { + local id="$1" + + curl -v "$GITSERVER/locks/" \ + -u "user:pass" \ + -o http.json \ + -H "Accept:application/vnd.git-lfs+json" 2>&1 | + tee http.log + + grep "200 OK" http.log + grep "$id" http.json || { + cat http.json + exit 1 + } +} + +# refute that a lock with the given ID exists on the test server +refute_server_lock() { + local id="$1" + + curl -v "$GITSERVER/locks/" \ + -u "user:pass" \ + -o http.json \ + -H "Accept:application/vnd.git-lfs+json" 2>&1 | tee http.log + + grep "200 OK" http.log + + [ $(grep -c "$id" http.json) -eq 0 ] +} + # pointer returns a string Git LFS pointer file. # # $ pointer abc-some-oid 123 @@ -197,7 +228,7 @@ clone_repo() { # clone_repo_ssl clones a repository from the test Git server to the subdirectory -# $dir under $TRASHDIR, using the SSL endpoint. +# $dir under $TRASHDIR, using the SSL endpoint. # setup_remote_repo() needs to be run first. Output is written to clone_ssl.log. clone_repo_ssl() { cd "$TRASHDIR" @@ -213,6 +244,32 @@ clone_repo_ssl() { echo "$out" > clone_ssl.log echo "$out" } + +# setup_remote_repo_with_file creates a remote repo, clones it locally, commits +# a file tracked by LFS, and pushes it to the remote: +# +# setup_remote_repo_with_file "reponame" "filename" +setup_remote_repo_with_file() { + local reponame="$1" + local filename="$2" + + setup_remote_repo "remote_$reponame" + clone_repo "remote_$reponame" "clone_$reponame" + + git lfs track "$filename" + echo "$filename" > "$filename" + git add .gitattributes $filename + git commit -m "add $filename" | tee commit.log + + grep "master (root-commit)" commit.log + grep "2 files changed" commit.log + grep "create mode 100644 $filename" commit.log + grep "create mode 100644 .gitattributes" commit.log + + git push origin master 2>&1 | tee push.log + grep "master -> master" push.log +} + # setup initializes the clean, isolated environment for integration tests. setup() { cd "$ROOTDIR" @@ -233,10 +290,10 @@ setup() { if [ -z "$SKIPCOMPILE" ]; then for go in test/cmd/*.go; do - GO15VENDOREXPERIMENT=0 go build -o "$BINPATH/$(basename $go .go)" "$go" + GO15VENDOREXPERIMENT=1 go build -o "$BINPATH/$(basename $go .go)" "$go" done # Ensure API test util is built during tests to ensure it stays in sync - GO15VENDOREXPERIMENT=0 go build -o "$BINPATH/git-lfs-test-server-api" "test/git-lfs-test-server-api/main.go" "test/git-lfs-test-server-api/testdownload.go" "test/git-lfs-test-server-api/testupload.go" + GO15VENDOREXPERIMENT=1 go build -o "$BINPATH/git-lfs-test-server-api" "test/git-lfs-test-server-api/main.go" "test/git-lfs-test-server-api/testdownload.go" "test/git-lfs-test-server-api/testupload.go" fi LFSTEST_URL="$LFS_URL_FILE" LFSTEST_SSL_URL="$LFS_SSL_URL_FILE" LFSTEST_DIR="$REMOTEDIR" LFSTEST_CERT="$LFS_CERT_FILE" lfstest-gitserver > "$REMOTEDIR/gitserver.log" 2>&1 & diff --git a/test/testutils.go b/test/testutils.go index 70580722..bbc0a1c2 100644 --- a/test/testutils.go +++ b/test/testutils.go @@ -21,6 +21,7 @@ import ( "github.com/github/git-lfs/git" "github.com/github/git-lfs/lfs" + "github.com/github/git-lfs/localstorage" ) type RepoType int @@ -74,7 +75,7 @@ func (r *Repo) Pushd() { r.callback.Fatalf("Can't chdir %v", err) } r.popDir = oldwd - lfs.ResolveDirs() + localstorage.ResolveDirs() } func (r *Repo) Popd() { diff --git a/tools/filetools.go b/tools/filetools.go new file mode 100644 index 00000000..9bae4fbe --- /dev/null +++ b/tools/filetools.go @@ -0,0 +1,108 @@ +// Package tools contains other helper functions too small to justify their own package +// NOTE: Subject to change, do not rely on this package from outside git-lfs source +package tools + +import ( + "fmt" + "os" + "path/filepath" + "strings" +) + +// FileOrDirExists determines if a file/dir exists, returns IsDir() results too. +func FileOrDirExists(path string) (exists bool, isDir bool) { + fi, err := os.Stat(path) + if err != nil { + return false, false + } else { + return true, fi.IsDir() + } +} + +// FileExists determines if a file (NOT dir) exists. +func FileExists(path string) bool { + ret, isDir := FileOrDirExists(path) + return ret && !isDir +} + +// DirExists determines if a dir (NOT file) exists. +func DirExists(path string) bool { + ret, isDir := FileOrDirExists(path) + return ret && isDir +} + +// FileExistsOfSize determines if a file exists and is of a specific size. +func FileExistsOfSize(path string, sz int64) bool { + fi, err := os.Stat(path) + + if err != nil { + return false + } + + return !fi.IsDir() && fi.Size() == sz +} + +// ResolveSymlinks ensures that if the path supplied is a symlink, it is +// resolved to the actual concrete path +func ResolveSymlinks(path string) string { + if len(path) == 0 { + return path + } + + if resolved, err := filepath.EvalSymlinks(path); err == nil { + return resolved + } + return path +} + +// RenameFileCopyPermissions moves srcfile to destfile, replacing destfile if +// necessary and also copying the permissions of destfile if it already exists +func RenameFileCopyPermissions(srcfile, destfile string) error { + info, err := os.Stat(destfile) + if os.IsNotExist(err) { + // no original file + } else if err != nil { + return err + } else { + if err := os.Chmod(srcfile, info.Mode()); err != nil { + return fmt.Errorf("can't set filemode on file %q: %v", srcfile, err) + } + } + + if err := os.Rename(srcfile, destfile); err != nil { + return fmt.Errorf("cannot replace %q with %q: %v", destfile, srcfile, err) + } + return nil +} + +// CleanPaths splits the given `paths` argument by the delimiter argument, and +// then "cleans" that path according to the filepath.Clean function (see +// https://golang.org/pkg/file/filepath#Clean). +func CleanPaths(paths, delim string) (cleaned []string) { + // If paths is an empty string, splitting it will yield [""], which will + // become the filepath ".". To avoid this, bail out if trimmed paths + // argument is empty. + if paths = strings.TrimSpace(paths); len(paths) == 0 { + return + } + + for _, part := range strings.Split(paths, delim) { + part = strings.TrimSpace(part) + + cleaned = append(cleaned, filepath.Clean(part)) + } + + return cleaned +} + +// CleanPathsDefault cleans the paths contained in the given `paths` argument +// delimited by the `delim`, argument. If an empty set is returned from that +// split, then the fallback argument is returned instead. +func CleanPathsDefault(paths, delim string, fallback []string) []string { + cleaned := CleanPaths(paths, delim) + if len(cleaned) == 0 { + return fallback + } + + return cleaned +} diff --git a/tools/filetools_test.go b/tools/filetools_test.go new file mode 100644 index 00000000..5935d984 --- /dev/null +++ b/tools/filetools_test.go @@ -0,0 +1,32 @@ +package tools_test + +import ( + "testing" + + "github.com/github/git-lfs/tools" + "github.com/stretchr/testify/assert" +) + +func TestCleanPathsCleansPaths(t *testing.T) { + cleaned := tools.CleanPaths("/foo/bar/,/foo/bar/baz", ",") + + assert.Equal(t, []string{"/foo/bar", "/foo/bar/baz"}, cleaned) +} + +func TestCleanPathsReturnsNoResultsWhenGivenNoPaths(t *testing.T) { + cleaned := tools.CleanPaths("", ",") + + assert.Empty(t, cleaned) +} + +func TestCleanPathsDefaultReturnsInputWhenResultsPresent(t *testing.T) { + cleaned := tools.CleanPathsDefault("/foo/bar/", ",", []string{"/default"}) + + assert.Equal(t, []string{"/foo/bar"}, cleaned) +} + +func TestCleanPathsDefaultReturnsDefaultWhenResultsAbsent(t *testing.T) { + cleaned := tools.CleanPathsDefault("", ",", []string{"/default"}) + + assert.Equal(t, []string{"/default"}, cleaned) +} diff --git a/tools/iotools.go b/tools/iotools.go new file mode 100644 index 00000000..1b99f077 --- /dev/null +++ b/tools/iotools.go @@ -0,0 +1,87 @@ +package tools + +import ( + "crypto/sha256" + "encoding/hex" + "hash" + "io" + + "github.com/github/git-lfs/progress" +) + +type readSeekCloserWrapper struct { + readSeeker io.ReadSeeker +} + +func (r *readSeekCloserWrapper) Read(p []byte) (n int, err error) { + return r.readSeeker.Read(p) +} + +func (r *readSeekCloserWrapper) Seek(offset int64, whence int) (int64, error) { + return r.readSeeker.Seek(offset, whence) +} + +func (r *readSeekCloserWrapper) Close() error { + return nil +} + +// NewReadSeekCloserWrapper wraps an io.ReadSeeker and implements a no-op Close() function +// to make it an io.ReadCloser +func NewReadSeekCloserWrapper(r io.ReadSeeker) io.ReadCloser { + return &readSeekCloserWrapper{r} +} + +// CopyWithCallback copies reader to writer while performing a progress callback +func CopyWithCallback(writer io.Writer, reader io.Reader, totalSize int64, cb progress.CopyCallback) (int64, error) { + if success, _ := CloneFile(writer, reader); success { + if cb != nil { + cb(totalSize, totalSize, 0) + } + return totalSize, nil + } + if cb == nil { + return io.Copy(writer, reader) + } + + cbReader := &progress.CallbackReader{ + C: cb, + TotalSize: totalSize, + Reader: reader, + } + return io.Copy(writer, cbReader) +} + +// Get a new Hash instance of the type used to hash LFS content +func NewLfsContentHash() hash.Hash { + return sha256.New() +} + +// HashingReader wraps a reader and calculates the hash of the data as it is read +type HashingReader struct { + reader io.Reader + hasher hash.Hash +} + +func NewHashingReader(r io.Reader) *HashingReader { + return &HashingReader{r, NewLfsContentHash()} +} + +func NewHashingReaderPreloadHash(r io.Reader, hash hash.Hash) *HashingReader { + return &HashingReader{r, hash} +} + +func (r *HashingReader) Hash() string { + return hex.EncodeToString(r.hasher.Sum(nil)) +} + +func (r *HashingReader) Read(b []byte) (int, error) { + w, err := r.reader.Read(b) + if err == nil || err == io.EOF { + _, e := r.hasher.Write(b[0:w]) + if e != nil && err == nil { + return w, e + } + } + + return w, err +} diff --git a/tools/util_generic.go b/tools/util_generic.go new file mode 100644 index 00000000..52943958 --- /dev/null +++ b/tools/util_generic.go @@ -0,0 +1,11 @@ +// +build !linux !cgo + +package tools + +import ( + "io" +) + +func CloneFile(writer io.Writer, reader io.Reader) (bool, error) { + return false, nil +} diff --git a/lfs/util_linux.go b/tools/util_linux.go similarity index 97% rename from lfs/util_linux.go rename to tools/util_linux.go index 776739dc..e43bb1d4 100644 --- a/lfs/util_linux.go +++ b/tools/util_linux.go @@ -1,6 +1,6 @@ // +build linux,cgo -package lfs +package tools /* #include diff --git a/tools/util_test.go b/tools/util_test.go new file mode 100644 index 00000000..a467cb31 --- /dev/null +++ b/tools/util_test.go @@ -0,0 +1,29 @@ +package tools + +import ( + "bytes" + "io/ioutil" + "testing" + + "github.com/stretchr/testify/assert" +) + +func TestCopyWithCallback(t *testing.T) { + buf := bytes.NewBufferString("BOOYA") + + called := 0 + calledWritten := make([]int64, 0, 2) + + n, err := CopyWithCallback(ioutil.Discard, buf, 5, func(total int64, written int64, current int) error { + called += 1 + calledWritten = append(calledWritten, written) + assert.Equal(t, 5, int(total)) + return nil + }) + assert.Nil(t, err) + assert.Equal(t, 5, int(n)) + + assert.Equal(t, 1, called) + assert.Len(t, calledWritten, 1) + assert.Equal(t, 5, int(calledWritten[0])) +} diff --git a/transfer/adapterbase.go b/transfer/adapterbase.go new file mode 100644 index 00000000..5ba233de --- /dev/null +++ b/transfer/adapterbase.go @@ -0,0 +1,156 @@ +package transfer + +import ( + "fmt" + "sync" + "time" + + "github.com/github/git-lfs/errutil" + "github.com/rubyist/tracerx" +) + +const ( + // objectExpirationGracePeriod is the grace period applied to objects + // when checking whether or not they have expired. + objectExpirationGracePeriod = 5 * time.Second +) + +// adapterBase implements the common functionality for core adapters which +// process transfers with N workers handling an oid each, and which wait for +// authentication to succeed on one worker before proceeding +type adapterBase struct { + name string + direction Direction + transferImpl transferImplementation + jobChan chan *Transfer + cb TransferProgressCallback + outChan chan TransferResult + // WaitGroup to sync the completion of all workers + workerWait sync.WaitGroup + // WaitGroup to serialise the first transfer response to perform login if needed + authWait sync.WaitGroup +} + +// transferImplementation must be implemented to provide the actual upload/download +// implementation for all core transfer approaches that use adapterBase for +// convenience. This function will be called on multiple goroutines so it +// must be either stateless or thread safe. However it will never be called +// for the same oid in parallel. +// If authOkFunc is not nil, implementations must call it as early as possible +// when authentication succeeded, before the whole file content is transferred +type transferImplementation interface { + DoTransfer(t *Transfer, cb TransferProgressCallback, authOkFunc func()) error +} + +func newAdapterBase(name string, dir Direction, ti transferImplementation) *adapterBase { + return &adapterBase{name: name, direction: dir, transferImpl: ti} +} + +func (a *adapterBase) Name() string { + return a.name +} + +func (a *adapterBase) Direction() Direction { + return a.direction +} + +func (a *adapterBase) Begin(maxConcurrency int, cb TransferProgressCallback, completion chan TransferResult) error { + a.cb = cb + a.outChan = completion + a.jobChan = make(chan *Transfer, 100) + + tracerx.Printf("xfer: adapter %q Begin() with %d workers", a.Name(), maxConcurrency) + + a.workerWait.Add(maxConcurrency) + a.authWait.Add(1) + for i := 0; i < maxConcurrency; i++ { + go a.worker(i) + } + tracerx.Printf("xfer: adapter %q started", a.Name()) + return nil +} + +func (a *adapterBase) Add(t *Transfer) { + tracerx.Printf("xfer: adapter %q Add() for %q", a.Name(), t.Object.Oid) + a.jobChan <- t +} + +func (a *adapterBase) End() { + tracerx.Printf("xfer: adapter %q End()", a.Name()) + close(a.jobChan) + // wait for all transfers to complete + a.workerWait.Wait() + if a.outChan != nil { + close(a.outChan) + } + tracerx.Printf("xfer: adapter %q stopped", a.Name()) +} + +// worker function, many of these run per adapter +func (a *adapterBase) worker(workerNum int) { + + tracerx.Printf("xfer: adapter %q worker %d starting", a.Name(), workerNum) + waitForAuth := workerNum > 0 + signalAuthOnResponse := workerNum == 0 + + // First worker is the only one allowed to start immediately + // The rest wait until successful response from 1st worker to + // make sure only 1 login prompt is presented if necessary + // Deliberately outside jobChan processing so we know worker 0 will process 1st item + if waitForAuth { + tracerx.Printf("xfer: adapter %q worker %d waiting for Auth", a.Name(), workerNum) + a.authWait.Wait() + tracerx.Printf("xfer: adapter %q worker %d auth signal received", a.Name(), workerNum) + } + + for t := range a.jobChan { + var authCallback func() + if signalAuthOnResponse { + authCallback = func() { + a.authWait.Done() + signalAuthOnResponse = false + } + } + tracerx.Printf("xfer: adapter %q worker %d processing job for %q", a.Name(), workerNum, t.Object.Oid) + + // Actual transfer happens here + var err error + if t.Object.IsExpired(time.Now().Add(objectExpirationGracePeriod)) { + tracerx.Printf("xfer: adapter %q worker %d found job for %q expired, retrying...", a.Name(), workerNum, t.Object.Oid) + err = errutil.NewRetriableError(fmt.Errorf("lfs/transfer: object %q has expired", t.Object.Oid)) + } else { + err = a.transferImpl.DoTransfer(t, a.cb, authCallback) + } + + if a.outChan != nil { + res := TransferResult{t, err} + a.outChan <- res + } + + tracerx.Printf("xfer: adapter %q worker %d finished job for %q", a.Name(), workerNum, t.Object.Oid) + } + // This will only happen if no jobs were submitted; just wake up all workers to finish + if signalAuthOnResponse { + a.authWait.Done() + } + tracerx.Printf("xfer: adapter %q worker %d stopping", a.Name(), workerNum) + a.workerWait.Done() +} + +func advanceCallbackProgress(cb TransferProgressCallback, t *Transfer, numBytes int64) { + if cb != nil { + // Must split into max int sizes since read count is int + const maxInt = int(^uint(0) >> 1) + for read := int64(0); read < numBytes; { + remainder := numBytes - read + if remainder > int64(maxInt) { + read += int64(maxInt) + cb(t.Name, t.Object.Size, read, maxInt) + } else { + read += remainder + cb(t.Name, t.Object.Size, read, int(remainder)) + } + + } + } +} diff --git a/transfer/basic_download.go b/transfer/basic_download.go new file mode 100644 index 00000000..cfe039b1 --- /dev/null +++ b/transfer/basic_download.go @@ -0,0 +1,228 @@ +package transfer + +import ( + "errors" + "fmt" + "hash" + "io" + "os" + "path/filepath" + "regexp" + "strconv" + + "github.com/github/git-lfs/errutil" + "github.com/github/git-lfs/httputil" + "github.com/github/git-lfs/localstorage" + "github.com/github/git-lfs/tools" + "github.com/rubyist/tracerx" +) + +// Adapter for basic HTTP downloads, includes resuming via HTTP Range +type basicDownloadAdapter struct { + *adapterBase +} + +func (a *basicDownloadAdapter) ClearTempStorage() error { + return os.RemoveAll(a.tempDir()) +} + +func (a *basicDownloadAdapter) tempDir() string { + // Must be dedicated to this adapter as deleted by ClearTempStorage + // Also make local to this repo not global, and separate to localstorage temp, + // which gets cleared at the end of every invocation + d := filepath.Join(localstorage.Objects().RootDir, "incomplete") + if err := os.MkdirAll(d, 0755); err != nil { + return os.TempDir() + } + return d +} + +func (a *basicDownloadAdapter) DoTransfer(t *Transfer, cb TransferProgressCallback, authOkFunc func()) error { + + f, fromByte, hashSoFar, err := a.checkResumeDownload(t) + if err != nil { + return err + } + return a.download(t, cb, authOkFunc, f, fromByte, hashSoFar) +} + +// Checks to see if a download can be resumed, and if so returns a non-nil locked file, byte start and hash +func (a *basicDownloadAdapter) checkResumeDownload(t *Transfer) (outFile *os.File, fromByte int64, hashSoFar hash.Hash, e error) { + // lock the file by opening it for read/write, rather than checking Stat() etc + // which could be subject to race conditions by other processes + f, err := os.OpenFile(a.downloadFilename(t), os.O_RDWR, 0644) + + if err != nil { + // Create a new file instead, must not already exist or error (permissions / race condition) + newfile, err := os.OpenFile(a.downloadFilename(t), os.O_CREATE|os.O_WRONLY|os.O_EXCL, 0644) + return newfile, 0, nil, err + } + + // Successfully opened an existing file at this point + // Read any existing data into hash then return file handle at end + hash := tools.NewLfsContentHash() + n, err := io.Copy(hash, f) + if err != nil { + f.Close() + return nil, 0, nil, err + } + tracerx.Printf("xfer: Attempting to resume download of %q from byte %d", t.Object.Oid, n) + return f, n, hash, nil + +} + +// Create or open a download file for resuming +func (a *basicDownloadAdapter) downloadFilename(t *Transfer) string { + // Not a temp file since we will be resuming it + return filepath.Join(a.tempDir(), t.Object.Oid+".tmp") +} + +// download starts or resumes and download. Always closes dlFile if non-nil +func (a *basicDownloadAdapter) download(t *Transfer, cb TransferProgressCallback, authOkFunc func(), dlFile *os.File, fromByte int64, hash hash.Hash) error { + + if dlFile != nil { + // ensure we always close dlFile. Note that this does not conflict with the + // early close below, as close is idempotent. + defer dlFile.Close() + } + + rel, ok := t.Object.Rel("download") + if !ok { + return errors.New("Object not found on the server.") + } + + req, err := httputil.NewHttpRequest("GET", rel.Href, rel.Header) + if err != nil { + return err + } + + if fromByte > 0 { + if dlFile == nil || hash == nil { + return fmt.Errorf("Cannot restart %v from %d without a file & hash", t.Object.Oid, fromByte) + } + // We could just use a start byte, but since we know the length be specific + req.Header.Set("Range", fmt.Sprintf("bytes=%d-%d", fromByte, t.Object.Size-1)) + } + + res, err := httputil.DoHttpRequest(req, true) + if err != nil { + // Special-case status code 416 () - fall back + if fromByte > 0 && dlFile != nil && res.StatusCode == 416 { + tracerx.Printf("xfer: server rejected resume download request for %q from byte %d; re-downloading from start", t.Object.Oid, fromByte) + dlFile.Close() + os.Remove(dlFile.Name()) + return a.download(t, cb, authOkFunc, nil, 0, nil) + } + return errutil.NewRetriableError(err) + } + httputil.LogTransfer("lfs.data.download", res) + defer res.Body.Close() + + // Range request must return 206 & content range to confirm + if fromByte > 0 { + rangeRequestOk := false + var failReason string + // check 206 and Content-Range, fall back if either not as expected + if res.StatusCode == 206 { + // Probably a successful range request, check Content-Range + if rangeHdr := res.Header.Get("Content-Range"); rangeHdr != "" { + regex := regexp.MustCompile(`bytes (\d+)\-.*`) + match := regex.FindStringSubmatch(rangeHdr) + if match != nil && len(match) > 1 { + contentStart, _ := strconv.ParseInt(match[1], 10, 64) + if contentStart == fromByte { + rangeRequestOk = true + } else { + failReason = fmt.Sprintf("Content-Range start byte incorrect: %s expected %d", match[1], fromByte) + } + } else { + failReason = fmt.Sprintf("badly formatted Content-Range header: %q", rangeHdr) + } + } else { + failReason = "missing Content-Range header in response" + } + } else { + failReason = fmt.Sprintf("expected status code 206, received %d", res.StatusCode) + } + if rangeRequestOk { + tracerx.Printf("xfer: server accepted resume download request: %q from byte %d", t.Object.Oid, fromByte) + advanceCallbackProgress(cb, t, fromByte) + } else { + // Abort resume, perform regular download + tracerx.Printf("xfer: failed to resume download for %q from byte %d: %s. Re-downloading from start", t.Object.Oid, fromByte, failReason) + dlFile.Close() + os.Remove(dlFile.Name()) + if res.StatusCode == 200 { + // If status code was 200 then server just ignored Range header and + // sent everything. Don't re-request, use this one from byte 0 + dlFile = nil + fromByte = 0 + hash = nil + } else { + // re-request needed + return a.download(t, cb, authOkFunc, nil, 0, nil) + } + } + } + + // Signal auth OK on success response, before starting download to free up + // other workers immediately + if authOkFunc != nil { + authOkFunc() + } + + var hasher *tools.HashingReader + if fromByte > 0 && hash != nil { + // pre-load hashing reader with previous content + hasher = tools.NewHashingReaderPreloadHash(res.Body, hash) + } else { + hasher = tools.NewHashingReader(res.Body) + } + + if dlFile == nil { + // New file start + dlFile, err = os.OpenFile(a.downloadFilename(t), os.O_WRONLY|os.O_CREATE|os.O_TRUNC, 0644) + if err != nil { + return err + } + defer dlFile.Close() + } + dlfilename := dlFile.Name() + // Wrap callback to give name context + ccb := func(totalSize int64, readSoFar int64, readSinceLast int) error { + if cb != nil { + return cb(t.Name, totalSize, readSoFar+fromByte, readSinceLast) + } + return nil + } + written, err := tools.CopyWithCallback(dlFile, hasher, res.ContentLength, ccb) + if err != nil { + return fmt.Errorf("cannot write data to tempfile %q: %v", dlfilename, err) + } + if err := dlFile.Close(); err != nil { + return fmt.Errorf("can't close tempfile %q: %v", dlfilename, err) + } + + if actual := hasher.Hash(); actual != t.Object.Oid { + return fmt.Errorf("Expected OID %s, got %s after %d bytes written", t.Object.Oid, actual, written) + } + + return tools.RenameFileCopyPermissions(dlfilename, t.Path) + +} + +func init() { + newfunc := func(name string, dir Direction) TransferAdapter { + switch dir { + case Download: + bd := &basicDownloadAdapter{newAdapterBase(name, dir, nil)} + // self implements impl + bd.transferImpl = bd + return bd + case Upload: + panic("Should never ask this func to upload") + } + return nil + } + RegisterNewTransferAdapterFunc(BasicAdapterName, Download, newfunc) +} diff --git a/transfer/basic_upload.go b/transfer/basic_upload.go new file mode 100644 index 00000000..88e3a905 --- /dev/null +++ b/transfer/basic_upload.go @@ -0,0 +1,148 @@ +package transfer + +import ( + "fmt" + "io" + "io/ioutil" + "os" + "path/filepath" + "strconv" + + "github.com/github/git-lfs/api" + "github.com/github/git-lfs/errutil" + "github.com/github/git-lfs/httputil" + "github.com/github/git-lfs/progress" +) + +const ( + BasicAdapterName = "basic" +) + +// Adapter for basic uploads (non resumable) +type basicUploadAdapter struct { + *adapterBase +} + +func (a *basicUploadAdapter) ClearTempStorage() error { + // Should be empty already but also remove dir + return os.RemoveAll(a.tempDir()) +} + +func (a *basicUploadAdapter) tempDir() string { + // Must be dedicated to this adapter as deleted by ClearTempStorage + d := filepath.Join(os.TempDir(), "git-lfs-basic-temp") + if err := os.MkdirAll(d, 0755); err != nil { + return os.TempDir() + } + return d +} + +func (a *basicUploadAdapter) DoTransfer(t *Transfer, cb TransferProgressCallback, authOkFunc func()) error { + rel, ok := t.Object.Rel("upload") + if !ok { + return fmt.Errorf("No upload action for this object.") + } + + req, err := httputil.NewHttpRequest("PUT", rel.Href, rel.Header) + if err != nil { + return err + } + + if len(req.Header.Get("Content-Type")) == 0 { + req.Header.Set("Content-Type", "application/octet-stream") + } + + if req.Header.Get("Transfer-Encoding") == "chunked" { + req.TransferEncoding = []string{"chunked"} + } else { + req.Header.Set("Content-Length", strconv.FormatInt(t.Object.Size, 10)) + } + + req.ContentLength = t.Object.Size + + f, err := os.OpenFile(t.Path, os.O_RDONLY, 0644) + if err != nil { + return errutil.Error(err) + } + defer f.Close() + + // Ensure progress callbacks made while uploading + // Wrap callback to give name context + ccb := func(totalSize int64, readSoFar int64, readSinceLast int) error { + if cb != nil { + return cb(t.Name, totalSize, readSoFar, readSinceLast) + } + return nil + } + var reader io.Reader + reader = &progress.CallbackReader{ + C: ccb, + TotalSize: t.Object.Size, + Reader: f, + } + + // Signal auth was ok on first read; this frees up other workers to start + if authOkFunc != nil { + reader = newStartCallbackReader(reader, func(*startCallbackReader) { + authOkFunc() + }) + } + + req.Body = ioutil.NopCloser(reader) + + res, err := httputil.DoHttpRequest(req, true) + if err != nil { + return errutil.NewRetriableError(err) + } + httputil.LogTransfer("lfs.data.upload", res) + + // A status code of 403 likely means that an authentication token for the + // upload has expired. This can be safely retried. + if res.StatusCode == 403 { + return errutil.NewRetriableError(err) + } + + if res.StatusCode > 299 { + return errutil.Errorf(nil, "Invalid status for %s: %d", httputil.TraceHttpReq(req), res.StatusCode) + } + + io.Copy(ioutil.Discard, res.Body) + res.Body.Close() + + return api.VerifyUpload(t.Object) +} + +// startCallbackReader is a reader wrapper which calls a function as soon as the +// first Read() call is made. This callback is only made once +type startCallbackReader struct { + r io.Reader + cb func(*startCallbackReader) + cbDone bool +} + +func (s *startCallbackReader) Read(p []byte) (n int, err error) { + if !s.cbDone && s.cb != nil { + s.cb(s) + s.cbDone = true + } + return s.r.Read(p) +} +func newStartCallbackReader(r io.Reader, cb func(*startCallbackReader)) *startCallbackReader { + return &startCallbackReader{r, cb, false} +} + +func init() { + newfunc := func(name string, dir Direction) TransferAdapter { + switch dir { + case Upload: + bu := &basicUploadAdapter{newAdapterBase(name, dir, nil)} + // self implements impl + bu.transferImpl = bu + return bu + case Download: + panic("Should never ask this func for basic download") + } + return nil + } + RegisterNewTransferAdapterFunc(BasicAdapterName, Upload, newfunc) +} diff --git a/transfer/transfer.go b/transfer/transfer.go new file mode 100644 index 00000000..2dae2a36 --- /dev/null +++ b/transfer/transfer.go @@ -0,0 +1,196 @@ +// Package transfer collects together adapters for uploading and downloading LFS content +// NOTE: Subject to change, do not rely on this package from outside git-lfs source +package transfer + +import ( + "sync" + + "github.com/github/git-lfs/config" + + "github.com/github/git-lfs/api" + "github.com/rubyist/tracerx" +) + +type Direction int + +const ( + Upload = Direction(iota) + Download = Direction(iota) +) + +// NewTransferAdapterFunc creates new instances of TransferAdapter. Code that wishes +// to provide new TransferAdapter instances should pass an implementation of this +// function to RegisterNewTransferAdapterFunc +// name and dir are to provide context if one func implements many instances +type NewTransferAdapterFunc func(name string, dir Direction) TransferAdapter + +var ( + funcMutex sync.Mutex + downloadAdapterFuncs = make(map[string]NewTransferAdapterFunc) + uploadAdapterFuncs = make(map[string]NewTransferAdapterFunc) +) + +type TransferProgressCallback func(name string, totalSize, readSoFar int64, readSinceLast int) error + +// TransferAdapter is implemented by types which can upload and/or download LFS +// file content to a remote store. Each TransferAdapter accepts one or more requests +// which it may schedule and parallelise in whatever way it chooses, clients of +// this interface will receive notifications of progress and completion asynchronously. +// TransferAdapters support transfers in one direction; if an implementation +// provides support for upload and download, it should be instantiated twice, +// advertising support for each direction separately. +// Note that TransferAdapter only implements the actual upload/download of content +// itself; organising the wider process including calling the API to get URLs, +// handling progress reporting and retries is the job of the core TransferQueue. +// This is so that the orchestration remains core & standard but TransferAdapter +// can be changed to physically transfer to different hosts with less code. +type TransferAdapter interface { + // Name returns the name of this adapter, which is the same for all instances + // of this type of adapter + Name() string + // Direction returns whether this instance is an upload or download instance + // TransferAdapter instances can only be one or the other, although the same + // type may be instantiated for each direction + Direction() Direction + // Begin a new batch of uploads or downloads. Call this first, followed by + // one or more Add calls. maxConcurrency controls the number of transfers + // that may be done at once. The passed in callback will receive updates on + // progress, and the completion channel will receive completion notifications + // Either argument may be nil if not required by the client + Begin(maxConcurrency int, cb TransferProgressCallback, completion chan TransferResult) error + // Add queues a download/upload, which will complete asynchronously and + // notify the callbacks given to Begin() + Add(t *Transfer) + // Indicate that all transfers have been scheduled and resources can be released + // once the queued items have completed. + // This call blocks until all items have been processed + End() + // ClearTempStorage clears any temporary files, such as unfinished downloads that + // would otherwise be resumed + ClearTempStorage() error +} + +// General struct for both uploads and downloads +type Transfer struct { + // Name of the file that triggered this transfer + Name string + // Object from API which provides the core data for this transfer + Object *api.ObjectResource + // Path for uploads is the source of data to send, for downloads is the + // location to place the final result + Path string +} + +// NewTransfer creates a new Transfer instance +func NewTransfer(name string, obj *api.ObjectResource, path string) *Transfer { + return &Transfer{name, obj, path} +} + +// Result of a transfer returned through CompletionChannel() +type TransferResult struct { + Transfer *Transfer + // This will be non-nil if there was an error transferring this item + Error error +} + +// GetAdapterNames returns a list of the names of adapters available to be created +func GetAdapterNames(dir Direction) []string { + switch dir { + case Upload: + return GetUploadAdapterNames() + case Download: + return GetDownloadAdapterNames() + } + return nil +} + +// GetDownloadAdapterNames returns a list of the names of download adapters available to be created +func GetDownloadAdapterNames() []string { + + if config.Config.BasicTransfersOnly() { + return []string{BasicAdapterName} + } + + funcMutex.Lock() + defer funcMutex.Unlock() + + ret := make([]string, 0, len(downloadAdapterFuncs)) + for n, _ := range downloadAdapterFuncs { + ret = append(ret, n) + } + return ret +} + +// GetUploadAdapterNames returns a list of the names of upload adapters available to be created +func GetUploadAdapterNames() []string { + + if config.Config.BasicTransfersOnly() { + return []string{BasicAdapterName} + } + + funcMutex.Lock() + defer funcMutex.Unlock() + + ret := make([]string, 0, len(uploadAdapterFuncs)) + for n, _ := range uploadAdapterFuncs { + ret = append(ret, n) + } + return ret +} + +// RegisterNewTransferAdapterFunc registers a new function for creating upload +// or download adapters. If a function with that name & direction is already +// registered, it is overridden +func RegisterNewTransferAdapterFunc(name string, dir Direction, f NewTransferAdapterFunc) { + funcMutex.Lock() + defer funcMutex.Unlock() + + switch dir { + case Upload: + uploadAdapterFuncs[name] = f + case Download: + downloadAdapterFuncs[name] = f + } +} + +// Create a new adapter by name and direction; default to BasicAdapterName if doesn't exist +func NewAdapterOrDefault(name string, dir Direction) TransferAdapter { + if len(name) == 0 { + name = BasicAdapterName + } + + a := NewAdapter(name, dir) + if a == nil { + tracerx.Printf("Defaulting to basic transfer adapter since %q did not exist", name) + a = NewAdapter(BasicAdapterName, dir) + } + return a +} + +// Create a new adapter by name and direction, or nil if doesn't exist +func NewAdapter(name string, dir Direction) TransferAdapter { + funcMutex.Lock() + defer funcMutex.Unlock() + + switch dir { + case Upload: + if u, ok := uploadAdapterFuncs[name]; ok { + return u(name, dir) + } + case Download: + if d, ok := downloadAdapterFuncs[name]; ok { + return d(name, dir) + } + } + return nil +} + +// Create a new download adapter by name, or BasicAdapterName if doesn't exist +func NewDownloadAdapter(name string) TransferAdapter { + return NewAdapterOrDefault(name, Download) +} + +// Create a new upload adapter by name, or BasicAdapterName if doesn't exist +func NewUploadAdapter(name string) TransferAdapter { + return NewAdapterOrDefault(name, Upload) +} diff --git a/transfer/transfer_test.go b/transfer/transfer_test.go new file mode 100644 index 00000000..12532504 --- /dev/null +++ b/transfer/transfer_test.go @@ -0,0 +1,124 @@ +package transfer + +import ( + "testing" + + "github.com/github/git-lfs/config" + + "github.com/stretchr/testify/assert" +) + +type testAdapter struct { + name string + dir Direction +} + +func (a *testAdapter) Name() string { + return a.name +} +func (a *testAdapter) Direction() Direction { + return a.dir +} +func (a *testAdapter) Begin(maxConcurrency int, cb TransferProgressCallback, completion chan TransferResult) error { + return nil +} +func (a *testAdapter) Add(t *Transfer) { +} +func (a *testAdapter) End() { +} +func (a *testAdapter) ClearTempStorage() error { + return nil +} +func newTestAdapter(name string, dir Direction) TransferAdapter { + return &testAdapter{name, dir} +} +func newRenamedTestAdapter(name string, dir Direction) TransferAdapter { + return &testAdapter{"RENAMED", dir} +} +func resetAdapters() { + uploadAdapterFuncs = make(map[string]NewTransferAdapterFunc) + downloadAdapterFuncs = make(map[string]NewTransferAdapterFunc) +} + +func testBasicAdapterExists(t *testing.T) { + assert := assert.New(t) + + dls := GetDownloadAdapterNames() + if assert.NotNil(dls) { + assert.Equal([]string{"basic"}, dls) + } + uls := GetUploadAdapterNames() + if assert.NotNil(uls) { + assert.Equal([]string{"basic"}, uls) + } + da := NewDownloadAdapter("basic") + if assert.NotNil(da) { + assert.Equal("basic", da.Name()) + assert.Equal(Download, da.Direction()) + } + ua := NewUploadAdapter("basic") + if assert.NotNil(ua) { + assert.Equal("basic", ua.Name()) + assert.Equal(Upload, ua.Direction()) + } +} + +func testAdapterRegAndOverride(t *testing.T) { + assert := assert.New(t) + + assert.Nil(NewDownloadAdapter("test")) + assert.Nil(NewUploadAdapter("test")) + + RegisterNewTransferAdapterFunc("test", Upload, newTestAdapter) + assert.Nil(NewDownloadAdapter("test")) + assert.NotNil(NewUploadAdapter("test")) + + RegisterNewTransferAdapterFunc("test", Download, newTestAdapter) + da := NewDownloadAdapter("test") + if assert.NotNil(da) { + assert.Equal("test", da.Name()) + assert.Equal(Download, da.Direction()) + } + ua := NewUploadAdapter("test") + if assert.NotNil(ua) { + assert.Equal("test", ua.Name()) + assert.Equal(Upload, ua.Direction()) + } + + // Test override + RegisterNewTransferAdapterFunc("test", Upload, newRenamedTestAdapter) + ua = NewUploadAdapter("test") + if assert.NotNil(ua) { + assert.Equal("RENAMED", ua.Name()) + assert.Equal(Upload, ua.Direction()) + } + da = NewDownloadAdapter("test") + if assert.NotNil(da) { + assert.Equal("test", da.Name()) + assert.Equal(Download, da.Direction()) + } + RegisterNewTransferAdapterFunc("test", Download, newRenamedTestAdapter) + da = NewDownloadAdapter("test") + if assert.NotNil(da) { + assert.Equal("RENAMED", da.Name()) + assert.Equal(Download, da.Direction()) + } + +} + +func testAdapterRegButBasicOnly(t *testing.T) { + assert := assert.New(t) + + config.Config.SetConfig("lfs.basictransfersonly", "yes") + RegisterNewTransferAdapterFunc("test", Upload, newTestAdapter) + RegisterNewTransferAdapterFunc("test", Download, newTestAdapter) + // Will still be created if we ask for them + assert.NotNil(NewUploadAdapter("test")) + assert.NotNil(NewDownloadAdapter("test")) + + // But list will exclude + ld := GetDownloadAdapterNames() + assert.Equal([]string{BasicAdapterName}, ld) + lu := GetUploadAdapterNames() + assert.Equal([]string{BasicAdapterName}, lu) +} diff --git a/transfer/tus_upload.go b/transfer/tus_upload.go new file mode 100644 index 00000000..2804e07c --- /dev/null +++ b/transfer/tus_upload.go @@ -0,0 +1,166 @@ +package transfer + +import ( + "fmt" + "io" + "io/ioutil" + "os" + "strconv" + + "github.com/github/git-lfs/api" + "github.com/github/git-lfs/errutil" + "github.com/github/git-lfs/httputil" + "github.com/github/git-lfs/progress" + "github.com/rubyist/tracerx" +) + +const ( + TusAdapterName = "tus" + TusVersion = "1.0.0" +) + +// Adapter for tus.io protocol resumaable uploads +type tusUploadAdapter struct { + *adapterBase +} + +func (a *tusUploadAdapter) ClearTempStorage() error { + // nothing to do, all temp state is on the server end + return nil +} + +func (a *tusUploadAdapter) DoTransfer(t *Transfer, cb TransferProgressCallback, authOkFunc func()) error { + rel, ok := t.Object.Rel("upload") + if !ok { + return fmt.Errorf("No upload action for this object.") + } + + // Note not supporting the Creation extension since the batch API generates URLs + // Also not supporting Concatenation to support parallel uploads of chunks; forward only + + // 1. Send HEAD request to determine upload start point + // Request must include Tus-Resumable header (version) + tracerx.Printf("xfer: sending tus.io HEAD request for %q", t.Object.Oid) + req, err := httputil.NewHttpRequest("HEAD", rel.Href, rel.Header) + if err != nil { + return err + } + req.Header.Set("Tus-Resumable", TusVersion) + res, err := httputil.DoHttpRequest(req, false) + if err != nil { + return errutil.NewRetriableError(err) + } + + // Response will contain Upload-Offset if supported + offHdr := res.Header.Get("Upload-Offset") + if len(offHdr) == 0 { + return fmt.Errorf("Missing Upload-Offset header from tus.io HEAD response at %q, contact server admin", rel.Href) + } + offset, err := strconv.ParseInt(offHdr, 10, 64) + if err != nil || offset < 0 { + return fmt.Errorf("Invalid Upload-Offset value %q in response from tus.io HEAD at %q, contact server admin", offHdr, rel.Href) + } + // Upload-Offset=size means already completed (skip) + // Batch API will probably already detect this, but handle just in case + if offset >= t.Object.Size { + tracerx.Printf("xfer: tus.io HEAD offset %d indicates %q is already fully uploaded, skipping", offset, t.Object.Oid) + advanceCallbackProgress(cb, t, t.Object.Size) + return nil + } + + // Open file for uploading + f, err := os.OpenFile(t.Path, os.O_RDONLY, 0644) + if err != nil { + return errutil.Error(err) + } + defer f.Close() + + // Upload-Offset=0 means start from scratch, but still send PATCH + if offset == 0 { + tracerx.Printf("xfer: tus.io uploading %q from start", t.Object.Oid) + } else { + tracerx.Printf("xfer: tus.io resuming upload %q from %d", t.Object.Oid, offset) + advanceCallbackProgress(cb, t, offset) + _, err := f.Seek(offset, os.SEEK_CUR) + if err != nil { + return errutil.Error(err) + } + } + + // 2. Send PATCH request with byte start point (even if 0) in Upload-Offset + // Response status must be 204 + // Response Upload-Offset must be request Upload-Offset plus sent bytes + // Response may include Upload-Expires header in which case check not passed + + tracerx.Printf("xfer: sending tus.io PATCH request for %q", t.Object.Oid) + req, err = httputil.NewHttpRequest("PATCH", rel.Href, rel.Header) + if err != nil { + return err + } + req.Header.Set("Tus-Resumable", TusVersion) + req.Header.Set("Upload-Offset", strconv.FormatInt(offset, 10)) + req.Header.Set("Content-Type", "application/offset+octet-stream") + req.Header.Set("Content-Length", strconv.FormatInt(t.Object.Size-offset, 10)) + req.ContentLength = t.Object.Size - offset + + // Ensure progress callbacks made while uploading + // Wrap callback to give name context + ccb := func(totalSize int64, readSoFar int64, readSinceLast int) error { + if cb != nil { + return cb(t.Name, totalSize, readSoFar, readSinceLast) + } + return nil + } + var reader io.Reader + reader = &progress.CallbackReader{ + C: ccb, + TotalSize: t.Object.Size, + Reader: f, + } + + // Signal auth was ok on first read; this frees up other workers to start + if authOkFunc != nil { + reader = newStartCallbackReader(reader, func(*startCallbackReader) { + authOkFunc() + }) + } + + req.Body = ioutil.NopCloser(reader) + + res, err = httputil.DoHttpRequest(req, false) + if err != nil { + return errutil.NewRetriableError(err) + } + httputil.LogTransfer("lfs.data.upload", res) + + // A status code of 403 likely means that an authentication token for the + // upload has expired. This can be safely retried. + if res.StatusCode == 403 { + return errutil.NewRetriableError(err) + } + + if res.StatusCode > 299 { + return errutil.Errorf(nil, "Invalid status for %s: %d", httputil.TraceHttpReq(req), res.StatusCode) + } + + io.Copy(ioutil.Discard, res.Body) + res.Body.Close() + + return api.VerifyUpload(t.Object) +} + +func init() { + newfunc := func(name string, dir Direction) TransferAdapter { + switch dir { + case Upload: + bu := &tusUploadAdapter{newAdapterBase(name, dir, nil)} + // self implements impl + bu.transferImpl = bu + return bu + case Download: + panic("Should never ask tus.io to download") + } + return nil + } + RegisterNewTransferAdapterFunc(TusAdapterName, Upload, newfunc) +} diff --git a/vendor/_nuts/code.google.com/p/log4go/.hgtags b/vendor/_nuts/code.google.com/p/log4go/.hgtags deleted file mode 100644 index 72a2eea2..00000000 --- a/vendor/_nuts/code.google.com/p/log4go/.hgtags +++ /dev/null @@ -1,4 +0,0 @@ -4fbe6aadba231e838a449d340e43bdaab0bf85bd go.weekly.2012-02-07 -56168fd53249d639c25c74ced881fffb20d27be9 go.weekly.2012-02-22 -56168fd53249d639c25c74ced881fffb20d27be9 go.weekly.2012-02-22 -5c22fbd77d91f54d76cdbdee05318699754c44cc go.weekly.2012-02-22 diff --git a/vendor/_nuts/code.google.com/p/log4go/LICENSE b/vendor/_nuts/code.google.com/p/log4go/LICENSE deleted file mode 100644 index 7093402b..00000000 --- a/vendor/_nuts/code.google.com/p/log4go/LICENSE +++ /dev/null @@ -1,13 +0,0 @@ -Copyright (c) 2010, Kyle Lemons . All rights reserved. - -Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: - * Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the - documentation and/or other materials provided with the distribution. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF -THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/_nuts/code.google.com/p/log4go/README b/vendor/_nuts/code.google.com/p/log4go/README deleted file mode 100644 index 16d80ecb..00000000 --- a/vendor/_nuts/code.google.com/p/log4go/README +++ /dev/null @@ -1,12 +0,0 @@ -Please see http://log4go.googlecode.com/ - -Installation: -- Run `goinstall log4go.googlecode.com/hg` - -Usage: -- Add the following import: -import l4g "log4go.googlecode.com/hg" - -Acknowledgements: -- pomack - For providing awesome patches to bring log4go up to the latest Go spec diff --git a/vendor/_nuts/code.google.com/p/log4go/config.go b/vendor/_nuts/code.google.com/p/log4go/config.go deleted file mode 100644 index f048b69f..00000000 --- a/vendor/_nuts/code.google.com/p/log4go/config.go +++ /dev/null @@ -1,288 +0,0 @@ -// Copyright (C) 2010, Kyle Lemons . All rights reserved. - -package log4go - -import ( - "encoding/xml" - "fmt" - "io/ioutil" - "os" - "strconv" - "strings" -) - -type xmlProperty struct { - Name string `xml:"name,attr"` - Value string `xml:",chardata"` -} - -type xmlFilter struct { - Enabled string `xml:"enabled,attr"` - Tag string `xml:"tag"` - Level string `xml:"level"` - Type string `xml:"type"` - Property []xmlProperty `xml:"property"` -} - -type xmlLoggerConfig struct { - Filter []xmlFilter `xml:"filter"` -} - -// Load XML configuration; see examples/example.xml for documentation -func (log Logger) LoadConfiguration(filename string) { - log.Close() - - // Open the configuration file - fd, err := os.Open(filename) - if err != nil { - fmt.Fprintf(os.Stderr, "LoadConfiguration: Error: Could not open %q for reading: %s\n", filename, err) - os.Exit(1) - } - - contents, err := ioutil.ReadAll(fd) - if err != nil { - fmt.Fprintf(os.Stderr, "LoadConfiguration: Error: Could not read %q: %s\n", filename, err) - os.Exit(1) - } - - xc := new(xmlLoggerConfig) - if err := xml.Unmarshal(contents, xc); err != nil { - fmt.Fprintf(os.Stderr, "LoadConfiguration: Error: Could not parse XML configuration in %q: %s\n", filename, err) - os.Exit(1) - } - - for _, xmlfilt := range xc.Filter { - var filt LogWriter - var lvl level - bad, good, enabled := false, true, false - - // Check required children - if len(xmlfilt.Enabled) == 0 { - fmt.Fprintf(os.Stderr, "LoadConfiguration: Error: Required attribute %s for filter missing in %s\n", "enabled", filename) - bad = true - } else { - enabled = xmlfilt.Enabled != "false" - } - if len(xmlfilt.Tag) == 0 { - fmt.Fprintf(os.Stderr, "LoadConfiguration: Error: Required child <%s> for filter missing in %s\n", "tag", filename) - bad = true - } - if len(xmlfilt.Type) == 0 { - fmt.Fprintf(os.Stderr, "LoadConfiguration: Error: Required child <%s> for filter missing in %s\n", "type", filename) - bad = true - } - if len(xmlfilt.Level) == 0 { - fmt.Fprintf(os.Stderr, "LoadConfiguration: Error: Required child <%s> for filter missing in %s\n", "level", filename) - bad = true - } - - switch xmlfilt.Level { - case "FINEST": - lvl = FINEST - case "FINE": - lvl = FINE - case "DEBUG": - lvl = DEBUG - case "TRACE": - lvl = TRACE - case "INFO": - lvl = INFO - case "WARNING": - lvl = WARNING - case "ERROR": - lvl = ERROR - case "CRITICAL": - lvl = CRITICAL - default: - fmt.Fprintf(os.Stderr, "LoadConfiguration: Error: Required child <%s> for filter has unknown value in %s: %s\n", "level", filename, xmlfilt.Level) - bad = true - } - - // Just so all of the required attributes are errored at the same time if missing - if bad { - os.Exit(1) - } - - switch xmlfilt.Type { - case "console": - filt, good = xmlToConsoleLogWriter(filename, xmlfilt.Property, enabled) - case "file": - filt, good = xmlToFileLogWriter(filename, xmlfilt.Property, enabled) - case "xml": - filt, good = xmlToXMLLogWriter(filename, xmlfilt.Property, enabled) - case "socket": - filt, good = xmlToSocketLogWriter(filename, xmlfilt.Property, enabled) - default: - fmt.Fprintf(os.Stderr, "LoadConfiguration: Error: Could not load XML configuration in %s: unknown filter type \"%s\"\n", filename, xmlfilt.Type) - os.Exit(1) - } - - // Just so all of the required params are errored at the same time if wrong - if !good { - os.Exit(1) - } - - // If we're disabled (syntax and correctness checks only), don't add to logger - if !enabled { - continue - } - - log[xmlfilt.Tag] = &Filter{lvl, filt} - } -} - -func xmlToConsoleLogWriter(filename string, props []xmlProperty, enabled bool) (ConsoleLogWriter, bool) { - // Parse properties - for _, prop := range props { - switch prop.Name { - default: - fmt.Fprintf(os.Stderr, "LoadConfiguration: Warning: Unknown property \"%s\" for console filter in %s\n", prop.Name, filename) - } - } - - // If it's disabled, we're just checking syntax - if !enabled { - return nil, true - } - - return NewConsoleLogWriter(), true -} - -// Parse a number with K/M/G suffixes based on thousands (1000) or 2^10 (1024) -func strToNumSuffix(str string, mult int) int { - num := 1 - if len(str) > 1 { - switch str[len(str)-1] { - case 'G', 'g': - num *= mult - fallthrough - case 'M', 'm': - num *= mult - fallthrough - case 'K', 'k': - num *= mult - str = str[0 : len(str)-1] - } - } - parsed, _ := strconv.Atoi(str) - return parsed * num -} -func xmlToFileLogWriter(filename string, props []xmlProperty, enabled bool) (*FileLogWriter, bool) { - file := "" - format := "[%D %T] [%L] (%S) %M" - maxlines := 0 - maxsize := 0 - daily := false - rotate := false - - // Parse properties - for _, prop := range props { - switch prop.Name { - case "filename": - file = strings.Trim(prop.Value, " \r\n") - case "format": - format = strings.Trim(prop.Value, " \r\n") - case "maxlines": - maxlines = strToNumSuffix(strings.Trim(prop.Value, " \r\n"), 1000) - case "maxsize": - maxsize = strToNumSuffix(strings.Trim(prop.Value, " \r\n"), 1024) - case "daily": - daily = strings.Trim(prop.Value, " \r\n") != "false" - case "rotate": - rotate = strings.Trim(prop.Value, " \r\n") != "false" - default: - fmt.Fprintf(os.Stderr, "LoadConfiguration: Warning: Unknown property \"%s\" for file filter in %s\n", prop.Name, filename) - } - } - - // Check properties - if len(file) == 0 { - fmt.Fprintf(os.Stderr, "LoadConfiguration: Error: Required property \"%s\" for file filter missing in %s\n", "filename", filename) - return nil, false - } - - // If it's disabled, we're just checking syntax - if !enabled { - return nil, true - } - - flw := NewFileLogWriter(file, rotate) - flw.SetFormat(format) - flw.SetRotateLines(maxlines) - flw.SetRotateSize(maxsize) - flw.SetRotateDaily(daily) - return flw, true -} - -func xmlToXMLLogWriter(filename string, props []xmlProperty, enabled bool) (*FileLogWriter, bool) { - file := "" - maxrecords := 0 - maxsize := 0 - daily := false - rotate := false - - // Parse properties - for _, prop := range props { - switch prop.Name { - case "filename": - file = strings.Trim(prop.Value, " \r\n") - case "maxrecords": - maxrecords = strToNumSuffix(strings.Trim(prop.Value, " \r\n"), 1000) - case "maxsize": - maxsize = strToNumSuffix(strings.Trim(prop.Value, " \r\n"), 1024) - case "daily": - daily = strings.Trim(prop.Value, " \r\n") != "false" - case "rotate": - rotate = strings.Trim(prop.Value, " \r\n") != "false" - default: - fmt.Fprintf(os.Stderr, "LoadConfiguration: Warning: Unknown property \"%s\" for xml filter in %s\n", prop.Name, filename) - } - } - - // Check properties - if len(file) == 0 { - fmt.Fprintf(os.Stderr, "LoadConfiguration: Error: Required property \"%s\" for xml filter missing in %s\n", "filename", filename) - return nil, false - } - - // If it's disabled, we're just checking syntax - if !enabled { - return nil, true - } - - xlw := NewXMLLogWriter(file, rotate) - xlw.SetRotateLines(maxrecords) - xlw.SetRotateSize(maxsize) - xlw.SetRotateDaily(daily) - return xlw, true -} - -func xmlToSocketLogWriter(filename string, props []xmlProperty, enabled bool) (SocketLogWriter, bool) { - endpoint := "" - protocol := "udp" - - // Parse properties - for _, prop := range props { - switch prop.Name { - case "endpoint": - endpoint = strings.Trim(prop.Value, " \r\n") - case "protocol": - protocol = strings.Trim(prop.Value, " \r\n") - default: - fmt.Fprintf(os.Stderr, "LoadConfiguration: Warning: Unknown property \"%s\" for file filter in %s\n", prop.Name, filename) - } - } - - // Check properties - if len(endpoint) == 0 { - fmt.Fprintf(os.Stderr, "LoadConfiguration: Error: Required property \"%s\" for file filter missing in %s\n", "endpoint", filename) - return nil, false - } - - // If it's disabled, we're just checking syntax - if !enabled { - return nil, true - } - - return NewSocketLogWriter(protocol, endpoint), true -} diff --git a/vendor/_nuts/code.google.com/p/log4go/examples/ConsoleLogWriter_Manual.go b/vendor/_nuts/code.google.com/p/log4go/examples/ConsoleLogWriter_Manual.go deleted file mode 100644 index 7169ec97..00000000 --- a/vendor/_nuts/code.google.com/p/log4go/examples/ConsoleLogWriter_Manual.go +++ /dev/null @@ -1,13 +0,0 @@ -package main - -import ( - "time" -) - -import l4g "github.com/github/git-lfs/vendor/_nuts/code.google.com/p/log4go" - -func main() { - log := l4g.NewLogger() - log.AddFilter("stdout", l4g.DEBUG, l4g.NewConsoleLogWriter()) - log.Info("The time is now: %s", time.Now().Format("15:04:05 MST 2006/01/02")) -} diff --git a/vendor/_nuts/code.google.com/p/log4go/examples/FileLogWriter_Manual.go b/vendor/_nuts/code.google.com/p/log4go/examples/FileLogWriter_Manual.go deleted file mode 100644 index 872bb45b..00000000 --- a/vendor/_nuts/code.google.com/p/log4go/examples/FileLogWriter_Manual.go +++ /dev/null @@ -1,57 +0,0 @@ -package main - -import ( - "bufio" - "fmt" - "io" - "os" - "time" -) - -import l4g "github.com/github/git-lfs/vendor/_nuts/code.google.com/p/log4go" - -const ( - filename = "flw.log" -) - -func main() { - // Get a new logger instance - log := l4g.NewLogger() - - // Create a default logger that is logging messages of FINE or higher - log.AddFilter("file", l4g.FINE, l4g.NewFileLogWriter(filename, false)) - log.Close() - - /* Can also specify manually via the following: (these are the defaults) */ - flw := l4g.NewFileLogWriter(filename, false) - flw.SetFormat("[%D %T] [%L] (%S) %M") - flw.SetRotate(false) - flw.SetRotateSize(0) - flw.SetRotateLines(0) - flw.SetRotateDaily(false) - log.AddFilter("file", l4g.FINE, flw) - - // Log some experimental messages - log.Finest("Everything is created now (notice that I will not be printing to the file)") - log.Info("The time is now: %s", time.Now().Format("15:04:05 MST 2006/01/02")) - log.Critical("Time to close out!") - - // Close the log - log.Close() - - // Print what was logged to the file (yes, I know I'm skipping error checking) - fd, _ := os.Open(filename) - in := bufio.NewReader(fd) - fmt.Print("Messages logged to file were: (line numbers not included)\n") - for lineno := 1; ; lineno++ { - line, err := in.ReadString('\n') - if err == io.EOF { - break - } - fmt.Printf("%3d:\t%s", lineno, line) - } - fd.Close() - - // Remove the file so it's not lying around - os.Remove(filename) -} diff --git a/vendor/_nuts/code.google.com/p/log4go/examples/SimpleNetLogServer.go b/vendor/_nuts/code.google.com/p/log4go/examples/SimpleNetLogServer.go deleted file mode 100644 index 83c80ad1..00000000 --- a/vendor/_nuts/code.google.com/p/log4go/examples/SimpleNetLogServer.go +++ /dev/null @@ -1,42 +0,0 @@ -package main - -import ( - "flag" - "fmt" - "net" - "os" -) - -var ( - port = flag.String("p", "12124", "Port number to listen on") -) - -func e(err error) { - if err != nil { - fmt.Printf("Erroring out: %s\n", err) - os.Exit(1) - } -} - -func main() { - flag.Parse() - - // Bind to the port - bind, err := net.ResolveUDPAddr("0.0.0.0:" + *port) - e(err) - - // Create listener - listener, err := net.ListenUDP("udp", bind) - e(err) - - fmt.Printf("Listening to port %s...\n", *port) - for { - // read into a new buffer - buffer := make([]byte, 1024) - _, _, err := listener.ReadFrom(buffer) - e(err) - - // log to standard output - fmt.Println(string(buffer)) - } -} diff --git a/vendor/_nuts/code.google.com/p/log4go/examples/SocketLogWriter_Manual.go b/vendor/_nuts/code.google.com/p/log4go/examples/SocketLogWriter_Manual.go deleted file mode 100644 index d553699f..00000000 --- a/vendor/_nuts/code.google.com/p/log4go/examples/SocketLogWriter_Manual.go +++ /dev/null @@ -1,18 +0,0 @@ -package main - -import ( - "time" -) - -import l4g "github.com/github/git-lfs/vendor/_nuts/code.google.com/p/log4go" - -func main() { - log := l4g.NewLogger() - log.AddFilter("network", l4g.FINEST, l4g.NewSocketLogWriter("udp", "192.168.1.255:12124")) - - // Run `nc -u -l -p 12124` or similar before you run this to see the following message - log.Info("The time is now: %s", time.Now().Format("15:04:05 MST 2006/01/02")) - - // This makes sure the output stream buffer is written - log.Close() -} diff --git a/vendor/_nuts/code.google.com/p/log4go/examples/XMLConfigurationExample.go b/vendor/_nuts/code.google.com/p/log4go/examples/XMLConfigurationExample.go deleted file mode 100644 index d6485753..00000000 --- a/vendor/_nuts/code.google.com/p/log4go/examples/XMLConfigurationExample.go +++ /dev/null @@ -1,13 +0,0 @@ -package main - -import l4g "github.com/github/git-lfs/vendor/_nuts/code.google.com/p/log4go" - -func main() { - // Load the configuration (isn't this easy?) - l4g.LoadConfiguration("example.xml") - - // And now we're ready! - l4g.Finest("This will only go to those of you really cool UDP kids! If you change enabled=true.") - l4g.Debug("Oh no! %d + %d = %d!", 2, 2, 2+2) - l4g.Info("About that time, eh chaps?") -} diff --git a/vendor/_nuts/code.google.com/p/log4go/examples/example.xml b/vendor/_nuts/code.google.com/p/log4go/examples/example.xml deleted file mode 100644 index e791278c..00000000 --- a/vendor/_nuts/code.google.com/p/log4go/examples/example.xml +++ /dev/null @@ -1,47 +0,0 @@ - - - stdout - console - - DEBUG - - - file - file - FINEST - test.log - - [%D %T] [%L] (%S) %M - false - 0M - 0K - true - - - xmllog - xml - TRACE - trace.xml - true - 100M - 6K - false - - - donotopen - socket - FINEST - 192.168.1.255:12124 - udp - - diff --git a/vendor/_nuts/code.google.com/p/log4go/filelog.go b/vendor/_nuts/code.google.com/p/log4go/filelog.go deleted file mode 100644 index 9cbd815d..00000000 --- a/vendor/_nuts/code.google.com/p/log4go/filelog.go +++ /dev/null @@ -1,239 +0,0 @@ -// Copyright (C) 2010, Kyle Lemons . All rights reserved. - -package log4go - -import ( - "os" - "fmt" - "time" -) - -// This log writer sends output to a file -type FileLogWriter struct { - rec chan *LogRecord - rot chan bool - - // The opened file - filename string - file *os.File - - // The logging format - format string - - // File header/trailer - header, trailer string - - // Rotate at linecount - maxlines int - maxlines_curlines int - - // Rotate at size - maxsize int - maxsize_cursize int - - // Rotate daily - daily bool - daily_opendate int - - // Keep old logfiles (.001, .002, etc) - rotate bool -} - -// This is the FileLogWriter's output method -func (w *FileLogWriter) LogWrite(rec *LogRecord) { - w.rec <- rec -} - -func (w *FileLogWriter) Close() { - close(w.rec) -} - -// NewFileLogWriter creates a new LogWriter which writes to the given file and -// has rotation enabled if rotate is true. -// -// If rotate is true, any time a new log file is opened, the old one is renamed -// with a .### extension to preserve it. The various Set* methods can be used -// to configure log rotation based on lines, size, and daily. -// -// The standard log-line format is: -// [%D %T] [%L] (%S) %M -func NewFileLogWriter(fname string, rotate bool) *FileLogWriter { - w := &FileLogWriter{ - rec: make(chan *LogRecord, LogBufferLength), - rot: make(chan bool), - filename: fname, - format: "[%D %T] [%L] (%S) %M", - rotate: rotate, - } - - // open the file for the first time - if err := w.intRotate(); err != nil { - fmt.Fprintf(os.Stderr, "FileLogWriter(%q): %s\n", w.filename, err) - return nil - } - - go func() { - defer func() { - if w.file != nil { - fmt.Fprint(w.file, FormatLogRecord(w.trailer, &LogRecord{Created: time.Now()})) - w.file.Close() - } - }() - - for { - select { - case <-w.rot: - if err := w.intRotate(); err != nil { - fmt.Fprintf(os.Stderr, "FileLogWriter(%q): %s\n", w.filename, err) - return - } - case rec, ok := <-w.rec: - if !ok { - return - } - now := time.Now() - if (w.maxlines > 0 && w.maxlines_curlines >= w.maxlines) || - (w.maxsize > 0 && w.maxsize_cursize >= w.maxsize) || - (w.daily && now.Day() != w.daily_opendate) { - if err := w.intRotate(); err != nil { - fmt.Fprintf(os.Stderr, "FileLogWriter(%q): %s\n", w.filename, err) - return - } - } - - // Perform the write - n, err := fmt.Fprint(w.file, FormatLogRecord(w.format, rec)) - if err != nil { - fmt.Fprintf(os.Stderr, "FileLogWriter(%q): %s\n", w.filename, err) - return - } - - // Update the counts - w.maxlines_curlines++ - w.maxsize_cursize += n - } - } - }() - - return w -} - -// Request that the logs rotate -func (w *FileLogWriter) Rotate() { - w.rot <- true -} - -// If this is called in a threaded context, it MUST be synchronized -func (w *FileLogWriter) intRotate() error { - // Close any log file that may be open - if w.file != nil { - fmt.Fprint(w.file, FormatLogRecord(w.trailer, &LogRecord{Created: time.Now()})) - w.file.Close() - } - - // If we are keeping log files, move it to the next available number - if w.rotate { - _, err := os.Lstat(w.filename) - if err == nil { // file exists - // Find the next available number - num := 1 - fname := "" - for ; err == nil && num <= 999; num++ { - fname = w.filename + fmt.Sprintf(".%03d", num) - _, err = os.Lstat(fname) - } - // return error if the last file checked still existed - if err == nil { - return fmt.Errorf("Rotate: Cannot find free log number to rename %s\n", w.filename) - } - - // Rename the file to its newfound home - err = os.Rename(w.filename, fname) - if err != nil { - return fmt.Errorf("Rotate: %s\n", err) - } - } - } - - // Open the log file - fd, err := os.OpenFile(w.filename, os.O_WRONLY|os.O_APPEND|os.O_CREATE, 0660) - if err != nil { - return err - } - w.file = fd - - now := time.Now() - fmt.Fprint(w.file, FormatLogRecord(w.header, &LogRecord{Created: now})) - - // Set the daily open date to the current date - w.daily_opendate = now.Day() - - // initialize rotation values - w.maxlines_curlines = 0 - w.maxsize_cursize = 0 - - return nil -} - -// Set the logging format (chainable). Must be called before the first log -// message is written. -func (w *FileLogWriter) SetFormat(format string) *FileLogWriter { - w.format = format - return w -} - -// Set the logfile header and footer (chainable). Must be called before the first log -// message is written. These are formatted similar to the FormatLogRecord (e.g. -// you can use %D and %T in your header/footer for date and time). -func (w *FileLogWriter) SetHeadFoot(head, foot string) *FileLogWriter { - w.header, w.trailer = head, foot - if w.maxlines_curlines == 0 { - fmt.Fprint(w.file, FormatLogRecord(w.header, &LogRecord{Created: time.Now()})) - } - return w -} - -// Set rotate at linecount (chainable). Must be called before the first log -// message is written. -func (w *FileLogWriter) SetRotateLines(maxlines int) *FileLogWriter { - //fmt.Fprintf(os.Stderr, "FileLogWriter.SetRotateLines: %v\n", maxlines) - w.maxlines = maxlines - return w -} - -// Set rotate at size (chainable). Must be called before the first log message -// is written. -func (w *FileLogWriter) SetRotateSize(maxsize int) *FileLogWriter { - //fmt.Fprintf(os.Stderr, "FileLogWriter.SetRotateSize: %v\n", maxsize) - w.maxsize = maxsize - return w -} - -// Set rotate daily (chainable). Must be called before the first log message is -// written. -func (w *FileLogWriter) SetRotateDaily(daily bool) *FileLogWriter { - //fmt.Fprintf(os.Stderr, "FileLogWriter.SetRotateDaily: %v\n", daily) - w.daily = daily - return w -} - -// SetRotate changes whether or not the old logs are kept. (chainable) Must be -// called before the first log message is written. If rotate is false, the -// files are overwritten; otherwise, they are rotated to another file before the -// new log is opened. -func (w *FileLogWriter) SetRotate(rotate bool) *FileLogWriter { - //fmt.Fprintf(os.Stderr, "FileLogWriter.SetRotate: %v\n", rotate) - w.rotate = rotate - return w -} - -// NewXMLLogWriter is a utility method for creating a FileLogWriter set up to -// output XML record log messages instead of line-based ones. -func NewXMLLogWriter(fname string, rotate bool) *FileLogWriter { - return NewFileLogWriter(fname, rotate).SetFormat( - ` - %D %T - %S - %M - `).SetHeadFoot("", "") -} diff --git a/vendor/_nuts/code.google.com/p/log4go/log4go.go b/vendor/_nuts/code.google.com/p/log4go/log4go.go deleted file mode 100644 index ab4e857f..00000000 --- a/vendor/_nuts/code.google.com/p/log4go/log4go.go +++ /dev/null @@ -1,484 +0,0 @@ -// Copyright (C) 2010, Kyle Lemons . All rights reserved. - -// Package log4go provides level-based and highly configurable logging. -// -// Enhanced Logging -// -// This is inspired by the logging functionality in Java. Essentially, you create a Logger -// object and create output filters for it. You can send whatever you want to the Logger, -// and it will filter that based on your settings and send it to the outputs. This way, you -// can put as much debug code in your program as you want, and when you're done you can filter -// out the mundane messages so only the important ones show up. -// -// Utility functions are provided to make life easier. Here is some example code to get started: -// -// log := log4go.NewLogger() -// log.AddFilter("stdout", log4go.DEBUG, log4go.NewConsoleLogWriter()) -// log.AddFilter("log", log4go.FINE, log4go.NewFileLogWriter("example.log", true)) -// log.Info("The time is now: %s", time.LocalTime().Format("15:04:05 MST 2006/01/02")) -// -// The first two lines can be combined with the utility NewDefaultLogger: -// -// log := log4go.NewDefaultLogger(log4go.DEBUG) -// log.AddFilter("log", log4go.FINE, log4go.NewFileLogWriter("example.log", true)) -// log.Info("The time is now: %s", time.LocalTime().Format("15:04:05 MST 2006/01/02")) -// -// Usage notes: -// - The ConsoleLogWriter does not display the source of the message to standard -// output, but the FileLogWriter does. -// - The utility functions (Info, Debug, Warn, etc) derive their source from the -// calling function, and this incurs extra overhead. -// -// Changes from 2.0: -// - The external interface has remained mostly stable, but a lot of the -// internals have been changed, so if you depended on any of this or created -// your own LogWriter, then you will probably have to update your code. In -// particular, Logger is now a map and ConsoleLogWriter is now a channel -// behind-the-scenes, and the LogWrite method no longer has return values. -// -// Future work: (please let me know if you think I should work on any of these particularly) -// - Log file rotation -// - Logging configuration files ala log4j -// - Have the ability to remove filters? -// - Have GetInfoChannel, GetDebugChannel, etc return a chan string that allows -// for another method of logging -// - Add an XML filter type -package log4go - -import ( - "errors" - "os" - "fmt" - "time" - "strings" - "runtime" -) - -// Version information -const ( - L4G_VERSION = "log4go-v3.0.1" - L4G_MAJOR = 3 - L4G_MINOR = 0 - L4G_BUILD = 1 -) - -/****** Constants ******/ - -// These are the integer logging levels used by the logger -type level int - -const ( - FINEST level = iota - FINE - DEBUG - TRACE - INFO - WARNING - ERROR - CRITICAL -) - -// Logging level strings -var ( - levelStrings = [...]string{"FNST", "FINE", "DEBG", "TRAC", "INFO", "WARN", "EROR", "CRIT"} -) - -func (l level) String() string { - if l < 0 || int(l) > len(levelStrings) { - return "UNKNOWN" - } - return levelStrings[int(l)] -} - -/****** Variables ******/ -var ( - // LogBufferLength specifies how many log messages a particular log4go - // logger can buffer at a time before writing them. - LogBufferLength = 32 -) - -/****** LogRecord ******/ - -// A LogRecord contains all of the pertinent information for each message -type LogRecord struct { - Level level // The log level - Created time.Time // The time at which the log message was created (nanoseconds) - Source string // The message source - Message string // The log message -} - -/****** LogWriter ******/ - -// This is an interface for anything that should be able to write logs -type LogWriter interface { - // This will be called to log a LogRecord message. - LogWrite(rec *LogRecord) - - // This should clean up anything lingering about the LogWriter, as it is called before - // the LogWriter is removed. LogWrite should not be called after Close. - Close() -} - -/****** Logger ******/ - -// A Filter represents the log level below which no log records are written to -// the associated LogWriter. -type Filter struct { - Level level - LogWriter -} - -// A Logger represents a collection of Filters through which log messages are -// written. -type Logger map[string]*Filter - -// Create a new logger. -// -// DEPRECATED: Use make(Logger) instead. -func NewLogger() Logger { - os.Stderr.WriteString("warning: use of deprecated NewLogger\n") - return make(Logger) -} - -// Create a new logger with a "stdout" filter configured to send log messages at -// or above lvl to standard output. -// -// DEPRECATED: use NewDefaultLogger instead. -func NewConsoleLogger(lvl level) Logger { - os.Stderr.WriteString("warning: use of deprecated NewConsoleLogger\n") - return Logger{ - "stdout": &Filter{lvl, NewConsoleLogWriter()}, - } -} - -// Create a new logger with a "stdout" filter configured to send log messages at -// or above lvl to standard output. -func NewDefaultLogger(lvl level) Logger { - return Logger{ - "stdout": &Filter{lvl, NewConsoleLogWriter()}, - } -} - -// Closes all log writers in preparation for exiting the program or a -// reconfiguration of logging. Calling this is not really imperative, unless -// you want to guarantee that all log messages are written. Close removes -// all filters (and thus all LogWriters) from the logger. -func (log Logger) Close() { - // Close all open loggers - for name, filt := range log { - filt.Close() - delete(log, name) - } -} - -// Add a new LogWriter to the Logger which will only log messages at lvl or -// higher. This function should not be called from multiple goroutines. -// Returns the logger for chaining. -func (log Logger) AddFilter(name string, lvl level, writer LogWriter) Logger { - log[name] = &Filter{lvl, writer} - return log -} - -/******* Logging *******/ -// Send a formatted log message internally -func (log Logger) intLogf(lvl level, format string, args ...interface{}) { - skip := true - - // Determine if any logging will be done - for _, filt := range log { - if lvl >= filt.Level { - skip = false - break - } - } - if skip { - return - } - - // Determine caller func - pc, _, lineno, ok := runtime.Caller(2) - src := "" - if ok { - src = fmt.Sprintf("%s:%d", runtime.FuncForPC(pc).Name(), lineno) - } - - msg := format - if len(args) > 0 { - msg = fmt.Sprintf(format, args...) - } - - // Make the log record - rec := &LogRecord{ - Level: lvl, - Created: time.Now(), - Source: src, - Message: msg, - } - - // Dispatch the logs - for _, filt := range log { - if lvl < filt.Level { - continue - } - filt.LogWrite(rec) - } -} - -// Send a closure log message internally -func (log Logger) intLogc(lvl level, closure func() string) { - skip := true - - // Determine if any logging will be done - for _, filt := range log { - if lvl >= filt.Level { - skip = false - break - } - } - if skip { - return - } - - // Determine caller func - pc, _, lineno, ok := runtime.Caller(2) - src := "" - if ok { - src = fmt.Sprintf("%s:%d", runtime.FuncForPC(pc).Name(), lineno) - } - - // Make the log record - rec := &LogRecord{ - Level: lvl, - Created: time.Now(), - Source: src, - Message: closure(), - } - - // Dispatch the logs - for _, filt := range log { - if lvl < filt.Level { - continue - } - filt.LogWrite(rec) - } -} - -// Send a log message with manual level, source, and message. -func (log Logger) Log(lvl level, source, message string) { - skip := true - - // Determine if any logging will be done - for _, filt := range log { - if lvl >= filt.Level { - skip = false - break - } - } - if skip { - return - } - - // Make the log record - rec := &LogRecord{ - Level: lvl, - Created: time.Now(), - Source: source, - Message: message, - } - - // Dispatch the logs - for _, filt := range log { - if lvl < filt.Level { - continue - } - filt.LogWrite(rec) - } -} - -// Logf logs a formatted log message at the given log level, using the caller as -// its source. -func (log Logger) Logf(lvl level, format string, args ...interface{}) { - log.intLogf(lvl, format, args...) -} - -// Logc logs a string returned by the closure at the given log level, using the caller as -// its source. If no log message would be written, the closure is never called. -func (log Logger) Logc(lvl level, closure func() string) { - log.intLogc(lvl, closure) -} - -// Finest logs a message at the finest log level. -// See Debug for an explanation of the arguments. -func (log Logger) Finest(arg0 interface{}, args ...interface{}) { - const ( - lvl = FINEST - ) - switch first := arg0.(type) { - case string: - // Use the string as a format string - log.intLogf(lvl, first, args...) - case func() string: - // Log the closure (no other arguments used) - log.intLogc(lvl, first) - default: - // Build a format string so that it will be similar to Sprint - log.intLogf(lvl, fmt.Sprint(arg0)+strings.Repeat(" %v", len(args)), args...) - } -} - -// Fine logs a message at the fine log level. -// See Debug for an explanation of the arguments. -func (log Logger) Fine(arg0 interface{}, args ...interface{}) { - const ( - lvl = FINE - ) - switch first := arg0.(type) { - case string: - // Use the string as a format string - log.intLogf(lvl, first, args...) - case func() string: - // Log the closure (no other arguments used) - log.intLogc(lvl, first) - default: - // Build a format string so that it will be similar to Sprint - log.intLogf(lvl, fmt.Sprint(arg0)+strings.Repeat(" %v", len(args)), args...) - } -} - -// Debug is a utility method for debug log messages. -// The behavior of Debug depends on the first argument: -// - arg0 is a string -// When given a string as the first argument, this behaves like Logf but with -// the DEBUG log level: the first argument is interpreted as a format for the -// latter arguments. -// - arg0 is a func()string -// When given a closure of type func()string, this logs the string returned by -// the closure iff it will be logged. The closure runs at most one time. -// - arg0 is interface{} -// When given anything else, the log message will be each of the arguments -// formatted with %v and separated by spaces (ala Sprint). -func (log Logger) Debug(arg0 interface{}, args ...interface{}) { - const ( - lvl = DEBUG - ) - switch first := arg0.(type) { - case string: - // Use the string as a format string - log.intLogf(lvl, first, args...) - case func() string: - // Log the closure (no other arguments used) - log.intLogc(lvl, first) - default: - // Build a format string so that it will be similar to Sprint - log.intLogf(lvl, fmt.Sprint(arg0)+strings.Repeat(" %v", len(args)), args...) - } -} - -// Trace logs a message at the trace log level. -// See Debug for an explanation of the arguments. -func (log Logger) Trace(arg0 interface{}, args ...interface{}) { - const ( - lvl = TRACE - ) - switch first := arg0.(type) { - case string: - // Use the string as a format string - log.intLogf(lvl, first, args...) - case func() string: - // Log the closure (no other arguments used) - log.intLogc(lvl, first) - default: - // Build a format string so that it will be similar to Sprint - log.intLogf(lvl, fmt.Sprint(arg0)+strings.Repeat(" %v", len(args)), args...) - } -} - -// Info logs a message at the info log level. -// See Debug for an explanation of the arguments. -func (log Logger) Info(arg0 interface{}, args ...interface{}) { - const ( - lvl = INFO - ) - switch first := arg0.(type) { - case string: - // Use the string as a format string - log.intLogf(lvl, first, args...) - case func() string: - // Log the closure (no other arguments used) - log.intLogc(lvl, first) - default: - // Build a format string so that it will be similar to Sprint - log.intLogf(lvl, fmt.Sprint(arg0)+strings.Repeat(" %v", len(args)), args...) - } -} - -// Warn logs a message at the warning log level and returns the formatted error. -// At the warning level and higher, there is no performance benefit if the -// message is not actually logged, because all formats are processed and all -// closures are executed to format the error message. -// See Debug for further explanation of the arguments. -func (log Logger) Warn(arg0 interface{}, args ...interface{}) error { - const ( - lvl = WARNING - ) - var msg string - switch first := arg0.(type) { - case string: - // Use the string as a format string - msg = fmt.Sprintf(first, args...) - case func() string: - // Log the closure (no other arguments used) - msg = first() - default: - // Build a format string so that it will be similar to Sprint - msg = fmt.Sprintf(fmt.Sprint(first)+strings.Repeat(" %v", len(args)), args...) - } - log.intLogf(lvl, msg) - return errors.New(msg) -} - -// Error logs a message at the error log level and returns the formatted error, -// See Warn for an explanation of the performance and Debug for an explanation -// of the parameters. -func (log Logger) Error(arg0 interface{}, args ...interface{}) error { - const ( - lvl = ERROR - ) - var msg string - switch first := arg0.(type) { - case string: - // Use the string as a format string - msg = fmt.Sprintf(first, args...) - case func() string: - // Log the closure (no other arguments used) - msg = first() - default: - // Build a format string so that it will be similar to Sprint - msg = fmt.Sprintf(fmt.Sprint(first)+strings.Repeat(" %v", len(args)), args...) - } - log.intLogf(lvl, msg) - return errors.New(msg) -} - -// Critical logs a message at the critical log level and returns the formatted error, -// See Warn for an explanation of the performance and Debug for an explanation -// of the parameters. -func (log Logger) Critical(arg0 interface{}, args ...interface{}) error { - const ( - lvl = CRITICAL - ) - var msg string - switch first := arg0.(type) { - case string: - // Use the string as a format string - msg = fmt.Sprintf(first, args...) - case func() string: - // Log the closure (no other arguments used) - msg = first() - default: - // Build a format string so that it will be similar to Sprint - msg = fmt.Sprintf(fmt.Sprint(first)+strings.Repeat(" %v", len(args)), args...) - } - log.intLogf(lvl, msg) - return errors.New(msg) -} diff --git a/vendor/_nuts/code.google.com/p/log4go/log4go_test.go b/vendor/_nuts/code.google.com/p/log4go/log4go_test.go deleted file mode 100644 index 90c62997..00000000 --- a/vendor/_nuts/code.google.com/p/log4go/log4go_test.go +++ /dev/null @@ -1,534 +0,0 @@ -// Copyright (C) 2010, Kyle Lemons . All rights reserved. - -package log4go - -import ( - "crypto/md5" - "encoding/hex" - "fmt" - "io" - "io/ioutil" - "os" - "runtime" - "testing" - "time" -) - -const testLogFile = "_logtest.log" - -var now time.Time = time.Unix(0, 1234567890123456789).In(time.UTC) - -func newLogRecord(lvl level, src string, msg string) *LogRecord { - return &LogRecord{ - Level: lvl, - Source: src, - Created: now, - Message: msg, - } -} - -func TestELog(t *testing.T) { - fmt.Printf("Testing %s\n", L4G_VERSION) - lr := newLogRecord(CRITICAL, "source", "message") - if lr.Level != CRITICAL { - t.Errorf("Incorrect level: %d should be %d", lr.Level, CRITICAL) - } - if lr.Source != "source" { - t.Errorf("Incorrect source: %s should be %s", lr.Source, "source") - } - if lr.Message != "message" { - t.Errorf("Incorrect message: %s should be %s", lr.Source, "message") - } -} - -var formatTests = []struct { - Test string - Record *LogRecord - Formats map[string]string -}{ - { - Test: "Standard formats", - Record: &LogRecord{ - Level: ERROR, - Source: "source", - Message: "message", - Created: now, - }, - Formats: map[string]string{ - // TODO(kevlar): How can I do this so it'll work outside of PST? - FORMAT_DEFAULT: "[2009/02/13 23:31:30 UTC] [EROR] (source) message\n", - FORMAT_SHORT: "[23:31 02/13/09] [EROR] message\n", - FORMAT_ABBREV: "[EROR] message\n", - }, - }, -} - -func TestFormatLogRecord(t *testing.T) { - for _, test := range formatTests { - name := test.Test - for fmt, want := range test.Formats { - if got := FormatLogRecord(fmt, test.Record); got != want { - t.Errorf("%s - %s:", name, fmt) - t.Errorf(" got %q", got) - t.Errorf(" want %q", want) - } - } - } -} - -var logRecordWriteTests = []struct { - Test string - Record *LogRecord - Console string -}{ - { - Test: "Normal message", - Record: &LogRecord{ - Level: CRITICAL, - Source: "source", - Message: "message", - Created: now, - }, - Console: "[02/13/09 23:31:30] [CRIT] message\n", - }, -} - -func TestConsoleLogWriter(t *testing.T) { - console := make(ConsoleLogWriter) - - r, w := io.Pipe() - go console.run(w) - defer console.Close() - - buf := make([]byte, 1024) - - for _, test := range logRecordWriteTests { - name := test.Test - - console.LogWrite(test.Record) - n, _ := r.Read(buf) - - if got, want := string(buf[:n]), test.Console; got != want { - t.Errorf("%s: got %q", name, got) - t.Errorf("%s: want %q", name, want) - } - } -} - -func TestFileLogWriter(t *testing.T) { - defer func(buflen int) { - LogBufferLength = buflen - }(LogBufferLength) - LogBufferLength = 0 - - w := NewFileLogWriter(testLogFile, false) - if w == nil { - t.Fatalf("Invalid return: w should not be nil") - } - defer os.Remove(testLogFile) - - w.LogWrite(newLogRecord(CRITICAL, "source", "message")) - w.Close() - runtime.Gosched() - - if contents, err := ioutil.ReadFile(testLogFile); err != nil { - t.Errorf("read(%q): %s", testLogFile, err) - } else if len(contents) != 50 { - t.Errorf("malformed filelog: %q (%d bytes)", string(contents), len(contents)) - } -} - -func TestXMLLogWriter(t *testing.T) { - defer func(buflen int) { - LogBufferLength = buflen - }(LogBufferLength) - LogBufferLength = 0 - - w := NewXMLLogWriter(testLogFile, false) - if w == nil { - t.Fatalf("Invalid return: w should not be nil") - } - defer os.Remove(testLogFile) - - w.LogWrite(newLogRecord(CRITICAL, "source", "message")) - w.Close() - runtime.Gosched() - - if contents, err := ioutil.ReadFile(testLogFile); err != nil { - t.Errorf("read(%q): %s", testLogFile, err) - } else if len(contents) != 185 { - t.Errorf("malformed xmllog: %q (%d bytes)", string(contents), len(contents)) - } -} - -func TestLogger(t *testing.T) { - sl := NewDefaultLogger(WARNING) - if sl == nil { - t.Fatalf("NewDefaultLogger should never return nil") - } - if lw, exist := sl["stdout"]; lw == nil || exist != true { - t.Fatalf("NewDefaultLogger produced invalid logger (DNE or nil)") - } - if sl["stdout"].Level != WARNING { - t.Fatalf("NewDefaultLogger produced invalid logger (incorrect level)") - } - if len(sl) != 1 { - t.Fatalf("NewDefaultLogger produced invalid logger (incorrect map count)") - } - - //func (l *Logger) AddFilter(name string, level int, writer LogWriter) {} - l := make(Logger) - l.AddFilter("stdout", DEBUG, NewConsoleLogWriter()) - if lw, exist := l["stdout"]; lw == nil || exist != true { - t.Fatalf("AddFilter produced invalid logger (DNE or nil)") - } - if l["stdout"].Level != DEBUG { - t.Fatalf("AddFilter produced invalid logger (incorrect level)") - } - if len(l) != 1 { - t.Fatalf("AddFilter produced invalid logger (incorrect map count)") - } - - //func (l *Logger) Warn(format string, args ...interface{}) error {} - if err := l.Warn("%s %d %#v", "Warning:", 1, []int{}); err.Error() != "Warning: 1 []int{}" { - t.Errorf("Warn returned invalid error: %s", err) - } - - //func (l *Logger) Error(format string, args ...interface{}) error {} - if err := l.Error("%s %d %#v", "Error:", 10, []string{}); err.Error() != "Error: 10 []string{}" { - t.Errorf("Error returned invalid error: %s", err) - } - - //func (l *Logger) Critical(format string, args ...interface{}) error {} - if err := l.Critical("%s %d %#v", "Critical:", 100, []int64{}); err.Error() != "Critical: 100 []int64{}" { - t.Errorf("Critical returned invalid error: %s", err) - } - - // Already tested or basically untestable - //func (l *Logger) Log(level int, source, message string) {} - //func (l *Logger) Logf(level int, format string, args ...interface{}) {} - //func (l *Logger) intLogf(level int, format string, args ...interface{}) string {} - //func (l *Logger) Finest(format string, args ...interface{}) {} - //func (l *Logger) Fine(format string, args ...interface{}) {} - //func (l *Logger) Debug(format string, args ...interface{}) {} - //func (l *Logger) Trace(format string, args ...interface{}) {} - //func (l *Logger) Info(format string, args ...interface{}) {} -} - -func TestLogOutput(t *testing.T) { - const ( - expected = "fdf3e51e444da56b4cb400f30bc47424" - ) - - // Unbuffered output - defer func(buflen int) { - LogBufferLength = buflen - }(LogBufferLength) - LogBufferLength = 0 - - l := make(Logger) - - // Delete and open the output log without a timestamp (for a constant md5sum) - l.AddFilter("file", FINEST, NewFileLogWriter(testLogFile, false).SetFormat("[%L] %M")) - defer os.Remove(testLogFile) - - // Send some log messages - l.Log(CRITICAL, "testsrc1", fmt.Sprintf("This message is level %d", int(CRITICAL))) - l.Logf(ERROR, "This message is level %v", ERROR) - l.Logf(WARNING, "This message is level %s", WARNING) - l.Logc(INFO, func() string { return "This message is level INFO" }) - l.Trace("This message is level %d", int(TRACE)) - l.Debug("This message is level %s", DEBUG) - l.Fine(func() string { return fmt.Sprintf("This message is level %v", FINE) }) - l.Finest("This message is level %v", FINEST) - l.Finest(FINEST, "is also this message's level") - - l.Close() - - contents, err := ioutil.ReadFile(testLogFile) - if err != nil { - t.Fatalf("Could not read output log: %s", err) - } - - sum := md5.New() - sum.Write(contents) - if sumstr := hex.EncodeToString(sum.Sum(nil)); sumstr != expected { - t.Errorf("--- Log Contents:\n%s---", string(contents)) - t.Fatalf("Checksum does not match: %s (expecting %s)", sumstr, expected) - } -} - -func TestCountMallocs(t *testing.T) { - const N = 1 - var m runtime.MemStats - getMallocs := func() uint64 { - runtime.ReadMemStats(&m) - return m.Mallocs - } - - // Console logger - sl := NewDefaultLogger(INFO) - mallocs := 0 - getMallocs() - for i := 0; i < N; i++ { - sl.Log(WARNING, "here", "This is a WARNING message") - } - mallocs += getMallocs() - fmt.Printf("mallocs per sl.Log((WARNING, \"here\", \"This is a log message\"): %d\n", mallocs/N) - - // Console logger formatted - mallocs = 0 - getMallocs() - for i := 0; i < N; i++ { - sl.Logf(WARNING, "%s is a log message with level %d", "This", WARNING) - } - mallocs += getMallocs() - fmt.Printf("mallocs per sl.Logf(WARNING, \"%%s is a log message with level %%d\", \"This\", WARNING): %d\n", mallocs/N) - - // Console logger (not logged) - sl = NewDefaultLogger(INFO) - mallocs = 0 - getMallocs() - for i := 0; i < N; i++ { - sl.Log(DEBUG, "here", "This is a DEBUG log message") - } - mallocs += getMallocs() - fmt.Printf("mallocs per unlogged sl.Log((WARNING, \"here\", \"This is a log message\"): %d\n", mallocs/N) - - // Console logger formatted (not logged) - mallocs = 0 - getMallocs() - for i := 0; i < N; i++ { - sl.Logf(DEBUG, "%s is a log message with level %d", "This", DEBUG) - } - mallocs += getMallocs() - fmt.Printf("mallocs per unlogged sl.Logf(WARNING, \"%%s is a log message with level %%d\", \"This\", WARNING): %d\n", mallocs/N) -} - -func TestXMLConfig(t *testing.T) { - const ( - configfile = "example.xml" - ) - - fd, err := os.Create(configfile) - if err != nil { - t.Fatalf("Could not open %s for writing: %s", configfile, err) - } - - fmt.Fprintln(fd, "") - fmt.Fprintln(fd, " ") - fmt.Fprintln(fd, " stdout") - fmt.Fprintln(fd, " console") - fmt.Fprintln(fd, " ") - fmt.Fprintln(fd, " DEBUG") - fmt.Fprintln(fd, " ") - fmt.Fprintln(fd, " ") - fmt.Fprintln(fd, " file") - fmt.Fprintln(fd, " file") - fmt.Fprintln(fd, " FINEST") - fmt.Fprintln(fd, " test.log") - fmt.Fprintln(fd, " ") - fmt.Fprintln(fd, " [%D %T] [%L] (%S) %M") - fmt.Fprintln(fd, " false ") - fmt.Fprintln(fd, " 0M ") - fmt.Fprintln(fd, " 0K ") - fmt.Fprintln(fd, " true ") - fmt.Fprintln(fd, " ") - fmt.Fprintln(fd, " ") - fmt.Fprintln(fd, " xmllog") - fmt.Fprintln(fd, " xml") - fmt.Fprintln(fd, " TRACE") - fmt.Fprintln(fd, " trace.xml") - fmt.Fprintln(fd, " true ") - fmt.Fprintln(fd, " 100M ") - fmt.Fprintln(fd, " 6K ") - fmt.Fprintln(fd, " false ") - fmt.Fprintln(fd, " ") - fmt.Fprintln(fd, " ") - fmt.Fprintln(fd, " donotopen") - fmt.Fprintln(fd, " socket") - fmt.Fprintln(fd, " FINEST") - fmt.Fprintln(fd, " 192.168.1.255:12124 ") - fmt.Fprintln(fd, " udp ") - fmt.Fprintln(fd, " ") - fmt.Fprintln(fd, "") - fd.Close() - - log := make(Logger) - log.LoadConfiguration(configfile) - defer os.Remove("trace.xml") - defer os.Remove("test.log") - defer log.Close() - - // Make sure we got all loggers - if len(log) != 3 { - t.Fatalf("XMLConfig: Expected 3 filters, found %d", len(log)) - } - - // Make sure they're the right keys - if _, ok := log["stdout"]; !ok { - t.Errorf("XMLConfig: Expected stdout logger") - } - if _, ok := log["file"]; !ok { - t.Fatalf("XMLConfig: Expected file logger") - } - if _, ok := log["xmllog"]; !ok { - t.Fatalf("XMLConfig: Expected xmllog logger") - } - - // Make sure they're the right type - if _, ok := log["stdout"].LogWriter.(ConsoleLogWriter); !ok { - t.Fatalf("XMLConfig: Expected stdout to be ConsoleLogWriter, found %T", log["stdout"].LogWriter) - } - if _, ok := log["file"].LogWriter.(*FileLogWriter); !ok { - t.Fatalf("XMLConfig: Expected file to be *FileLogWriter, found %T", log["file"].LogWriter) - } - if _, ok := log["xmllog"].LogWriter.(*FileLogWriter); !ok { - t.Fatalf("XMLConfig: Expected xmllog to be *FileLogWriter, found %T", log["xmllog"].LogWriter) - } - - // Make sure levels are set - if lvl := log["stdout"].Level; lvl != DEBUG { - t.Errorf("XMLConfig: Expected stdout to be set to level %d, found %d", DEBUG, lvl) - } - if lvl := log["file"].Level; lvl != FINEST { - t.Errorf("XMLConfig: Expected file to be set to level %d, found %d", FINEST, lvl) - } - if lvl := log["xmllog"].Level; lvl != TRACE { - t.Errorf("XMLConfig: Expected xmllog to be set to level %d, found %d", TRACE, lvl) - } - - // Make sure the w is open and points to the right file - if fname := log["file"].LogWriter.(*FileLogWriter).file.Name(); fname != "test.log" { - t.Errorf("XMLConfig: Expected file to have opened %s, found %s", "test.log", fname) - } - - // Make sure the XLW is open and points to the right file - if fname := log["xmllog"].LogWriter.(*FileLogWriter).file.Name(); fname != "trace.xml" { - t.Errorf("XMLConfig: Expected xmllog to have opened %s, found %s", "trace.xml", fname) - } - - // Move XML log file - os.Rename(configfile, "examples/"+configfile) // Keep this so that an example with the documentation is available -} - -func BenchmarkFormatLogRecord(b *testing.B) { - const updateEvery = 1 - rec := &LogRecord{ - Level: CRITICAL, - Created: now, - Source: "source", - Message: "message", - } - for i := 0; i < b.N; i++ { - rec.Created = rec.Created.Add(1 * time.Second / updateEvery) - if i%2 == 0 { - FormatLogRecord(FORMAT_DEFAULT, rec) - } else { - FormatLogRecord(FORMAT_SHORT, rec) - } - } -} - -func BenchmarkConsoleLog(b *testing.B) { - /* This doesn't seem to work on OS X - sink, err := os.Open(os.DevNull) - if err != nil { - panic(err) - } - if err := syscall.Dup2(int(sink.Fd()), syscall.Stdout); err != nil { - panic(err) - } - */ - - stdout = ioutil.Discard - sl := NewDefaultLogger(INFO) - for i := 0; i < b.N; i++ { - sl.Log(WARNING, "here", "This is a log message") - } -} - -func BenchmarkConsoleNotLogged(b *testing.B) { - sl := NewDefaultLogger(INFO) - for i := 0; i < b.N; i++ { - sl.Log(DEBUG, "here", "This is a log message") - } -} - -func BenchmarkConsoleUtilLog(b *testing.B) { - sl := NewDefaultLogger(INFO) - for i := 0; i < b.N; i++ { - sl.Info("%s is a log message", "This") - } -} - -func BenchmarkConsoleUtilNotLog(b *testing.B) { - sl := NewDefaultLogger(INFO) - for i := 0; i < b.N; i++ { - sl.Debug("%s is a log message", "This") - } -} - -func BenchmarkFileLog(b *testing.B) { - sl := make(Logger) - b.StopTimer() - sl.AddFilter("file", INFO, NewFileLogWriter("benchlog.log", false)) - b.StartTimer() - for i := 0; i < b.N; i++ { - sl.Log(WARNING, "here", "This is a log message") - } - b.StopTimer() - os.Remove("benchlog.log") -} - -func BenchmarkFileNotLogged(b *testing.B) { - sl := make(Logger) - b.StopTimer() - sl.AddFilter("file", INFO, NewFileLogWriter("benchlog.log", false)) - b.StartTimer() - for i := 0; i < b.N; i++ { - sl.Log(DEBUG, "here", "This is a log message") - } - b.StopTimer() - os.Remove("benchlog.log") -} - -func BenchmarkFileUtilLog(b *testing.B) { - sl := make(Logger) - b.StopTimer() - sl.AddFilter("file", INFO, NewFileLogWriter("benchlog.log", false)) - b.StartTimer() - for i := 0; i < b.N; i++ { - sl.Info("%s is a log message", "This") - } - b.StopTimer() - os.Remove("benchlog.log") -} - -func BenchmarkFileUtilNotLog(b *testing.B) { - sl := make(Logger) - b.StopTimer() - sl.AddFilter("file", INFO, NewFileLogWriter("benchlog.log", false)) - b.StartTimer() - for i := 0; i < b.N; i++ { - sl.Debug("%s is a log message", "This") - } - b.StopTimer() - os.Remove("benchlog.log") -} - -// Benchmark results (darwin amd64 6g) -//elog.BenchmarkConsoleLog 100000 22819 ns/op -//elog.BenchmarkConsoleNotLogged 2000000 879 ns/op -//elog.BenchmarkConsoleUtilLog 50000 34380 ns/op -//elog.BenchmarkConsoleUtilNotLog 1000000 1339 ns/op -//elog.BenchmarkFileLog 100000 26497 ns/op -//elog.BenchmarkFileNotLogged 2000000 821 ns/op -//elog.BenchmarkFileUtilLog 50000 33945 ns/op -//elog.BenchmarkFileUtilNotLog 1000000 1258 ns/op diff --git a/vendor/_nuts/code.google.com/p/log4go/pattlog.go b/vendor/_nuts/code.google.com/p/log4go/pattlog.go deleted file mode 100644 index 8224302b..00000000 --- a/vendor/_nuts/code.google.com/p/log4go/pattlog.go +++ /dev/null @@ -1,122 +0,0 @@ -// Copyright (C) 2010, Kyle Lemons . All rights reserved. - -package log4go - -import ( - "fmt" - "bytes" - "io" -) - -const ( - FORMAT_DEFAULT = "[%D %T] [%L] (%S) %M" - FORMAT_SHORT = "[%t %d] [%L] %M" - FORMAT_ABBREV = "[%L] %M" -) - -type formatCacheType struct { - LastUpdateSeconds int64 - shortTime, shortDate string - longTime, longDate string -} - -var formatCache = &formatCacheType{} - -// Known format codes: -// %T - Time (15:04:05 MST) -// %t - Time (15:04) -// %D - Date (2006/01/02) -// %d - Date (01/02/06) -// %L - Level (FNST, FINE, DEBG, TRAC, WARN, EROR, CRIT) -// %S - Source -// %M - Message -// Ignores unknown formats -// Recommended: "[%D %T] [%L] (%S) %M" -func FormatLogRecord(format string, rec *LogRecord) string { - if rec == nil { - return "" - } - if len(format) == 0 { - return "" - } - - out := bytes.NewBuffer(make([]byte, 0, 64)) - secs := rec.Created.UnixNano() / 1e9 - - cache := *formatCache - if cache.LastUpdateSeconds != secs { - month, day, year := rec.Created.Month(), rec.Created.Day(), rec.Created.Year() - hour, minute, second := rec.Created.Hour(), rec.Created.Minute(), rec.Created.Second() - zone, _ := rec.Created.Zone() - updated := &formatCacheType{ - LastUpdateSeconds: secs, - shortTime: fmt.Sprintf("%02d:%02d", hour, minute), - shortDate: fmt.Sprintf("%02d/%02d/%02d", month, day, year%100), - longTime: fmt.Sprintf("%02d:%02d:%02d %s", hour, minute, second, zone), - longDate: fmt.Sprintf("%04d/%02d/%02d", year, month, day), - } - cache = *updated - formatCache = updated - } - - // Split the string into pieces by % signs - pieces := bytes.Split([]byte(format), []byte{'%'}) - - // Iterate over the pieces, replacing known formats - for i, piece := range pieces { - if i > 0 && len(piece) > 0 { - switch piece[0] { - case 'T': - out.WriteString(cache.longTime) - case 't': - out.WriteString(cache.shortTime) - case 'D': - out.WriteString(cache.longDate) - case 'd': - out.WriteString(cache.shortDate) - case 'L': - out.WriteString(levelStrings[rec.Level]) - case 'S': - out.WriteString(rec.Source) - case 'M': - out.WriteString(rec.Message) - } - if len(piece) > 1 { - out.Write(piece[1:]) - } - } else if len(piece) > 0 { - out.Write(piece) - } - } - out.WriteByte('\n') - - return out.String() -} - -// This is the standard writer that prints to standard output. -type FormatLogWriter chan *LogRecord - -// This creates a new FormatLogWriter -func NewFormatLogWriter(out io.Writer, format string) FormatLogWriter { - records := make(FormatLogWriter, LogBufferLength) - go records.run(out, format) - return records -} - -func (w FormatLogWriter) run(out io.Writer, format string) { - for rec := range w { - fmt.Fprint(out, FormatLogRecord(format, rec)) - } -} - -// This is the FormatLogWriter's output method. This will block if the output -// buffer is full. -func (w FormatLogWriter) LogWrite(rec *LogRecord) { - w <- rec -} - -// Close stops the logger from sending messages to standard output. Attempts to -// send log messages to this logger after a Close have undefined behavior. -func (w FormatLogWriter) Close() { - close(w) -} diff --git a/vendor/_nuts/code.google.com/p/log4go/socklog.go b/vendor/_nuts/code.google.com/p/log4go/socklog.go deleted file mode 100644 index 1d224a99..00000000 --- a/vendor/_nuts/code.google.com/p/log4go/socklog.go +++ /dev/null @@ -1,57 +0,0 @@ -// Copyright (C) 2010, Kyle Lemons . All rights reserved. - -package log4go - -import ( - "encoding/json" - "fmt" - "net" - "os" -) - -// This log writer sends output to a socket -type SocketLogWriter chan *LogRecord - -// This is the SocketLogWriter's output method -func (w SocketLogWriter) LogWrite(rec *LogRecord) { - w <- rec -} - -func (w SocketLogWriter) Close() { - close(w) -} - -func NewSocketLogWriter(proto, hostport string) SocketLogWriter { - sock, err := net.Dial(proto, hostport) - if err != nil { - fmt.Fprintf(os.Stderr, "NewSocketLogWriter(%q): %s\n", hostport, err) - return nil - } - - w := SocketLogWriter(make(chan *LogRecord, LogBufferLength)) - - go func() { - defer func() { - if sock != nil && proto == "tcp" { - sock.Close() - } - }() - - for rec := range w { - // Marshall into JSON - js, err := json.Marshal(rec) - if err != nil { - fmt.Fprint(os.Stderr, "SocketLogWriter(%q): %s", hostport, err) - return - } - - _, err = sock.Write(js) - if err != nil { - fmt.Fprint(os.Stderr, "SocketLogWriter(%q): %s", hostport, err) - return - } - } - }() - - return w -} diff --git a/vendor/_nuts/code.google.com/p/log4go/termlog.go b/vendor/_nuts/code.google.com/p/log4go/termlog.go deleted file mode 100644 index 1ed2e4e0..00000000 --- a/vendor/_nuts/code.google.com/p/log4go/termlog.go +++ /dev/null @@ -1,45 +0,0 @@ -// Copyright (C) 2010, Kyle Lemons . All rights reserved. - -package log4go - -import ( - "io" - "os" - "fmt" -) - -var stdout io.Writer = os.Stdout - -// This is the standard writer that prints to standard output. -type ConsoleLogWriter chan *LogRecord - -// This creates a new ConsoleLogWriter -func NewConsoleLogWriter() ConsoleLogWriter { - records := make(ConsoleLogWriter, LogBufferLength) - go records.run(stdout) - return records -} - -func (w ConsoleLogWriter) run(out io.Writer) { - var timestr string - var timestrAt int64 - - for rec := range w { - if at := rec.Created.UnixNano() / 1e9; at != timestrAt { - timestr, timestrAt = rec.Created.Format("01/02/06 15:04:05"), at - } - fmt.Fprint(out, "[", timestr, "] [", levelStrings[rec.Level], "] ", rec.Message, "\n") - } -} - -// This is the ConsoleLogWriter's output method. This will block if the output -// buffer is full. -func (w ConsoleLogWriter) LogWrite(rec *LogRecord) { - w <- rec -} - -// Close stops the logger from sending messages to standard output. Attempts to -// send log messages to this logger after a Close have undefined behavior. -func (w ConsoleLogWriter) Close() { - close(w) -} diff --git a/vendor/_nuts/code.google.com/p/log4go/wrapper.go b/vendor/_nuts/code.google.com/p/log4go/wrapper.go deleted file mode 100644 index 10ecd88e..00000000 --- a/vendor/_nuts/code.google.com/p/log4go/wrapper.go +++ /dev/null @@ -1,278 +0,0 @@ -// Copyright (C) 2010, Kyle Lemons . All rights reserved. - -package log4go - -import ( - "errors" - "os" - "fmt" - "strings" -) - -var ( - Global Logger -) - -func init() { - Global = NewDefaultLogger(DEBUG) -} - -// Wrapper for (*Logger).LoadConfiguration -func LoadConfiguration(filename string) { - Global.LoadConfiguration(filename) -} - -// Wrapper for (*Logger).AddFilter -func AddFilter(name string, lvl level, writer LogWriter) { - Global.AddFilter(name, lvl, writer) -} - -// Wrapper for (*Logger).Close (closes and removes all logwriters) -func Close() { - Global.Close() -} - -func Crash(args ...interface{}) { - if len(args) > 0 { - Global.intLogf(CRITICAL, strings.Repeat(" %v", len(args))[1:], args...) - } - panic(args) -} - -// Logs the given message and crashes the program -func Crashf(format string, args ...interface{}) { - Global.intLogf(CRITICAL, format, args...) - Global.Close() // so that hopefully the messages get logged - panic(fmt.Sprintf(format, args...)) -} - -// Compatibility with `log` -func Exit(args ...interface{}) { - if len(args) > 0 { - Global.intLogf(ERROR, strings.Repeat(" %v", len(args))[1:], args...) - } - Global.Close() // so that hopefully the messages get logged - os.Exit(0) -} - -// Compatibility with `log` -func Exitf(format string, args ...interface{}) { - Global.intLogf(ERROR, format, args...) - Global.Close() // so that hopefully the messages get logged - os.Exit(0) -} - -// Compatibility with `log` -func Stderr(args ...interface{}) { - if len(args) > 0 { - Global.intLogf(ERROR, strings.Repeat(" %v", len(args))[1:], args...) - } -} - -// Compatibility with `log` -func Stderrf(format string, args ...interface{}) { - Global.intLogf(ERROR, format, args...) -} - -// Compatibility with `log` -func Stdout(args ...interface{}) { - if len(args) > 0 { - Global.intLogf(INFO, strings.Repeat(" %v", len(args))[1:], args...) - } -} - -// Compatibility with `log` -func Stdoutf(format string, args ...interface{}) { - Global.intLogf(INFO, format, args...) -} - -// Send a log message manually -// Wrapper for (*Logger).Log -func Log(lvl level, source, message string) { - Global.Log(lvl, source, message) -} - -// Send a formatted log message easily -// Wrapper for (*Logger).Logf -func Logf(lvl level, format string, args ...interface{}) { - Global.intLogf(lvl, format, args...) -} - -// Send a closure log message -// Wrapper for (*Logger).Logc -func Logc(lvl level, closure func() string) { - Global.intLogc(lvl, closure) -} - -// Utility for finest log messages (see Debug() for parameter explanation) -// Wrapper for (*Logger).Finest -func Finest(arg0 interface{}, args ...interface{}) { - const ( - lvl = FINEST - ) - switch first := arg0.(type) { - case string: - // Use the string as a format string - Global.intLogf(lvl, first, args...) - case func() string: - // Log the closure (no other arguments used) - Global.intLogc(lvl, first) - default: - // Build a format string so that it will be similar to Sprint - Global.intLogf(lvl, fmt.Sprint(arg0)+strings.Repeat(" %v", len(args)), args...) - } -} - -// Utility for fine log messages (see Debug() for parameter explanation) -// Wrapper for (*Logger).Fine -func Fine(arg0 interface{}, args ...interface{}) { - const ( - lvl = FINE - ) - switch first := arg0.(type) { - case string: - // Use the string as a format string - Global.intLogf(lvl, first, args...) - case func() string: - // Log the closure (no other arguments used) - Global.intLogc(lvl, first) - default: - // Build a format string so that it will be similar to Sprint - Global.intLogf(lvl, fmt.Sprint(arg0)+strings.Repeat(" %v", len(args)), args...) - } -} - -// Utility for debug log messages -// When given a string as the first argument, this behaves like Logf but with the DEBUG log level (e.g. the first argument is interpreted as a format for the latter arguments) -// When given a closure of type func()string, this logs the string returned by the closure iff it will be logged. The closure runs at most one time. -// When given anything else, the log message will be each of the arguments formatted with %v and separated by spaces (ala Sprint). -// Wrapper for (*Logger).Debug -func Debug(arg0 interface{}, args ...interface{}) { - const ( - lvl = DEBUG - ) - switch first := arg0.(type) { - case string: - // Use the string as a format string - Global.intLogf(lvl, first, args...) - case func() string: - // Log the closure (no other arguments used) - Global.intLogc(lvl, first) - default: - // Build a format string so that it will be similar to Sprint - Global.intLogf(lvl, fmt.Sprint(arg0)+strings.Repeat(" %v", len(args)), args...) - } -} - -// Utility for trace log messages (see Debug() for parameter explanation) -// Wrapper for (*Logger).Trace -func Trace(arg0 interface{}, args ...interface{}) { - const ( - lvl = TRACE - ) - switch first := arg0.(type) { - case string: - // Use the string as a format string - Global.intLogf(lvl, first, args...) - case func() string: - // Log the closure (no other arguments used) - Global.intLogc(lvl, first) - default: - // Build a format string so that it will be similar to Sprint - Global.intLogf(lvl, fmt.Sprint(arg0)+strings.Repeat(" %v", len(args)), args...) - } -} - -// Utility for info log messages (see Debug() for parameter explanation) -// Wrapper for (*Logger).Info -func Info(arg0 interface{}, args ...interface{}) { - const ( - lvl = INFO - ) - switch first := arg0.(type) { - case string: - // Use the string as a format string - Global.intLogf(lvl, first, args...) - case func() string: - // Log the closure (no other arguments used) - Global.intLogc(lvl, first) - default: - // Build a format string so that it will be similar to Sprint - Global.intLogf(lvl, fmt.Sprint(arg0)+strings.Repeat(" %v", len(args)), args...) - } -} - -// Utility for warn log messages (returns an error for easy function returns) (see Debug() for parameter explanation) -// These functions will execute a closure exactly once, to build the error message for the return -// Wrapper for (*Logger).Warn -func Warn(arg0 interface{}, args ...interface{}) error { - const ( - lvl = WARNING - ) - switch first := arg0.(type) { - case string: - // Use the string as a format string - Global.intLogf(lvl, first, args...) - return errors.New(fmt.Sprintf(first, args...)) - case func() string: - // Log the closure (no other arguments used) - str := first() - Global.intLogf(lvl, "%s", str) - return errors.New(str) - default: - // Build a format string so that it will be similar to Sprint - Global.intLogf(lvl, fmt.Sprint(first)+strings.Repeat(" %v", len(args)), args...) - return errors.New(fmt.Sprint(first) + fmt.Sprintf(strings.Repeat(" %v", len(args)), args...)) - } - return nil -} - -// Utility for error log messages (returns an error for easy function returns) (see Debug() for parameter explanation) -// These functions will execute a closure exactly once, to build the error message for the return -// Wrapper for (*Logger).Error -func Error(arg0 interface{}, args ...interface{}) error { - const ( - lvl = ERROR - ) - switch first := arg0.(type) { - case string: - // Use the string as a format string - Global.intLogf(lvl, first, args...) - return errors.New(fmt.Sprintf(first, args...)) - case func() string: - // Log the closure (no other arguments used) - str := first() - Global.intLogf(lvl, "%s", str) - return errors.New(str) - default: - // Build a format string so that it will be similar to Sprint - Global.intLogf(lvl, fmt.Sprint(first)+strings.Repeat(" %v", len(args)), args...) - return errors.New(fmt.Sprint(first) + fmt.Sprintf(strings.Repeat(" %v", len(args)), args...)) - } - return nil -} - -// Utility for critical log messages (returns an error for easy function returns) (see Debug() for parameter explanation) -// These functions will execute a closure exactly once, to build the error message for the return -// Wrapper for (*Logger).Critical -func Critical(arg0 interface{}, args ...interface{}) error { - const ( - lvl = CRITICAL - ) - switch first := arg0.(type) { - case string: - // Use the string as a format string - Global.intLogf(lvl, first, args...) - return errors.New(fmt.Sprintf(first, args...)) - case func() string: - // Log the closure (no other arguments used) - str := first() - Global.intLogf(lvl, "%s", str) - return errors.New(str) - default: - // Build a format string so that it will be similar to Sprint - Global.intLogf(lvl, fmt.Sprint(first)+strings.Repeat(" %v", len(args)), args...) - return errors.New(fmt.Sprint(first) + fmt.Sprintf(strings.Repeat(" %v", len(args)), args...)) - } - return nil -} diff --git a/vendor/_nuts/github.com/ThomsonReutersEikon/go-ntlm/utils/decode_auth.go b/vendor/_nuts/github.com/ThomsonReutersEikon/go-ntlm/utils/decode_auth.go deleted file mode 100644 index d2d1d1f5..00000000 --- a/vendor/_nuts/github.com/ThomsonReutersEikon/go-ntlm/utils/decode_auth.go +++ /dev/null @@ -1,18 +0,0 @@ -package main - -import ( - "encoding/base64" - "flag" - "fmt" -) - -func main() { - var ntlmVersion = flag.Int("ntlm", 2, "NTLM version to try: 1 or 2") - flag.Parse() - var data string - fmt.Println("Paste the base64 encoded Authenticate message (with no line breaks):") - fmt.Scanf("%s", &data) - authenticateData, _ := base64.StdEncoding.DecodeString(data) - a, _ := ntlm.ParseAuthenticateMessage(authenticateData, *ntlmVersion) - fmt.Printf(a.String()) -} diff --git a/vendor/_nuts/github.com/ThomsonReutersEikon/go-ntlm/utils/test_auth.go b/vendor/_nuts/github.com/ThomsonReutersEikon/go-ntlm/utils/test_auth.go deleted file mode 100644 index 2338ba10..00000000 --- a/vendor/_nuts/github.com/ThomsonReutersEikon/go-ntlm/utils/test_auth.go +++ /dev/null @@ -1,87 +0,0 @@ -package main - -import ( - "encoding/base64" - "fmt" - "github.com/ThomsonReutersEikon/go-ntlm/ntlm" -) - -func main() { - // ntlm v2 - // challengeMessage := "TlRMTVNTUAACAAAAAAAAADgAAABVgphiPXSy0E6+HrMAAAAAAAAAAKIAogA4AAAABQEoCgAAAA8CAA4AUgBFAFUAVABFAFIAUwABABwAVQBLAEIAUAAtAEMAQgBUAFIATQBGAEUAMAA2AAQAFgBSAGUAdQB0AGUAcgBzAC4AbgBlAHQAAwA0AHUAawBiAHAALQBjAGIAdAByAG0AZgBlADAANgAuAFIAZQB1AHQAZQByAHMALgBuAGUAdAAFABYAUgBlAHUAdABlAHIAcwAuAG4AZQB0AAAAAAA=" - // authenticateMessage := "TlRMTVNTUAADAAAAGAAYALYAAADSANIAzgAAADQANABIAAAAIAAgAHwAAAAaABoAnAAAABAAEACgAQAAVYKQQgUCzg4AAAAPYQByAHIAYQB5ADEAMgAuAG0AcwBnAHQAcwB0AC4AcgBlAHUAdABlAHIAcwAuAGMAbwBtAHUAcwBlAHIAcwB0AHIAZQBzAHMAMQAwADAAMAAwADgATgBZAEMAVgBBADEAMgBTADIAQwBNAFMAQQBPYrLjU4h0YlWZeEoNvTJtBQMnnJuAeUwsP+vGmAHNRBpgZ+4ChQLqAQEAAAAAAACPFEIFjx7OAQUDJ5ybgHlMAAAAAAIADgBSAEUAVQBUAEUAUgBTAAEAHABVAEsAQgBQAC0AQwBCAFQAUgBNAEYARQAwADYABAAWAFIAZQB1AHQAZQByAHMALgBuAGUAdAADADQAdQBrAGIAcAAtAGMAYgB0AHIAbQBmAGUAMAA2AC4AUgBlAHUAdABlAHIAcwAuAG4AZQB0AAUAFgBSAGUAdQB0AGUAcgBzAC4AbgBlAHQAAAAAAAAAAAANuvnqD3K88ZpjkLleL0NW" - - //LCS v1 - //challengeMessage := "TlRMTVNTUAACAAAAAAAAADgAAADzgpjid08w9p89DLUAAAAAAAAAAPAA8AA4AAAABQLODgAAAA8CAA4AQQBSAFIAQQBZADEAMgABABYATgBZAEMAUwBNAFMARwA5ADkAMQAyAAQANABhAHIAcgBhAHkAMQAyAC4AbQBzAGcAdABzAHQALgByAGUAdQB0AGUAcgBzAC4AYwBvAG0AAwBMAE4AWQBDAFMATQBTAEcAOQA5ADEAMgAuAGEAcgByAGEAeQAxADIALgBtAHMAZwB0AHMAdAAuAHIAZQB1AHQAZQByAHMALgBjAG8AbQAFADQAYQByAHIAYQB5ADEAMgAuAG0AcwBnAHQAcwB0AC4AcgBlAHUAdABlAHIAcwAuAGMAbwBtAAAAAAA=" - //authenticateMessage := "TlRMTVNTUAADAAAAGAAYAKwAAAAYABgAxAAAAAAAAABYAAAANgA2AFgAAAAeAB4AjgAAABAAEADcAAAAVYKQYgYBsR0AAAAPUJSCwwcYcGpE0Zp9GsD3RDAANQAwADAANAA1AC4AcgBtAHcAYQB0AGUAcwB0AEAAcgBlAHUAdABlAHIAcwAuAGMAbwBtAFcASQBOAC0AMABEAEQAQQBCAEsAQwAxAFUASQA4ALIsDLYZktr3YlJDLyVT6GHgwNA+DFdM87IsDLYZktr3YlJDLyVT6GHgwNA+DFdM851g+vaa4CHvomwyYmjbB1M=" - - //US - //challengeMessage := "TlRMTVNTUAACAAAAAAAAADgAAABVgphisF5WgZrWn4MAAAAAAAAAAKIAogA4AAAABQEoCgAAAA8CAA4AUgBFAFUAVABFAFIAUwABABwAVQBLAEIAUAAtAEMAQgBUAFIATQBGAEUAMAA2AAQAFgBSAGUAdQB0AGUAcgBzAC4AbgBlAHQAAwA0AHUAawBiAHAALQBjAGIAdAByAG0AZgBlADAANgAuAFIAZQB1AHQAZQByAHMALgBuAGUAdAAFABYAUgBlAHUAdABlAHIAcwAuAG4AZQB0AAAAAAA=" - //authenticateMessage := "TlRMTVNTUAADAAAAGAAYAKwAAAAYABgAxAAAAAAAAABYAAAANgA2AFgAAAAeAB4AjgAAABAAEADcAAAAVYKQYgYBsR0AAAAPJc+NGJ4qgACnkkGb9J8RezAANQAwADAANAA1AC4AcgBtAHcAYQB0AGUAcwB0AEAAcgBlAHUAdABlAHIAcwAuAGMAbwBtAFcASQBOAC0AMABEAEQAQQBCAEsAQwAxAFUASQA4AJLPhCq8UHZjb5sEjtoaJtWBY2ZwNZyujpLPhCq8UHZjb5sEjtoaJtWBY2ZwNZyujtW8TsZdZ6PMc1ipWbL7VgY=" - - //US again - challengeMessage := "TlRMTVNTUAACAAAAAAAAADgAAABVgphiMx43owKH33MAAAAAAAAAAKIAogA4AAAABQEoCgAAAA8CAA4AUgBFAFUAVABFAFIAUwABABwAVQBLAEIAUAAtAEMAQgBUAFIATQBGAEUAMAA2AAQAFgBSAGUAdQB0AGUAcgBzAC4AbgBlAHQAAwA0AHUAawBiAHAALQBjAGIAdAByAG0AZgBlADAANgAuAFIAZQB1AHQAZQByAHMALgBuAGUAdAAFABYAUgBlAHUAdABlAHIAcwAuAG4AZQB0AAAAAAA=" - authenticateMessage := "TlRMTVNTUAADAAAAGAAYAKwAAAAYABgAxAAAAAAAAABYAAAANgA2AFgAAAAeAB4AjgAAABAAEADcAAAAVYKQYgYBsR0AAAAPukU9WmBJLdSLU2NvXjNgUzAANQAwADAANAA1AC4AcgBtAHcAYQB0AGUAcwB0AEAAcgBlAHUAdABlAHIAcwAuAGMAbwBtAFcASQBOAC0AMABEAEQAQQBCAEsAQwAxAFUASQA4AOLIAEYvI6zgw2+MBf8xHSTZhIfVaKIIFuLIAEYvI6zgw2+MBf8xHSTZhIfVaKIIFroZDwl770tY/oFQk38nnuI=" - - server, err := ntlm.CreateServerSession(ntlm.Version2, ntlm.ConnectionlessMode) - server.SetUserInfo("050045.rmwatest@reuters.com", "Welcome1", "") - - challengeData, _ := base64.StdEncoding.DecodeString(challengeMessage) - c, _ := ntlm.ParseChallengeMessage(challengeData) - - fmt.Println("----- Challenge Message ----- ") - fmt.Println(c.String()) - fmt.Println("----- END Challenge Message ----- ") - - authenticateData, _ := base64.StdEncoding.DecodeString(authenticateMessage) - var context ntlm.ServerSession - - msg, err := ntlm.ParseAuthenticateMessage(authenticateData, 2) - if err != nil { - msg2, newErr := ntlm.ParseAuthenticateMessage(authenticateData, 1) - if newErr != nil { - fmt.Printf("Error ParseAuthenticateMessage , %s", err) - return - } - - // Message parsed correctly as NTLMv1 so assume the session is v1 and reset the server session - newContext, err := ntlm.CreateServerSession(ntlm.Version1, ntlm.ConnectionlessMode) - newContext.SetUserInfo(server.GetUserInfo()) - if err != nil { - fmt.Println("Could not create NTLMv1 session") - return - } - - // Need the originally generated server challenge so we can process the response - newContext.SetServerChallenge(c.ServerChallenge) - // err = server.ProcessAuthenticateMessage(msg) - err = newContext.ProcessAuthenticateMessage(msg2) - if err != nil { - fmt.Printf("Could not process authenticate v1 message: %s\n", err) - return - } - // Set the security context to now be NTLMv1 - context = newContext - fmt.Println("----- Authenticate Message ----- ") - fmt.Println(msg2.String()) - fmt.Println("----- END Authenticate Message ----- ") - - } else { - context = server - // Need the server challenge to be set - server.SetServerChallenge(c.ServerChallenge) - - // err = server.ProcessAuthenticateMessage(msg) - err = context.ProcessAuthenticateMessage(msg) - if err != nil { - fmt.Printf("Could not process authenticate message: %s\n", err) - return - } - fmt.Println("----- Authenticate Message ----- ") - fmt.Println(msg.String()) - fmt.Println("----- END Authenticate Message ----- ") - - } - - fmt.Println("success") -} diff --git a/vendor/_nuts/github.com/ThomsonReutersEikon/log4go/.gitignore b/vendor/_nuts/github.com/ThomsonReutersEikon/log4go/.gitignore deleted file mode 100644 index 6e92f57d..00000000 --- a/vendor/_nuts/github.com/ThomsonReutersEikon/log4go/.gitignore +++ /dev/null @@ -1 +0,0 @@ -tags diff --git a/vendor/_nuts/github.com/ThomsonReutersEikon/log4go/LICENSE b/vendor/_nuts/github.com/ThomsonReutersEikon/log4go/LICENSE deleted file mode 100644 index 7093402b..00000000 --- a/vendor/_nuts/github.com/ThomsonReutersEikon/log4go/LICENSE +++ /dev/null @@ -1,13 +0,0 @@ -Copyright (c) 2010, Kyle Lemons . All rights reserved. - -Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: - * Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the - documentation and/or other materials provided with the distribution. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF -THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/_nuts/github.com/ThomsonReutersEikon/log4go/README b/vendor/_nuts/github.com/ThomsonReutersEikon/log4go/README deleted file mode 100644 index 16d80ecb..00000000 --- a/vendor/_nuts/github.com/ThomsonReutersEikon/log4go/README +++ /dev/null @@ -1,12 +0,0 @@ -Please see http://log4go.googlecode.com/ - -Installation: -- Run `goinstall log4go.googlecode.com/hg` - -Usage: -- Add the following import: -import l4g "log4go.googlecode.com/hg" - -Acknowledgements: -- pomack - For providing awesome patches to bring log4go up to the latest Go spec diff --git a/vendor/_nuts/github.com/ThomsonReutersEikon/log4go/config.go b/vendor/_nuts/github.com/ThomsonReutersEikon/log4go/config.go deleted file mode 100644 index 324dc16f..00000000 --- a/vendor/_nuts/github.com/ThomsonReutersEikon/log4go/config.go +++ /dev/null @@ -1,283 +0,0 @@ -// Copyright (C) 2010, Kyle Lemons . All rights reserved. - -package log4go - -import ( - "encoding/xml" - "fmt" - "io/ioutil" - "os" - "strconv" - "strings" -) - -type xmlProperty struct { - Name string `xml:"name,attr"` - Value string `xml:",chardata"` -} - -type xmlFilter struct { - Enabled string `xml:"enabled,attr"` - Tag string `xml:"tag"` - Level string `xml:"level"` - Type string `xml:"type"` - Property []xmlProperty `xml:"property"` -} - -type xmlLoggerConfig struct { - Filter []xmlFilter `xml:"filter"` -} - -// Load XML configuration; see examples/example.xml for documentation -func (log Logger) LoadConfiguration(filename string) { - log.Close() - - // Open the configuration file - fd, err := os.Open(filename) - if err != nil { - fmt.Fprintf(os.Stderr, "LoadConfiguration: Error: Could not open %q for reading: %s\n", filename, err) - os.Exit(1) - } - - contents, err := ioutil.ReadAll(fd) - if err != nil { - fmt.Fprintf(os.Stderr, "LoadConfiguration: Error: Could not read %q: %s\n", filename, err) - os.Exit(1) - } - - xc := new(xmlLoggerConfig) - if err := xml.Unmarshal(contents, xc); err != nil { - fmt.Fprintf(os.Stderr, "LoadConfiguration: Error: Could not parse XML configuration in %q: %s\n", filename, err) - os.Exit(1) - } - - for _, xmlfilt := range xc.Filter { - var filt LogWriter - var lvl level - bad, good, enabled := false, true, false - - // Check required children - if len(xmlfilt.Enabled) == 0 { - fmt.Fprintf(os.Stderr, "LoadConfiguration: Error: Required attribute %s for filter missing in %s\n", "enabled", filename) - bad = true - } else { - enabled = xmlfilt.Enabled != "false" - } - if len(xmlfilt.Tag) == 0 { - fmt.Fprintf(os.Stderr, "LoadConfiguration: Error: Required child <%s> for filter missing in %s\n", "tag", filename) - bad = true - } - if len(xmlfilt.Type) == 0 { - fmt.Fprintf(os.Stderr, "LoadConfiguration: Error: Required child <%s> for filter missing in %s\n", "type", filename) - bad = true - } - if len(xmlfilt.Level) == 0 { - fmt.Fprintf(os.Stderr, "LoadConfiguration: Error: Required child <%s> for filter missing in %s\n", "level", filename) - bad = true - } - - switch xmlfilt.Level { - case "FINEST": - lvl = FINEST - case "FINE": - lvl = FINE - case "DEBUG": - lvl = DEBUG - case "TRACE": - lvl = TRACE - case "INFO": - lvl = INFO - case "WARNING": - lvl = WARNING - case "ERROR": - lvl = ERROR - case "CRITICAL": - lvl = CRITICAL - default: - fmt.Fprintf(os.Stderr, "LoadConfiguration: Error: Required child <%s> for filter has unknown value in %s: %s\n", "level", filename, xmlfilt.Level) - bad = true - } - - // Just so all of the required attributes are errored at the same time if missing - if bad { - os.Exit(1) - } - - switch xmlfilt.Type { - case "console": - filt, good = xmlToConsoleLogWriter(filename, xmlfilt.Property, enabled) - case "file": - filt, good = xmlToFileLogWriter(filename, xmlfilt.Property, enabled) - case "xml": - filt, good = xmlToXMLLogWriter(filename, xmlfilt.Property, enabled) - case "socket": - filt, good = xmlToSocketLogWriter(filename, xmlfilt.Property, enabled) - default: - fmt.Fprintf(os.Stderr, "LoadConfiguration: Error: Could not load XML configuration in %s: unknown filter type \"%s\"\n", filename, xmlfilt.Type) - os.Exit(1) - } - - // Just so all of the required params are errored at the same time if wrong - if !good { - os.Exit(1) - } - - // If we're disabled (syntax and correctness checks only), don't add to logger - if !enabled { - continue - } - - log[xmlfilt.Tag] = &Filter{lvl, filt} - } -} - -func xmlToConsoleLogWriter(filename string, props []xmlProperty, enabled bool) (ConsoleLogWriter, bool) { - // Parse properties - for _, prop := range props { - switch prop.Name { - default: - fmt.Fprintf(os.Stderr, "LoadConfiguration: Warning: Unknown property \"%s\" for console filter in %s\n", prop.Name, filename) - } - } - - return NewConsoleLogWriter(), true -} - -// Parse a number with K/M/G suffixes based on thousands (1000) or 2^10 (1024) -func strToNumSuffix(str string, mult int) int { - num := 1 - if len(str) > 1 { - switch str[len(str)-1] { - case 'G', 'g': - num *= mult - fallthrough - case 'M', 'm': - num *= mult - fallthrough - case 'K', 'k': - num *= mult - str = str[0 : len(str)-1] - } - } - parsed, _ := strconv.Atoi(str) - return parsed * num -} -func xmlToFileLogWriter(filename string, props []xmlProperty, enabled bool) (*FileLogWriter, bool) { - file := "" - format := "[%D %T] [%L] (%S) %M" - maxlines := 0 - maxsize := 0 - daily := false - rotate := false - - // Parse properties - for _, prop := range props { - switch prop.Name { - case "filename": - file = strings.Trim(prop.Value, " \r\n") - case "format": - format = strings.Trim(prop.Value, " \r\n") - case "maxlines": - maxlines = strToNumSuffix(strings.Trim(prop.Value, " \r\n"), 1000) - case "maxsize": - maxsize = strToNumSuffix(strings.Trim(prop.Value, " \r\n"), 1024) - case "daily": - daily = strings.Trim(prop.Value, " \r\n") != "false" - case "rotate": - rotate = strings.Trim(prop.Value, " \r\n") != "false" - default: - fmt.Fprintf(os.Stderr, "LoadConfiguration: Warning: Unknown property \"%s\" for file filter in %s\n", prop.Name, filename) - } - } - - // Check properties - if len(file) == 0 { - fmt.Fprintf(os.Stderr, "LoadConfiguration: Error: Required property \"%s\" for file filter missing in %s\n", "filename", filename) - return nil, false - } - - // If it's disabled, we're just checking syntax - if !enabled { - return nil, true - } - - flw := NewFileLogWriter(file, rotate) - flw.SetFormat(format) - flw.SetRotateLines(maxlines) - flw.SetRotateSize(maxsize) - flw.SetRotateDaily(daily) - return flw, true -} - -func xmlToXMLLogWriter(filename string, props []xmlProperty, enabled bool) (*FileLogWriter, bool) { - file := "" - maxrecords := 0 - maxsize := 0 - daily := false - rotate := false - - // Parse properties - for _, prop := range props { - switch prop.Name { - case "filename": - file = strings.Trim(prop.Value, " \r\n") - case "maxrecords": - maxrecords = strToNumSuffix(strings.Trim(prop.Value, " \r\n"), 1000) - case "maxsize": - maxsize = strToNumSuffix(strings.Trim(prop.Value, " \r\n"), 1024) - case "daily": - daily = strings.Trim(prop.Value, " \r\n") != "false" - case "rotate": - rotate = strings.Trim(prop.Value, " \r\n") != "false" - default: - fmt.Fprintf(os.Stderr, "LoadConfiguration: Warning: Unknown property \"%s\" for xml filter in %s\n", prop.Name, filename) - } - } - - // Check properties - if len(file) == 0 { - fmt.Fprintf(os.Stderr, "LoadConfiguration: Error: Required property \"%s\" for xml filter missing in %s\n", "filename", filename) - return nil, false - } - - // If it's disabled, we're just checking syntax - if !enabled { - return nil, true - } - - xlw := NewXMLLogWriter(file, rotate) - xlw.SetRotateLines(maxrecords) - xlw.SetRotateSize(maxsize) - xlw.SetRotateDaily(daily) - return xlw, true -} - -func xmlToSocketLogWriter(filename string, props []xmlProperty, enabled bool) (SocketLogWriter, bool) { - endpoint := "" - protocol := "udp" - - // Parse properties - for _, prop := range props { - switch prop.Name { - case "endpoint": - endpoint = strings.Trim(prop.Value, " \r\n") - case "protocol": - protocol = strings.Trim(prop.Value, " \r\n") - default: - fmt.Fprintf(os.Stderr, "LoadConfiguration: Warning: Unknown property \"%s\" for file filter in %s\n", prop.Name, filename) - } - } - - // Check properties - if len(endpoint) == 0 { - fmt.Fprintf(os.Stderr, "LoadConfiguration: Error: Required property \"%s\" for file filter missing in %s\n", "endpoint", filename) - return nil, false - } - - // If it's disabled, we're just checking syntax - if !enabled { - return nil, true - } - - return NewSocketLogWriter(protocol, endpoint), true -} diff --git a/vendor/_nuts/github.com/ThomsonReutersEikon/log4go/examples/ConsoleLogWriter_Manual.go b/vendor/_nuts/github.com/ThomsonReutersEikon/log4go/examples/ConsoleLogWriter_Manual.go deleted file mode 100644 index 7169ec97..00000000 --- a/vendor/_nuts/github.com/ThomsonReutersEikon/log4go/examples/ConsoleLogWriter_Manual.go +++ /dev/null @@ -1,13 +0,0 @@ -package main - -import ( - "time" -) - -import l4g "github.com/github/git-lfs/vendor/_nuts/code.google.com/p/log4go" - -func main() { - log := l4g.NewLogger() - log.AddFilter("stdout", l4g.DEBUG, l4g.NewConsoleLogWriter()) - log.Info("The time is now: %s", time.Now().Format("15:04:05 MST 2006/01/02")) -} diff --git a/vendor/_nuts/github.com/ThomsonReutersEikon/log4go/examples/FileLogWriter_Manual.go b/vendor/_nuts/github.com/ThomsonReutersEikon/log4go/examples/FileLogWriter_Manual.go deleted file mode 100644 index 872bb45b..00000000 --- a/vendor/_nuts/github.com/ThomsonReutersEikon/log4go/examples/FileLogWriter_Manual.go +++ /dev/null @@ -1,57 +0,0 @@ -package main - -import ( - "bufio" - "fmt" - "io" - "os" - "time" -) - -import l4g "github.com/github/git-lfs/vendor/_nuts/code.google.com/p/log4go" - -const ( - filename = "flw.log" -) - -func main() { - // Get a new logger instance - log := l4g.NewLogger() - - // Create a default logger that is logging messages of FINE or higher - log.AddFilter("file", l4g.FINE, l4g.NewFileLogWriter(filename, false)) - log.Close() - - /* Can also specify manually via the following: (these are the defaults) */ - flw := l4g.NewFileLogWriter(filename, false) - flw.SetFormat("[%D %T] [%L] (%S) %M") - flw.SetRotate(false) - flw.SetRotateSize(0) - flw.SetRotateLines(0) - flw.SetRotateDaily(false) - log.AddFilter("file", l4g.FINE, flw) - - // Log some experimental messages - log.Finest("Everything is created now (notice that I will not be printing to the file)") - log.Info("The time is now: %s", time.Now().Format("15:04:05 MST 2006/01/02")) - log.Critical("Time to close out!") - - // Close the log - log.Close() - - // Print what was logged to the file (yes, I know I'm skipping error checking) - fd, _ := os.Open(filename) - in := bufio.NewReader(fd) - fmt.Print("Messages logged to file were: (line numbers not included)\n") - for lineno := 1; ; lineno++ { - line, err := in.ReadString('\n') - if err == io.EOF { - break - } - fmt.Printf("%3d:\t%s", lineno, line) - } - fd.Close() - - // Remove the file so it's not lying around - os.Remove(filename) -} diff --git a/vendor/_nuts/github.com/ThomsonReutersEikon/log4go/examples/SimpleNetLogServer.go b/vendor/_nuts/github.com/ThomsonReutersEikon/log4go/examples/SimpleNetLogServer.go deleted file mode 100644 index 83c80ad1..00000000 --- a/vendor/_nuts/github.com/ThomsonReutersEikon/log4go/examples/SimpleNetLogServer.go +++ /dev/null @@ -1,42 +0,0 @@ -package main - -import ( - "flag" - "fmt" - "net" - "os" -) - -var ( - port = flag.String("p", "12124", "Port number to listen on") -) - -func e(err error) { - if err != nil { - fmt.Printf("Erroring out: %s\n", err) - os.Exit(1) - } -} - -func main() { - flag.Parse() - - // Bind to the port - bind, err := net.ResolveUDPAddr("0.0.0.0:" + *port) - e(err) - - // Create listener - listener, err := net.ListenUDP("udp", bind) - e(err) - - fmt.Printf("Listening to port %s...\n", *port) - for { - // read into a new buffer - buffer := make([]byte, 1024) - _, _, err := listener.ReadFrom(buffer) - e(err) - - // log to standard output - fmt.Println(string(buffer)) - } -} diff --git a/vendor/_nuts/github.com/ThomsonReutersEikon/log4go/examples/SocketLogWriter_Manual.go b/vendor/_nuts/github.com/ThomsonReutersEikon/log4go/examples/SocketLogWriter_Manual.go deleted file mode 100644 index d553699f..00000000 --- a/vendor/_nuts/github.com/ThomsonReutersEikon/log4go/examples/SocketLogWriter_Manual.go +++ /dev/null @@ -1,18 +0,0 @@ -package main - -import ( - "time" -) - -import l4g "github.com/github/git-lfs/vendor/_nuts/code.google.com/p/log4go" - -func main() { - log := l4g.NewLogger() - log.AddFilter("network", l4g.FINEST, l4g.NewSocketLogWriter("udp", "192.168.1.255:12124")) - - // Run `nc -u -l -p 12124` or similar before you run this to see the following message - log.Info("The time is now: %s", time.Now().Format("15:04:05 MST 2006/01/02")) - - // This makes sure the output stream buffer is written - log.Close() -} diff --git a/vendor/_nuts/github.com/ThomsonReutersEikon/log4go/examples/XMLConfigurationExample.go b/vendor/_nuts/github.com/ThomsonReutersEikon/log4go/examples/XMLConfigurationExample.go deleted file mode 100644 index d6485753..00000000 --- a/vendor/_nuts/github.com/ThomsonReutersEikon/log4go/examples/XMLConfigurationExample.go +++ /dev/null @@ -1,13 +0,0 @@ -package main - -import l4g "github.com/github/git-lfs/vendor/_nuts/code.google.com/p/log4go" - -func main() { - // Load the configuration (isn't this easy?) - l4g.LoadConfiguration("example.xml") - - // And now we're ready! - l4g.Finest("This will only go to those of you really cool UDP kids! If you change enabled=true.") - l4g.Debug("Oh no! %d + %d = %d!", 2, 2, 2+2) - l4g.Info("About that time, eh chaps?") -} diff --git a/vendor/_nuts/github.com/ThomsonReutersEikon/log4go/examples/example.xml b/vendor/_nuts/github.com/ThomsonReutersEikon/log4go/examples/example.xml deleted file mode 100644 index e791278c..00000000 --- a/vendor/_nuts/github.com/ThomsonReutersEikon/log4go/examples/example.xml +++ /dev/null @@ -1,47 +0,0 @@ - - - stdout - console - - DEBUG - - - file - file - FINEST - test.log - - [%D %T] [%L] (%S) %M - false - 0M - 0K - true - - - xmllog - xml - TRACE - trace.xml - true - 100M - 6K - false - - - donotopen - socket - FINEST - 192.168.1.255:12124 - udp - - diff --git a/vendor/_nuts/github.com/ThomsonReutersEikon/log4go/filelog.go b/vendor/_nuts/github.com/ThomsonReutersEikon/log4go/filelog.go deleted file mode 100644 index 9cbd815d..00000000 --- a/vendor/_nuts/github.com/ThomsonReutersEikon/log4go/filelog.go +++ /dev/null @@ -1,239 +0,0 @@ -// Copyright (C) 2010, Kyle Lemons . All rights reserved. - -package log4go - -import ( - "os" - "fmt" - "time" -) - -// This log writer sends output to a file -type FileLogWriter struct { - rec chan *LogRecord - rot chan bool - - // The opened file - filename string - file *os.File - - // The logging format - format string - - // File header/trailer - header, trailer string - - // Rotate at linecount - maxlines int - maxlines_curlines int - - // Rotate at size - maxsize int - maxsize_cursize int - - // Rotate daily - daily bool - daily_opendate int - - // Keep old logfiles (.001, .002, etc) - rotate bool -} - -// This is the FileLogWriter's output method -func (w *FileLogWriter) LogWrite(rec *LogRecord) { - w.rec <- rec -} - -func (w *FileLogWriter) Close() { - close(w.rec) -} - -// NewFileLogWriter creates a new LogWriter which writes to the given file and -// has rotation enabled if rotate is true. -// -// If rotate is true, any time a new log file is opened, the old one is renamed -// with a .### extension to preserve it. The various Set* methods can be used -// to configure log rotation based on lines, size, and daily. -// -// The standard log-line format is: -// [%D %T] [%L] (%S) %M -func NewFileLogWriter(fname string, rotate bool) *FileLogWriter { - w := &FileLogWriter{ - rec: make(chan *LogRecord, LogBufferLength), - rot: make(chan bool), - filename: fname, - format: "[%D %T] [%L] (%S) %M", - rotate: rotate, - } - - // open the file for the first time - if err := w.intRotate(); err != nil { - fmt.Fprintf(os.Stderr, "FileLogWriter(%q): %s\n", w.filename, err) - return nil - } - - go func() { - defer func() { - if w.file != nil { - fmt.Fprint(w.file, FormatLogRecord(w.trailer, &LogRecord{Created: time.Now()})) - w.file.Close() - } - }() - - for { - select { - case <-w.rot: - if err := w.intRotate(); err != nil { - fmt.Fprintf(os.Stderr, "FileLogWriter(%q): %s\n", w.filename, err) - return - } - case rec, ok := <-w.rec: - if !ok { - return - } - now := time.Now() - if (w.maxlines > 0 && w.maxlines_curlines >= w.maxlines) || - (w.maxsize > 0 && w.maxsize_cursize >= w.maxsize) || - (w.daily && now.Day() != w.daily_opendate) { - if err := w.intRotate(); err != nil { - fmt.Fprintf(os.Stderr, "FileLogWriter(%q): %s\n", w.filename, err) - return - } - } - - // Perform the write - n, err := fmt.Fprint(w.file, FormatLogRecord(w.format, rec)) - if err != nil { - fmt.Fprintf(os.Stderr, "FileLogWriter(%q): %s\n", w.filename, err) - return - } - - // Update the counts - w.maxlines_curlines++ - w.maxsize_cursize += n - } - } - }() - - return w -} - -// Request that the logs rotate -func (w *FileLogWriter) Rotate() { - w.rot <- true -} - -// If this is called in a threaded context, it MUST be synchronized -func (w *FileLogWriter) intRotate() error { - // Close any log file that may be open - if w.file != nil { - fmt.Fprint(w.file, FormatLogRecord(w.trailer, &LogRecord{Created: time.Now()})) - w.file.Close() - } - - // If we are keeping log files, move it to the next available number - if w.rotate { - _, err := os.Lstat(w.filename) - if err == nil { // file exists - // Find the next available number - num := 1 - fname := "" - for ; err == nil && num <= 999; num++ { - fname = w.filename + fmt.Sprintf(".%03d", num) - _, err = os.Lstat(fname) - } - // return error if the last file checked still existed - if err == nil { - return fmt.Errorf("Rotate: Cannot find free log number to rename %s\n", w.filename) - } - - // Rename the file to its newfound home - err = os.Rename(w.filename, fname) - if err != nil { - return fmt.Errorf("Rotate: %s\n", err) - } - } - } - - // Open the log file - fd, err := os.OpenFile(w.filename, os.O_WRONLY|os.O_APPEND|os.O_CREATE, 0660) - if err != nil { - return err - } - w.file = fd - - now := time.Now() - fmt.Fprint(w.file, FormatLogRecord(w.header, &LogRecord{Created: now})) - - // Set the daily open date to the current date - w.daily_opendate = now.Day() - - // initialize rotation values - w.maxlines_curlines = 0 - w.maxsize_cursize = 0 - - return nil -} - -// Set the logging format (chainable). Must be called before the first log -// message is written. -func (w *FileLogWriter) SetFormat(format string) *FileLogWriter { - w.format = format - return w -} - -// Set the logfile header and footer (chainable). Must be called before the first log -// message is written. These are formatted similar to the FormatLogRecord (e.g. -// you can use %D and %T in your header/footer for date and time). -func (w *FileLogWriter) SetHeadFoot(head, foot string) *FileLogWriter { - w.header, w.trailer = head, foot - if w.maxlines_curlines == 0 { - fmt.Fprint(w.file, FormatLogRecord(w.header, &LogRecord{Created: time.Now()})) - } - return w -} - -// Set rotate at linecount (chainable). Must be called before the first log -// message is written. -func (w *FileLogWriter) SetRotateLines(maxlines int) *FileLogWriter { - //fmt.Fprintf(os.Stderr, "FileLogWriter.SetRotateLines: %v\n", maxlines) - w.maxlines = maxlines - return w -} - -// Set rotate at size (chainable). Must be called before the first log message -// is written. -func (w *FileLogWriter) SetRotateSize(maxsize int) *FileLogWriter { - //fmt.Fprintf(os.Stderr, "FileLogWriter.SetRotateSize: %v\n", maxsize) - w.maxsize = maxsize - return w -} - -// Set rotate daily (chainable). Must be called before the first log message is -// written. -func (w *FileLogWriter) SetRotateDaily(daily bool) *FileLogWriter { - //fmt.Fprintf(os.Stderr, "FileLogWriter.SetRotateDaily: %v\n", daily) - w.daily = daily - return w -} - -// SetRotate changes whether or not the old logs are kept. (chainable) Must be -// called before the first log message is written. If rotate is false, the -// files are overwritten; otherwise, they are rotated to another file before the -// new log is opened. -func (w *FileLogWriter) SetRotate(rotate bool) *FileLogWriter { - //fmt.Fprintf(os.Stderr, "FileLogWriter.SetRotate: %v\n", rotate) - w.rotate = rotate - return w -} - -// NewXMLLogWriter is a utility method for creating a FileLogWriter set up to -// output XML record log messages instead of line-based ones. -func NewXMLLogWriter(fname string, rotate bool) *FileLogWriter { - return NewFileLogWriter(fname, rotate).SetFormat( - ` - %D %T - %S - %M - `).SetHeadFoot("", "") -} diff --git a/vendor/_nuts/github.com/ThomsonReutersEikon/log4go/log4go.go b/vendor/_nuts/github.com/ThomsonReutersEikon/log4go/log4go.go deleted file mode 100644 index ab4e857f..00000000 --- a/vendor/_nuts/github.com/ThomsonReutersEikon/log4go/log4go.go +++ /dev/null @@ -1,484 +0,0 @@ -// Copyright (C) 2010, Kyle Lemons . All rights reserved. - -// Package log4go provides level-based and highly configurable logging. -// -// Enhanced Logging -// -// This is inspired by the logging functionality in Java. Essentially, you create a Logger -// object and create output filters for it. You can send whatever you want to the Logger, -// and it will filter that based on your settings and send it to the outputs. This way, you -// can put as much debug code in your program as you want, and when you're done you can filter -// out the mundane messages so only the important ones show up. -// -// Utility functions are provided to make life easier. Here is some example code to get started: -// -// log := log4go.NewLogger() -// log.AddFilter("stdout", log4go.DEBUG, log4go.NewConsoleLogWriter()) -// log.AddFilter("log", log4go.FINE, log4go.NewFileLogWriter("example.log", true)) -// log.Info("The time is now: %s", time.LocalTime().Format("15:04:05 MST 2006/01/02")) -// -// The first two lines can be combined with the utility NewDefaultLogger: -// -// log := log4go.NewDefaultLogger(log4go.DEBUG) -// log.AddFilter("log", log4go.FINE, log4go.NewFileLogWriter("example.log", true)) -// log.Info("The time is now: %s", time.LocalTime().Format("15:04:05 MST 2006/01/02")) -// -// Usage notes: -// - The ConsoleLogWriter does not display the source of the message to standard -// output, but the FileLogWriter does. -// - The utility functions (Info, Debug, Warn, etc) derive their source from the -// calling function, and this incurs extra overhead. -// -// Changes from 2.0: -// - The external interface has remained mostly stable, but a lot of the -// internals have been changed, so if you depended on any of this or created -// your own LogWriter, then you will probably have to update your code. In -// particular, Logger is now a map and ConsoleLogWriter is now a channel -// behind-the-scenes, and the LogWrite method no longer has return values. -// -// Future work: (please let me know if you think I should work on any of these particularly) -// - Log file rotation -// - Logging configuration files ala log4j -// - Have the ability to remove filters? -// - Have GetInfoChannel, GetDebugChannel, etc return a chan string that allows -// for another method of logging -// - Add an XML filter type -package log4go - -import ( - "errors" - "os" - "fmt" - "time" - "strings" - "runtime" -) - -// Version information -const ( - L4G_VERSION = "log4go-v3.0.1" - L4G_MAJOR = 3 - L4G_MINOR = 0 - L4G_BUILD = 1 -) - -/****** Constants ******/ - -// These are the integer logging levels used by the logger -type level int - -const ( - FINEST level = iota - FINE - DEBUG - TRACE - INFO - WARNING - ERROR - CRITICAL -) - -// Logging level strings -var ( - levelStrings = [...]string{"FNST", "FINE", "DEBG", "TRAC", "INFO", "WARN", "EROR", "CRIT"} -) - -func (l level) String() string { - if l < 0 || int(l) > len(levelStrings) { - return "UNKNOWN" - } - return levelStrings[int(l)] -} - -/****** Variables ******/ -var ( - // LogBufferLength specifies how many log messages a particular log4go - // logger can buffer at a time before writing them. - LogBufferLength = 32 -) - -/****** LogRecord ******/ - -// A LogRecord contains all of the pertinent information for each message -type LogRecord struct { - Level level // The log level - Created time.Time // The time at which the log message was created (nanoseconds) - Source string // The message source - Message string // The log message -} - -/****** LogWriter ******/ - -// This is an interface for anything that should be able to write logs -type LogWriter interface { - // This will be called to log a LogRecord message. - LogWrite(rec *LogRecord) - - // This should clean up anything lingering about the LogWriter, as it is called before - // the LogWriter is removed. LogWrite should not be called after Close. - Close() -} - -/****** Logger ******/ - -// A Filter represents the log level below which no log records are written to -// the associated LogWriter. -type Filter struct { - Level level - LogWriter -} - -// A Logger represents a collection of Filters through which log messages are -// written. -type Logger map[string]*Filter - -// Create a new logger. -// -// DEPRECATED: Use make(Logger) instead. -func NewLogger() Logger { - os.Stderr.WriteString("warning: use of deprecated NewLogger\n") - return make(Logger) -} - -// Create a new logger with a "stdout" filter configured to send log messages at -// or above lvl to standard output. -// -// DEPRECATED: use NewDefaultLogger instead. -func NewConsoleLogger(lvl level) Logger { - os.Stderr.WriteString("warning: use of deprecated NewConsoleLogger\n") - return Logger{ - "stdout": &Filter{lvl, NewConsoleLogWriter()}, - } -} - -// Create a new logger with a "stdout" filter configured to send log messages at -// or above lvl to standard output. -func NewDefaultLogger(lvl level) Logger { - return Logger{ - "stdout": &Filter{lvl, NewConsoleLogWriter()}, - } -} - -// Closes all log writers in preparation for exiting the program or a -// reconfiguration of logging. Calling this is not really imperative, unless -// you want to guarantee that all log messages are written. Close removes -// all filters (and thus all LogWriters) from the logger. -func (log Logger) Close() { - // Close all open loggers - for name, filt := range log { - filt.Close() - delete(log, name) - } -} - -// Add a new LogWriter to the Logger which will only log messages at lvl or -// higher. This function should not be called from multiple goroutines. -// Returns the logger for chaining. -func (log Logger) AddFilter(name string, lvl level, writer LogWriter) Logger { - log[name] = &Filter{lvl, writer} - return log -} - -/******* Logging *******/ -// Send a formatted log message internally -func (log Logger) intLogf(lvl level, format string, args ...interface{}) { - skip := true - - // Determine if any logging will be done - for _, filt := range log { - if lvl >= filt.Level { - skip = false - break - } - } - if skip { - return - } - - // Determine caller func - pc, _, lineno, ok := runtime.Caller(2) - src := "" - if ok { - src = fmt.Sprintf("%s:%d", runtime.FuncForPC(pc).Name(), lineno) - } - - msg := format - if len(args) > 0 { - msg = fmt.Sprintf(format, args...) - } - - // Make the log record - rec := &LogRecord{ - Level: lvl, - Created: time.Now(), - Source: src, - Message: msg, - } - - // Dispatch the logs - for _, filt := range log { - if lvl < filt.Level { - continue - } - filt.LogWrite(rec) - } -} - -// Send a closure log message internally -func (log Logger) intLogc(lvl level, closure func() string) { - skip := true - - // Determine if any logging will be done - for _, filt := range log { - if lvl >= filt.Level { - skip = false - break - } - } - if skip { - return - } - - // Determine caller func - pc, _, lineno, ok := runtime.Caller(2) - src := "" - if ok { - src = fmt.Sprintf("%s:%d", runtime.FuncForPC(pc).Name(), lineno) - } - - // Make the log record - rec := &LogRecord{ - Level: lvl, - Created: time.Now(), - Source: src, - Message: closure(), - } - - // Dispatch the logs - for _, filt := range log { - if lvl < filt.Level { - continue - } - filt.LogWrite(rec) - } -} - -// Send a log message with manual level, source, and message. -func (log Logger) Log(lvl level, source, message string) { - skip := true - - // Determine if any logging will be done - for _, filt := range log { - if lvl >= filt.Level { - skip = false - break - } - } - if skip { - return - } - - // Make the log record - rec := &LogRecord{ - Level: lvl, - Created: time.Now(), - Source: source, - Message: message, - } - - // Dispatch the logs - for _, filt := range log { - if lvl < filt.Level { - continue - } - filt.LogWrite(rec) - } -} - -// Logf logs a formatted log message at the given log level, using the caller as -// its source. -func (log Logger) Logf(lvl level, format string, args ...interface{}) { - log.intLogf(lvl, format, args...) -} - -// Logc logs a string returned by the closure at the given log level, using the caller as -// its source. If no log message would be written, the closure is never called. -func (log Logger) Logc(lvl level, closure func() string) { - log.intLogc(lvl, closure) -} - -// Finest logs a message at the finest log level. -// See Debug for an explanation of the arguments. -func (log Logger) Finest(arg0 interface{}, args ...interface{}) { - const ( - lvl = FINEST - ) - switch first := arg0.(type) { - case string: - // Use the string as a format string - log.intLogf(lvl, first, args...) - case func() string: - // Log the closure (no other arguments used) - log.intLogc(lvl, first) - default: - // Build a format string so that it will be similar to Sprint - log.intLogf(lvl, fmt.Sprint(arg0)+strings.Repeat(" %v", len(args)), args...) - } -} - -// Fine logs a message at the fine log level. -// See Debug for an explanation of the arguments. -func (log Logger) Fine(arg0 interface{}, args ...interface{}) { - const ( - lvl = FINE - ) - switch first := arg0.(type) { - case string: - // Use the string as a format string - log.intLogf(lvl, first, args...) - case func() string: - // Log the closure (no other arguments used) - log.intLogc(lvl, first) - default: - // Build a format string so that it will be similar to Sprint - log.intLogf(lvl, fmt.Sprint(arg0)+strings.Repeat(" %v", len(args)), args...) - } -} - -// Debug is a utility method for debug log messages. -// The behavior of Debug depends on the first argument: -// - arg0 is a string -// When given a string as the first argument, this behaves like Logf but with -// the DEBUG log level: the first argument is interpreted as a format for the -// latter arguments. -// - arg0 is a func()string -// When given a closure of type func()string, this logs the string returned by -// the closure iff it will be logged. The closure runs at most one time. -// - arg0 is interface{} -// When given anything else, the log message will be each of the arguments -// formatted with %v and separated by spaces (ala Sprint). -func (log Logger) Debug(arg0 interface{}, args ...interface{}) { - const ( - lvl = DEBUG - ) - switch first := arg0.(type) { - case string: - // Use the string as a format string - log.intLogf(lvl, first, args...) - case func() string: - // Log the closure (no other arguments used) - log.intLogc(lvl, first) - default: - // Build a format string so that it will be similar to Sprint - log.intLogf(lvl, fmt.Sprint(arg0)+strings.Repeat(" %v", len(args)), args...) - } -} - -// Trace logs a message at the trace log level. -// See Debug for an explanation of the arguments. -func (log Logger) Trace(arg0 interface{}, args ...interface{}) { - const ( - lvl = TRACE - ) - switch first := arg0.(type) { - case string: - // Use the string as a format string - log.intLogf(lvl, first, args...) - case func() string: - // Log the closure (no other arguments used) - log.intLogc(lvl, first) - default: - // Build a format string so that it will be similar to Sprint - log.intLogf(lvl, fmt.Sprint(arg0)+strings.Repeat(" %v", len(args)), args...) - } -} - -// Info logs a message at the info log level. -// See Debug for an explanation of the arguments. -func (log Logger) Info(arg0 interface{}, args ...interface{}) { - const ( - lvl = INFO - ) - switch first := arg0.(type) { - case string: - // Use the string as a format string - log.intLogf(lvl, first, args...) - case func() string: - // Log the closure (no other arguments used) - log.intLogc(lvl, first) - default: - // Build a format string so that it will be similar to Sprint - log.intLogf(lvl, fmt.Sprint(arg0)+strings.Repeat(" %v", len(args)), args...) - } -} - -// Warn logs a message at the warning log level and returns the formatted error. -// At the warning level and higher, there is no performance benefit if the -// message is not actually logged, because all formats are processed and all -// closures are executed to format the error message. -// See Debug for further explanation of the arguments. -func (log Logger) Warn(arg0 interface{}, args ...interface{}) error { - const ( - lvl = WARNING - ) - var msg string - switch first := arg0.(type) { - case string: - // Use the string as a format string - msg = fmt.Sprintf(first, args...) - case func() string: - // Log the closure (no other arguments used) - msg = first() - default: - // Build a format string so that it will be similar to Sprint - msg = fmt.Sprintf(fmt.Sprint(first)+strings.Repeat(" %v", len(args)), args...) - } - log.intLogf(lvl, msg) - return errors.New(msg) -} - -// Error logs a message at the error log level and returns the formatted error, -// See Warn for an explanation of the performance and Debug for an explanation -// of the parameters. -func (log Logger) Error(arg0 interface{}, args ...interface{}) error { - const ( - lvl = ERROR - ) - var msg string - switch first := arg0.(type) { - case string: - // Use the string as a format string - msg = fmt.Sprintf(first, args...) - case func() string: - // Log the closure (no other arguments used) - msg = first() - default: - // Build a format string so that it will be similar to Sprint - msg = fmt.Sprintf(fmt.Sprint(first)+strings.Repeat(" %v", len(args)), args...) - } - log.intLogf(lvl, msg) - return errors.New(msg) -} - -// Critical logs a message at the critical log level and returns the formatted error, -// See Warn for an explanation of the performance and Debug for an explanation -// of the parameters. -func (log Logger) Critical(arg0 interface{}, args ...interface{}) error { - const ( - lvl = CRITICAL - ) - var msg string - switch first := arg0.(type) { - case string: - // Use the string as a format string - msg = fmt.Sprintf(first, args...) - case func() string: - // Log the closure (no other arguments used) - msg = first() - default: - // Build a format string so that it will be similar to Sprint - msg = fmt.Sprintf(fmt.Sprint(first)+strings.Repeat(" %v", len(args)), args...) - } - log.intLogf(lvl, msg) - return errors.New(msg) -} diff --git a/vendor/_nuts/github.com/ThomsonReutersEikon/log4go/log4go_test.go b/vendor/_nuts/github.com/ThomsonReutersEikon/log4go/log4go_test.go deleted file mode 100644 index 6f550649..00000000 --- a/vendor/_nuts/github.com/ThomsonReutersEikon/log4go/log4go_test.go +++ /dev/null @@ -1,537 +0,0 @@ -// Copyright (C) 2010, Kyle Lemons . All rights reserved. - -package log4go - -import ( - "crypto/md5" - "encoding/hex" - "fmt" - "io" - "io/ioutil" - "os" - "runtime" - "testing" - "time" -) - -const testLogFile = "_logtest.log" - -var now time.Time = time.Unix(0, 1234567890123456789).In(time.UTC) - -func newLogRecord(lvl level, src string, msg string) *LogRecord { - return &LogRecord{ - Level: lvl, - Source: src, - Created: now, - Message: msg, - } -} - -func TestELog(t *testing.T) { - fmt.Printf("Testing %s\n", L4G_VERSION) - lr := newLogRecord(CRITICAL, "source", "message") - if lr.Level != CRITICAL { - t.Errorf("Incorrect level: %d should be %d", lr.Level, CRITICAL) - } - if lr.Source != "source" { - t.Errorf("Incorrect source: %s should be %s", lr.Source, "source") - } - if lr.Message != "message" { - t.Errorf("Incorrect message: %s should be %s", lr.Source, "message") - } -} - -var formatTests = []struct { - Test string - Record *LogRecord - Formats map[string]string -}{ - { - Test: "Standard formats", - Record: &LogRecord{ - Level: ERROR, - Source: "source", - Message: "message", - Created: now, - }, - Formats: map[string]string{ - // TODO(kevlar): How can I do this so it'll work outside of PST? - FORMAT_DEFAULT: "[2009/02/13 23:31:30 UTC] [EROR] (source) message\n", - FORMAT_SHORT: "[23:31 02/13/09] [EROR] message\n", - FORMAT_ABBREV: "[EROR] message\n", - }, - }, -} - -func TestFormatLogRecord(t *testing.T) { - for _, test := range formatTests { - name := test.Test - for fmt, want := range test.Formats { - if got := FormatLogRecord(fmt, test.Record); got != want { - t.Errorf("%s - %s:", name, fmt) - t.Errorf(" got %q", got) - t.Errorf(" want %q", want) - } - } - } -} - -var logRecordWriteTests = []struct { - Test string - Record *LogRecord - Console string -}{ - { - Test: "Normal message", - Record: &LogRecord{ - Level: CRITICAL, - Source: "source", - Message: "message", - Created: now, - }, - Console: "[Feb 13 23:31:30.123456] [CRIT] message\n", - }, -} - -func TestConsoleLogWriter(t *testing.T) { - r, w := io.Pipe() - stdout = w - console := NewConsoleLogWriter() - - // Write - go func() { - for _, test := range logRecordWriteTests { - console.LogWrite(test.Record) - } - }() - // Read and verify - buf := make([]byte, 1024) - for _, test := range logRecordWriteTests { - name := test.Test - n, _ := r.Read(buf) - if got, want := string(buf[:n]), test.Console; got != want { - t.Errorf("%s: got %q", name, got) - t.Errorf("%s: want %q", name, want) - } - } - - stdout = os.Stdout -} - -func TestFileLogWriter(t *testing.T) { - defer func(buflen int) { - LogBufferLength = buflen - }(LogBufferLength) - LogBufferLength = 0 - - w := NewFileLogWriter(testLogFile, false) - if w == nil { - t.Fatalf("Invalid return: w should not be nil") - } - defer os.Remove(testLogFile) - - w.LogWrite(newLogRecord(CRITICAL, "source", "message")) - w.Close() - runtime.Gosched() - - if contents, err := ioutil.ReadFile(testLogFile); err != nil { - t.Errorf("read(%q): %s", testLogFile, err) - } else if len(contents) != 50 { - t.Errorf("malformed filelog: %q (%d bytes)", string(contents), len(contents)) - } -} - -func TestXMLLogWriter(t *testing.T) { - defer func(buflen int) { - LogBufferLength = buflen - }(LogBufferLength) - LogBufferLength = 0 - - w := NewXMLLogWriter(testLogFile, false) - if w == nil { - t.Fatalf("Invalid return: w should not be nil") - } - defer os.Remove(testLogFile) - - w.LogWrite(newLogRecord(CRITICAL, "source", "message")) - w.Close() - runtime.Gosched() - - if contents, err := ioutil.ReadFile(testLogFile); err != nil { - t.Errorf("read(%q): %s", testLogFile, err) - } else if len(contents) != 185 { - t.Errorf("malformed xmllog: %q (%d bytes)", string(contents), len(contents)) - } -} - -func TestLogger(t *testing.T) { - sl := NewDefaultLogger(WARNING) - if sl == nil { - t.Fatalf("NewDefaultLogger should never return nil") - } - if lw, exist := sl["stdout"]; lw == nil || exist != true { - t.Fatalf("NewDefaultLogger produced invalid logger (DNE or nil)") - } - if sl["stdout"].Level != WARNING { - t.Fatalf("NewDefaultLogger produced invalid logger (incorrect level)") - } - if len(sl) != 1 { - t.Fatalf("NewDefaultLogger produced invalid logger (incorrect map count)") - } - - //func (l *Logger) AddFilter(name string, level int, writer LogWriter) {} - l := make(Logger) - l.AddFilter("stdout", DEBUG, NewConsoleLogWriter()) - if lw, exist := l["stdout"]; lw == nil || exist != true { - t.Fatalf("AddFilter produced invalid logger (DNE or nil)") - } - if l["stdout"].Level != DEBUG { - t.Fatalf("AddFilter produced invalid logger (incorrect level)") - } - if len(l) != 1 { - t.Fatalf("AddFilter produced invalid logger (incorrect map count)") - } - - //func (l *Logger) Warn(format string, args ...interface{}) error {} - if err := l.Warn("%s %d %#v", "Warning:", 1, []int{}); err.Error() != "Warning: 1 []int{}" { - t.Errorf("Warn returned invalid error: %s", err) - } - - //func (l *Logger) Error(format string, args ...interface{}) error {} - if err := l.Error("%s %d %#v", "Error:", 10, []string{}); err.Error() != "Error: 10 []string{}" { - t.Errorf("Error returned invalid error: %s", err) - } - - //func (l *Logger) Critical(format string, args ...interface{}) error {} - if err := l.Critical("%s %d %#v", "Critical:", 100, []int64{}); err.Error() != "Critical: 100 []int64{}" { - t.Errorf("Critical returned invalid error: %s", err) - } - - // Already tested or basically untestable - //func (l *Logger) Log(level int, source, message string) {} - //func (l *Logger) Logf(level int, format string, args ...interface{}) {} - //func (l *Logger) intLogf(level int, format string, args ...interface{}) string {} - //func (l *Logger) Finest(format string, args ...interface{}) {} - //func (l *Logger) Fine(format string, args ...interface{}) {} - //func (l *Logger) Debug(format string, args ...interface{}) {} - //func (l *Logger) Trace(format string, args ...interface{}) {} - //func (l *Logger) Info(format string, args ...interface{}) {} -} - -func TestLogOutput(t *testing.T) { - const ( - expected = "fdf3e51e444da56b4cb400f30bc47424" - ) - - // Unbuffered output - defer func(buflen int) { - LogBufferLength = buflen - }(LogBufferLength) - LogBufferLength = 0 - - l := make(Logger) - - // Delete and open the output log without a timestamp (for a constant md5sum) - l.AddFilter("file", FINEST, NewFileLogWriter(testLogFile, false).SetFormat("[%L] %M")) - defer os.Remove(testLogFile) - - // Send some log messages - l.Log(CRITICAL, "testsrc1", fmt.Sprintf("This message is level %d", int(CRITICAL))) - l.Logf(ERROR, "This message is level %v", ERROR) - l.Logf(WARNING, "This message is level %s", WARNING) - l.Logc(INFO, func() string { return "This message is level INFO" }) - l.Trace("This message is level %d", int(TRACE)) - l.Debug("This message is level %s", DEBUG) - l.Fine(func() string { return fmt.Sprintf("This message is level %v", FINE) }) - l.Finest("This message is level %v", FINEST) - l.Finest(FINEST, "is also this message's level") - - l.Close() - - contents, err := ioutil.ReadFile(testLogFile) - if err != nil { - t.Fatalf("Could not read output log: %s", err) - } - - sum := md5.New() - sum.Write(contents) - if sumstr := hex.EncodeToString(sum.Sum(nil)); sumstr != expected { - t.Errorf("--- Log Contents:\n%s---", string(contents)) - t.Fatalf("Checksum does not match: %s (expecting %s)", sumstr, expected) - } -} - -func TestCountMallocs(t *testing.T) { - const N = 1 - var m runtime.MemStats - getMallocs := func() uint64 { - runtime.ReadMemStats(&m) - return m.Mallocs - } - - // Console logger - sl := NewDefaultLogger(INFO) - mallocs := 0 - getMallocs() - for i := 0; i < N; i++ { - sl.Log(WARNING, "here", "This is a WARNING message") - } - mallocs += getMallocs() - fmt.Printf("mallocs per sl.Log((WARNING, \"here\", \"This is a log message\"): %d\n", mallocs/N) - - // Console logger formatted - mallocs = 0 - getMallocs() - for i := 0; i < N; i++ { - sl.Logf(WARNING, "%s is a log message with level %d", "This", WARNING) - } - mallocs += getMallocs() - fmt.Printf("mallocs per sl.Logf(WARNING, \"%%s is a log message with level %%d\", \"This\", WARNING): %d\n", mallocs/N) - - // Console logger (not logged) - sl = NewDefaultLogger(INFO) - mallocs = 0 - getMallocs() - for i := 0; i < N; i++ { - sl.Log(DEBUG, "here", "This is a DEBUG log message") - } - mallocs += getMallocs() - fmt.Printf("mallocs per unlogged sl.Log((WARNING, \"here\", \"This is a log message\"): %d\n", mallocs/N) - - // Console logger formatted (not logged) - mallocs = 0 - getMallocs() - for i := 0; i < N; i++ { - sl.Logf(DEBUG, "%s is a log message with level %d", "This", DEBUG) - } - mallocs += getMallocs() - fmt.Printf("mallocs per unlogged sl.Logf(WARNING, \"%%s is a log message with level %%d\", \"This\", WARNING): %d\n", mallocs/N) -} - -func TestXMLConfig(t *testing.T) { - const ( - configfile = "example.xml" - ) - - fd, err := os.Create(configfile) - if err != nil { - t.Fatalf("Could not open %s for writing: %s", configfile, err) - } - - fmt.Fprintln(fd, "") - fmt.Fprintln(fd, " ") - fmt.Fprintln(fd, " stdout") - fmt.Fprintln(fd, " console") - fmt.Fprintln(fd, " ") - fmt.Fprintln(fd, " DEBUG") - fmt.Fprintln(fd, " ") - fmt.Fprintln(fd, " ") - fmt.Fprintln(fd, " file") - fmt.Fprintln(fd, " file") - fmt.Fprintln(fd, " FINEST") - fmt.Fprintln(fd, " test.log") - fmt.Fprintln(fd, " ") - fmt.Fprintln(fd, " [%D %T] [%L] (%S) %M") - fmt.Fprintln(fd, " false ") - fmt.Fprintln(fd, " 0M ") - fmt.Fprintln(fd, " 0K ") - fmt.Fprintln(fd, " true ") - fmt.Fprintln(fd, " ") - fmt.Fprintln(fd, " ") - fmt.Fprintln(fd, " xmllog") - fmt.Fprintln(fd, " xml") - fmt.Fprintln(fd, " TRACE") - fmt.Fprintln(fd, " trace.xml") - fmt.Fprintln(fd, " true ") - fmt.Fprintln(fd, " 100M ") - fmt.Fprintln(fd, " 6K ") - fmt.Fprintln(fd, " false ") - fmt.Fprintln(fd, " ") - fmt.Fprintln(fd, " ") - fmt.Fprintln(fd, " donotopen") - fmt.Fprintln(fd, " socket") - fmt.Fprintln(fd, " FINEST") - fmt.Fprintln(fd, " 192.168.1.255:12124 ") - fmt.Fprintln(fd, " udp ") - fmt.Fprintln(fd, " ") - fmt.Fprintln(fd, "") - fd.Close() - - log := make(Logger) - log.LoadConfiguration(configfile) - defer os.Remove("trace.xml") - defer os.Remove("test.log") - defer log.Close() - - // Make sure we got all loggers - if len(log) != 3 { - t.Fatalf("XMLConfig: Expected 3 filters, found %d", len(log)) - } - - // Make sure they're the right keys - if _, ok := log["stdout"]; !ok { - t.Errorf("XMLConfig: Expected stdout logger") - } - if _, ok := log["file"]; !ok { - t.Fatalf("XMLConfig: Expected file logger") - } - if _, ok := log["xmllog"]; !ok { - t.Fatalf("XMLConfig: Expected xmllog logger") - } - - // Make sure they're the right type - if _, ok := log["stdout"].LogWriter.(ConsoleLogWriter); !ok { - t.Fatalf("XMLConfig: Expected stdout to be ConsoleLogWriter, found %T", log["stdout"].LogWriter) - } - if _, ok := log["file"].LogWriter.(*FileLogWriter); !ok { - t.Fatalf("XMLConfig: Expected file to be *FileLogWriter, found %T", log["file"].LogWriter) - } - if _, ok := log["xmllog"].LogWriter.(*FileLogWriter); !ok { - t.Fatalf("XMLConfig: Expected xmllog to be *FileLogWriter, found %T", log["xmllog"].LogWriter) - } - - // Make sure levels are set - if lvl := log["stdout"].Level; lvl != DEBUG { - t.Errorf("XMLConfig: Expected stdout to be set to level %d, found %d", DEBUG, lvl) - } - if lvl := log["file"].Level; lvl != FINEST { - t.Errorf("XMLConfig: Expected file to be set to level %d, found %d", FINEST, lvl) - } - if lvl := log["xmllog"].Level; lvl != TRACE { - t.Errorf("XMLConfig: Expected xmllog to be set to level %d, found %d", TRACE, lvl) - } - - // Make sure the w is open and points to the right file - if fname := log["file"].LogWriter.(*FileLogWriter).file.Name(); fname != "test.log" { - t.Errorf("XMLConfig: Expected file to have opened %s, found %s", "test.log", fname) - } - - // Make sure the XLW is open and points to the right file - if fname := log["xmllog"].LogWriter.(*FileLogWriter).file.Name(); fname != "trace.xml" { - t.Errorf("XMLConfig: Expected xmllog to have opened %s, found %s", "trace.xml", fname) - } - - // Move XML log file - os.Rename(configfile, "examples/"+configfile) // Keep this so that an example with the documentation is available -} - -func BenchmarkFormatLogRecord(b *testing.B) { - const updateEvery = 1 - rec := &LogRecord{ - Level: CRITICAL, - Created: now, - Source: "source", - Message: "message", - } - for i := 0; i < b.N; i++ { - rec.Created = rec.Created.Add(1 * time.Second / updateEvery) - if i%2 == 0 { - FormatLogRecord(FORMAT_DEFAULT, rec) - } else { - FormatLogRecord(FORMAT_SHORT, rec) - } - } -} - -func BenchmarkConsoleLog(b *testing.B) { - /* This doesn't seem to work on OS X - sink, err := os.Open(os.DevNull) - if err != nil { - panic(err) - } - if err := syscall.Dup2(int(sink.Fd()), syscall.Stdout); err != nil { - panic(err) - } - */ - - stdout = ioutil.Discard - sl := NewDefaultLogger(INFO) - for i := 0; i < b.N; i++ { - sl.Log(WARNING, "here", "This is a log message") - } -} - -func BenchmarkConsoleNotLogged(b *testing.B) { - sl := NewDefaultLogger(INFO) - for i := 0; i < b.N; i++ { - sl.Log(DEBUG, "here", "This is a log message") - } -} - -func BenchmarkConsoleUtilLog(b *testing.B) { - sl := NewDefaultLogger(INFO) - for i := 0; i < b.N; i++ { - sl.Info("%s is a log message", "This") - } -} - -func BenchmarkConsoleUtilNotLog(b *testing.B) { - sl := NewDefaultLogger(INFO) - for i := 0; i < b.N; i++ { - sl.Debug("%s is a log message", "This") - } -} - -func BenchmarkFileLog(b *testing.B) { - sl := make(Logger) - b.StopTimer() - sl.AddFilter("file", INFO, NewFileLogWriter("benchlog.log", false)) - b.StartTimer() - for i := 0; i < b.N; i++ { - sl.Log(WARNING, "here", "This is a log message") - } - b.StopTimer() - os.Remove("benchlog.log") -} - -func BenchmarkFileNotLogged(b *testing.B) { - sl := make(Logger) - b.StopTimer() - sl.AddFilter("file", INFO, NewFileLogWriter("benchlog.log", false)) - b.StartTimer() - for i := 0; i < b.N; i++ { - sl.Log(DEBUG, "here", "This is a log message") - } - b.StopTimer() - os.Remove("benchlog.log") -} - -func BenchmarkFileUtilLog(b *testing.B) { - sl := make(Logger) - b.StopTimer() - sl.AddFilter("file", INFO, NewFileLogWriter("benchlog.log", false)) - b.StartTimer() - for i := 0; i < b.N; i++ { - sl.Info("%s is a log message", "This") - } - b.StopTimer() - os.Remove("benchlog.log") -} - -func BenchmarkFileUtilNotLog(b *testing.B) { - sl := make(Logger) - b.StopTimer() - sl.AddFilter("file", INFO, NewFileLogWriter("benchlog.log", false)) - b.StartTimer() - for i := 0; i < b.N; i++ { - sl.Debug("%s is a log message", "This") - } - b.StopTimer() - os.Remove("benchlog.log") -} - -// Benchmark results (darwin amd64 6g) -//elog.BenchmarkConsoleLog 100000 22819 ns/op -//elog.BenchmarkConsoleNotLogged 2000000 879 ns/op -//elog.BenchmarkConsoleUtilLog 50000 34380 ns/op -//elog.BenchmarkConsoleUtilNotLog 1000000 1339 ns/op -//elog.BenchmarkFileLog 100000 26497 ns/op -//elog.BenchmarkFileNotLogged 2000000 821 ns/op -//elog.BenchmarkFileUtilLog 50000 33945 ns/op -//elog.BenchmarkFileUtilNotLog 1000000 1258 ns/op diff --git a/vendor/_nuts/github.com/ThomsonReutersEikon/log4go/pattlog.go b/vendor/_nuts/github.com/ThomsonReutersEikon/log4go/pattlog.go deleted file mode 100644 index c655f93b..00000000 --- a/vendor/_nuts/github.com/ThomsonReutersEikon/log4go/pattlog.go +++ /dev/null @@ -1,131 +0,0 @@ -// Copyright (C) 2010, Kyle Lemons . All rights reserved. - -package log4go - -import ( - "bytes" - "fmt" - "io" - "os" - "strconv" - "sync/atomic" - "unsafe" -) - -const ( - FORMAT_DEFAULT = "[%D %T] [%L] (%S) %M" - FORMAT_SHORT = "[%t %d] [%L] %M" - FORMAT_ABBREV = "[%L] %M" -) - -type formatCacheType struct { - LastUpdateSeconds int64 - shortTime, shortDate string - longTime, longDate string -} - -var formatCache = unsafe.Pointer(&formatCacheType{}) -var pid = os.Getpid() - -// Known format codes: -// %T - Time (15:04:05 MST) -// %t - Time (15:04) -// %R - Real time (2006-01-02 15:04:05.999999) -// %D - Date (2006/01/02) -// %d - Date (01/02/06) -// %L - Level (FNST, FINE, DEBG, TRAC, WARN, EROR, CRIT) -// %S - Source -// %M - Message -// Ignores unknown formats -// Recommended: "[%D %T] [%L] (%S) %M" -func FormatLogRecord(format string, rec *LogRecord) string { - if rec == nil { - return "" - } - if len(format) == 0 { - return "" - } - - out := bytes.NewBuffer(make([]byte, 0, 64)) - secs := rec.Created.UnixNano() / 1e9 - - cache := *(*formatCacheType)(atomic.LoadPointer(&formatCache)) - if cache.LastUpdateSeconds != secs { - month, day, year := rec.Created.Month(), rec.Created.Day(), rec.Created.Year() - hour, minute, second := rec.Created.Hour(), rec.Created.Minute(), rec.Created.Second() - zone, _ := rec.Created.Zone() - cache = formatCacheType{ - LastUpdateSeconds: secs, - shortTime: fmt.Sprintf("%02d:%02d", hour, minute), - shortDate: fmt.Sprintf("%02d/%02d/%02d", month, day, year%100), - longTime: fmt.Sprintf("%02d:%02d:%02d %s", hour, minute, second, zone), - longDate: fmt.Sprintf("%04d/%02d/%02d", year, month, day), - } - atomic.StorePointer(&formatCache, unsafe.Pointer(&cache)) - } - - // Split the string into pieces by % signs - pieces := bytes.Split([]byte(format), []byte{'%'}) - - // Iterate over the pieces, replacing known formats - for i, piece := range pieces { - if i > 0 && len(piece) > 0 { - switch piece[0] { - case 'P': - out.WriteString(strconv.Itoa(pid)) - case 'T': - out.WriteString(cache.longTime) - case 't': - out.WriteString(cache.shortTime) - case 'D': - out.WriteString(cache.longDate) - case 'd': - out.WriteString(cache.shortDate) - case 'L': - out.WriteString(levelStrings[rec.Level]) - case 'S': - out.WriteString(rec.Source) - case 'M': - out.WriteString(rec.Message) - case 'R': - out.WriteString(rec.Created.Format("2006-01-02 15:04:05.999999")) - } - if len(piece) > 1 { - out.Write(piece[1:]) - } - } else if len(piece) > 0 { - out.Write(piece) - } - } - out.WriteByte('\n') - - return out.String() -} - -// This is the standard writer that prints to standard output. -type FormatLogWriter chan *LogRecord - -// This creates a new FormatLogWriter -func NewFormatLogWriter(out io.Writer, format string) FormatLogWriter { - records := make(FormatLogWriter, LogBufferLength) - go records.run(out, format) - return records -} - -func (w FormatLogWriter) run(out io.Writer, format string) { - for rec := range w { - fmt.Fprint(out, FormatLogRecord(format, rec)) - } -} - -// This is the FormatLogWriter's output method. This will block if the output -// buffer is full. -func (w FormatLogWriter) LogWrite(rec *LogRecord) { - w <- rec -} - -// Close stops the logger from sending messages to standard output. Attempts to -// send log messages to this logger after a Close have undefined behavior. -func (w FormatLogWriter) Close() { - close(w) -} diff --git a/vendor/_nuts/github.com/ThomsonReutersEikon/log4go/socklog.go b/vendor/_nuts/github.com/ThomsonReutersEikon/log4go/socklog.go deleted file mode 100644 index 1d224a99..00000000 --- a/vendor/_nuts/github.com/ThomsonReutersEikon/log4go/socklog.go +++ /dev/null @@ -1,57 +0,0 @@ -// Copyright (C) 2010, Kyle Lemons . All rights reserved. - -package log4go - -import ( - "encoding/json" - "fmt" - "net" - "os" -) - -// This log writer sends output to a socket -type SocketLogWriter chan *LogRecord - -// This is the SocketLogWriter's output method -func (w SocketLogWriter) LogWrite(rec *LogRecord) { - w <- rec -} - -func (w SocketLogWriter) Close() { - close(w) -} - -func NewSocketLogWriter(proto, hostport string) SocketLogWriter { - sock, err := net.Dial(proto, hostport) - if err != nil { - fmt.Fprintf(os.Stderr, "NewSocketLogWriter(%q): %s\n", hostport, err) - return nil - } - - w := SocketLogWriter(make(chan *LogRecord, LogBufferLength)) - - go func() { - defer func() { - if sock != nil && proto == "tcp" { - sock.Close() - } - }() - - for rec := range w { - // Marshall into JSON - js, err := json.Marshal(rec) - if err != nil { - fmt.Fprint(os.Stderr, "SocketLogWriter(%q): %s", hostport, err) - return - } - - _, err = sock.Write(js) - if err != nil { - fmt.Fprint(os.Stderr, "SocketLogWriter(%q): %s", hostport, err) - return - } - } - }() - - return w -} diff --git a/vendor/_nuts/github.com/ThomsonReutersEikon/log4go/termlog.go b/vendor/_nuts/github.com/ThomsonReutersEikon/log4go/termlog.go deleted file mode 100644 index 39f60c0e..00000000 --- a/vendor/_nuts/github.com/ThomsonReutersEikon/log4go/termlog.go +++ /dev/null @@ -1,31 +0,0 @@ -// Copyright (C) 2010, Kyle Lemons . All rights reserved. - -package log4go - -import ( - "fmt" - "io" - "os" - "time" -) - -var stdout io.Writer = os.Stdout - -// This is the standard writer that prints to standard output. -type ConsoleLogWriter struct{} - -// This creates a new ConsoleLogWriter -func NewConsoleLogWriter() ConsoleLogWriter { - return ConsoleLogWriter{} -} - -// This is the ConsoleLogWriter's output method. -func (w ConsoleLogWriter) LogWrite(rec *LogRecord) { - timestr := rec.Created.Format(time.StampMicro) - fmt.Fprint(stdout, "[", timestr, "] [", levelStrings[rec.Level], "] ", rec.Message, "\n") -} - -// Close flushes the log. Probably don't need this any more. -func (w ConsoleLogWriter) Close() { - os.Stdout.Sync() -} diff --git a/vendor/_nuts/github.com/ThomsonReutersEikon/log4go/wrapper.go b/vendor/_nuts/github.com/ThomsonReutersEikon/log4go/wrapper.go deleted file mode 100644 index 10ecd88e..00000000 --- a/vendor/_nuts/github.com/ThomsonReutersEikon/log4go/wrapper.go +++ /dev/null @@ -1,278 +0,0 @@ -// Copyright (C) 2010, Kyle Lemons . All rights reserved. - -package log4go - -import ( - "errors" - "os" - "fmt" - "strings" -) - -var ( - Global Logger -) - -func init() { - Global = NewDefaultLogger(DEBUG) -} - -// Wrapper for (*Logger).LoadConfiguration -func LoadConfiguration(filename string) { - Global.LoadConfiguration(filename) -} - -// Wrapper for (*Logger).AddFilter -func AddFilter(name string, lvl level, writer LogWriter) { - Global.AddFilter(name, lvl, writer) -} - -// Wrapper for (*Logger).Close (closes and removes all logwriters) -func Close() { - Global.Close() -} - -func Crash(args ...interface{}) { - if len(args) > 0 { - Global.intLogf(CRITICAL, strings.Repeat(" %v", len(args))[1:], args...) - } - panic(args) -} - -// Logs the given message and crashes the program -func Crashf(format string, args ...interface{}) { - Global.intLogf(CRITICAL, format, args...) - Global.Close() // so that hopefully the messages get logged - panic(fmt.Sprintf(format, args...)) -} - -// Compatibility with `log` -func Exit(args ...interface{}) { - if len(args) > 0 { - Global.intLogf(ERROR, strings.Repeat(" %v", len(args))[1:], args...) - } - Global.Close() // so that hopefully the messages get logged - os.Exit(0) -} - -// Compatibility with `log` -func Exitf(format string, args ...interface{}) { - Global.intLogf(ERROR, format, args...) - Global.Close() // so that hopefully the messages get logged - os.Exit(0) -} - -// Compatibility with `log` -func Stderr(args ...interface{}) { - if len(args) > 0 { - Global.intLogf(ERROR, strings.Repeat(" %v", len(args))[1:], args...) - } -} - -// Compatibility with `log` -func Stderrf(format string, args ...interface{}) { - Global.intLogf(ERROR, format, args...) -} - -// Compatibility with `log` -func Stdout(args ...interface{}) { - if len(args) > 0 { - Global.intLogf(INFO, strings.Repeat(" %v", len(args))[1:], args...) - } -} - -// Compatibility with `log` -func Stdoutf(format string, args ...interface{}) { - Global.intLogf(INFO, format, args...) -} - -// Send a log message manually -// Wrapper for (*Logger).Log -func Log(lvl level, source, message string) { - Global.Log(lvl, source, message) -} - -// Send a formatted log message easily -// Wrapper for (*Logger).Logf -func Logf(lvl level, format string, args ...interface{}) { - Global.intLogf(lvl, format, args...) -} - -// Send a closure log message -// Wrapper for (*Logger).Logc -func Logc(lvl level, closure func() string) { - Global.intLogc(lvl, closure) -} - -// Utility for finest log messages (see Debug() for parameter explanation) -// Wrapper for (*Logger).Finest -func Finest(arg0 interface{}, args ...interface{}) { - const ( - lvl = FINEST - ) - switch first := arg0.(type) { - case string: - // Use the string as a format string - Global.intLogf(lvl, first, args...) - case func() string: - // Log the closure (no other arguments used) - Global.intLogc(lvl, first) - default: - // Build a format string so that it will be similar to Sprint - Global.intLogf(lvl, fmt.Sprint(arg0)+strings.Repeat(" %v", len(args)), args...) - } -} - -// Utility for fine log messages (see Debug() for parameter explanation) -// Wrapper for (*Logger).Fine -func Fine(arg0 interface{}, args ...interface{}) { - const ( - lvl = FINE - ) - switch first := arg0.(type) { - case string: - // Use the string as a format string - Global.intLogf(lvl, first, args...) - case func() string: - // Log the closure (no other arguments used) - Global.intLogc(lvl, first) - default: - // Build a format string so that it will be similar to Sprint - Global.intLogf(lvl, fmt.Sprint(arg0)+strings.Repeat(" %v", len(args)), args...) - } -} - -// Utility for debug log messages -// When given a string as the first argument, this behaves like Logf but with the DEBUG log level (e.g. the first argument is interpreted as a format for the latter arguments) -// When given a closure of type func()string, this logs the string returned by the closure iff it will be logged. The closure runs at most one time. -// When given anything else, the log message will be each of the arguments formatted with %v and separated by spaces (ala Sprint). -// Wrapper for (*Logger).Debug -func Debug(arg0 interface{}, args ...interface{}) { - const ( - lvl = DEBUG - ) - switch first := arg0.(type) { - case string: - // Use the string as a format string - Global.intLogf(lvl, first, args...) - case func() string: - // Log the closure (no other arguments used) - Global.intLogc(lvl, first) - default: - // Build a format string so that it will be similar to Sprint - Global.intLogf(lvl, fmt.Sprint(arg0)+strings.Repeat(" %v", len(args)), args...) - } -} - -// Utility for trace log messages (see Debug() for parameter explanation) -// Wrapper for (*Logger).Trace -func Trace(arg0 interface{}, args ...interface{}) { - const ( - lvl = TRACE - ) - switch first := arg0.(type) { - case string: - // Use the string as a format string - Global.intLogf(lvl, first, args...) - case func() string: - // Log the closure (no other arguments used) - Global.intLogc(lvl, first) - default: - // Build a format string so that it will be similar to Sprint - Global.intLogf(lvl, fmt.Sprint(arg0)+strings.Repeat(" %v", len(args)), args...) - } -} - -// Utility for info log messages (see Debug() for parameter explanation) -// Wrapper for (*Logger).Info -func Info(arg0 interface{}, args ...interface{}) { - const ( - lvl = INFO - ) - switch first := arg0.(type) { - case string: - // Use the string as a format string - Global.intLogf(lvl, first, args...) - case func() string: - // Log the closure (no other arguments used) - Global.intLogc(lvl, first) - default: - // Build a format string so that it will be similar to Sprint - Global.intLogf(lvl, fmt.Sprint(arg0)+strings.Repeat(" %v", len(args)), args...) - } -} - -// Utility for warn log messages (returns an error for easy function returns) (see Debug() for parameter explanation) -// These functions will execute a closure exactly once, to build the error message for the return -// Wrapper for (*Logger).Warn -func Warn(arg0 interface{}, args ...interface{}) error { - const ( - lvl = WARNING - ) - switch first := arg0.(type) { - case string: - // Use the string as a format string - Global.intLogf(lvl, first, args...) - return errors.New(fmt.Sprintf(first, args...)) - case func() string: - // Log the closure (no other arguments used) - str := first() - Global.intLogf(lvl, "%s", str) - return errors.New(str) - default: - // Build a format string so that it will be similar to Sprint - Global.intLogf(lvl, fmt.Sprint(first)+strings.Repeat(" %v", len(args)), args...) - return errors.New(fmt.Sprint(first) + fmt.Sprintf(strings.Repeat(" %v", len(args)), args...)) - } - return nil -} - -// Utility for error log messages (returns an error for easy function returns) (see Debug() for parameter explanation) -// These functions will execute a closure exactly once, to build the error message for the return -// Wrapper for (*Logger).Error -func Error(arg0 interface{}, args ...interface{}) error { - const ( - lvl = ERROR - ) - switch first := arg0.(type) { - case string: - // Use the string as a format string - Global.intLogf(lvl, first, args...) - return errors.New(fmt.Sprintf(first, args...)) - case func() string: - // Log the closure (no other arguments used) - str := first() - Global.intLogf(lvl, "%s", str) - return errors.New(str) - default: - // Build a format string so that it will be similar to Sprint - Global.intLogf(lvl, fmt.Sprint(first)+strings.Repeat(" %v", len(args)), args...) - return errors.New(fmt.Sprint(first) + fmt.Sprintf(strings.Repeat(" %v", len(args)), args...)) - } - return nil -} - -// Utility for critical log messages (returns an error for easy function returns) (see Debug() for parameter explanation) -// These functions will execute a closure exactly once, to build the error message for the return -// Wrapper for (*Logger).Critical -func Critical(arg0 interface{}, args ...interface{}) error { - const ( - lvl = CRITICAL - ) - switch first := arg0.(type) { - case string: - // Use the string as a format string - Global.intLogf(lvl, first, args...) - return errors.New(fmt.Sprintf(first, args...)) - case func() string: - // Log the closure (no other arguments used) - str := first() - Global.intLogf(lvl, "%s", str) - return errors.New(str) - default: - // Build a format string so that it will be similar to Sprint - Global.intLogf(lvl, fmt.Sprint(first)+strings.Repeat(" %v", len(args)), args...) - return errors.New(fmt.Sprint(first) + fmt.Sprintf(strings.Repeat(" %v", len(args)), args...)) - } - return nil -} diff --git a/vendor/_nuts/github.com/technoweenie/assert/.gitignore b/vendor/_nuts/github.com/technoweenie/assert/.gitignore deleted file mode 100644 index b6fadf4e..00000000 --- a/vendor/_nuts/github.com/technoweenie/assert/.gitignore +++ /dev/null @@ -1,7 +0,0 @@ -_go_.* -_gotest_.* -_obj -_test -_testmain.go -*.out -*.[568] diff --git a/vendor/_nuts/github.com/technoweenie/assert/README.md b/vendor/_nuts/github.com/technoweenie/assert/README.md deleted file mode 100644 index 8b6b6fc4..00000000 --- a/vendor/_nuts/github.com/technoweenie/assert/README.md +++ /dev/null @@ -1,45 +0,0 @@ -# Assert (c) Blake Mizerany and Keith Rarick -- MIT LICENCE - -## Assertions for Go tests - -## Install - - $ go get github.com/bmizerany/assert - -## Use - -**point.go** - - package point - - type Point struct { - x, y int - } - -**point_test.go** - - - package point - - import ( - "testing" - "github.com/bmizerany/assert" - ) - - func TestAsserts(t *testing.T) { - p1 := Point{1, 1} - p2 := Point{2, 1} - - assert.Equal(t, p1, p2) - } - -**output** - $ go test - --- FAIL: TestAsserts (0.00 seconds) - assert.go:15: /Users/flavio.barbosa/dev/stewie/src/point_test.go:12 - assert.go:24: ! X: 1 != 2 - FAIL - -## Docs - - http://github.com/bmizerany/assert diff --git a/vendor/_nuts/github.com/technoweenie/assert/assert.go b/vendor/_nuts/github.com/technoweenie/assert/assert.go deleted file mode 100644 index 038164c7..00000000 --- a/vendor/_nuts/github.com/technoweenie/assert/assert.go +++ /dev/null @@ -1,77 +0,0 @@ -package assert - -// Testing helpers for doozer. - -import ( - "github.com/github/git-lfs/vendor/_nuts/github.com/kr/pretty" - "reflect" - "testing" - "runtime" - "fmt" -) - -func assert(t *testing.T, result bool, f func(), cd int) { - if !result { - _, file, line, _ := runtime.Caller(cd + 1) - t.Errorf("%s:%d", file, line) - f() - t.FailNow() - } -} - -func equal(t *testing.T, exp, got interface{}, cd int, args ...interface{}) { - fn := func() { - for _, desc := range pretty.Diff(exp, got) { - t.Error("!", desc) - } - if len(args) > 0 { - t.Error("!", " -", fmt.Sprint(args...)) - } - } - result := reflect.DeepEqual(exp, got) - assert(t, result, fn, cd+1) -} - -func tt(t *testing.T, result bool, cd int, args ...interface{}) { - fn := func() { - t.Errorf("! Failure") - if len(args) > 0 { - t.Error("!", " -", fmt.Sprint(args...)) - } - } - assert(t, result, fn, cd+1) -} - -func T(t *testing.T, result bool, args ...interface{}) { - tt(t, result, 1, args...) -} - -func Tf(t *testing.T, result bool, format string, args ...interface{}) { - tt(t, result, 1, fmt.Sprintf(format, args...)) -} - -func Equal(t *testing.T, exp, got interface{}, args ...interface{}) { - equal(t, exp, got, 1, args...) -} - -func Equalf(t *testing.T, exp, got interface{}, format string, args ...interface{}) { - equal(t, exp, got, 1, fmt.Sprintf(format, args...)) -} - -func NotEqual(t *testing.T, exp, got interface{}, args ...interface{}) { - fn := func() { - t.Errorf("! Unexpected: <%#v>", exp) - if len(args) > 0 { - t.Error("!", " -", fmt.Sprint(args...)) - } - } - result := !reflect.DeepEqual(exp, got) - assert(t, result, fn, 1) -} - -func Panic(t *testing.T, err interface{}, fn func()) { - defer func() { - equal(t, err, recover(), 3) - }() - fn() -} diff --git a/vendor/_nuts/github.com/technoweenie/assert/assert_test.go b/vendor/_nuts/github.com/technoweenie/assert/assert_test.go deleted file mode 100644 index 162a590c..00000000 --- a/vendor/_nuts/github.com/technoweenie/assert/assert_test.go +++ /dev/null @@ -1,15 +0,0 @@ -package assert - -import ( - "testing" -) - -func TestLineNumbers(t *testing.T) { - Equal(t, "foo", "foo", "msg!") - //Equal(t, "foo", "bar", "this should blow up") -} - -func TestNotEqual(t *testing.T) { - NotEqual(t, "foo", "bar", "msg!") - //NotEqual(t, "foo", "foo", "this should blow up") -} diff --git a/vendor/_nuts/github.com/technoweenie/assert/example/point.go b/vendor/_nuts/github.com/technoweenie/assert/example/point.go deleted file mode 100644 index 15789fe1..00000000 --- a/vendor/_nuts/github.com/technoweenie/assert/example/point.go +++ /dev/null @@ -1,5 +0,0 @@ -package point - -type Point struct { - X, Y int -} diff --git a/vendor/_nuts/github.com/technoweenie/assert/example/point_test.go b/vendor/_nuts/github.com/technoweenie/assert/example/point_test.go deleted file mode 100644 index dcf74603..00000000 --- a/vendor/_nuts/github.com/technoweenie/assert/example/point_test.go +++ /dev/null @@ -1,13 +0,0 @@ -package point - -import ( - "testing" - "github.com/github/git-lfs/vendor/_nuts/github.com/technoweenie/assert" -) - -func TestAsserts(t *testing.T) { - p1 := Point{1, 1} - p2 := Point{2, 1} - - assert.Equal(t, p1, p2) -} diff --git a/vendor/_nuts/github.com/ThomsonReutersEikon/go-ntlm/.gitignore b/vendor/github.com/ThomsonReutersEikon/go-ntlm/.gitignore similarity index 100% rename from vendor/_nuts/github.com/ThomsonReutersEikon/go-ntlm/.gitignore rename to vendor/github.com/ThomsonReutersEikon/go-ntlm/.gitignore diff --git a/vendor/_nuts/github.com/ThomsonReutersEikon/go-ntlm/License b/vendor/github.com/ThomsonReutersEikon/go-ntlm/License similarity index 100% rename from vendor/_nuts/github.com/ThomsonReutersEikon/go-ntlm/License rename to vendor/github.com/ThomsonReutersEikon/go-ntlm/License diff --git a/vendor/_nuts/github.com/ThomsonReutersEikon/go-ntlm/README.md b/vendor/github.com/ThomsonReutersEikon/go-ntlm/README.md similarity index 100% rename from vendor/_nuts/github.com/ThomsonReutersEikon/go-ntlm/README.md rename to vendor/github.com/ThomsonReutersEikon/go-ntlm/README.md diff --git a/vendor/_nuts/github.com/ThomsonReutersEikon/go-ntlm/ntlm/av_pairs.go b/vendor/github.com/ThomsonReutersEikon/go-ntlm/ntlm/av_pairs.go similarity index 100% rename from vendor/_nuts/github.com/ThomsonReutersEikon/go-ntlm/ntlm/av_pairs.go rename to vendor/github.com/ThomsonReutersEikon/go-ntlm/ntlm/av_pairs.go diff --git a/vendor/_nuts/github.com/ThomsonReutersEikon/go-ntlm/ntlm/challenge_responses.go b/vendor/github.com/ThomsonReutersEikon/go-ntlm/ntlm/challenge_responses.go similarity index 100% rename from vendor/_nuts/github.com/ThomsonReutersEikon/go-ntlm/ntlm/challenge_responses.go rename to vendor/github.com/ThomsonReutersEikon/go-ntlm/ntlm/challenge_responses.go diff --git a/vendor/_nuts/github.com/ThomsonReutersEikon/go-ntlm/ntlm/crypto.go b/vendor/github.com/ThomsonReutersEikon/go-ntlm/ntlm/crypto.go similarity index 97% rename from vendor/_nuts/github.com/ThomsonReutersEikon/go-ntlm/ntlm/crypto.go rename to vendor/github.com/ThomsonReutersEikon/go-ntlm/ntlm/crypto.go index 1f7c41ca..6e6774dd 100644 --- a/vendor/_nuts/github.com/ThomsonReutersEikon/go-ntlm/ntlm/crypto.go +++ b/vendor/github.com/ThomsonReutersEikon/go-ntlm/ntlm/crypto.go @@ -8,8 +8,9 @@ import ( md5P "crypto/md5" "crypto/rand" rc4P "crypto/rc4" - md4P "github.com/github/git-lfs/vendor/_nuts/github.com/ThomsonReutersEikon/go-ntlm/ntlm/md4" crc32P "hash/crc32" + + md4P "github.com/ThomsonReutersEikon/go-ntlm/ntlm/md4" ) func md4(data []byte) []byte { diff --git a/vendor/_nuts/github.com/ThomsonReutersEikon/go-ntlm/ntlm/crypto_test.go b/vendor/github.com/ThomsonReutersEikon/go-ntlm/ntlm/crypto_test.go similarity index 100% rename from vendor/_nuts/github.com/ThomsonReutersEikon/go-ntlm/ntlm/crypto_test.go rename to vendor/github.com/ThomsonReutersEikon/go-ntlm/ntlm/crypto_test.go diff --git a/vendor/_nuts/github.com/ThomsonReutersEikon/go-ntlm/ntlm/helpers.go b/vendor/github.com/ThomsonReutersEikon/go-ntlm/ntlm/helpers.go similarity index 75% rename from vendor/_nuts/github.com/ThomsonReutersEikon/go-ntlm/ntlm/helpers.go rename to vendor/github.com/ThomsonReutersEikon/go-ntlm/ntlm/helpers.go index 1cc78d52..6b1a1ddb 100644 --- a/vendor/_nuts/github.com/ThomsonReutersEikon/go-ntlm/ntlm/helpers.go +++ b/vendor/github.com/ThomsonReutersEikon/go-ntlm/ntlm/helpers.go @@ -5,8 +5,8 @@ package ntlm import ( "bytes" "crypto/rand" - "unicode/utf16" "encoding/binary" + "unicode/utf16" ) // Concatenate two byte slices into a new slice @@ -67,23 +67,22 @@ func utf16FromString(s string) []byte { // Convert a UTF16 string to UTF8 string for Go usage func utf16ToString(bytes []byte) string { - var data []uint16 + var data []uint16 - // NOTE: This is definitely not the best way to do this, but when I tried using a buffer.Read I could not get it to work - for offset := 0; offset < len(bytes); offset = offset + 2 { - i := binary.LittleEndian.Uint16(bytes[offset : offset+2]) - data = append(data, i) - } + // NOTE: This is definitely not the best way to do this, but when I tried using a buffer.Read I could not get it to work + for offset := 0; offset < len(bytes); offset = offset + 2 { + i := binary.LittleEndian.Uint16(bytes[offset : offset+2]) + data = append(data, i) + } - return string(utf16.Decode(data)) + return string(utf16.Decode(data)) } func uint32ToBytes(v uint32) []byte { - bytes := make([]byte, 4) - bytes[0] = byte(v & 0xff) - bytes[1] = byte((v >> 8) & 0xff) - bytes[2] = byte((v >> 16) & 0xff) - bytes[3] = byte((v >> 24) & 0xff) - return bytes + bytes := make([]byte, 4) + bytes[0] = byte(v & 0xff) + bytes[1] = byte((v >> 8) & 0xff) + bytes[2] = byte((v >> 16) & 0xff) + bytes[3] = byte((v >> 24) & 0xff) + return bytes } - diff --git a/vendor/_nuts/github.com/ThomsonReutersEikon/go-ntlm/ntlm/helpers_test.go b/vendor/github.com/ThomsonReutersEikon/go-ntlm/ntlm/helpers_test.go similarity index 100% rename from vendor/_nuts/github.com/ThomsonReutersEikon/go-ntlm/ntlm/helpers_test.go rename to vendor/github.com/ThomsonReutersEikon/go-ntlm/ntlm/helpers_test.go diff --git a/vendor/_nuts/github.com/ThomsonReutersEikon/go-ntlm/ntlm/keys.go b/vendor/github.com/ThomsonReutersEikon/go-ntlm/ntlm/keys.go similarity index 100% rename from vendor/_nuts/github.com/ThomsonReutersEikon/go-ntlm/ntlm/keys.go rename to vendor/github.com/ThomsonReutersEikon/go-ntlm/ntlm/keys.go diff --git a/vendor/_nuts/github.com/ThomsonReutersEikon/go-ntlm/ntlm/md4/LICENSE b/vendor/github.com/ThomsonReutersEikon/go-ntlm/ntlm/md4/LICENSE similarity index 100% rename from vendor/_nuts/github.com/ThomsonReutersEikon/go-ntlm/ntlm/md4/LICENSE rename to vendor/github.com/ThomsonReutersEikon/go-ntlm/ntlm/md4/LICENSE diff --git a/vendor/_nuts/github.com/ThomsonReutersEikon/go-ntlm/ntlm/md4/md4.go b/vendor/github.com/ThomsonReutersEikon/go-ntlm/ntlm/md4/md4.go similarity index 100% rename from vendor/_nuts/github.com/ThomsonReutersEikon/go-ntlm/ntlm/md4/md4.go rename to vendor/github.com/ThomsonReutersEikon/go-ntlm/ntlm/md4/md4.go diff --git a/vendor/_nuts/github.com/ThomsonReutersEikon/go-ntlm/ntlm/md4/md4_test.go b/vendor/github.com/ThomsonReutersEikon/go-ntlm/ntlm/md4/md4_test.go similarity index 100% rename from vendor/_nuts/github.com/ThomsonReutersEikon/go-ntlm/ntlm/md4/md4_test.go rename to vendor/github.com/ThomsonReutersEikon/go-ntlm/ntlm/md4/md4_test.go diff --git a/vendor/_nuts/github.com/ThomsonReutersEikon/go-ntlm/ntlm/md4/md4block.go b/vendor/github.com/ThomsonReutersEikon/go-ntlm/ntlm/md4/md4block.go similarity index 100% rename from vendor/_nuts/github.com/ThomsonReutersEikon/go-ntlm/ntlm/md4/md4block.go rename to vendor/github.com/ThomsonReutersEikon/go-ntlm/ntlm/md4/md4block.go diff --git a/vendor/_nuts/github.com/ThomsonReutersEikon/go-ntlm/ntlm/message_authenticate.go b/vendor/github.com/ThomsonReutersEikon/go-ntlm/ntlm/message_authenticate.go similarity index 99% rename from vendor/_nuts/github.com/ThomsonReutersEikon/go-ntlm/ntlm/message_authenticate.go rename to vendor/github.com/ThomsonReutersEikon/go-ntlm/ntlm/message_authenticate.go index 5c2448ef..1792f532 100644 --- a/vendor/_nuts/github.com/ThomsonReutersEikon/go-ntlm/ntlm/message_authenticate.go +++ b/vendor/github.com/ThomsonReutersEikon/go-ntlm/ntlm/message_authenticate.go @@ -38,7 +38,7 @@ type AuthenticateMessage struct { /// MS-NLMP 2.2.1.3 - In connectionless mode, a NEGOTIATE structure that contains a set of bit flags (section 2.2.2.5) and represents the // conclusion of negotiation—the choices the client has made from the options the server offered in the CHALLENGE_MESSAGE. // In connection-oriented mode, a NEGOTIATE structure that contains the set of bit flags (section 2.2.2.5) negotiated in - // the previous + // the previous NegotiateFlags uint32 // 4 bytes // Version (8 bytes): A VERSION structure (section 2.2.2.10) that is present only when the NTLMSSP_NEGOTIATE_VERSION diff --git a/vendor/_nuts/github.com/ThomsonReutersEikon/go-ntlm/ntlm/message_authenticate_test.go b/vendor/github.com/ThomsonReutersEikon/go-ntlm/ntlm/message_authenticate_test.go similarity index 100% rename from vendor/_nuts/github.com/ThomsonReutersEikon/go-ntlm/ntlm/message_authenticate_test.go rename to vendor/github.com/ThomsonReutersEikon/go-ntlm/ntlm/message_authenticate_test.go diff --git a/vendor/_nuts/github.com/ThomsonReutersEikon/go-ntlm/ntlm/message_challenge.go b/vendor/github.com/ThomsonReutersEikon/go-ntlm/ntlm/message_challenge.go similarity index 100% rename from vendor/_nuts/github.com/ThomsonReutersEikon/go-ntlm/ntlm/message_challenge.go rename to vendor/github.com/ThomsonReutersEikon/go-ntlm/ntlm/message_challenge.go diff --git a/vendor/_nuts/github.com/ThomsonReutersEikon/go-ntlm/ntlm/message_challenge_test.go b/vendor/github.com/ThomsonReutersEikon/go-ntlm/ntlm/message_challenge_test.go similarity index 100% rename from vendor/_nuts/github.com/ThomsonReutersEikon/go-ntlm/ntlm/message_challenge_test.go rename to vendor/github.com/ThomsonReutersEikon/go-ntlm/ntlm/message_challenge_test.go diff --git a/vendor/_nuts/github.com/ThomsonReutersEikon/go-ntlm/ntlm/message_negotiate.go b/vendor/github.com/ThomsonReutersEikon/go-ntlm/ntlm/message_negotiate.go similarity index 100% rename from vendor/_nuts/github.com/ThomsonReutersEikon/go-ntlm/ntlm/message_negotiate.go rename to vendor/github.com/ThomsonReutersEikon/go-ntlm/ntlm/message_negotiate.go diff --git a/vendor/_nuts/github.com/ThomsonReutersEikon/go-ntlm/ntlm/negotiate_flags.go b/vendor/github.com/ThomsonReutersEikon/go-ntlm/ntlm/negotiate_flags.go similarity index 100% rename from vendor/_nuts/github.com/ThomsonReutersEikon/go-ntlm/ntlm/negotiate_flags.go rename to vendor/github.com/ThomsonReutersEikon/go-ntlm/ntlm/negotiate_flags.go diff --git a/vendor/_nuts/github.com/ThomsonReutersEikon/go-ntlm/ntlm/negotiate_flags_test.go b/vendor/github.com/ThomsonReutersEikon/go-ntlm/ntlm/negotiate_flags_test.go similarity index 100% rename from vendor/_nuts/github.com/ThomsonReutersEikon/go-ntlm/ntlm/negotiate_flags_test.go rename to vendor/github.com/ThomsonReutersEikon/go-ntlm/ntlm/negotiate_flags_test.go diff --git a/vendor/_nuts/github.com/ThomsonReutersEikon/go-ntlm/ntlm/ntlm.go b/vendor/github.com/ThomsonReutersEikon/go-ntlm/ntlm/ntlm.go similarity index 100% rename from vendor/_nuts/github.com/ThomsonReutersEikon/go-ntlm/ntlm/ntlm.go rename to vendor/github.com/ThomsonReutersEikon/go-ntlm/ntlm/ntlm.go diff --git a/vendor/_nuts/github.com/ThomsonReutersEikon/go-ntlm/ntlm/ntlmv1.go b/vendor/github.com/ThomsonReutersEikon/go-ntlm/ntlm/ntlmv1.go similarity index 98% rename from vendor/_nuts/github.com/ThomsonReutersEikon/go-ntlm/ntlm/ntlmv1.go rename to vendor/github.com/ThomsonReutersEikon/go-ntlm/ntlm/ntlmv1.go index 4536f955..4ad2b84e 100644 --- a/vendor/_nuts/github.com/ThomsonReutersEikon/go-ntlm/ntlm/ntlmv1.go +++ b/vendor/github.com/ThomsonReutersEikon/go-ntlm/ntlm/ntlmv1.go @@ -6,7 +6,7 @@ import ( "bytes" rc4P "crypto/rc4" "errors" - l4g "github.com/github/git-lfs/vendor/_nuts/github.com/ThomsonReutersEikon/log4go" + "log" "strings" ) @@ -181,7 +181,7 @@ func (n *V1ServerSession) ProcessAuthenticateMessage(am *AuthenticateMessage) (e // They should always be correct (I hope) n.user = am.UserName.String() n.userDomain = am.DomainName.String() - l4g.Info("(ProcessAuthenticateMessage)NTLM v1 User %s Domain %s", n.user, n.userDomain) + log.Printf("(ProcessAuthenticateMessage)NTLM v1 User %s Domain %s", n.user, n.userDomain) err = n.fetchResponseKeys() if err != nil { @@ -225,7 +225,7 @@ func (n *V1ServerSession) ProcessAuthenticateMessage(am *AuthenticateMessage) (e //UGH not entirely sure how this could possibly happen, going to put this in for now //TODO investigate if this ever is really happening am.Version = &VersionStruct{ProductMajorVersion: uint8(5), ProductMinorVersion: uint8(1), ProductBuild: uint16(2600), NTLMRevisionCurrent: uint8(15)} - l4g.Error("Nil version in ntlmv1") + log.Printf("Nil version in ntlmv1") } err = n.calculateKeys(am.Version.NTLMRevisionCurrent) diff --git a/vendor/_nuts/github.com/ThomsonReutersEikon/go-ntlm/ntlm/ntlmv1_test.go b/vendor/github.com/ThomsonReutersEikon/go-ntlm/ntlm/ntlmv1_test.go similarity index 93% rename from vendor/_nuts/github.com/ThomsonReutersEikon/go-ntlm/ntlm/ntlmv1_test.go rename to vendor/github.com/ThomsonReutersEikon/go-ntlm/ntlm/ntlmv1_test.go index 18e02cf4..e175557a 100644 --- a/vendor/_nuts/github.com/ThomsonReutersEikon/go-ntlm/ntlm/ntlmv1_test.go +++ b/vendor/github.com/ThomsonReutersEikon/go-ntlm/ntlm/ntlmv1_test.go @@ -42,14 +42,14 @@ func checkV1Value(t *testing.T, name string, value []byte, expected string, err // would authenticate. This was due to a bug in the MS-NLMP docs. This tests for that issue func TestNtlmV1ExtendedSessionSecurity(t *testing.T) { // NTLMv1 with extended session security - challengeMessage := "TlRMTVNTUAACAAAAAAAAADgAAABVgphiRy3oSZvn1I4AAAAAAAAAAKIAogA4AAAABQEoCgAAAA8CAA4AUgBFAFUAVABFAFIAUwABABwAVQBLAEIAUAAtAEMAQgBUAFIATQBGAEUAMAA2AAQAFgBSAGUAdQB0AGUAcgBzAC4AbgBlAHQAAwA0AHUAawBiAHAALQBjAGIAdAByAG0AZgBlADAANgAuAFIAZQB1AHQAZQByAHMALgBuAGUAdAAFABYAUgBlAHUAdABlAHIAcwAuAG4AZQB0AAAAAAA=" - authenticateMessage := "TlRMTVNTUAADAAAAGAAYAJgAAAAYABgAsAAAAAAAAABIAAAAOgA6AEgAAAAWABYAggAAABAAEADIAAAAVYKYYgUCzg4AAAAPMQAwADAAMAAwADEALgB3AGMAcABAAHQAaABvAG0AcwBvAG4AcgBlAHUAdABlAHIAcwAuAGMAbwBtAE4AWQBDAFMATQBTAEcAOQA5ADAAOQBRWAK3h/TIywAAAAAAAAAAAAAAAAAAAAA3tp89kZU1hs1XZp7KTyGm3XsFAT9stEDW9YXDaeYVBmBcBb//2FOu" + challengeMessage := "TlRMTVNTUAACAAAAAAAAADgAAABVgphiRy3oSZvn1I4AAAAAAAAAAKIAogA4AAAABQEoCgAAAA8CAA4AUgBFAFUAVABFAFIAUwABABwAVQBLAEIAUAAtAEMAQgBUAFIATQBGAEUAMAA2AAQAFgBSAGUAdQB0AGUAcgBzAC4AbgBlAHQAAwA0AHUAawBiAHAALQBjAGIAdAByAG0AZgBlADAANgAuAFIAZQB1AHQAZQByAHMALgBuAGUAdAAFABYAUgBlAHUAdABlAHIAcwAuAG4AZQB0AAAAAAA=" + authenticateMessage := "TlRMTVNTUAADAAAAGAAYAJgAAAAYABgAsAAAAAAAAABIAAAAOgA6AEgAAAAWABYAggAAABAAEADIAAAAVYKYYgUCzg4AAAAPMQAwADAAMAAwADEALgB3AGMAcABAAHQAaABvAG0AcwBvAG4AcgBlAHUAdABlAHIAcwAuAGMAbwBtAE4AWQBDAFMATQBTAEcAOQA5ADAAOQBRWAK3h/TIywAAAAAAAAAAAAAAAAAAAAA3tp89kZU1hs1XZp7KTyGm3XsFAT9stEDW9YXDaeYVBmBcBb//2FOu" challengeData, _ := base64.StdEncoding.DecodeString(challengeMessage) c, _ := ParseChallengeMessage(challengeData) - authenticateData, _ := base64.StdEncoding.DecodeString(authenticateMessage) - msg, err := ParseAuthenticateMessage(authenticateData, 1) + authenticateData, _ := base64.StdEncoding.DecodeString(authenticateMessage) + msg, err := ParseAuthenticateMessage(authenticateData, 1) if err != nil { t.Errorf("Could not process authenticate message: %s", err) } diff --git a/vendor/_nuts/github.com/ThomsonReutersEikon/go-ntlm/ntlm/ntlmv2.go b/vendor/github.com/ThomsonReutersEikon/go-ntlm/ntlm/ntlmv2.go similarity index 98% rename from vendor/_nuts/github.com/ThomsonReutersEikon/go-ntlm/ntlm/ntlmv2.go rename to vendor/github.com/ThomsonReutersEikon/go-ntlm/ntlm/ntlmv2.go index cc319f3d..a511f89a 100644 --- a/vendor/_nuts/github.com/ThomsonReutersEikon/go-ntlm/ntlm/ntlmv2.go +++ b/vendor/github.com/ThomsonReutersEikon/go-ntlm/ntlm/ntlmv2.go @@ -7,7 +7,7 @@ import ( rc4P "crypto/rc4" "encoding/binary" "errors" - l4g "github.com/github/git-lfs/vendor/_nuts/github.com/ThomsonReutersEikon/log4go" + "log" "strings" "time" ) @@ -204,7 +204,7 @@ func (n *V2ServerSession) ProcessAuthenticateMessage(am *AuthenticateMessage) (e // They should always be correct (I hope) n.user = am.UserName.String() n.userDomain = am.DomainName.String() - l4g.Info("(ProcessAuthenticateMessage)NTLM v2 User %s Domain %s", n.user, n.userDomain) + log.Printf("(ProcessAuthenticateMessage)NTLM v2 User %s Domain %s", n.user, n.userDomain) err = n.fetchResponseKeys() if err != nil { @@ -243,7 +243,7 @@ func (n *V2ServerSession) ProcessAuthenticateMessage(am *AuthenticateMessage) (e //TODO investigate if this ever is really happening am.Version = &VersionStruct{ProductMajorVersion: uint8(5), ProductMinorVersion: uint8(1), ProductBuild: uint16(2600), NTLMRevisionCurrent: uint8(15)} - l4g.Error("Nil version in ntlmv2") + log.Printf("Nil version in ntlmv2") } err = n.calculateKeys(am.Version.NTLMRevisionCurrent) diff --git a/vendor/_nuts/github.com/ThomsonReutersEikon/go-ntlm/ntlm/ntlmv2_test.go b/vendor/github.com/ThomsonReutersEikon/go-ntlm/ntlm/ntlmv2_test.go similarity index 100% rename from vendor/_nuts/github.com/ThomsonReutersEikon/go-ntlm/ntlm/ntlmv2_test.go rename to vendor/github.com/ThomsonReutersEikon/go-ntlm/ntlm/ntlmv2_test.go diff --git a/vendor/_nuts/github.com/ThomsonReutersEikon/go-ntlm/ntlm/payload.go b/vendor/github.com/ThomsonReutersEikon/go-ntlm/ntlm/payload.go similarity index 100% rename from vendor/_nuts/github.com/ThomsonReutersEikon/go-ntlm/ntlm/payload.go rename to vendor/github.com/ThomsonReutersEikon/go-ntlm/ntlm/payload.go diff --git a/vendor/_nuts/github.com/ThomsonReutersEikon/go-ntlm/ntlm/signature.go b/vendor/github.com/ThomsonReutersEikon/go-ntlm/ntlm/signature.go similarity index 100% rename from vendor/_nuts/github.com/ThomsonReutersEikon/go-ntlm/ntlm/signature.go rename to vendor/github.com/ThomsonReutersEikon/go-ntlm/ntlm/signature.go diff --git a/vendor/_nuts/github.com/ThomsonReutersEikon/go-ntlm/ntlm/signature_test.go b/vendor/github.com/ThomsonReutersEikon/go-ntlm/ntlm/signature_test.go similarity index 100% rename from vendor/_nuts/github.com/ThomsonReutersEikon/go-ntlm/ntlm/signature_test.go rename to vendor/github.com/ThomsonReutersEikon/go-ntlm/ntlm/signature_test.go diff --git a/vendor/_nuts/github.com/ThomsonReutersEikon/go-ntlm/ntlm/version.go b/vendor/github.com/ThomsonReutersEikon/go-ntlm/ntlm/version.go similarity index 100% rename from vendor/_nuts/github.com/ThomsonReutersEikon/go-ntlm/ntlm/version.go rename to vendor/github.com/ThomsonReutersEikon/go-ntlm/ntlm/version.go diff --git a/vendor/_nuts/github.com/bgentry/go-netrc/.hgignore b/vendor/github.com/bgentry/go-netrc/.hgignore similarity index 100% rename from vendor/_nuts/github.com/bgentry/go-netrc/.hgignore rename to vendor/github.com/bgentry/go-netrc/.hgignore diff --git a/vendor/_nuts/github.com/bgentry/go-netrc/LICENSE b/vendor/github.com/bgentry/go-netrc/LICENSE similarity index 100% rename from vendor/_nuts/github.com/bgentry/go-netrc/LICENSE rename to vendor/github.com/bgentry/go-netrc/LICENSE diff --git a/vendor/_nuts/github.com/bgentry/go-netrc/README.md b/vendor/github.com/bgentry/go-netrc/README.md similarity index 100% rename from vendor/_nuts/github.com/bgentry/go-netrc/README.md rename to vendor/github.com/bgentry/go-netrc/README.md diff --git a/vendor/_nuts/github.com/bgentry/go-netrc/netrc/examples/bad_default_order.netrc b/vendor/github.com/bgentry/go-netrc/netrc/examples/bad_default_order.netrc similarity index 100% rename from vendor/_nuts/github.com/bgentry/go-netrc/netrc/examples/bad_default_order.netrc rename to vendor/github.com/bgentry/go-netrc/netrc/examples/bad_default_order.netrc diff --git a/vendor/_nuts/github.com/bgentry/go-netrc/netrc/examples/good.netrc b/vendor/github.com/bgentry/go-netrc/netrc/examples/good.netrc similarity index 100% rename from vendor/_nuts/github.com/bgentry/go-netrc/netrc/examples/good.netrc rename to vendor/github.com/bgentry/go-netrc/netrc/examples/good.netrc diff --git a/vendor/_nuts/github.com/bgentry/go-netrc/netrc/netrc.go b/vendor/github.com/bgentry/go-netrc/netrc/netrc.go similarity index 100% rename from vendor/_nuts/github.com/bgentry/go-netrc/netrc/netrc.go rename to vendor/github.com/bgentry/go-netrc/netrc/netrc.go diff --git a/vendor/_nuts/github.com/bgentry/go-netrc/netrc/netrc_test.go b/vendor/github.com/bgentry/go-netrc/netrc/netrc_test.go similarity index 100% rename from vendor/_nuts/github.com/bgentry/go-netrc/netrc/netrc_test.go rename to vendor/github.com/bgentry/go-netrc/netrc/netrc_test.go diff --git a/vendor/_nuts/github.com/cheggaaa/pb/LICENSE b/vendor/github.com/cheggaaa/pb/LICENSE similarity index 100% rename from vendor/_nuts/github.com/cheggaaa/pb/LICENSE rename to vendor/github.com/cheggaaa/pb/LICENSE diff --git a/vendor/_nuts/github.com/cheggaaa/pb/README.md b/vendor/github.com/cheggaaa/pb/README.md similarity index 100% rename from vendor/_nuts/github.com/cheggaaa/pb/README.md rename to vendor/github.com/cheggaaa/pb/README.md diff --git a/vendor/_nuts/github.com/cheggaaa/pb/example/copy/copy.go b/vendor/github.com/cheggaaa/pb/example/copy/copy.go similarity index 95% rename from vendor/_nuts/github.com/cheggaaa/pb/example/copy/copy.go rename to vendor/github.com/cheggaaa/pb/example/copy/copy.go index 0fffd728..82b5329a 100644 --- a/vendor/_nuts/github.com/cheggaaa/pb/example/copy/copy.go +++ b/vendor/github.com/cheggaaa/pb/example/copy/copy.go @@ -2,7 +2,7 @@ package main import ( "fmt" - "github.com/github/git-lfs/vendor/_nuts/github.com/cheggaaa/pb" + "github.com/cheggaaa/pb" "io" "net/http" "os" diff --git a/vendor/_nuts/github.com/cheggaaa/pb/example/pb.go b/vendor/github.com/cheggaaa/pb/example/pb.go similarity index 86% rename from vendor/_nuts/github.com/cheggaaa/pb/example/pb.go rename to vendor/github.com/cheggaaa/pb/example/pb.go index eee92ff0..140ba039 100644 --- a/vendor/_nuts/github.com/cheggaaa/pb/example/pb.go +++ b/vendor/github.com/cheggaaa/pb/example/pb.go @@ -1,7 +1,7 @@ package main import ( - "github.com/github/git-lfs/vendor/_nuts/github.com/cheggaaa/pb" + "github.com/cheggaaa/pb" "time" ) diff --git a/vendor/_nuts/github.com/cheggaaa/pb/format.go b/vendor/github.com/cheggaaa/pb/format.go similarity index 100% rename from vendor/_nuts/github.com/cheggaaa/pb/format.go rename to vendor/github.com/cheggaaa/pb/format.go diff --git a/vendor/_nuts/github.com/cheggaaa/pb/format_test.go b/vendor/github.com/cheggaaa/pb/format_test.go similarity index 100% rename from vendor/_nuts/github.com/cheggaaa/pb/format_test.go rename to vendor/github.com/cheggaaa/pb/format_test.go diff --git a/vendor/_nuts/github.com/cheggaaa/pb/pb.go b/vendor/github.com/cheggaaa/pb/pb.go similarity index 100% rename from vendor/_nuts/github.com/cheggaaa/pb/pb.go rename to vendor/github.com/cheggaaa/pb/pb.go diff --git a/vendor/_nuts/github.com/cheggaaa/pb/pb_nix.go b/vendor/github.com/cheggaaa/pb/pb_nix.go similarity index 100% rename from vendor/_nuts/github.com/cheggaaa/pb/pb_nix.go rename to vendor/github.com/cheggaaa/pb/pb_nix.go diff --git a/vendor/_nuts/github.com/cheggaaa/pb/pb_solaris.go b/vendor/github.com/cheggaaa/pb/pb_solaris.go similarity index 100% rename from vendor/_nuts/github.com/cheggaaa/pb/pb_solaris.go rename to vendor/github.com/cheggaaa/pb/pb_solaris.go diff --git a/vendor/_nuts/github.com/cheggaaa/pb/pb_test.go b/vendor/github.com/cheggaaa/pb/pb_test.go similarity index 100% rename from vendor/_nuts/github.com/cheggaaa/pb/pb_test.go rename to vendor/github.com/cheggaaa/pb/pb_test.go diff --git a/vendor/_nuts/github.com/cheggaaa/pb/pb_win.go b/vendor/github.com/cheggaaa/pb/pb_win.go similarity index 72% rename from vendor/_nuts/github.com/cheggaaa/pb/pb_win.go rename to vendor/github.com/cheggaaa/pb/pb_win.go index c700a5aa..eb36190c 100644 --- a/vendor/_nuts/github.com/cheggaaa/pb/pb_win.go +++ b/vendor/github.com/cheggaaa/pb/pb_win.go @@ -3,7 +3,7 @@ package pb import ( - "github.com/github/git-lfs/vendor/_nuts/github.com/olekukonko/ts" + "github.com/olekukonko/ts" ) func bold(str string) string { diff --git a/vendor/_nuts/github.com/cheggaaa/pb/pb_x.go b/vendor/github.com/cheggaaa/pb/pb_x.go similarity index 100% rename from vendor/_nuts/github.com/cheggaaa/pb/pb_x.go rename to vendor/github.com/cheggaaa/pb/pb_x.go diff --git a/vendor/_nuts/github.com/cheggaaa/pb/reader.go b/vendor/github.com/cheggaaa/pb/reader.go similarity index 100% rename from vendor/_nuts/github.com/cheggaaa/pb/reader.go rename to vendor/github.com/cheggaaa/pb/reader.go diff --git a/vendor/_nuts/github.com/inconshreveable/mousetrap/LICENSE b/vendor/github.com/inconshreveable/mousetrap/LICENSE similarity index 100% rename from vendor/_nuts/github.com/inconshreveable/mousetrap/LICENSE rename to vendor/github.com/inconshreveable/mousetrap/LICENSE diff --git a/vendor/_nuts/github.com/inconshreveable/mousetrap/README.md b/vendor/github.com/inconshreveable/mousetrap/README.md similarity index 100% rename from vendor/_nuts/github.com/inconshreveable/mousetrap/README.md rename to vendor/github.com/inconshreveable/mousetrap/README.md diff --git a/vendor/_nuts/github.com/inconshreveable/mousetrap/trap_others.go b/vendor/github.com/inconshreveable/mousetrap/trap_others.go similarity index 100% rename from vendor/_nuts/github.com/inconshreveable/mousetrap/trap_others.go rename to vendor/github.com/inconshreveable/mousetrap/trap_others.go diff --git a/vendor/_nuts/github.com/inconshreveable/mousetrap/trap_windows.go b/vendor/github.com/inconshreveable/mousetrap/trap_windows.go similarity index 100% rename from vendor/_nuts/github.com/inconshreveable/mousetrap/trap_windows.go rename to vendor/github.com/inconshreveable/mousetrap/trap_windows.go diff --git a/vendor/_nuts/github.com/inconshreveable/mousetrap/trap_windows_1.4.go b/vendor/github.com/inconshreveable/mousetrap/trap_windows_1.4.go similarity index 100% rename from vendor/_nuts/github.com/inconshreveable/mousetrap/trap_windows_1.4.go rename to vendor/github.com/inconshreveable/mousetrap/trap_windows_1.4.go diff --git a/vendor/_nuts/github.com/kr/pretty/.gitignore b/vendor/github.com/kr/pretty/.gitignore similarity index 100% rename from vendor/_nuts/github.com/kr/pretty/.gitignore rename to vendor/github.com/kr/pretty/.gitignore diff --git a/vendor/_nuts/github.com/kr/pretty/License b/vendor/github.com/kr/pretty/License similarity index 100% rename from vendor/_nuts/github.com/kr/pretty/License rename to vendor/github.com/kr/pretty/License diff --git a/vendor/_nuts/github.com/kr/pretty/Readme b/vendor/github.com/kr/pretty/Readme similarity index 100% rename from vendor/_nuts/github.com/kr/pretty/Readme rename to vendor/github.com/kr/pretty/Readme diff --git a/vendor/_nuts/github.com/kr/pretty/diff.go b/vendor/github.com/kr/pretty/diff.go similarity index 100% rename from vendor/_nuts/github.com/kr/pretty/diff.go rename to vendor/github.com/kr/pretty/diff.go diff --git a/vendor/_nuts/github.com/kr/pretty/diff_test.go b/vendor/github.com/kr/pretty/diff_test.go similarity index 100% rename from vendor/_nuts/github.com/kr/pretty/diff_test.go rename to vendor/github.com/kr/pretty/diff_test.go diff --git a/vendor/_nuts/github.com/kr/pretty/example_test.go b/vendor/github.com/kr/pretty/example_test.go similarity index 81% rename from vendor/_nuts/github.com/kr/pretty/example_test.go rename to vendor/github.com/kr/pretty/example_test.go index f192163c..ecf40f3f 100644 --- a/vendor/_nuts/github.com/kr/pretty/example_test.go +++ b/vendor/github.com/kr/pretty/example_test.go @@ -2,7 +2,7 @@ package pretty_test import ( "fmt" - "github.com/github/git-lfs/vendor/_nuts/github.com/kr/pretty" + "github.com/kr/pretty" ) func Example() { diff --git a/vendor/_nuts/github.com/kr/pretty/formatter.go b/vendor/github.com/kr/pretty/formatter.go similarity index 99% rename from vendor/_nuts/github.com/kr/pretty/formatter.go rename to vendor/github.com/kr/pretty/formatter.go index 07652978..c834d464 100644 --- a/vendor/_nuts/github.com/kr/pretty/formatter.go +++ b/vendor/github.com/kr/pretty/formatter.go @@ -7,7 +7,7 @@ import ( "strconv" "text/tabwriter" - "github.com/github/git-lfs/vendor/_nuts/github.com/kr/text" + "github.com/kr/text" ) const ( diff --git a/vendor/_nuts/github.com/kr/pretty/formatter_test.go b/vendor/github.com/kr/pretty/formatter_test.go similarity index 100% rename from vendor/_nuts/github.com/kr/pretty/formatter_test.go rename to vendor/github.com/kr/pretty/formatter_test.go diff --git a/vendor/_nuts/github.com/kr/pretty/pretty.go b/vendor/github.com/kr/pretty/pretty.go similarity index 100% rename from vendor/_nuts/github.com/kr/pretty/pretty.go rename to vendor/github.com/kr/pretty/pretty.go diff --git a/vendor/_nuts/github.com/kr/pretty/zero.go b/vendor/github.com/kr/pretty/zero.go similarity index 100% rename from vendor/_nuts/github.com/kr/pretty/zero.go rename to vendor/github.com/kr/pretty/zero.go diff --git a/vendor/_nuts/github.com/kr/pty/.gitignore b/vendor/github.com/kr/pty/.gitignore similarity index 100% rename from vendor/_nuts/github.com/kr/pty/.gitignore rename to vendor/github.com/kr/pty/.gitignore diff --git a/vendor/_nuts/github.com/kr/pty/License b/vendor/github.com/kr/pty/License similarity index 100% rename from vendor/_nuts/github.com/kr/pty/License rename to vendor/github.com/kr/pty/License diff --git a/vendor/_nuts/github.com/kr/pty/README.md b/vendor/github.com/kr/pty/README.md similarity index 100% rename from vendor/_nuts/github.com/kr/pty/README.md rename to vendor/github.com/kr/pty/README.md diff --git a/vendor/_nuts/github.com/kr/pty/doc.go b/vendor/github.com/kr/pty/doc.go similarity index 100% rename from vendor/_nuts/github.com/kr/pty/doc.go rename to vendor/github.com/kr/pty/doc.go diff --git a/vendor/_nuts/github.com/kr/pty/ioctl.go b/vendor/github.com/kr/pty/ioctl.go similarity index 100% rename from vendor/_nuts/github.com/kr/pty/ioctl.go rename to vendor/github.com/kr/pty/ioctl.go diff --git a/vendor/_nuts/github.com/kr/pty/ioctl_bsd.go b/vendor/github.com/kr/pty/ioctl_bsd.go similarity index 100% rename from vendor/_nuts/github.com/kr/pty/ioctl_bsd.go rename to vendor/github.com/kr/pty/ioctl_bsd.go diff --git a/vendor/_nuts/github.com/kr/pty/mktypes.bash b/vendor/github.com/kr/pty/mktypes.bash old mode 100644 new mode 100755 similarity index 100% rename from vendor/_nuts/github.com/kr/pty/mktypes.bash rename to vendor/github.com/kr/pty/mktypes.bash diff --git a/vendor/_nuts/github.com/kr/pty/pty_darwin.go b/vendor/github.com/kr/pty/pty_darwin.go similarity index 100% rename from vendor/_nuts/github.com/kr/pty/pty_darwin.go rename to vendor/github.com/kr/pty/pty_darwin.go diff --git a/vendor/_nuts/github.com/kr/pty/pty_freebsd.go b/vendor/github.com/kr/pty/pty_freebsd.go similarity index 100% rename from vendor/_nuts/github.com/kr/pty/pty_freebsd.go rename to vendor/github.com/kr/pty/pty_freebsd.go diff --git a/vendor/_nuts/github.com/kr/pty/pty_linux.go b/vendor/github.com/kr/pty/pty_linux.go similarity index 100% rename from vendor/_nuts/github.com/kr/pty/pty_linux.go rename to vendor/github.com/kr/pty/pty_linux.go diff --git a/vendor/_nuts/github.com/kr/pty/pty_unsupported.go b/vendor/github.com/kr/pty/pty_unsupported.go similarity index 100% rename from vendor/_nuts/github.com/kr/pty/pty_unsupported.go rename to vendor/github.com/kr/pty/pty_unsupported.go diff --git a/vendor/_nuts/github.com/kr/pty/run.go b/vendor/github.com/kr/pty/run.go similarity index 100% rename from vendor/_nuts/github.com/kr/pty/run.go rename to vendor/github.com/kr/pty/run.go diff --git a/vendor/_nuts/github.com/kr/pty/types.go b/vendor/github.com/kr/pty/types.go similarity index 100% rename from vendor/_nuts/github.com/kr/pty/types.go rename to vendor/github.com/kr/pty/types.go diff --git a/vendor/_nuts/github.com/kr/pty/types_freebsd.go b/vendor/github.com/kr/pty/types_freebsd.go similarity index 100% rename from vendor/_nuts/github.com/kr/pty/types_freebsd.go rename to vendor/github.com/kr/pty/types_freebsd.go diff --git a/vendor/_nuts/github.com/kr/pty/util.go b/vendor/github.com/kr/pty/util.go similarity index 100% rename from vendor/_nuts/github.com/kr/pty/util.go rename to vendor/github.com/kr/pty/util.go diff --git a/vendor/_nuts/github.com/kr/pty/ztypes_386.go b/vendor/github.com/kr/pty/ztypes_386.go similarity index 100% rename from vendor/_nuts/github.com/kr/pty/ztypes_386.go rename to vendor/github.com/kr/pty/ztypes_386.go diff --git a/vendor/_nuts/github.com/kr/pty/ztypes_amd64.go b/vendor/github.com/kr/pty/ztypes_amd64.go similarity index 100% rename from vendor/_nuts/github.com/kr/pty/ztypes_amd64.go rename to vendor/github.com/kr/pty/ztypes_amd64.go diff --git a/vendor/_nuts/github.com/kr/pty/ztypes_arm.go b/vendor/github.com/kr/pty/ztypes_arm.go similarity index 100% rename from vendor/_nuts/github.com/kr/pty/ztypes_arm.go rename to vendor/github.com/kr/pty/ztypes_arm.go diff --git a/vendor/_nuts/github.com/kr/pty/ztypes_arm64.go b/vendor/github.com/kr/pty/ztypes_arm64.go similarity index 100% rename from vendor/_nuts/github.com/kr/pty/ztypes_arm64.go rename to vendor/github.com/kr/pty/ztypes_arm64.go diff --git a/vendor/_nuts/github.com/kr/pty/ztypes_freebsd_386.go b/vendor/github.com/kr/pty/ztypes_freebsd_386.go similarity index 100% rename from vendor/_nuts/github.com/kr/pty/ztypes_freebsd_386.go rename to vendor/github.com/kr/pty/ztypes_freebsd_386.go diff --git a/vendor/_nuts/github.com/kr/pty/ztypes_freebsd_amd64.go b/vendor/github.com/kr/pty/ztypes_freebsd_amd64.go similarity index 100% rename from vendor/_nuts/github.com/kr/pty/ztypes_freebsd_amd64.go rename to vendor/github.com/kr/pty/ztypes_freebsd_amd64.go diff --git a/vendor/_nuts/github.com/kr/pty/ztypes_freebsd_arm.go b/vendor/github.com/kr/pty/ztypes_freebsd_arm.go similarity index 100% rename from vendor/_nuts/github.com/kr/pty/ztypes_freebsd_arm.go rename to vendor/github.com/kr/pty/ztypes_freebsd_arm.go diff --git a/vendor/_nuts/github.com/kr/pty/ztypes_ppc64.go b/vendor/github.com/kr/pty/ztypes_ppc64.go similarity index 100% rename from vendor/_nuts/github.com/kr/pty/ztypes_ppc64.go rename to vendor/github.com/kr/pty/ztypes_ppc64.go diff --git a/vendor/_nuts/github.com/kr/pty/ztypes_ppc64le.go b/vendor/github.com/kr/pty/ztypes_ppc64le.go similarity index 100% rename from vendor/_nuts/github.com/kr/pty/ztypes_ppc64le.go rename to vendor/github.com/kr/pty/ztypes_ppc64le.go diff --git a/vendor/_nuts/github.com/kr/pty/ztypes_s390x.go b/vendor/github.com/kr/pty/ztypes_s390x.go similarity index 100% rename from vendor/_nuts/github.com/kr/pty/ztypes_s390x.go rename to vendor/github.com/kr/pty/ztypes_s390x.go diff --git a/vendor/_nuts/github.com/kr/text/License b/vendor/github.com/kr/text/License similarity index 100% rename from vendor/_nuts/github.com/kr/text/License rename to vendor/github.com/kr/text/License diff --git a/vendor/_nuts/github.com/kr/text/Readme b/vendor/github.com/kr/text/Readme similarity index 100% rename from vendor/_nuts/github.com/kr/text/Readme rename to vendor/github.com/kr/text/Readme diff --git a/vendor/_nuts/github.com/kr/text/colwriter/Readme b/vendor/github.com/kr/text/colwriter/Readme similarity index 100% rename from vendor/_nuts/github.com/kr/text/colwriter/Readme rename to vendor/github.com/kr/text/colwriter/Readme diff --git a/vendor/_nuts/github.com/kr/text/colwriter/column.go b/vendor/github.com/kr/text/colwriter/column.go similarity index 100% rename from vendor/_nuts/github.com/kr/text/colwriter/column.go rename to vendor/github.com/kr/text/colwriter/column.go diff --git a/vendor/_nuts/github.com/kr/text/colwriter/column_test.go b/vendor/github.com/kr/text/colwriter/column_test.go similarity index 100% rename from vendor/_nuts/github.com/kr/text/colwriter/column_test.go rename to vendor/github.com/kr/text/colwriter/column_test.go diff --git a/vendor/_nuts/github.com/kr/text/doc.go b/vendor/github.com/kr/text/doc.go similarity index 100% rename from vendor/_nuts/github.com/kr/text/doc.go rename to vendor/github.com/kr/text/doc.go diff --git a/vendor/_nuts/github.com/kr/text/indent.go b/vendor/github.com/kr/text/indent.go similarity index 100% rename from vendor/_nuts/github.com/kr/text/indent.go rename to vendor/github.com/kr/text/indent.go diff --git a/vendor/_nuts/github.com/kr/text/indent_test.go b/vendor/github.com/kr/text/indent_test.go similarity index 100% rename from vendor/_nuts/github.com/kr/text/indent_test.go rename to vendor/github.com/kr/text/indent_test.go diff --git a/vendor/_nuts/github.com/kr/text/mc/Readme b/vendor/github.com/kr/text/mc/Readme similarity index 100% rename from vendor/_nuts/github.com/kr/text/mc/Readme rename to vendor/github.com/kr/text/mc/Readme diff --git a/vendor/_nuts/github.com/kr/text/mc/mc.go b/vendor/github.com/kr/text/mc/mc.go similarity index 90% rename from vendor/_nuts/github.com/kr/text/mc/mc.go rename to vendor/github.com/kr/text/mc/mc.go index 4c591aac..00169a30 100644 --- a/vendor/_nuts/github.com/kr/text/mc/mc.go +++ b/vendor/github.com/kr/text/mc/mc.go @@ -10,8 +10,8 @@ package main import ( - "github.com/github/git-lfs/vendor/_nuts/github.com/kr/pty" - "github.com/github/git-lfs/vendor/_nuts/github.com/kr/text/colwriter" + "github.com/kr/pty" + "github.com/kr/text/colwriter" "io" "log" "os" diff --git a/vendor/_nuts/github.com/kr/text/wrap.go b/vendor/github.com/kr/text/wrap.go old mode 100644 new mode 100755 similarity index 100% rename from vendor/_nuts/github.com/kr/text/wrap.go rename to vendor/github.com/kr/text/wrap.go diff --git a/vendor/_nuts/github.com/kr/text/wrap_test.go b/vendor/github.com/kr/text/wrap_test.go similarity index 100% rename from vendor/_nuts/github.com/kr/text/wrap_test.go rename to vendor/github.com/kr/text/wrap_test.go diff --git a/vendor/_nuts/github.com/olekukonko/ts/.travis.yml b/vendor/github.com/olekukonko/ts/.travis.yml similarity index 100% rename from vendor/_nuts/github.com/olekukonko/ts/.travis.yml rename to vendor/github.com/olekukonko/ts/.travis.yml diff --git a/vendor/_nuts/github.com/olekukonko/ts/LICENCE b/vendor/github.com/olekukonko/ts/LICENCE similarity index 100% rename from vendor/_nuts/github.com/olekukonko/ts/LICENCE rename to vendor/github.com/olekukonko/ts/LICENCE diff --git a/vendor/_nuts/github.com/olekukonko/ts/README.md b/vendor/github.com/olekukonko/ts/README.md similarity index 100% rename from vendor/_nuts/github.com/olekukonko/ts/README.md rename to vendor/github.com/olekukonko/ts/README.md diff --git a/vendor/_nuts/github.com/olekukonko/ts/doc.go b/vendor/github.com/olekukonko/ts/doc.go similarity index 100% rename from vendor/_nuts/github.com/olekukonko/ts/doc.go rename to vendor/github.com/olekukonko/ts/doc.go diff --git a/vendor/_nuts/github.com/olekukonko/ts/ts.go b/vendor/github.com/olekukonko/ts/ts.go similarity index 100% rename from vendor/_nuts/github.com/olekukonko/ts/ts.go rename to vendor/github.com/olekukonko/ts/ts.go diff --git a/vendor/_nuts/github.com/olekukonko/ts/ts_darwin.go b/vendor/github.com/olekukonko/ts/ts_darwin.go similarity index 100% rename from vendor/_nuts/github.com/olekukonko/ts/ts_darwin.go rename to vendor/github.com/olekukonko/ts/ts_darwin.go diff --git a/vendor/_nuts/github.com/olekukonko/ts/ts_linux.go b/vendor/github.com/olekukonko/ts/ts_linux.go similarity index 100% rename from vendor/_nuts/github.com/olekukonko/ts/ts_linux.go rename to vendor/github.com/olekukonko/ts/ts_linux.go diff --git a/vendor/_nuts/github.com/olekukonko/ts/ts_other.go b/vendor/github.com/olekukonko/ts/ts_other.go similarity index 100% rename from vendor/_nuts/github.com/olekukonko/ts/ts_other.go rename to vendor/github.com/olekukonko/ts/ts_other.go diff --git a/vendor/_nuts/github.com/olekukonko/ts/ts_test.go b/vendor/github.com/olekukonko/ts/ts_test.go similarity index 100% rename from vendor/_nuts/github.com/olekukonko/ts/ts_test.go rename to vendor/github.com/olekukonko/ts/ts_test.go diff --git a/vendor/_nuts/github.com/olekukonko/ts/ts_unix.go b/vendor/github.com/olekukonko/ts/ts_unix.go similarity index 100% rename from vendor/_nuts/github.com/olekukonko/ts/ts_unix.go rename to vendor/github.com/olekukonko/ts/ts_unix.go diff --git a/vendor/_nuts/github.com/olekukonko/ts/ts_windows.go b/vendor/github.com/olekukonko/ts/ts_windows.go similarity index 100% rename from vendor/_nuts/github.com/olekukonko/ts/ts_windows.go rename to vendor/github.com/olekukonko/ts/ts_windows.go diff --git a/vendor/_nuts/github.com/olekukonko/ts/ts_x.go b/vendor/github.com/olekukonko/ts/ts_x.go similarity index 100% rename from vendor/_nuts/github.com/olekukonko/ts/ts_x.go rename to vendor/github.com/olekukonko/ts/ts_x.go diff --git a/vendor/_nuts/github.com/rubyist/tracerx/LICENSE b/vendor/github.com/rubyist/tracerx/LICENSE similarity index 100% rename from vendor/_nuts/github.com/rubyist/tracerx/LICENSE rename to vendor/github.com/rubyist/tracerx/LICENSE diff --git a/vendor/_nuts/github.com/rubyist/tracerx/README.md b/vendor/github.com/rubyist/tracerx/README.md similarity index 100% rename from vendor/_nuts/github.com/rubyist/tracerx/README.md rename to vendor/github.com/rubyist/tracerx/README.md diff --git a/vendor/_nuts/github.com/rubyist/tracerx/tracerx.go b/vendor/github.com/rubyist/tracerx/tracerx.go similarity index 100% rename from vendor/_nuts/github.com/rubyist/tracerx/tracerx.go rename to vendor/github.com/rubyist/tracerx/tracerx.go diff --git a/vendor/_nuts/github.com/spf13/cobra/.gitignore b/vendor/github.com/spf13/cobra/.gitignore similarity index 100% rename from vendor/_nuts/github.com/spf13/cobra/.gitignore rename to vendor/github.com/spf13/cobra/.gitignore diff --git a/vendor/_nuts/github.com/spf13/cobra/.travis.yml b/vendor/github.com/spf13/cobra/.travis.yml similarity index 100% rename from vendor/_nuts/github.com/spf13/cobra/.travis.yml rename to vendor/github.com/spf13/cobra/.travis.yml diff --git a/vendor/_nuts/github.com/spf13/cobra/LICENSE.txt b/vendor/github.com/spf13/cobra/LICENSE.txt similarity index 100% rename from vendor/_nuts/github.com/spf13/cobra/LICENSE.txt rename to vendor/github.com/spf13/cobra/LICENSE.txt diff --git a/vendor/_nuts/github.com/spf13/cobra/README.md b/vendor/github.com/spf13/cobra/README.md similarity index 100% rename from vendor/_nuts/github.com/spf13/cobra/README.md rename to vendor/github.com/spf13/cobra/README.md diff --git a/vendor/_nuts/github.com/spf13/cobra/bash_completions.go b/vendor/github.com/spf13/cobra/bash_completions.go similarity index 99% rename from vendor/_nuts/github.com/spf13/cobra/bash_completions.go rename to vendor/github.com/spf13/cobra/bash_completions.go index 29ad2f91..82c4274a 100644 --- a/vendor/_nuts/github.com/spf13/cobra/bash_completions.go +++ b/vendor/github.com/spf13/cobra/bash_completions.go @@ -7,7 +7,7 @@ import ( "sort" "strings" - "github.com/github/git-lfs/vendor/_nuts/github.com/spf13/pflag" + "github.com/spf13/pflag" ) const ( diff --git a/vendor/_nuts/github.com/spf13/cobra/bash_completions.md b/vendor/github.com/spf13/cobra/bash_completions.md similarity index 100% rename from vendor/_nuts/github.com/spf13/cobra/bash_completions.md rename to vendor/github.com/spf13/cobra/bash_completions.md diff --git a/vendor/_nuts/github.com/spf13/cobra/bash_completions_test.go b/vendor/github.com/spf13/cobra/bash_completions_test.go similarity index 100% rename from vendor/_nuts/github.com/spf13/cobra/bash_completions_test.go rename to vendor/github.com/spf13/cobra/bash_completions_test.go diff --git a/vendor/_nuts/github.com/spf13/cobra/cobra.go b/vendor/github.com/spf13/cobra/cobra.go similarity index 100% rename from vendor/_nuts/github.com/spf13/cobra/cobra.go rename to vendor/github.com/spf13/cobra/cobra.go diff --git a/vendor/_nuts/github.com/spf13/cobra/cobra_test.go b/vendor/github.com/spf13/cobra/cobra_test.go similarity index 99% rename from vendor/_nuts/github.com/spf13/cobra/cobra_test.go rename to vendor/github.com/spf13/cobra/cobra_test.go index c636692e..7cb4917d 100644 --- a/vendor/_nuts/github.com/spf13/cobra/cobra_test.go +++ b/vendor/github.com/spf13/cobra/cobra_test.go @@ -9,7 +9,7 @@ import ( "strings" "testing" - "github.com/github/git-lfs/vendor/_nuts/github.com/spf13/pflag" + "github.com/spf13/pflag" ) var _ = fmt.Println diff --git a/vendor/_nuts/github.com/spf13/cobra/command.go b/vendor/github.com/spf13/cobra/command.go similarity index 99% rename from vendor/_nuts/github.com/spf13/cobra/command.go rename to vendor/github.com/spf13/cobra/command.go index aa1dcd46..74565c2b 100644 --- a/vendor/_nuts/github.com/spf13/cobra/command.go +++ b/vendor/github.com/spf13/cobra/command.go @@ -24,8 +24,8 @@ import ( "strings" "time" - "github.com/github/git-lfs/vendor/_nuts/github.com/inconshreveable/mousetrap" - flag "github.com/github/git-lfs/vendor/_nuts/github.com/spf13/pflag" + "github.com/inconshreveable/mousetrap" + flag "github.com/spf13/pflag" ) // Command is just that, a command for your application. diff --git a/vendor/_nuts/github.com/spf13/cobra/command_test.go b/vendor/github.com/spf13/cobra/command_test.go similarity index 100% rename from vendor/_nuts/github.com/spf13/cobra/command_test.go rename to vendor/github.com/spf13/cobra/command_test.go diff --git a/vendor/_nuts/github.com/spf13/cobra/md_docs.go b/vendor/github.com/spf13/cobra/md_docs.go similarity index 100% rename from vendor/_nuts/github.com/spf13/cobra/md_docs.go rename to vendor/github.com/spf13/cobra/md_docs.go diff --git a/vendor/_nuts/github.com/spf13/cobra/md_docs.md b/vendor/github.com/spf13/cobra/md_docs.md similarity index 100% rename from vendor/_nuts/github.com/spf13/cobra/md_docs.md rename to vendor/github.com/spf13/cobra/md_docs.md diff --git a/vendor/_nuts/github.com/spf13/cobra/md_docs_test.go b/vendor/github.com/spf13/cobra/md_docs_test.go similarity index 100% rename from vendor/_nuts/github.com/spf13/cobra/md_docs_test.go rename to vendor/github.com/spf13/cobra/md_docs_test.go diff --git a/vendor/_nuts/github.com/spf13/pflag/.travis.yml b/vendor/github.com/spf13/pflag/.travis.yml similarity index 100% rename from vendor/_nuts/github.com/spf13/pflag/.travis.yml rename to vendor/github.com/spf13/pflag/.travis.yml diff --git a/vendor/_nuts/github.com/spf13/pflag/LICENSE b/vendor/github.com/spf13/pflag/LICENSE similarity index 100% rename from vendor/_nuts/github.com/spf13/pflag/LICENSE rename to vendor/github.com/spf13/pflag/LICENSE diff --git a/vendor/_nuts/github.com/spf13/pflag/README.md b/vendor/github.com/spf13/pflag/README.md similarity index 100% rename from vendor/_nuts/github.com/spf13/pflag/README.md rename to vendor/github.com/spf13/pflag/README.md diff --git a/vendor/_nuts/github.com/spf13/pflag/bool.go b/vendor/github.com/spf13/pflag/bool.go similarity index 100% rename from vendor/_nuts/github.com/spf13/pflag/bool.go rename to vendor/github.com/spf13/pflag/bool.go diff --git a/vendor/_nuts/github.com/spf13/pflag/bool_test.go b/vendor/github.com/spf13/pflag/bool_test.go similarity index 100% rename from vendor/_nuts/github.com/spf13/pflag/bool_test.go rename to vendor/github.com/spf13/pflag/bool_test.go diff --git a/vendor/_nuts/github.com/spf13/pflag/count.go b/vendor/github.com/spf13/pflag/count.go similarity index 100% rename from vendor/_nuts/github.com/spf13/pflag/count.go rename to vendor/github.com/spf13/pflag/count.go diff --git a/vendor/_nuts/github.com/spf13/pflag/count_test.go b/vendor/github.com/spf13/pflag/count_test.go similarity index 100% rename from vendor/_nuts/github.com/spf13/pflag/count_test.go rename to vendor/github.com/spf13/pflag/count_test.go diff --git a/vendor/_nuts/github.com/spf13/pflag/duration.go b/vendor/github.com/spf13/pflag/duration.go similarity index 100% rename from vendor/_nuts/github.com/spf13/pflag/duration.go rename to vendor/github.com/spf13/pflag/duration.go diff --git a/vendor/_nuts/github.com/spf13/pflag/example_test.go b/vendor/github.com/spf13/pflag/example_test.go similarity index 97% rename from vendor/_nuts/github.com/spf13/pflag/example_test.go rename to vendor/github.com/spf13/pflag/example_test.go index c5940394..9be7a49f 100644 --- a/vendor/_nuts/github.com/spf13/pflag/example_test.go +++ b/vendor/github.com/spf13/pflag/example_test.go @@ -11,7 +11,7 @@ import ( "strings" "time" - flag "github.com/github/git-lfs/vendor/_nuts/github.com/spf13/pflag" + flag "github.com/spf13/pflag" ) // Example 1: A single string flag called "species" with default value "gopher". diff --git a/vendor/_nuts/github.com/spf13/pflag/export_test.go b/vendor/github.com/spf13/pflag/export_test.go similarity index 100% rename from vendor/_nuts/github.com/spf13/pflag/export_test.go rename to vendor/github.com/spf13/pflag/export_test.go diff --git a/vendor/_nuts/github.com/spf13/pflag/flag.go b/vendor/github.com/spf13/pflag/flag.go similarity index 100% rename from vendor/_nuts/github.com/spf13/pflag/flag.go rename to vendor/github.com/spf13/pflag/flag.go diff --git a/vendor/_nuts/github.com/spf13/pflag/flag_test.go b/vendor/github.com/spf13/pflag/flag_test.go similarity index 100% rename from vendor/_nuts/github.com/spf13/pflag/flag_test.go rename to vendor/github.com/spf13/pflag/flag_test.go diff --git a/vendor/_nuts/github.com/spf13/pflag/float32.go b/vendor/github.com/spf13/pflag/float32.go similarity index 100% rename from vendor/_nuts/github.com/spf13/pflag/float32.go rename to vendor/github.com/spf13/pflag/float32.go diff --git a/vendor/_nuts/github.com/spf13/pflag/float64.go b/vendor/github.com/spf13/pflag/float64.go similarity index 100% rename from vendor/_nuts/github.com/spf13/pflag/float64.go rename to vendor/github.com/spf13/pflag/float64.go diff --git a/vendor/_nuts/github.com/spf13/pflag/int.go b/vendor/github.com/spf13/pflag/int.go similarity index 100% rename from vendor/_nuts/github.com/spf13/pflag/int.go rename to vendor/github.com/spf13/pflag/int.go diff --git a/vendor/_nuts/github.com/spf13/pflag/int32.go b/vendor/github.com/spf13/pflag/int32.go similarity index 100% rename from vendor/_nuts/github.com/spf13/pflag/int32.go rename to vendor/github.com/spf13/pflag/int32.go diff --git a/vendor/_nuts/github.com/spf13/pflag/int64.go b/vendor/github.com/spf13/pflag/int64.go similarity index 100% rename from vendor/_nuts/github.com/spf13/pflag/int64.go rename to vendor/github.com/spf13/pflag/int64.go diff --git a/vendor/_nuts/github.com/spf13/pflag/int8.go b/vendor/github.com/spf13/pflag/int8.go similarity index 100% rename from vendor/_nuts/github.com/spf13/pflag/int8.go rename to vendor/github.com/spf13/pflag/int8.go diff --git a/vendor/_nuts/github.com/spf13/pflag/int_slice.go b/vendor/github.com/spf13/pflag/int_slice.go similarity index 100% rename from vendor/_nuts/github.com/spf13/pflag/int_slice.go rename to vendor/github.com/spf13/pflag/int_slice.go diff --git a/vendor/_nuts/github.com/spf13/pflag/int_slice_test.go b/vendor/github.com/spf13/pflag/int_slice_test.go similarity index 100% rename from vendor/_nuts/github.com/spf13/pflag/int_slice_test.go rename to vendor/github.com/spf13/pflag/int_slice_test.go diff --git a/vendor/_nuts/github.com/spf13/pflag/ip.go b/vendor/github.com/spf13/pflag/ip.go similarity index 100% rename from vendor/_nuts/github.com/spf13/pflag/ip.go rename to vendor/github.com/spf13/pflag/ip.go diff --git a/vendor/_nuts/github.com/spf13/pflag/ip_test.go b/vendor/github.com/spf13/pflag/ip_test.go similarity index 100% rename from vendor/_nuts/github.com/spf13/pflag/ip_test.go rename to vendor/github.com/spf13/pflag/ip_test.go diff --git a/vendor/_nuts/github.com/spf13/pflag/ipmask.go b/vendor/github.com/spf13/pflag/ipmask.go similarity index 100% rename from vendor/_nuts/github.com/spf13/pflag/ipmask.go rename to vendor/github.com/spf13/pflag/ipmask.go diff --git a/vendor/_nuts/github.com/spf13/pflag/ipnet.go b/vendor/github.com/spf13/pflag/ipnet.go similarity index 100% rename from vendor/_nuts/github.com/spf13/pflag/ipnet.go rename to vendor/github.com/spf13/pflag/ipnet.go diff --git a/vendor/_nuts/github.com/spf13/pflag/ipnet_test.go b/vendor/github.com/spf13/pflag/ipnet_test.go similarity index 100% rename from vendor/_nuts/github.com/spf13/pflag/ipnet_test.go rename to vendor/github.com/spf13/pflag/ipnet_test.go diff --git a/vendor/_nuts/github.com/spf13/pflag/string.go b/vendor/github.com/spf13/pflag/string.go similarity index 100% rename from vendor/_nuts/github.com/spf13/pflag/string.go rename to vendor/github.com/spf13/pflag/string.go diff --git a/vendor/_nuts/github.com/spf13/pflag/string_slice.go b/vendor/github.com/spf13/pflag/string_slice.go similarity index 100% rename from vendor/_nuts/github.com/spf13/pflag/string_slice.go rename to vendor/github.com/spf13/pflag/string_slice.go diff --git a/vendor/_nuts/github.com/spf13/pflag/string_slice_test.go b/vendor/github.com/spf13/pflag/string_slice_test.go similarity index 100% rename from vendor/_nuts/github.com/spf13/pflag/string_slice_test.go rename to vendor/github.com/spf13/pflag/string_slice_test.go diff --git a/vendor/_nuts/github.com/spf13/pflag/uint.go b/vendor/github.com/spf13/pflag/uint.go similarity index 100% rename from vendor/_nuts/github.com/spf13/pflag/uint.go rename to vendor/github.com/spf13/pflag/uint.go diff --git a/vendor/_nuts/github.com/spf13/pflag/uint16.go b/vendor/github.com/spf13/pflag/uint16.go similarity index 100% rename from vendor/_nuts/github.com/spf13/pflag/uint16.go rename to vendor/github.com/spf13/pflag/uint16.go diff --git a/vendor/_nuts/github.com/spf13/pflag/uint32.go b/vendor/github.com/spf13/pflag/uint32.go similarity index 100% rename from vendor/_nuts/github.com/spf13/pflag/uint32.go rename to vendor/github.com/spf13/pflag/uint32.go diff --git a/vendor/_nuts/github.com/spf13/pflag/uint64.go b/vendor/github.com/spf13/pflag/uint64.go similarity index 100% rename from vendor/_nuts/github.com/spf13/pflag/uint64.go rename to vendor/github.com/spf13/pflag/uint64.go diff --git a/vendor/_nuts/github.com/spf13/pflag/uint8.go b/vendor/github.com/spf13/pflag/uint8.go similarity index 100% rename from vendor/_nuts/github.com/spf13/pflag/uint8.go rename to vendor/github.com/spf13/pflag/uint8.go diff --git a/vendor/github.com/stretchr/testify/.gitignore b/vendor/github.com/stretchr/testify/.gitignore new file mode 100644 index 00000000..5aacdb7c --- /dev/null +++ b/vendor/github.com/stretchr/testify/.gitignore @@ -0,0 +1,24 @@ +# Compiled Object files, Static and Dynamic libs (Shared Objects) +*.o +*.a +*.so + +# Folders +_obj +_test + +# Architecture specific extensions/prefixes +*.[568vq] +[568vq].out + +*.cgo1.go +*.cgo2.c +_cgo_defun.c +_cgo_gotypes.go +_cgo_export.* + +_testmain.go + +*.exe + +.DS_Store diff --git a/vendor/github.com/stretchr/testify/.travis.yml b/vendor/github.com/stretchr/testify/.travis.yml new file mode 100644 index 00000000..455923ec --- /dev/null +++ b/vendor/github.com/stretchr/testify/.travis.yml @@ -0,0 +1,15 @@ +language: go + +sudo: false + +go: + - 1.1 + - 1.2 + - 1.3 + - 1.4 + - 1.5 + - 1.6 + - tip + +script: + - go test -v ./... diff --git a/vendor/github.com/stretchr/testify/Godeps/Godeps.json b/vendor/github.com/stretchr/testify/Godeps/Godeps.json new file mode 100644 index 00000000..5069ce03 --- /dev/null +++ b/vendor/github.com/stretchr/testify/Godeps/Godeps.json @@ -0,0 +1,21 @@ +{ + "ImportPath": "github.com/stretchr/testify", + "GoVersion": "go1.5", + "Packages": [ + "./..." + ], + "Deps": [ + { + "ImportPath": "github.com/davecgh/go-spew/spew", + "Rev": "5215b55f46b2b919f50a1df0eaa5886afe4e3b3d" + }, + { + "ImportPath": "github.com/pmezard/go-difflib/difflib", + "Rev": "d8ed2627bdf02c080bf22230dbb337003b7aba2d" + }, + { + "ImportPath": "github.com/stretchr/objx", + "Rev": "cbeaeb16a013161a98496fad62933b1d21786672" + } + ] +} diff --git a/vendor/github.com/stretchr/testify/Godeps/Readme b/vendor/github.com/stretchr/testify/Godeps/Readme new file mode 100644 index 00000000..4cdaa53d --- /dev/null +++ b/vendor/github.com/stretchr/testify/Godeps/Readme @@ -0,0 +1,5 @@ +This directory tree is generated automatically by godep. + +Please do not edit. + +See https://github.com/tools/godep for more information. diff --git a/vendor/github.com/stretchr/testify/LICENCE.txt b/vendor/github.com/stretchr/testify/LICENCE.txt new file mode 100644 index 00000000..473b670a --- /dev/null +++ b/vendor/github.com/stretchr/testify/LICENCE.txt @@ -0,0 +1,22 @@ +Copyright (c) 2012 - 2013 Mat Ryer and Tyler Bunnell + +Please consider promoting this project if you find it useful. + +Permission is hereby granted, free of charge, to any person +obtaining a copy of this software and associated documentation +files (the "Software"), to deal in the Software without restriction, +including without limitation the rights to use, copy, modify, merge, +publish, distribute, sublicense, and/or sell copies of the Software, +and to permit persons to whom the Software is furnished to do so, +subject to the following conditions: + +The above copyright notice and this permission notice shall be included +in all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, +EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES +OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. +IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, +DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT +OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE +OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/vendor/github.com/stretchr/testify/LICENSE b/vendor/github.com/stretchr/testify/LICENSE new file mode 100644 index 00000000..473b670a --- /dev/null +++ b/vendor/github.com/stretchr/testify/LICENSE @@ -0,0 +1,22 @@ +Copyright (c) 2012 - 2013 Mat Ryer and Tyler Bunnell + +Please consider promoting this project if you find it useful. + +Permission is hereby granted, free of charge, to any person +obtaining a copy of this software and associated documentation +files (the "Software"), to deal in the Software without restriction, +including without limitation the rights to use, copy, modify, merge, +publish, distribute, sublicense, and/or sell copies of the Software, +and to permit persons to whom the Software is furnished to do so, +subject to the following conditions: + +The above copyright notice and this permission notice shall be included +in all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, +EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES +OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. +IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, +DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT +OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE +OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/vendor/github.com/stretchr/testify/README.md b/vendor/github.com/stretchr/testify/README.md new file mode 100644 index 00000000..aaf2aa0a --- /dev/null +++ b/vendor/github.com/stretchr/testify/README.md @@ -0,0 +1,332 @@ +Testify - Thou Shalt Write Tests +================================ + +[![Build Status](https://travis-ci.org/stretchr/testify.svg)](https://travis-ci.org/stretchr/testify) + +Go code (golang) set of packages that provide many tools for testifying that your code will behave as you intend. + +Features include: + + * [Easy assertions](#assert-package) + * [Mocking](#mock-package) + * [HTTP response trapping](#http-package) + * [Testing suite interfaces and functions](#suite-package) + +Get started: + + * Install testify with [one line of code](#installation), or [update it with another](#staying-up-to-date) + * For an introduction to writing test code in Go, see http://golang.org/doc/code.html#Testing + * Check out the API Documentation http://godoc.org/github.com/stretchr/testify + * To make your testing life easier, check out our other project, [gorc](http://github.com/stretchr/gorc) + * A little about [Test-Driven Development (TDD)](http://en.wikipedia.org/wiki/Test-driven_development) + + + +[`assert`](http://godoc.org/github.com/stretchr/testify/assert "API documentation") package +------------------------------------------------------------------------------------------- + +The `assert` package provides some helpful methods that allow you to write better test code in Go. + + * Prints friendly, easy to read failure descriptions + * Allows for very readable code + * Optionally annotate each assertion with a message + +See it in action: + +```go +package yours + +import ( + "testing" + "github.com/stretchr/testify/assert" +) + +func TestSomething(t *testing.T) { + + // assert equality + assert.Equal(t, 123, 123, "they should be equal") + + // assert inequality + assert.NotEqual(t, 123, 456, "they should not be equal") + + // assert for nil (good for errors) + assert.Nil(t, object) + + // assert for not nil (good when you expect something) + if assert.NotNil(t, object) { + + // now we know that object isn't nil, we are safe to make + // further assertions without causing any errors + assert.Equal(t, "Something", object.Value) + + } + +} +``` + + * Every assert func takes the `testing.T` object as the first argument. This is how it writes the errors out through the normal `go test` capabilities. + * Every assert func returns a bool indicating whether the assertion was successful or not, this is useful for if you want to go on making further assertions under certain conditions. + +if you assert many times, use the below: + +```go +package yours + +import ( + "testing" + "github.com/stretchr/testify/assert" +) + +func TestSomething(t *testing.T) { + assert := assert.New(t) + + // assert equality + assert.Equal(123, 123, "they should be equal") + + // assert inequality + assert.NotEqual(123, 456, "they should not be equal") + + // assert for nil (good for errors) + assert.Nil(object) + + // assert for not nil (good when you expect something) + if assert.NotNil(object) { + + // now we know that object isn't nil, we are safe to make + // further assertions without causing any errors + assert.Equal("Something", object.Value) + } +} +``` + +[`require`](http://godoc.org/github.com/stretchr/testify/require "API documentation") package +--------------------------------------------------------------------------------------------- + +The `require` package provides same global functions as the `assert` package, but instead of returning a boolean result they terminate current test. + +See [t.FailNow](http://golang.org/pkg/testing/#T.FailNow) for details. + + +[`http`](http://godoc.org/github.com/stretchr/testify/http "API documentation") package +--------------------------------------------------------------------------------------- + +The `http` package contains test objects useful for testing code that relies on the `net/http` package. Check out the [(deprecated) API documentation for the `http` package](http://godoc.org/github.com/stretchr/testify/http). + +We recommend you use [httptest](http://golang.org/pkg/net/http/httptest) instead. + +[`mock`](http://godoc.org/github.com/stretchr/testify/mock "API documentation") package +---------------------------------------------------------------------------------------- + +The `mock` package provides a mechanism for easily writing mock objects that can be used in place of real objects when writing test code. + +An example test function that tests a piece of code that relies on an external object `testObj`, can setup expectations (testify) and assert that they indeed happened: + +```go +package yours + +import ( + "testing" + "github.com/stretchr/testify/mock" +) + +/* + Test objects +*/ + +// MyMockedObject is a mocked object that implements an interface +// that describes an object that the code I am testing relies on. +type MyMockedObject struct{ + mock.Mock +} + +// DoSomething is a method on MyMockedObject that implements some interface +// and just records the activity, and returns what the Mock object tells it to. +// +// In the real object, this method would do something useful, but since this +// is a mocked object - we're just going to stub it out. +// +// NOTE: This method is not being tested here, code that uses this object is. +func (m *MyMockedObject) DoSomething(number int) (bool, error) { + + args := m.Called(number) + return args.Bool(0), args.Error(1) + +} + +/* + Actual test functions +*/ + +// TestSomething is an example of how to use our test object to +// make assertions about some target code we are testing. +func TestSomething(t *testing.T) { + + // create an instance of our test object + testObj := new(MyMockedObject) + + // setup expectations + testObj.On("DoSomething", 123).Return(true, nil) + + // call the code we are testing + targetFuncThatDoesSomethingWithObj(testObj) + + // assert that the expectations were met + testObj.AssertExpectations(t) + +} +``` + +For more information on how to write mock code, check out the [API documentation for the `mock` package](http://godoc.org/github.com/stretchr/testify/mock). + +You can use the [mockery tool](http://github.com/vektra/mockery) to autogenerate the mock code against an interface as well, making using mocks much quicker. + +[`suite`](http://godoc.org/github.com/stretchr/testify/suite "API documentation") package +----------------------------------------------------------------------------------------- + +The `suite` package provides functionality that you might be used to from more common object oriented languages. With it, you can build a testing suite as a struct, build setup/teardown methods and testing methods on your struct, and run them with 'go test' as per normal. + +An example suite is shown below: + +```go +// Basic imports +import ( + "testing" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/suite" +) + +// Define the suite, and absorb the built-in basic suite +// functionality from testify - including a T() method which +// returns the current testing context +type ExampleTestSuite struct { + suite.Suite + VariableThatShouldStartAtFive int +} + +// Make sure that VariableThatShouldStartAtFive is set to five +// before each test +func (suite *ExampleTestSuite) SetupTest() { + suite.VariableThatShouldStartAtFive = 5 +} + +// All methods that begin with "Test" are run as tests within a +// suite. +func (suite *ExampleTestSuite) TestExample() { + assert.Equal(suite.T(), 5, suite.VariableThatShouldStartAtFive) +} + +// In order for 'go test' to run this suite, we need to create +// a normal test function and pass our suite to suite.Run +func TestExampleTestSuite(t *testing.T) { + suite.Run(t, new(ExampleTestSuite)) +} +``` + +For a more complete example, using all of the functionality provided by the suite package, look at our [example testing suite](https://github.com/stretchr/testify/blob/master/suite/suite_test.go) + +For more information on writing suites, check out the [API documentation for the `suite` package](http://godoc.org/github.com/stretchr/testify/suite). + +`Suite` object has assertion methods: + +```go +// Basic imports +import ( + "testing" + "github.com/stretchr/testify/suite" +) + +// Define the suite, and absorb the built-in basic suite +// functionality from testify - including assertion methods. +type ExampleTestSuite struct { + suite.Suite + VariableThatShouldStartAtFive int +} + +// Make sure that VariableThatShouldStartAtFive is set to five +// before each test +func (suite *ExampleTestSuite) SetupTest() { + suite.VariableThatShouldStartAtFive = 5 +} + +// All methods that begin with "Test" are run as tests within a +// suite. +func (suite *ExampleTestSuite) TestExample() { + suite.Equal(suite.VariableThatShouldStartAtFive, 5) +} + +// In order for 'go test' to run this suite, we need to create +// a normal test function and pass our suite to suite.Run +func TestExampleTestSuite(t *testing.T) { + suite.Run(t, new(ExampleTestSuite)) +} +``` + +------ + +Installation +============ + +To install Testify, use `go get`: + + * Latest version: go get github.com/stretchr/testify + * Specific version: go get gopkg.in/stretchr/testify.v1 + +This will then make the following packages available to you: + + github.com/stretchr/testify/assert + github.com/stretchr/testify/mock + github.com/stretchr/testify/http + +Import the `testify/assert` package into your code using this template: + +```go +package yours + +import ( + "testing" + "github.com/stretchr/testify/assert" +) + +func TestSomething(t *testing.T) { + + assert.True(t, true, "True is true!") + +} +``` + +------ + +Staying up to date +================== + +To update Testify to the latest version, use `go get -u github.com/stretchr/testify`. + +------ + +Version History +=============== + + * 1.0 - New package versioning strategy adopted. + +------ + +Contributing +============ + +Please feel free to submit issues, fork the repository and send pull requests! + +When submitting an issue, we ask that you please include a complete test function that demonstrates the issue. Extra credit for those using Testify to write the test code that demonstrates it. + +------ + +Licence +======= +Copyright (c) 2012 - 2013 Mat Ryer and Tyler Bunnell + +Please consider promoting this project if you find it useful. + +Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/vendor/github.com/stretchr/testify/_codegen/main.go b/vendor/github.com/stretchr/testify/_codegen/main.go new file mode 100644 index 00000000..328009f8 --- /dev/null +++ b/vendor/github.com/stretchr/testify/_codegen/main.go @@ -0,0 +1,287 @@ +// This program reads all assertion functions from the assert package and +// automatically generates the corersponding requires and forwarded assertions + +package main + +import ( + "bytes" + "flag" + "fmt" + "go/ast" + "go/build" + "go/doc" + "go/importer" + "go/parser" + "go/token" + "go/types" + "io" + "io/ioutil" + "log" + "os" + "path" + "strings" + "text/template" + + "github.com/ernesto-jimenez/gogen/imports" +) + +var ( + pkg = flag.String("assert-path", "github.com/stretchr/testify/assert", "Path to the assert package") + outputPkg = flag.String("output-package", "", "package for the resulting code") + tmplFile = flag.String("template", "", "What file to load the function template from") + out = flag.String("out", "", "What file to write the source code to") +) + +func main() { + flag.Parse() + + scope, docs, err := parsePackageSource(*pkg) + if err != nil { + log.Fatal(err) + } + + importer, funcs, err := analyzeCode(scope, docs) + if err != nil { + log.Fatal(err) + } + + if err := generateCode(importer, funcs); err != nil { + log.Fatal(err) + } +} + +func generateCode(importer imports.Importer, funcs []testFunc) error { + buff := bytes.NewBuffer(nil) + + tmplHead, tmplFunc, err := parseTemplates() + if err != nil { + return err + } + + // Generate header + if err := tmplHead.Execute(buff, struct { + Name string + Imports map[string]string + }{ + *outputPkg, + importer.Imports(), + }); err != nil { + return err + } + + // Generate funcs + for _, fn := range funcs { + buff.Write([]byte("\n\n")) + if err := tmplFunc.Execute(buff, &fn); err != nil { + return err + } + } + + // Write file + output, err := outputFile() + if err != nil { + return err + } + defer output.Close() + _, err = io.Copy(output, buff) + return err +} + +func parseTemplates() (*template.Template, *template.Template, error) { + tmplHead, err := template.New("header").Parse(headerTemplate) + if err != nil { + return nil, nil, err + } + if *tmplFile != "" { + f, err := ioutil.ReadFile(*tmplFile) + if err != nil { + return nil, nil, err + } + funcTemplate = string(f) + } + tmpl, err := template.New("function").Parse(funcTemplate) + if err != nil { + return nil, nil, err + } + return tmplHead, tmpl, nil +} + +func outputFile() (*os.File, error) { + filename := *out + if filename == "-" || (filename == "" && *tmplFile == "") { + return os.Stdout, nil + } + if filename == "" { + filename = strings.TrimSuffix(strings.TrimSuffix(*tmplFile, ".tmpl"), ".go") + ".go" + } + return os.Create(filename) +} + +// analyzeCode takes the types scope and the docs and returns the import +// information and information about all the assertion functions. +func analyzeCode(scope *types.Scope, docs *doc.Package) (imports.Importer, []testFunc, error) { + testingT := scope.Lookup("TestingT").Type().Underlying().(*types.Interface) + + importer := imports.New(*outputPkg) + var funcs []testFunc + // Go through all the top level functions + for _, fdocs := range docs.Funcs { + // Find the function + obj := scope.Lookup(fdocs.Name) + + fn, ok := obj.(*types.Func) + if !ok { + continue + } + // Check function signatuer has at least two arguments + sig := fn.Type().(*types.Signature) + if sig.Params().Len() < 2 { + continue + } + // Check first argument is of type testingT + first, ok := sig.Params().At(0).Type().(*types.Named) + if !ok { + continue + } + firstType, ok := first.Underlying().(*types.Interface) + if !ok { + continue + } + if !types.Implements(firstType, testingT) { + continue + } + + funcs = append(funcs, testFunc{*outputPkg, fdocs, fn}) + importer.AddImportsFrom(sig.Params()) + } + return importer, funcs, nil +} + +// parsePackageSource returns the types scope and the package documentation from the pa +func parsePackageSource(pkg string) (*types.Scope, *doc.Package, error) { + pd, err := build.Import(pkg, ".", 0) + if err != nil { + return nil, nil, err + } + + fset := token.NewFileSet() + files := make(map[string]*ast.File) + fileList := make([]*ast.File, len(pd.GoFiles)) + for i, fname := range pd.GoFiles { + src, err := ioutil.ReadFile(path.Join(pd.SrcRoot, pd.ImportPath, fname)) + if err != nil { + return nil, nil, err + } + f, err := parser.ParseFile(fset, fname, src, parser.ParseComments|parser.AllErrors) + if err != nil { + return nil, nil, err + } + files[fname] = f + fileList[i] = f + } + + cfg := types.Config{ + Importer: importer.Default(), + } + info := types.Info{ + Defs: make(map[*ast.Ident]types.Object), + } + tp, err := cfg.Check(pkg, fset, fileList, &info) + if err != nil { + return nil, nil, err + } + + scope := tp.Scope() + + ap, _ := ast.NewPackage(fset, files, nil, nil) + docs := doc.New(ap, pkg, 0) + + return scope, docs, nil +} + +type testFunc struct { + CurrentPkg string + DocInfo *doc.Func + TypeInfo *types.Func +} + +func (f *testFunc) Qualifier(p *types.Package) string { + if p == nil || p.Name() == f.CurrentPkg { + return "" + } + return p.Name() +} + +func (f *testFunc) Params() string { + sig := f.TypeInfo.Type().(*types.Signature) + params := sig.Params() + p := "" + comma := "" + to := params.Len() + var i int + + if sig.Variadic() { + to-- + } + for i = 1; i < to; i++ { + param := params.At(i) + p += fmt.Sprintf("%s%s %s", comma, param.Name(), types.TypeString(param.Type(), f.Qualifier)) + comma = ", " + } + if sig.Variadic() { + param := params.At(params.Len() - 1) + p += fmt.Sprintf("%s%s ...%s", comma, param.Name(), types.TypeString(param.Type().(*types.Slice).Elem(), f.Qualifier)) + } + return p +} + +func (f *testFunc) ForwardedParams() string { + sig := f.TypeInfo.Type().(*types.Signature) + params := sig.Params() + p := "" + comma := "" + to := params.Len() + var i int + + if sig.Variadic() { + to-- + } + for i = 1; i < to; i++ { + param := params.At(i) + p += fmt.Sprintf("%s%s", comma, param.Name()) + comma = ", " + } + if sig.Variadic() { + param := params.At(params.Len() - 1) + p += fmt.Sprintf("%s%s...", comma, param.Name()) + } + return p +} + +func (f *testFunc) Comment() string { + return "// " + strings.Replace(strings.TrimSpace(f.DocInfo.Doc), "\n", "\n// ", -1) +} + +func (f *testFunc) CommentWithoutT(receiver string) string { + search := fmt.Sprintf("assert.%s(t, ", f.DocInfo.Name) + replace := fmt.Sprintf("%s.%s(", receiver, f.DocInfo.Name) + return strings.Replace(f.Comment(), search, replace, -1) +} + +var headerTemplate = `/* +* CODE GENERATED AUTOMATICALLY WITH github.com/stretchr/testify/_codegen +* THIS FILE MUST NOT BE EDITED BY HAND +*/ + +package {{.Name}} + +import ( +{{range $path, $name := .Imports}} + {{$name}} "{{$path}}"{{end}} +) +` + +var funcTemplate = `{{.Comment}} +func (fwd *AssertionsForwarder) {{.DocInfo.Name}}({{.Params}}) bool { + return assert.{{.DocInfo.Name}}({{.ForwardedParams}}) +}` diff --git a/vendor/github.com/stretchr/testify/assert/assertion_forward.go b/vendor/github.com/stretchr/testify/assert/assertion_forward.go new file mode 100644 index 00000000..e6a79604 --- /dev/null +++ b/vendor/github.com/stretchr/testify/assert/assertion_forward.go @@ -0,0 +1,387 @@ +/* +* CODE GENERATED AUTOMATICALLY WITH github.com/stretchr/testify/_codegen +* THIS FILE MUST NOT BE EDITED BY HAND +*/ + +package assert + +import ( + + http "net/http" + url "net/url" + time "time" +) + + +// Condition uses a Comparison to assert a complex condition. +func (a *Assertions) Condition(comp Comparison, msgAndArgs ...interface{}) bool { + return Condition(a.t, comp, msgAndArgs...) +} + + +// Contains asserts that the specified string, list(array, slice...) or map contains the +// specified substring or element. +// +// a.Contains("Hello World", "World", "But 'Hello World' does contain 'World'") +// a.Contains(["Hello", "World"], "World", "But ["Hello", "World"] does contain 'World'") +// a.Contains({"Hello": "World"}, "Hello", "But {'Hello': 'World'} does contain 'Hello'") +// +// Returns whether the assertion was successful (true) or not (false). +func (a *Assertions) Contains(s interface{}, contains interface{}, msgAndArgs ...interface{}) bool { + return Contains(a.t, s, contains, msgAndArgs...) +} + + +// Empty asserts that the specified object is empty. I.e. nil, "", false, 0 or either +// a slice or a channel with len == 0. +// +// a.Empty(obj) +// +// Returns whether the assertion was successful (true) or not (false). +func (a *Assertions) Empty(object interface{}, msgAndArgs ...interface{}) bool { + return Empty(a.t, object, msgAndArgs...) +} + + +// Equal asserts that two objects are equal. +// +// a.Equal(123, 123, "123 and 123 should be equal") +// +// Returns whether the assertion was successful (true) or not (false). +func (a *Assertions) Equal(expected interface{}, actual interface{}, msgAndArgs ...interface{}) bool { + return Equal(a.t, expected, actual, msgAndArgs...) +} + + +// EqualError asserts that a function returned an error (i.e. not `nil`) +// and that it is equal to the provided error. +// +// actualObj, err := SomeFunction() +// if assert.Error(t, err, "An error was expected") { +// assert.Equal(t, err, expectedError) +// } +// +// Returns whether the assertion was successful (true) or not (false). +func (a *Assertions) EqualError(theError error, errString string, msgAndArgs ...interface{}) bool { + return EqualError(a.t, theError, errString, msgAndArgs...) +} + + +// EqualValues asserts that two objects are equal or convertable to the same types +// and equal. +// +// a.EqualValues(uint32(123), int32(123), "123 and 123 should be equal") +// +// Returns whether the assertion was successful (true) or not (false). +func (a *Assertions) EqualValues(expected interface{}, actual interface{}, msgAndArgs ...interface{}) bool { + return EqualValues(a.t, expected, actual, msgAndArgs...) +} + + +// Error asserts that a function returned an error (i.e. not `nil`). +// +// actualObj, err := SomeFunction() +// if a.Error(err, "An error was expected") { +// assert.Equal(t, err, expectedError) +// } +// +// Returns whether the assertion was successful (true) or not (false). +func (a *Assertions) Error(err error, msgAndArgs ...interface{}) bool { + return Error(a.t, err, msgAndArgs...) +} + + +// Exactly asserts that two objects are equal is value and type. +// +// a.Exactly(int32(123), int64(123), "123 and 123 should NOT be equal") +// +// Returns whether the assertion was successful (true) or not (false). +func (a *Assertions) Exactly(expected interface{}, actual interface{}, msgAndArgs ...interface{}) bool { + return Exactly(a.t, expected, actual, msgAndArgs...) +} + + +// Fail reports a failure through +func (a *Assertions) Fail(failureMessage string, msgAndArgs ...interface{}) bool { + return Fail(a.t, failureMessage, msgAndArgs...) +} + + +// FailNow fails test +func (a *Assertions) FailNow(failureMessage string, msgAndArgs ...interface{}) bool { + return FailNow(a.t, failureMessage, msgAndArgs...) +} + + +// False asserts that the specified value is false. +// +// a.False(myBool, "myBool should be false") +// +// Returns whether the assertion was successful (true) or not (false). +func (a *Assertions) False(value bool, msgAndArgs ...interface{}) bool { + return False(a.t, value, msgAndArgs...) +} + + +// HTTPBodyContains asserts that a specified handler returns a +// body that contains a string. +// +// a.HTTPBodyContains(myHandler, "www.google.com", nil, "I'm Feeling Lucky") +// +// Returns whether the assertion was successful (true) or not (false). +func (a *Assertions) HTTPBodyContains(handler http.HandlerFunc, method string, url string, values url.Values, str interface{}) bool { + return HTTPBodyContains(a.t, handler, method, url, values, str) +} + + +// HTTPBodyNotContains asserts that a specified handler returns a +// body that does not contain a string. +// +// a.HTTPBodyNotContains(myHandler, "www.google.com", nil, "I'm Feeling Lucky") +// +// Returns whether the assertion was successful (true) or not (false). +func (a *Assertions) HTTPBodyNotContains(handler http.HandlerFunc, method string, url string, values url.Values, str interface{}) bool { + return HTTPBodyNotContains(a.t, handler, method, url, values, str) +} + + +// HTTPError asserts that a specified handler returns an error status code. +// +// a.HTTPError(myHandler, "POST", "/a/b/c", url.Values{"a": []string{"b", "c"}} +// +// Returns whether the assertion was successful (true) or not (false). +func (a *Assertions) HTTPError(handler http.HandlerFunc, method string, url string, values url.Values) bool { + return HTTPError(a.t, handler, method, url, values) +} + + +// HTTPRedirect asserts that a specified handler returns a redirect status code. +// +// a.HTTPRedirect(myHandler, "GET", "/a/b/c", url.Values{"a": []string{"b", "c"}} +// +// Returns whether the assertion was successful (true) or not (false). +func (a *Assertions) HTTPRedirect(handler http.HandlerFunc, method string, url string, values url.Values) bool { + return HTTPRedirect(a.t, handler, method, url, values) +} + + +// HTTPSuccess asserts that a specified handler returns a success status code. +// +// a.HTTPSuccess(myHandler, "POST", "http://www.google.com", nil) +// +// Returns whether the assertion was successful (true) or not (false). +func (a *Assertions) HTTPSuccess(handler http.HandlerFunc, method string, url string, values url.Values) bool { + return HTTPSuccess(a.t, handler, method, url, values) +} + + +// Implements asserts that an object is implemented by the specified interface. +// +// a.Implements((*MyInterface)(nil), new(MyObject), "MyObject") +func (a *Assertions) Implements(interfaceObject interface{}, object interface{}, msgAndArgs ...interface{}) bool { + return Implements(a.t, interfaceObject, object, msgAndArgs...) +} + + +// InDelta asserts that the two numerals are within delta of each other. +// +// a.InDelta(math.Pi, (22 / 7.0), 0.01) +// +// Returns whether the assertion was successful (true) or not (false). +func (a *Assertions) InDelta(expected interface{}, actual interface{}, delta float64, msgAndArgs ...interface{}) bool { + return InDelta(a.t, expected, actual, delta, msgAndArgs...) +} + + +// InDeltaSlice is the same as InDelta, except it compares two slices. +func (a *Assertions) InDeltaSlice(expected interface{}, actual interface{}, delta float64, msgAndArgs ...interface{}) bool { + return InDeltaSlice(a.t, expected, actual, delta, msgAndArgs...) +} + + +// InEpsilon asserts that expected and actual have a relative error less than epsilon +// +// Returns whether the assertion was successful (true) or not (false). +func (a *Assertions) InEpsilon(expected interface{}, actual interface{}, epsilon float64, msgAndArgs ...interface{}) bool { + return InEpsilon(a.t, expected, actual, epsilon, msgAndArgs...) +} + + +// InEpsilonSlice is the same as InEpsilon, except it compares two slices. +func (a *Assertions) InEpsilonSlice(expected interface{}, actual interface{}, delta float64, msgAndArgs ...interface{}) bool { + return InEpsilonSlice(a.t, expected, actual, delta, msgAndArgs...) +} + + +// IsType asserts that the specified objects are of the same type. +func (a *Assertions) IsType(expectedType interface{}, object interface{}, msgAndArgs ...interface{}) bool { + return IsType(a.t, expectedType, object, msgAndArgs...) +} + + +// JSONEq asserts that two JSON strings are equivalent. +// +// a.JSONEq(`{"hello": "world", "foo": "bar"}`, `{"foo": "bar", "hello": "world"}`) +// +// Returns whether the assertion was successful (true) or not (false). +func (a *Assertions) JSONEq(expected string, actual string, msgAndArgs ...interface{}) bool { + return JSONEq(a.t, expected, actual, msgAndArgs...) +} + + +// Len asserts that the specified object has specific length. +// Len also fails if the object has a type that len() not accept. +// +// a.Len(mySlice, 3, "The size of slice is not 3") +// +// Returns whether the assertion was successful (true) or not (false). +func (a *Assertions) Len(object interface{}, length int, msgAndArgs ...interface{}) bool { + return Len(a.t, object, length, msgAndArgs...) +} + + +// Nil asserts that the specified object is nil. +// +// a.Nil(err, "err should be nothing") +// +// Returns whether the assertion was successful (true) or not (false). +func (a *Assertions) Nil(object interface{}, msgAndArgs ...interface{}) bool { + return Nil(a.t, object, msgAndArgs...) +} + + +// NoError asserts that a function returned no error (i.e. `nil`). +// +// actualObj, err := SomeFunction() +// if a.NoError(err) { +// assert.Equal(t, actualObj, expectedObj) +// } +// +// Returns whether the assertion was successful (true) or not (false). +func (a *Assertions) NoError(err error, msgAndArgs ...interface{}) bool { + return NoError(a.t, err, msgAndArgs...) +} + + +// NotContains asserts that the specified string, list(array, slice...) or map does NOT contain the +// specified substring or element. +// +// a.NotContains("Hello World", "Earth", "But 'Hello World' does NOT contain 'Earth'") +// a.NotContains(["Hello", "World"], "Earth", "But ['Hello', 'World'] does NOT contain 'Earth'") +// a.NotContains({"Hello": "World"}, "Earth", "But {'Hello': 'World'} does NOT contain 'Earth'") +// +// Returns whether the assertion was successful (true) or not (false). +func (a *Assertions) NotContains(s interface{}, contains interface{}, msgAndArgs ...interface{}) bool { + return NotContains(a.t, s, contains, msgAndArgs...) +} + + +// NotEmpty asserts that the specified object is NOT empty. I.e. not nil, "", false, 0 or either +// a slice or a channel with len == 0. +// +// if a.NotEmpty(obj) { +// assert.Equal(t, "two", obj[1]) +// } +// +// Returns whether the assertion was successful (true) or not (false). +func (a *Assertions) NotEmpty(object interface{}, msgAndArgs ...interface{}) bool { + return NotEmpty(a.t, object, msgAndArgs...) +} + + +// NotEqual asserts that the specified values are NOT equal. +// +// a.NotEqual(obj1, obj2, "two objects shouldn't be equal") +// +// Returns whether the assertion was successful (true) or not (false). +func (a *Assertions) NotEqual(expected interface{}, actual interface{}, msgAndArgs ...interface{}) bool { + return NotEqual(a.t, expected, actual, msgAndArgs...) +} + + +// NotNil asserts that the specified object is not nil. +// +// a.NotNil(err, "err should be something") +// +// Returns whether the assertion was successful (true) or not (false). +func (a *Assertions) NotNil(object interface{}, msgAndArgs ...interface{}) bool { + return NotNil(a.t, object, msgAndArgs...) +} + + +// NotPanics asserts that the code inside the specified PanicTestFunc does NOT panic. +// +// a.NotPanics(func(){ +// RemainCalm() +// }, "Calling RemainCalm() should NOT panic") +// +// Returns whether the assertion was successful (true) or not (false). +func (a *Assertions) NotPanics(f PanicTestFunc, msgAndArgs ...interface{}) bool { + return NotPanics(a.t, f, msgAndArgs...) +} + + +// NotRegexp asserts that a specified regexp does not match a string. +// +// a.NotRegexp(regexp.MustCompile("starts"), "it's starting") +// a.NotRegexp("^start", "it's not starting") +// +// Returns whether the assertion was successful (true) or not (false). +func (a *Assertions) NotRegexp(rx interface{}, str interface{}, msgAndArgs ...interface{}) bool { + return NotRegexp(a.t, rx, str, msgAndArgs...) +} + + +// NotZero asserts that i is not the zero value for its type and returns the truth. +func (a *Assertions) NotZero(i interface{}, msgAndArgs ...interface{}) bool { + return NotZero(a.t, i, msgAndArgs...) +} + + +// Panics asserts that the code inside the specified PanicTestFunc panics. +// +// a.Panics(func(){ +// GoCrazy() +// }, "Calling GoCrazy() should panic") +// +// Returns whether the assertion was successful (true) or not (false). +func (a *Assertions) Panics(f PanicTestFunc, msgAndArgs ...interface{}) bool { + return Panics(a.t, f, msgAndArgs...) +} + + +// Regexp asserts that a specified regexp matches a string. +// +// a.Regexp(regexp.MustCompile("start"), "it's starting") +// a.Regexp("start...$", "it's not starting") +// +// Returns whether the assertion was successful (true) or not (false). +func (a *Assertions) Regexp(rx interface{}, str interface{}, msgAndArgs ...interface{}) bool { + return Regexp(a.t, rx, str, msgAndArgs...) +} + + +// True asserts that the specified value is true. +// +// a.True(myBool, "myBool should be true") +// +// Returns whether the assertion was successful (true) or not (false). +func (a *Assertions) True(value bool, msgAndArgs ...interface{}) bool { + return True(a.t, value, msgAndArgs...) +} + + +// WithinDuration asserts that the two times are within duration delta of each other. +// +// a.WithinDuration(time.Now(), time.Now(), 10*time.Second, "The difference should not be more than 10s") +// +// Returns whether the assertion was successful (true) or not (false). +func (a *Assertions) WithinDuration(expected time.Time, actual time.Time, delta time.Duration, msgAndArgs ...interface{}) bool { + return WithinDuration(a.t, expected, actual, delta, msgAndArgs...) +} + + +// Zero asserts that i is the zero value for its type and returns the truth. +func (a *Assertions) Zero(i interface{}, msgAndArgs ...interface{}) bool { + return Zero(a.t, i, msgAndArgs...) +} diff --git a/vendor/github.com/stretchr/testify/assert/assertion_forward.go.tmpl b/vendor/github.com/stretchr/testify/assert/assertion_forward.go.tmpl new file mode 100644 index 00000000..99f9acfb --- /dev/null +++ b/vendor/github.com/stretchr/testify/assert/assertion_forward.go.tmpl @@ -0,0 +1,4 @@ +{{.CommentWithoutT "a"}} +func (a *Assertions) {{.DocInfo.Name}}({{.Params}}) bool { + return {{.DocInfo.Name}}(a.t, {{.ForwardedParams}}) +} diff --git a/vendor/github.com/stretchr/testify/assert/assertions.go b/vendor/github.com/stretchr/testify/assert/assertions.go new file mode 100644 index 00000000..d7c16c59 --- /dev/null +++ b/vendor/github.com/stretchr/testify/assert/assertions.go @@ -0,0 +1,1004 @@ +package assert + +import ( + "bufio" + "bytes" + "encoding/json" + "fmt" + "math" + "reflect" + "regexp" + "runtime" + "strings" + "time" + "unicode" + "unicode/utf8" + + "github.com/davecgh/go-spew/spew" + "github.com/pmezard/go-difflib/difflib" +) + +// TestingT is an interface wrapper around *testing.T +type TestingT interface { + Errorf(format string, args ...interface{}) +} + +// Comparison a custom function that returns true on success and false on failure +type Comparison func() (success bool) + +/* + Helper functions +*/ + +// ObjectsAreEqual determines if two objects are considered equal. +// +// This function does no assertion of any kind. +func ObjectsAreEqual(expected, actual interface{}) bool { + + if expected == nil || actual == nil { + return expected == actual + } + + return reflect.DeepEqual(expected, actual) + +} + +// ObjectsAreEqualValues gets whether two objects are equal, or if their +// values are equal. +func ObjectsAreEqualValues(expected, actual interface{}) bool { + if ObjectsAreEqual(expected, actual) { + return true + } + + actualType := reflect.TypeOf(actual) + if actualType == nil { + return false + } + expectedValue := reflect.ValueOf(expected) + if expectedValue.IsValid() && expectedValue.Type().ConvertibleTo(actualType) { + // Attempt comparison after type conversion + return reflect.DeepEqual(expectedValue.Convert(actualType).Interface(), actual) + } + + return false +} + +/* CallerInfo is necessary because the assert functions use the testing object +internally, causing it to print the file:line of the assert method, rather than where +the problem actually occured in calling code.*/ + +// CallerInfo returns an array of strings containing the file and line number +// of each stack frame leading from the current test to the assert call that +// failed. +func CallerInfo() []string { + + pc := uintptr(0) + file := "" + line := 0 + ok := false + name := "" + + callers := []string{} + for i := 0; ; i++ { + pc, file, line, ok = runtime.Caller(i) + if !ok { + return nil + } + + // This is a huge edge case, but it will panic if this is the case, see #180 + if file == "" { + break + } + + parts := strings.Split(file, "/") + dir := parts[len(parts)-2] + file = parts[len(parts)-1] + if (dir != "assert" && dir != "mock" && dir != "require") || file == "mock_test.go" { + callers = append(callers, fmt.Sprintf("%s:%d", file, line)) + } + + f := runtime.FuncForPC(pc) + if f == nil { + break + } + name = f.Name() + // Drop the package + segments := strings.Split(name, ".") + name = segments[len(segments)-1] + if isTest(name, "Test") || + isTest(name, "Benchmark") || + isTest(name, "Example") { + break + } + } + + return callers +} + +// Stolen from the `go test` tool. +// isTest tells whether name looks like a test (or benchmark, according to prefix). +// It is a Test (say) if there is a character after Test that is not a lower-case letter. +// We don't want TesticularCancer. +func isTest(name, prefix string) bool { + if !strings.HasPrefix(name, prefix) { + return false + } + if len(name) == len(prefix) { // "Test" is ok + return true + } + rune, _ := utf8.DecodeRuneInString(name[len(prefix):]) + return !unicode.IsLower(rune) +} + +// getWhitespaceString returns a string that is long enough to overwrite the default +// output from the go testing framework. +func getWhitespaceString() string { + + _, file, line, ok := runtime.Caller(1) + if !ok { + return "" + } + parts := strings.Split(file, "/") + file = parts[len(parts)-1] + + return strings.Repeat(" ", len(fmt.Sprintf("%s:%d: ", file, line))) + +} + +func messageFromMsgAndArgs(msgAndArgs ...interface{}) string { + if len(msgAndArgs) == 0 || msgAndArgs == nil { + return "" + } + if len(msgAndArgs) == 1 { + return msgAndArgs[0].(string) + } + if len(msgAndArgs) > 1 { + return fmt.Sprintf(msgAndArgs[0].(string), msgAndArgs[1:]...) + } + return "" +} + +// Indents all lines of the message by appending a number of tabs to each line, in an output format compatible with Go's +// test printing (see inner comment for specifics) +func indentMessageLines(message string, tabs int) string { + outBuf := new(bytes.Buffer) + + for i, scanner := 0, bufio.NewScanner(strings.NewReader(message)); scanner.Scan(); i++ { + if i != 0 { + outBuf.WriteRune('\n') + } + for ii := 0; ii < tabs; ii++ { + outBuf.WriteRune('\t') + // Bizarrely, all lines except the first need one fewer tabs prepended, so deliberately advance the counter + // by 1 prematurely. + if ii == 0 && i > 0 { + ii++ + } + } + outBuf.WriteString(scanner.Text()) + } + + return outBuf.String() +} + +type failNower interface { + FailNow() +} + +// FailNow fails test +func FailNow(t TestingT, failureMessage string, msgAndArgs ...interface{}) bool { + Fail(t, failureMessage, msgAndArgs...) + + // We cannot extend TestingT with FailNow() and + // maintain backwards compatibility, so we fallback + // to panicking when FailNow is not available in + // TestingT. + // See issue #263 + + if t, ok := t.(failNower); ok { + t.FailNow() + } else { + panic("test failed and t is missing `FailNow()`") + } + return false +} + +// Fail reports a failure through +func Fail(t TestingT, failureMessage string, msgAndArgs ...interface{}) bool { + + message := messageFromMsgAndArgs(msgAndArgs...) + + errorTrace := strings.Join(CallerInfo(), "\n\r\t\t\t") + if len(message) > 0 { + t.Errorf("\r%s\r\tError Trace:\t%s\n"+ + "\r\tError:%s\n"+ + "\r\tMessages:\t%s\n\r", + getWhitespaceString(), + errorTrace, + indentMessageLines(failureMessage, 2), + message) + } else { + t.Errorf("\r%s\r\tError Trace:\t%s\n"+ + "\r\tError:%s\n\r", + getWhitespaceString(), + errorTrace, + indentMessageLines(failureMessage, 2)) + } + + return false +} + +// Implements asserts that an object is implemented by the specified interface. +// +// assert.Implements(t, (*MyInterface)(nil), new(MyObject), "MyObject") +func Implements(t TestingT, interfaceObject interface{}, object interface{}, msgAndArgs ...interface{}) bool { + + interfaceType := reflect.TypeOf(interfaceObject).Elem() + + if !reflect.TypeOf(object).Implements(interfaceType) { + return Fail(t, fmt.Sprintf("%T must implement %v", object, interfaceType), msgAndArgs...) + } + + return true + +} + +// IsType asserts that the specified objects are of the same type. +func IsType(t TestingT, expectedType interface{}, object interface{}, msgAndArgs ...interface{}) bool { + + if !ObjectsAreEqual(reflect.TypeOf(object), reflect.TypeOf(expectedType)) { + return Fail(t, fmt.Sprintf("Object expected to be of type %v, but was %v", reflect.TypeOf(expectedType), reflect.TypeOf(object)), msgAndArgs...) + } + + return true +} + +// Equal asserts that two objects are equal. +// +// assert.Equal(t, 123, 123, "123 and 123 should be equal") +// +// Returns whether the assertion was successful (true) or not (false). +func Equal(t TestingT, expected, actual interface{}, msgAndArgs ...interface{}) bool { + + if !ObjectsAreEqual(expected, actual) { + diff := diff(expected, actual) + return Fail(t, fmt.Sprintf("Not equal: %#v (expected)\n"+ + " != %#v (actual)%s", expected, actual, diff), msgAndArgs...) + } + + return true + +} + +// EqualValues asserts that two objects are equal or convertable to the same types +// and equal. +// +// assert.EqualValues(t, uint32(123), int32(123), "123 and 123 should be equal") +// +// Returns whether the assertion was successful (true) or not (false). +func EqualValues(t TestingT, expected, actual interface{}, msgAndArgs ...interface{}) bool { + + if !ObjectsAreEqualValues(expected, actual) { + return Fail(t, fmt.Sprintf("Not equal: %#v (expected)\n"+ + " != %#v (actual)", expected, actual), msgAndArgs...) + } + + return true + +} + +// Exactly asserts that two objects are equal is value and type. +// +// assert.Exactly(t, int32(123), int64(123), "123 and 123 should NOT be equal") +// +// Returns whether the assertion was successful (true) or not (false). +func Exactly(t TestingT, expected, actual interface{}, msgAndArgs ...interface{}) bool { + + aType := reflect.TypeOf(expected) + bType := reflect.TypeOf(actual) + + if aType != bType { + return Fail(t, fmt.Sprintf("Types expected to match exactly\n\r\t%v != %v", aType, bType), msgAndArgs...) + } + + return Equal(t, expected, actual, msgAndArgs...) + +} + +// NotNil asserts that the specified object is not nil. +// +// assert.NotNil(t, err, "err should be something") +// +// Returns whether the assertion was successful (true) or not (false). +func NotNil(t TestingT, object interface{}, msgAndArgs ...interface{}) bool { + if !isNil(object) { + return true + } + return Fail(t, "Expected value not to be nil.", msgAndArgs...) +} + +// isNil checks if a specified object is nil or not, without Failing. +func isNil(object interface{}) bool { + if object == nil { + return true + } + + value := reflect.ValueOf(object) + kind := value.Kind() + if kind >= reflect.Chan && kind <= reflect.Slice && value.IsNil() { + return true + } + + return false +} + +// Nil asserts that the specified object is nil. +// +// assert.Nil(t, err, "err should be nothing") +// +// Returns whether the assertion was successful (true) or not (false). +func Nil(t TestingT, object interface{}, msgAndArgs ...interface{}) bool { + if isNil(object) { + return true + } + return Fail(t, fmt.Sprintf("Expected nil, but got: %#v", object), msgAndArgs...) +} + +var numericZeros = []interface{}{ + int(0), + int8(0), + int16(0), + int32(0), + int64(0), + uint(0), + uint8(0), + uint16(0), + uint32(0), + uint64(0), + float32(0), + float64(0), +} + +// isEmpty gets whether the specified object is considered empty or not. +func isEmpty(object interface{}) bool { + + if object == nil { + return true + } else if object == "" { + return true + } else if object == false { + return true + } + + for _, v := range numericZeros { + if object == v { + return true + } + } + + objValue := reflect.ValueOf(object) + + switch objValue.Kind() { + case reflect.Map: + fallthrough + case reflect.Slice, reflect.Chan: + { + return (objValue.Len() == 0) + } + case reflect.Struct: + switch object.(type) { + case time.Time: + return object.(time.Time).IsZero() + } + case reflect.Ptr: + { + if objValue.IsNil() { + return true + } + switch object.(type) { + case *time.Time: + return object.(*time.Time).IsZero() + default: + return false + } + } + } + return false +} + +// Empty asserts that the specified object is empty. I.e. nil, "", false, 0 or either +// a slice or a channel with len == 0. +// +// assert.Empty(t, obj) +// +// Returns whether the assertion was successful (true) or not (false). +func Empty(t TestingT, object interface{}, msgAndArgs ...interface{}) bool { + + pass := isEmpty(object) + if !pass { + Fail(t, fmt.Sprintf("Should be empty, but was %v", object), msgAndArgs...) + } + + return pass + +} + +// NotEmpty asserts that the specified object is NOT empty. I.e. not nil, "", false, 0 or either +// a slice or a channel with len == 0. +// +// if assert.NotEmpty(t, obj) { +// assert.Equal(t, "two", obj[1]) +// } +// +// Returns whether the assertion was successful (true) or not (false). +func NotEmpty(t TestingT, object interface{}, msgAndArgs ...interface{}) bool { + + pass := !isEmpty(object) + if !pass { + Fail(t, fmt.Sprintf("Should NOT be empty, but was %v", object), msgAndArgs...) + } + + return pass + +} + +// getLen try to get length of object. +// return (false, 0) if impossible. +func getLen(x interface{}) (ok bool, length int) { + v := reflect.ValueOf(x) + defer func() { + if e := recover(); e != nil { + ok = false + } + }() + return true, v.Len() +} + +// Len asserts that the specified object has specific length. +// Len also fails if the object has a type that len() not accept. +// +// assert.Len(t, mySlice, 3, "The size of slice is not 3") +// +// Returns whether the assertion was successful (true) or not (false). +func Len(t TestingT, object interface{}, length int, msgAndArgs ...interface{}) bool { + ok, l := getLen(object) + if !ok { + return Fail(t, fmt.Sprintf("\"%s\" could not be applied builtin len()", object), msgAndArgs...) + } + + if l != length { + return Fail(t, fmt.Sprintf("\"%s\" should have %d item(s), but has %d", object, length, l), msgAndArgs...) + } + return true +} + +// True asserts that the specified value is true. +// +// assert.True(t, myBool, "myBool should be true") +// +// Returns whether the assertion was successful (true) or not (false). +func True(t TestingT, value bool, msgAndArgs ...interface{}) bool { + + if value != true { + return Fail(t, "Should be true", msgAndArgs...) + } + + return true + +} + +// False asserts that the specified value is false. +// +// assert.False(t, myBool, "myBool should be false") +// +// Returns whether the assertion was successful (true) or not (false). +func False(t TestingT, value bool, msgAndArgs ...interface{}) bool { + + if value != false { + return Fail(t, "Should be false", msgAndArgs...) + } + + return true + +} + +// NotEqual asserts that the specified values are NOT equal. +// +// assert.NotEqual(t, obj1, obj2, "two objects shouldn't be equal") +// +// Returns whether the assertion was successful (true) or not (false). +func NotEqual(t TestingT, expected, actual interface{}, msgAndArgs ...interface{}) bool { + + if ObjectsAreEqual(expected, actual) { + return Fail(t, fmt.Sprintf("Should not be: %#v\n", actual), msgAndArgs...) + } + + return true + +} + +// containsElement try loop over the list check if the list includes the element. +// return (false, false) if impossible. +// return (true, false) if element was not found. +// return (true, true) if element was found. +func includeElement(list interface{}, element interface{}) (ok, found bool) { + + listValue := reflect.ValueOf(list) + elementValue := reflect.ValueOf(element) + defer func() { + if e := recover(); e != nil { + ok = false + found = false + } + }() + + if reflect.TypeOf(list).Kind() == reflect.String { + return true, strings.Contains(listValue.String(), elementValue.String()) + } + + if reflect.TypeOf(list).Kind() == reflect.Map { + mapKeys := listValue.MapKeys() + for i := 0; i < len(mapKeys); i++ { + if ObjectsAreEqual(mapKeys[i].Interface(), element) { + return true, true + } + } + return true, false + } + + for i := 0; i < listValue.Len(); i++ { + if ObjectsAreEqual(listValue.Index(i).Interface(), element) { + return true, true + } + } + return true, false + +} + +// Contains asserts that the specified string, list(array, slice...) or map contains the +// specified substring or element. +// +// assert.Contains(t, "Hello World", "World", "But 'Hello World' does contain 'World'") +// assert.Contains(t, ["Hello", "World"], "World", "But ["Hello", "World"] does contain 'World'") +// assert.Contains(t, {"Hello": "World"}, "Hello", "But {'Hello': 'World'} does contain 'Hello'") +// +// Returns whether the assertion was successful (true) or not (false). +func Contains(t TestingT, s, contains interface{}, msgAndArgs ...interface{}) bool { + + ok, found := includeElement(s, contains) + if !ok { + return Fail(t, fmt.Sprintf("\"%s\" could not be applied builtin len()", s), msgAndArgs...) + } + if !found { + return Fail(t, fmt.Sprintf("\"%s\" does not contain \"%s\"", s, contains), msgAndArgs...) + } + + return true + +} + +// NotContains asserts that the specified string, list(array, slice...) or map does NOT contain the +// specified substring or element. +// +// assert.NotContains(t, "Hello World", "Earth", "But 'Hello World' does NOT contain 'Earth'") +// assert.NotContains(t, ["Hello", "World"], "Earth", "But ['Hello', 'World'] does NOT contain 'Earth'") +// assert.NotContains(t, {"Hello": "World"}, "Earth", "But {'Hello': 'World'} does NOT contain 'Earth'") +// +// Returns whether the assertion was successful (true) or not (false). +func NotContains(t TestingT, s, contains interface{}, msgAndArgs ...interface{}) bool { + + ok, found := includeElement(s, contains) + if !ok { + return Fail(t, fmt.Sprintf("\"%s\" could not be applied builtin len()", s), msgAndArgs...) + } + if found { + return Fail(t, fmt.Sprintf("\"%s\" should not contain \"%s\"", s, contains), msgAndArgs...) + } + + return true + +} + +// Condition uses a Comparison to assert a complex condition. +func Condition(t TestingT, comp Comparison, msgAndArgs ...interface{}) bool { + result := comp() + if !result { + Fail(t, "Condition failed!", msgAndArgs...) + } + return result +} + +// PanicTestFunc defines a func that should be passed to the assert.Panics and assert.NotPanics +// methods, and represents a simple func that takes no arguments, and returns nothing. +type PanicTestFunc func() + +// didPanic returns true if the function passed to it panics. Otherwise, it returns false. +func didPanic(f PanicTestFunc) (bool, interface{}) { + + didPanic := false + var message interface{} + func() { + + defer func() { + if message = recover(); message != nil { + didPanic = true + } + }() + + // call the target function + f() + + }() + + return didPanic, message + +} + +// Panics asserts that the code inside the specified PanicTestFunc panics. +// +// assert.Panics(t, func(){ +// GoCrazy() +// }, "Calling GoCrazy() should panic") +// +// Returns whether the assertion was successful (true) or not (false). +func Panics(t TestingT, f PanicTestFunc, msgAndArgs ...interface{}) bool { + + if funcDidPanic, panicValue := didPanic(f); !funcDidPanic { + return Fail(t, fmt.Sprintf("func %#v should panic\n\r\tPanic value:\t%v", f, panicValue), msgAndArgs...) + } + + return true +} + +// NotPanics asserts that the code inside the specified PanicTestFunc does NOT panic. +// +// assert.NotPanics(t, func(){ +// RemainCalm() +// }, "Calling RemainCalm() should NOT panic") +// +// Returns whether the assertion was successful (true) or not (false). +func NotPanics(t TestingT, f PanicTestFunc, msgAndArgs ...interface{}) bool { + + if funcDidPanic, panicValue := didPanic(f); funcDidPanic { + return Fail(t, fmt.Sprintf("func %#v should not panic\n\r\tPanic value:\t%v", f, panicValue), msgAndArgs...) + } + + return true +} + +// WithinDuration asserts that the two times are within duration delta of each other. +// +// assert.WithinDuration(t, time.Now(), time.Now(), 10*time.Second, "The difference should not be more than 10s") +// +// Returns whether the assertion was successful (true) or not (false). +func WithinDuration(t TestingT, expected, actual time.Time, delta time.Duration, msgAndArgs ...interface{}) bool { + + dt := expected.Sub(actual) + if dt < -delta || dt > delta { + return Fail(t, fmt.Sprintf("Max difference between %v and %v allowed is %v, but difference was %v", expected, actual, delta, dt), msgAndArgs...) + } + + return true +} + +func toFloat(x interface{}) (float64, bool) { + var xf float64 + xok := true + + switch xn := x.(type) { + case uint8: + xf = float64(xn) + case uint16: + xf = float64(xn) + case uint32: + xf = float64(xn) + case uint64: + xf = float64(xn) + case int: + xf = float64(xn) + case int8: + xf = float64(xn) + case int16: + xf = float64(xn) + case int32: + xf = float64(xn) + case int64: + xf = float64(xn) + case float32: + xf = float64(xn) + case float64: + xf = float64(xn) + default: + xok = false + } + + return xf, xok +} + +// InDelta asserts that the two numerals are within delta of each other. +// +// assert.InDelta(t, math.Pi, (22 / 7.0), 0.01) +// +// Returns whether the assertion was successful (true) or not (false). +func InDelta(t TestingT, expected, actual interface{}, delta float64, msgAndArgs ...interface{}) bool { + + af, aok := toFloat(expected) + bf, bok := toFloat(actual) + + if !aok || !bok { + return Fail(t, fmt.Sprintf("Parameters must be numerical"), msgAndArgs...) + } + + if math.IsNaN(af) { + return Fail(t, fmt.Sprintf("Actual must not be NaN"), msgAndArgs...) + } + + if math.IsNaN(bf) { + return Fail(t, fmt.Sprintf("Expected %v with delta %v, but was NaN", expected, delta), msgAndArgs...) + } + + dt := af - bf + if dt < -delta || dt > delta { + return Fail(t, fmt.Sprintf("Max difference between %v and %v allowed is %v, but difference was %v", expected, actual, delta, dt), msgAndArgs...) + } + + return true +} + +// InDeltaSlice is the same as InDelta, except it compares two slices. +func InDeltaSlice(t TestingT, expected, actual interface{}, delta float64, msgAndArgs ...interface{}) bool { + if expected == nil || actual == nil || + reflect.TypeOf(actual).Kind() != reflect.Slice || + reflect.TypeOf(expected).Kind() != reflect.Slice { + return Fail(t, fmt.Sprintf("Parameters must be slice"), msgAndArgs...) + } + + actualSlice := reflect.ValueOf(actual) + expectedSlice := reflect.ValueOf(expected) + + for i := 0; i < actualSlice.Len(); i++ { + result := InDelta(t, actualSlice.Index(i).Interface(), expectedSlice.Index(i).Interface(), delta) + if !result { + return result + } + } + + return true +} + +func calcRelativeError(expected, actual interface{}) (float64, error) { + af, aok := toFloat(expected) + if !aok { + return 0, fmt.Errorf("expected value %q cannot be converted to float", expected) + } + if af == 0 { + return 0, fmt.Errorf("expected value must have a value other than zero to calculate the relative error") + } + bf, bok := toFloat(actual) + if !bok { + return 0, fmt.Errorf("expected value %q cannot be converted to float", actual) + } + + return math.Abs(af-bf) / math.Abs(af), nil +} + +// InEpsilon asserts that expected and actual have a relative error less than epsilon +// +// Returns whether the assertion was successful (true) or not (false). +func InEpsilon(t TestingT, expected, actual interface{}, epsilon float64, msgAndArgs ...interface{}) bool { + actualEpsilon, err := calcRelativeError(expected, actual) + if err != nil { + return Fail(t, err.Error(), msgAndArgs...) + } + if actualEpsilon > epsilon { + return Fail(t, fmt.Sprintf("Relative error is too high: %#v (expected)\n"+ + " < %#v (actual)", actualEpsilon, epsilon), msgAndArgs...) + } + + return true +} + +// InEpsilonSlice is the same as InEpsilon, except it compares each value from two slices. +func InEpsilonSlice(t TestingT, expected, actual interface{}, epsilon float64, msgAndArgs ...interface{}) bool { + if expected == nil || actual == nil || + reflect.TypeOf(actual).Kind() != reflect.Slice || + reflect.TypeOf(expected).Kind() != reflect.Slice { + return Fail(t, fmt.Sprintf("Parameters must be slice"), msgAndArgs...) + } + + actualSlice := reflect.ValueOf(actual) + expectedSlice := reflect.ValueOf(expected) + + for i := 0; i < actualSlice.Len(); i++ { + result := InEpsilon(t, actualSlice.Index(i).Interface(), expectedSlice.Index(i).Interface(), epsilon) + if !result { + return result + } + } + + return true +} + +/* + Errors +*/ + +// NoError asserts that a function returned no error (i.e. `nil`). +// +// actualObj, err := SomeFunction() +// if assert.NoError(t, err) { +// assert.Equal(t, actualObj, expectedObj) +// } +// +// Returns whether the assertion was successful (true) or not (false). +func NoError(t TestingT, err error, msgAndArgs ...interface{}) bool { + if isNil(err) { + return true + } + + return Fail(t, fmt.Sprintf("Received unexpected error %q", err), msgAndArgs...) +} + +// Error asserts that a function returned an error (i.e. not `nil`). +// +// actualObj, err := SomeFunction() +// if assert.Error(t, err, "An error was expected") { +// assert.Equal(t, err, expectedError) +// } +// +// Returns whether the assertion was successful (true) or not (false). +func Error(t TestingT, err error, msgAndArgs ...interface{}) bool { + + message := messageFromMsgAndArgs(msgAndArgs...) + return NotNil(t, err, "An error is expected but got nil. %s", message) + +} + +// EqualError asserts that a function returned an error (i.e. not `nil`) +// and that it is equal to the provided error. +// +// actualObj, err := SomeFunction() +// if assert.Error(t, err, "An error was expected") { +// assert.Equal(t, err, expectedError) +// } +// +// Returns whether the assertion was successful (true) or not (false). +func EqualError(t TestingT, theError error, errString string, msgAndArgs ...interface{}) bool { + + message := messageFromMsgAndArgs(msgAndArgs...) + if !NotNil(t, theError, "An error is expected but got nil. %s", message) { + return false + } + s := "An error with value \"%s\" is expected but got \"%s\". %s" + return Equal(t, errString, theError.Error(), + s, errString, theError.Error(), message) +} + +// matchRegexp return true if a specified regexp matches a string. +func matchRegexp(rx interface{}, str interface{}) bool { + + var r *regexp.Regexp + if rr, ok := rx.(*regexp.Regexp); ok { + r = rr + } else { + r = regexp.MustCompile(fmt.Sprint(rx)) + } + + return (r.FindStringIndex(fmt.Sprint(str)) != nil) + +} + +// Regexp asserts that a specified regexp matches a string. +// +// assert.Regexp(t, regexp.MustCompile("start"), "it's starting") +// assert.Regexp(t, "start...$", "it's not starting") +// +// Returns whether the assertion was successful (true) or not (false). +func Regexp(t TestingT, rx interface{}, str interface{}, msgAndArgs ...interface{}) bool { + + match := matchRegexp(rx, str) + + if !match { + Fail(t, fmt.Sprintf("Expect \"%v\" to match \"%v\"", str, rx), msgAndArgs...) + } + + return match +} + +// NotRegexp asserts that a specified regexp does not match a string. +// +// assert.NotRegexp(t, regexp.MustCompile("starts"), "it's starting") +// assert.NotRegexp(t, "^start", "it's not starting") +// +// Returns whether the assertion was successful (true) or not (false). +func NotRegexp(t TestingT, rx interface{}, str interface{}, msgAndArgs ...interface{}) bool { + match := matchRegexp(rx, str) + + if match { + Fail(t, fmt.Sprintf("Expect \"%v\" to NOT match \"%v\"", str, rx), msgAndArgs...) + } + + return !match + +} + +// Zero asserts that i is the zero value for its type and returns the truth. +func Zero(t TestingT, i interface{}, msgAndArgs ...interface{}) bool { + if i != nil && !reflect.DeepEqual(i, reflect.Zero(reflect.TypeOf(i)).Interface()) { + return Fail(t, fmt.Sprintf("Should be zero, but was %v", i), msgAndArgs...) + } + return true +} + +// NotZero asserts that i is not the zero value for its type and returns the truth. +func NotZero(t TestingT, i interface{}, msgAndArgs ...interface{}) bool { + if i == nil || reflect.DeepEqual(i, reflect.Zero(reflect.TypeOf(i)).Interface()) { + return Fail(t, fmt.Sprintf("Should not be zero, but was %v", i), msgAndArgs...) + } + return true +} + +// JSONEq asserts that two JSON strings are equivalent. +// +// assert.JSONEq(t, `{"hello": "world", "foo": "bar"}`, `{"foo": "bar", "hello": "world"}`) +// +// Returns whether the assertion was successful (true) or not (false). +func JSONEq(t TestingT, expected string, actual string, msgAndArgs ...interface{}) bool { + var expectedJSONAsInterface, actualJSONAsInterface interface{} + + if err := json.Unmarshal([]byte(expected), &expectedJSONAsInterface); err != nil { + return Fail(t, fmt.Sprintf("Expected value ('%s') is not valid json.\nJSON parsing error: '%s'", expected, err.Error()), msgAndArgs...) + } + + if err := json.Unmarshal([]byte(actual), &actualJSONAsInterface); err != nil { + return Fail(t, fmt.Sprintf("Input ('%s') needs to be valid json.\nJSON parsing error: '%s'", actual, err.Error()), msgAndArgs...) + } + + return Equal(t, expectedJSONAsInterface, actualJSONAsInterface, msgAndArgs...) +} + +func typeAndKind(v interface{}) (reflect.Type, reflect.Kind) { + t := reflect.TypeOf(v) + k := t.Kind() + + if k == reflect.Ptr { + t = t.Elem() + k = t.Kind() + } + return t, k +} + +// diff returns a diff of both values as long as both are of the same type and +// are a struct, map, slice or array. Otherwise it returns an empty string. +func diff(expected interface{}, actual interface{}) string { + if expected == nil || actual == nil { + return "" + } + + et, ek := typeAndKind(expected) + at, _ := typeAndKind(actual) + + if et != at { + return "" + } + + if ek != reflect.Struct && ek != reflect.Map && ek != reflect.Slice && ek != reflect.Array { + return "" + } + + spew.Config.SortKeys = true + e := spew.Sdump(expected) + a := spew.Sdump(actual) + + diff, _ := difflib.GetUnifiedDiffString(difflib.UnifiedDiff{ + A: difflib.SplitLines(e), + B: difflib.SplitLines(a), + FromFile: "Expected", + FromDate: "", + ToFile: "Actual", + ToDate: "", + Context: 1, + }) + + return "\n\nDiff:\n" + diff +} diff --git a/vendor/github.com/stretchr/testify/assert/assertions_test.go b/vendor/github.com/stretchr/testify/assert/assertions_test.go new file mode 100644 index 00000000..15a04b8f --- /dev/null +++ b/vendor/github.com/stretchr/testify/assert/assertions_test.go @@ -0,0 +1,1122 @@ +package assert + +import ( + "errors" + "io" + "math" + "os" + "reflect" + "regexp" + "testing" + "time" +) + +var ( + i interface{} + zeros = []interface{}{ + false, + byte(0), + complex64(0), + complex128(0), + float32(0), + float64(0), + int(0), + int8(0), + int16(0), + int32(0), + int64(0), + rune(0), + uint(0), + uint8(0), + uint16(0), + uint32(0), + uint64(0), + uintptr(0), + "", + [0]interface{}{}, + []interface{}(nil), + struct{ x int }{}, + (*interface{})(nil), + (func())(nil), + nil, + interface{}(nil), + map[interface{}]interface{}(nil), + (chan interface{})(nil), + (<-chan interface{})(nil), + (chan<- interface{})(nil), + } + nonZeros = []interface{}{ + true, + byte(1), + complex64(1), + complex128(1), + float32(1), + float64(1), + int(1), + int8(1), + int16(1), + int32(1), + int64(1), + rune(1), + uint(1), + uint8(1), + uint16(1), + uint32(1), + uint64(1), + uintptr(1), + "s", + [1]interface{}{1}, + []interface{}{}, + struct{ x int }{1}, + (*interface{})(&i), + (func())(func() {}), + interface{}(1), + map[interface{}]interface{}{}, + (chan interface{})(make(chan interface{})), + (<-chan interface{})(make(chan interface{})), + (chan<- interface{})(make(chan interface{})), + } +) + +// AssertionTesterInterface defines an interface to be used for testing assertion methods +type AssertionTesterInterface interface { + TestMethod() +} + +// AssertionTesterConformingObject is an object that conforms to the AssertionTesterInterface interface +type AssertionTesterConformingObject struct { +} + +func (a *AssertionTesterConformingObject) TestMethod() { +} + +// AssertionTesterNonConformingObject is an object that does not conform to the AssertionTesterInterface interface +type AssertionTesterNonConformingObject struct { +} + +func TestObjectsAreEqual(t *testing.T) { + + if !ObjectsAreEqual("Hello World", "Hello World") { + t.Error("objectsAreEqual should return true") + } + if !ObjectsAreEqual(123, 123) { + t.Error("objectsAreEqual should return true") + } + if !ObjectsAreEqual(123.5, 123.5) { + t.Error("objectsAreEqual should return true") + } + if !ObjectsAreEqual([]byte("Hello World"), []byte("Hello World")) { + t.Error("objectsAreEqual should return true") + } + if !ObjectsAreEqual(nil, nil) { + t.Error("objectsAreEqual should return true") + } + if ObjectsAreEqual(map[int]int{5: 10}, map[int]int{10: 20}) { + t.Error("objectsAreEqual should return false") + } + if ObjectsAreEqual('x', "x") { + t.Error("objectsAreEqual should return false") + } + if ObjectsAreEqual("x", 'x') { + t.Error("objectsAreEqual should return false") + } + if ObjectsAreEqual(0, 0.1) { + t.Error("objectsAreEqual should return false") + } + if ObjectsAreEqual(0.1, 0) { + t.Error("objectsAreEqual should return false") + } + if ObjectsAreEqual(uint32(10), int32(10)) { + t.Error("objectsAreEqual should return false") + } + if !ObjectsAreEqualValues(uint32(10), int32(10)) { + t.Error("ObjectsAreEqualValues should return true") + } + if ObjectsAreEqualValues(0, nil) { + t.Fail() + } + if ObjectsAreEqualValues(nil, 0) { + t.Fail() + } + +} + +func TestImplements(t *testing.T) { + + mockT := new(testing.T) + + if !Implements(mockT, (*AssertionTesterInterface)(nil), new(AssertionTesterConformingObject)) { + t.Error("Implements method should return true: AssertionTesterConformingObject implements AssertionTesterInterface") + } + if Implements(mockT, (*AssertionTesterInterface)(nil), new(AssertionTesterNonConformingObject)) { + t.Error("Implements method should return false: AssertionTesterNonConformingObject does not implements AssertionTesterInterface") + } + +} + +func TestIsType(t *testing.T) { + + mockT := new(testing.T) + + if !IsType(mockT, new(AssertionTesterConformingObject), new(AssertionTesterConformingObject)) { + t.Error("IsType should return true: AssertionTesterConformingObject is the same type as AssertionTesterConformingObject") + } + if IsType(mockT, new(AssertionTesterConformingObject), new(AssertionTesterNonConformingObject)) { + t.Error("IsType should return false: AssertionTesterConformingObject is not the same type as AssertionTesterNonConformingObject") + } + +} + +func TestEqual(t *testing.T) { + + mockT := new(testing.T) + + if !Equal(mockT, "Hello World", "Hello World") { + t.Error("Equal should return true") + } + if !Equal(mockT, 123, 123) { + t.Error("Equal should return true") + } + if !Equal(mockT, 123.5, 123.5) { + t.Error("Equal should return true") + } + if !Equal(mockT, []byte("Hello World"), []byte("Hello World")) { + t.Error("Equal should return true") + } + if !Equal(mockT, nil, nil) { + t.Error("Equal should return true") + } + if !Equal(mockT, int32(123), int32(123)) { + t.Error("Equal should return true") + } + if !Equal(mockT, uint64(123), uint64(123)) { + t.Error("Equal should return true") + } + +} + +func TestNotNil(t *testing.T) { + + mockT := new(testing.T) + + if !NotNil(mockT, new(AssertionTesterConformingObject)) { + t.Error("NotNil should return true: object is not nil") + } + if NotNil(mockT, nil) { + t.Error("NotNil should return false: object is nil") + } + if NotNil(mockT, (*struct{})(nil)) { + t.Error("NotNil should return false: object is (*struct{})(nil)") + } + +} + +func TestNil(t *testing.T) { + + mockT := new(testing.T) + + if !Nil(mockT, nil) { + t.Error("Nil should return true: object is nil") + } + if !Nil(mockT, (*struct{})(nil)) { + t.Error("Nil should return true: object is (*struct{})(nil)") + } + if Nil(mockT, new(AssertionTesterConformingObject)) { + t.Error("Nil should return false: object is not nil") + } + +} + +func TestTrue(t *testing.T) { + + mockT := new(testing.T) + + if !True(mockT, true) { + t.Error("True should return true") + } + if True(mockT, false) { + t.Error("True should return false") + } + +} + +func TestFalse(t *testing.T) { + + mockT := new(testing.T) + + if !False(mockT, false) { + t.Error("False should return true") + } + if False(mockT, true) { + t.Error("False should return false") + } + +} + +func TestExactly(t *testing.T) { + + mockT := new(testing.T) + + a := float32(1) + b := float64(1) + c := float32(1) + d := float32(2) + + if Exactly(mockT, a, b) { + t.Error("Exactly should return false") + } + if Exactly(mockT, a, d) { + t.Error("Exactly should return false") + } + if !Exactly(mockT, a, c) { + t.Error("Exactly should return true") + } + + if Exactly(mockT, nil, a) { + t.Error("Exactly should return false") + } + if Exactly(mockT, a, nil) { + t.Error("Exactly should return false") + } + +} + +func TestNotEqual(t *testing.T) { + + mockT := new(testing.T) + + if !NotEqual(mockT, "Hello World", "Hello World!") { + t.Error("NotEqual should return true") + } + if !NotEqual(mockT, 123, 1234) { + t.Error("NotEqual should return true") + } + if !NotEqual(mockT, 123.5, 123.55) { + t.Error("NotEqual should return true") + } + if !NotEqual(mockT, []byte("Hello World"), []byte("Hello World!")) { + t.Error("NotEqual should return true") + } + if !NotEqual(mockT, nil, new(AssertionTesterConformingObject)) { + t.Error("NotEqual should return true") + } + funcA := func() int { return 23 } + funcB := func() int { return 42 } + if !NotEqual(mockT, funcA, funcB) { + t.Error("NotEqual should return true") + } + + if NotEqual(mockT, "Hello World", "Hello World") { + t.Error("NotEqual should return false") + } + if NotEqual(mockT, 123, 123) { + t.Error("NotEqual should return false") + } + if NotEqual(mockT, 123.5, 123.5) { + t.Error("NotEqual should return false") + } + if NotEqual(mockT, []byte("Hello World"), []byte("Hello World")) { + t.Error("NotEqual should return false") + } + if NotEqual(mockT, new(AssertionTesterConformingObject), new(AssertionTesterConformingObject)) { + t.Error("NotEqual should return false") + } +} + +type A struct { + Name, Value string +} + +func TestContains(t *testing.T) { + + mockT := new(testing.T) + list := []string{"Foo", "Bar"} + complexList := []*A{ + {"b", "c"}, + {"d", "e"}, + {"g", "h"}, + {"j", "k"}, + } + simpleMap := map[interface{}]interface{}{"Foo": "Bar"} + + if !Contains(mockT, "Hello World", "Hello") { + t.Error("Contains should return true: \"Hello World\" contains \"Hello\"") + } + if Contains(mockT, "Hello World", "Salut") { + t.Error("Contains should return false: \"Hello World\" does not contain \"Salut\"") + } + + if !Contains(mockT, list, "Bar") { + t.Error("Contains should return true: \"[\"Foo\", \"Bar\"]\" contains \"Bar\"") + } + if Contains(mockT, list, "Salut") { + t.Error("Contains should return false: \"[\"Foo\", \"Bar\"]\" does not contain \"Salut\"") + } + if !Contains(mockT, complexList, &A{"g", "h"}) { + t.Error("Contains should return true: complexList contains {\"g\", \"h\"}") + } + if Contains(mockT, complexList, &A{"g", "e"}) { + t.Error("Contains should return false: complexList contains {\"g\", \"e\"}") + } + if Contains(mockT, complexList, &A{"g", "e"}) { + t.Error("Contains should return false: complexList contains {\"g\", \"e\"}") + } + if !Contains(mockT, simpleMap, "Foo") { + t.Error("Contains should return true: \"{\"Foo\": \"Bar\"}\" contains \"Foo\"") + } + if Contains(mockT, simpleMap, "Bar") { + t.Error("Contains should return false: \"{\"Foo\": \"Bar\"}\" does not contains \"Bar\"") + } +} + +func TestNotContains(t *testing.T) { + + mockT := new(testing.T) + list := []string{"Foo", "Bar"} + simpleMap := map[interface{}]interface{}{"Foo": "Bar"} + + if !NotContains(mockT, "Hello World", "Hello!") { + t.Error("NotContains should return true: \"Hello World\" does not contain \"Hello!\"") + } + if NotContains(mockT, "Hello World", "Hello") { + t.Error("NotContains should return false: \"Hello World\" contains \"Hello\"") + } + + if !NotContains(mockT, list, "Foo!") { + t.Error("NotContains should return true: \"[\"Foo\", \"Bar\"]\" does not contain \"Foo!\"") + } + if NotContains(mockT, list, "Foo") { + t.Error("NotContains should return false: \"[\"Foo\", \"Bar\"]\" contains \"Foo\"") + } + if NotContains(mockT, simpleMap, "Foo") { + t.Error("Contains should return true: \"{\"Foo\": \"Bar\"}\" contains \"Foo\"") + } + if !NotContains(mockT, simpleMap, "Bar") { + t.Error("Contains should return false: \"{\"Foo\": \"Bar\"}\" does not contains \"Bar\"") + } +} + +func Test_includeElement(t *testing.T) { + + list1 := []string{"Foo", "Bar"} + list2 := []int{1, 2} + simpleMap := map[interface{}]interface{}{"Foo": "Bar"} + + ok, found := includeElement("Hello World", "World") + True(t, ok) + True(t, found) + + ok, found = includeElement(list1, "Foo") + True(t, ok) + True(t, found) + + ok, found = includeElement(list1, "Bar") + True(t, ok) + True(t, found) + + ok, found = includeElement(list2, 1) + True(t, ok) + True(t, found) + + ok, found = includeElement(list2, 2) + True(t, ok) + True(t, found) + + ok, found = includeElement(list1, "Foo!") + True(t, ok) + False(t, found) + + ok, found = includeElement(list2, 3) + True(t, ok) + False(t, found) + + ok, found = includeElement(list2, "1") + True(t, ok) + False(t, found) + + ok, found = includeElement(simpleMap, "Foo") + True(t, ok) + True(t, found) + + ok, found = includeElement(simpleMap, "Bar") + True(t, ok) + False(t, found) + + ok, found = includeElement(1433, "1") + False(t, ok) + False(t, found) +} + +func TestCondition(t *testing.T) { + mockT := new(testing.T) + + if !Condition(mockT, func() bool { return true }, "Truth") { + t.Error("Condition should return true") + } + + if Condition(mockT, func() bool { return false }, "Lie") { + t.Error("Condition should return false") + } + +} + +func TestDidPanic(t *testing.T) { + + if funcDidPanic, _ := didPanic(func() { + panic("Panic!") + }); !funcDidPanic { + t.Error("didPanic should return true") + } + + if funcDidPanic, _ := didPanic(func() { + }); funcDidPanic { + t.Error("didPanic should return false") + } + +} + +func TestPanics(t *testing.T) { + + mockT := new(testing.T) + + if !Panics(mockT, func() { + panic("Panic!") + }) { + t.Error("Panics should return true") + } + + if Panics(mockT, func() { + }) { + t.Error("Panics should return false") + } + +} + +func TestNotPanics(t *testing.T) { + + mockT := new(testing.T) + + if !NotPanics(mockT, func() { + }) { + t.Error("NotPanics should return true") + } + + if NotPanics(mockT, func() { + panic("Panic!") + }) { + t.Error("NotPanics should return false") + } + +} + +func TestNoError(t *testing.T) { + + mockT := new(testing.T) + + // start with a nil error + var err error + + True(t, NoError(mockT, err), "NoError should return True for nil arg") + + // now set an error + err = errors.New("some error") + + False(t, NoError(mockT, err), "NoError with error should return False") + +} + +func TestError(t *testing.T) { + + mockT := new(testing.T) + + // start with a nil error + var err error + + False(t, Error(mockT, err), "Error should return False for nil arg") + + // now set an error + err = errors.New("some error") + + True(t, Error(mockT, err), "Error with error should return True") + +} + +func TestEqualError(t *testing.T) { + mockT := new(testing.T) + + // start with a nil error + var err error + False(t, EqualError(mockT, err, ""), + "EqualError should return false for nil arg") + + // now set an error + err = errors.New("some error") + False(t, EqualError(mockT, err, "Not some error"), + "EqualError should return false for different error string") + True(t, EqualError(mockT, err, "some error"), + "EqualError should return true") +} + +func Test_isEmpty(t *testing.T) { + + chWithValue := make(chan struct{}, 1) + chWithValue <- struct{}{} + + True(t, isEmpty("")) + True(t, isEmpty(nil)) + True(t, isEmpty([]string{})) + True(t, isEmpty(0)) + True(t, isEmpty(int32(0))) + True(t, isEmpty(int64(0))) + True(t, isEmpty(false)) + True(t, isEmpty(map[string]string{})) + True(t, isEmpty(new(time.Time))) + True(t, isEmpty(time.Time{})) + True(t, isEmpty(make(chan struct{}))) + False(t, isEmpty("something")) + False(t, isEmpty(errors.New("something"))) + False(t, isEmpty([]string{"something"})) + False(t, isEmpty(1)) + False(t, isEmpty(true)) + False(t, isEmpty(map[string]string{"Hello": "World"})) + False(t, isEmpty(chWithValue)) + +} + +func TestEmpty(t *testing.T) { + + mockT := new(testing.T) + chWithValue := make(chan struct{}, 1) + chWithValue <- struct{}{} + var tiP *time.Time + var tiNP time.Time + var s *string + var f *os.File + + True(t, Empty(mockT, ""), "Empty string is empty") + True(t, Empty(mockT, nil), "Nil is empty") + True(t, Empty(mockT, []string{}), "Empty string array is empty") + True(t, Empty(mockT, 0), "Zero int value is empty") + True(t, Empty(mockT, false), "False value is empty") + True(t, Empty(mockT, make(chan struct{})), "Channel without values is empty") + True(t, Empty(mockT, s), "Nil string pointer is empty") + True(t, Empty(mockT, f), "Nil os.File pointer is empty") + True(t, Empty(mockT, tiP), "Nil time.Time pointer is empty") + True(t, Empty(mockT, tiNP), "time.Time is empty") + + False(t, Empty(mockT, "something"), "Non Empty string is not empty") + False(t, Empty(mockT, errors.New("something")), "Non nil object is not empty") + False(t, Empty(mockT, []string{"something"}), "Non empty string array is not empty") + False(t, Empty(mockT, 1), "Non-zero int value is not empty") + False(t, Empty(mockT, true), "True value is not empty") + False(t, Empty(mockT, chWithValue), "Channel with values is not empty") +} + +func TestNotEmpty(t *testing.T) { + + mockT := new(testing.T) + chWithValue := make(chan struct{}, 1) + chWithValue <- struct{}{} + + False(t, NotEmpty(mockT, ""), "Empty string is empty") + False(t, NotEmpty(mockT, nil), "Nil is empty") + False(t, NotEmpty(mockT, []string{}), "Empty string array is empty") + False(t, NotEmpty(mockT, 0), "Zero int value is empty") + False(t, NotEmpty(mockT, false), "False value is empty") + False(t, NotEmpty(mockT, make(chan struct{})), "Channel without values is empty") + + True(t, NotEmpty(mockT, "something"), "Non Empty string is not empty") + True(t, NotEmpty(mockT, errors.New("something")), "Non nil object is not empty") + True(t, NotEmpty(mockT, []string{"something"}), "Non empty string array is not empty") + True(t, NotEmpty(mockT, 1), "Non-zero int value is not empty") + True(t, NotEmpty(mockT, true), "True value is not empty") + True(t, NotEmpty(mockT, chWithValue), "Channel with values is not empty") +} + +func Test_getLen(t *testing.T) { + falseCases := []interface{}{ + nil, + 0, + true, + false, + 'A', + struct{}{}, + } + for _, v := range falseCases { + ok, l := getLen(v) + False(t, ok, "Expected getLen fail to get length of %#v", v) + Equal(t, 0, l, "getLen should return 0 for %#v", v) + } + + ch := make(chan int, 5) + ch <- 1 + ch <- 2 + ch <- 3 + trueCases := []struct { + v interface{} + l int + }{ + {[]int{1, 2, 3}, 3}, + {[...]int{1, 2, 3}, 3}, + {"ABC", 3}, + {map[int]int{1: 2, 2: 4, 3: 6}, 3}, + {ch, 3}, + + {[]int{}, 0}, + {map[int]int{}, 0}, + {make(chan int), 0}, + + {[]int(nil), 0}, + {map[int]int(nil), 0}, + {(chan int)(nil), 0}, + } + + for _, c := range trueCases { + ok, l := getLen(c.v) + True(t, ok, "Expected getLen success to get length of %#v", c.v) + Equal(t, c.l, l) + } +} + +func TestLen(t *testing.T) { + mockT := new(testing.T) + + False(t, Len(mockT, nil, 0), "nil does not have length") + False(t, Len(mockT, 0, 0), "int does not have length") + False(t, Len(mockT, true, 0), "true does not have length") + False(t, Len(mockT, false, 0), "false does not have length") + False(t, Len(mockT, 'A', 0), "Rune does not have length") + False(t, Len(mockT, struct{}{}, 0), "Struct does not have length") + + ch := make(chan int, 5) + ch <- 1 + ch <- 2 + ch <- 3 + + cases := []struct { + v interface{} + l int + }{ + {[]int{1, 2, 3}, 3}, + {[...]int{1, 2, 3}, 3}, + {"ABC", 3}, + {map[int]int{1: 2, 2: 4, 3: 6}, 3}, + {ch, 3}, + + {[]int{}, 0}, + {map[int]int{}, 0}, + {make(chan int), 0}, + + {[]int(nil), 0}, + {map[int]int(nil), 0}, + {(chan int)(nil), 0}, + } + + for _, c := range cases { + True(t, Len(mockT, c.v, c.l), "%#v have %d items", c.v, c.l) + } + + cases = []struct { + v interface{} + l int + }{ + {[]int{1, 2, 3}, 4}, + {[...]int{1, 2, 3}, 2}, + {"ABC", 2}, + {map[int]int{1: 2, 2: 4, 3: 6}, 4}, + {ch, 2}, + + {[]int{}, 1}, + {map[int]int{}, 1}, + {make(chan int), 1}, + + {[]int(nil), 1}, + {map[int]int(nil), 1}, + {(chan int)(nil), 1}, + } + + for _, c := range cases { + False(t, Len(mockT, c.v, c.l), "%#v have %d items", c.v, c.l) + } +} + +func TestWithinDuration(t *testing.T) { + + mockT := new(testing.T) + a := time.Now() + b := a.Add(10 * time.Second) + + True(t, WithinDuration(mockT, a, b, 10*time.Second), "A 10s difference is within a 10s time difference") + True(t, WithinDuration(mockT, b, a, 10*time.Second), "A 10s difference is within a 10s time difference") + + False(t, WithinDuration(mockT, a, b, 9*time.Second), "A 10s difference is not within a 9s time difference") + False(t, WithinDuration(mockT, b, a, 9*time.Second), "A 10s difference is not within a 9s time difference") + + False(t, WithinDuration(mockT, a, b, -9*time.Second), "A 10s difference is not within a 9s time difference") + False(t, WithinDuration(mockT, b, a, -9*time.Second), "A 10s difference is not within a 9s time difference") + + False(t, WithinDuration(mockT, a, b, -11*time.Second), "A 10s difference is not within a 9s time difference") + False(t, WithinDuration(mockT, b, a, -11*time.Second), "A 10s difference is not within a 9s time difference") +} + +func TestInDelta(t *testing.T) { + mockT := new(testing.T) + + True(t, InDelta(mockT, 1.001, 1, 0.01), "|1.001 - 1| <= 0.01") + True(t, InDelta(mockT, 1, 1.001, 0.01), "|1 - 1.001| <= 0.01") + True(t, InDelta(mockT, 1, 2, 1), "|1 - 2| <= 1") + False(t, InDelta(mockT, 1, 2, 0.5), "Expected |1 - 2| <= 0.5 to fail") + False(t, InDelta(mockT, 2, 1, 0.5), "Expected |2 - 1| <= 0.5 to fail") + False(t, InDelta(mockT, "", nil, 1), "Expected non numerals to fail") + False(t, InDelta(mockT, 42, math.NaN(), 0.01), "Expected NaN for actual to fail") + False(t, InDelta(mockT, math.NaN(), 42, 0.01), "Expected NaN for expected to fail") + + cases := []struct { + a, b interface{} + delta float64 + }{ + {uint8(2), uint8(1), 1}, + {uint16(2), uint16(1), 1}, + {uint32(2), uint32(1), 1}, + {uint64(2), uint64(1), 1}, + + {int(2), int(1), 1}, + {int8(2), int8(1), 1}, + {int16(2), int16(1), 1}, + {int32(2), int32(1), 1}, + {int64(2), int64(1), 1}, + + {float32(2), float32(1), 1}, + {float64(2), float64(1), 1}, + } + + for _, tc := range cases { + True(t, InDelta(mockT, tc.a, tc.b, tc.delta), "Expected |%V - %V| <= %v", tc.a, tc.b, tc.delta) + } +} + +func TestInDeltaSlice(t *testing.T) { + mockT := new(testing.T) + + True(t, InDeltaSlice(mockT, + []float64{1.001, 0.999}, + []float64{1, 1}, + 0.1), "{1.001, 0.009} is element-wise close to {1, 1} in delta=0.1") + + True(t, InDeltaSlice(mockT, + []float64{1, 2}, + []float64{0, 3}, + 1), "{1, 2} is element-wise close to {0, 3} in delta=1") + + False(t, InDeltaSlice(mockT, + []float64{1, 2}, + []float64{0, 3}, + 0.1), "{1, 2} is not element-wise close to {0, 3} in delta=0.1") + + False(t, InDeltaSlice(mockT, "", nil, 1), "Expected non numeral slices to fail") +} + +func TestInEpsilon(t *testing.T) { + mockT := new(testing.T) + + cases := []struct { + a, b interface{} + epsilon float64 + }{ + {uint8(2), uint16(2), .001}, + {2.1, 2.2, 0.1}, + {2.2, 2.1, 0.1}, + {-2.1, -2.2, 0.1}, + {-2.2, -2.1, 0.1}, + {uint64(100), uint8(101), 0.01}, + {0.1, -0.1, 2}, + {0.1, 0, 2}, + } + + for _, tc := range cases { + True(t, InEpsilon(t, tc.a, tc.b, tc.epsilon, "Expected %V and %V to have a relative difference of %v", tc.a, tc.b, tc.epsilon), "test: %q", tc) + } + + cases = []struct { + a, b interface{} + epsilon float64 + }{ + {uint8(2), int16(-2), .001}, + {uint64(100), uint8(102), 0.01}, + {2.1, 2.2, 0.001}, + {2.2, 2.1, 0.001}, + {2.1, -2.2, 1}, + {2.1, "bla-bla", 0}, + {0.1, -0.1, 1.99}, + {0, 0.1, 2}, // expected must be different to zero + } + + for _, tc := range cases { + False(t, InEpsilon(mockT, tc.a, tc.b, tc.epsilon, "Expected %V and %V to have a relative difference of %v", tc.a, tc.b, tc.epsilon)) + } + +} + +func TestInEpsilonSlice(t *testing.T) { + mockT := new(testing.T) + + True(t, InEpsilonSlice(mockT, + []float64{2.2, 2.0}, + []float64{2.1, 2.1}, + 0.06), "{2.2, 2.0} is element-wise close to {2.1, 2.1} in espilon=0.06") + + False(t, InEpsilonSlice(mockT, + []float64{2.2, 2.0}, + []float64{2.1, 2.1}, + 0.04), "{2.2, 2.0} is not element-wise close to {2.1, 2.1} in espilon=0.04") + + False(t, InEpsilonSlice(mockT, "", nil, 1), "Expected non numeral slices to fail") +} + +func TestRegexp(t *testing.T) { + mockT := new(testing.T) + + cases := []struct { + rx, str string + }{ + {"^start", "start of the line"}, + {"end$", "in the end"}, + {"[0-9]{3}[.-]?[0-9]{2}[.-]?[0-9]{2}", "My phone number is 650.12.34"}, + } + + for _, tc := range cases { + True(t, Regexp(mockT, tc.rx, tc.str)) + True(t, Regexp(mockT, regexp.MustCompile(tc.rx), tc.str)) + False(t, NotRegexp(mockT, tc.rx, tc.str)) + False(t, NotRegexp(mockT, regexp.MustCompile(tc.rx), tc.str)) + } + + cases = []struct { + rx, str string + }{ + {"^asdfastart", "Not the start of the line"}, + {"end$", "in the end."}, + {"[0-9]{3}[.-]?[0-9]{2}[.-]?[0-9]{2}", "My phone number is 650.12a.34"}, + } + + for _, tc := range cases { + False(t, Regexp(mockT, tc.rx, tc.str), "Expected \"%s\" to not match \"%s\"", tc.rx, tc.str) + False(t, Regexp(mockT, regexp.MustCompile(tc.rx), tc.str)) + True(t, NotRegexp(mockT, tc.rx, tc.str)) + True(t, NotRegexp(mockT, regexp.MustCompile(tc.rx), tc.str)) + } +} + +func testAutogeneratedFunction() { + defer func() { + if err := recover(); err == nil { + panic("did not panic") + } + CallerInfo() + }() + t := struct { + io.Closer + }{} + var c io.Closer + c = t + c.Close() +} + +func TestCallerInfoWithAutogeneratedFunctions(t *testing.T) { + NotPanics(t, func() { + testAutogeneratedFunction() + }) +} + +func TestZero(t *testing.T) { + mockT := new(testing.T) + + for _, test := range zeros { + True(t, Zero(mockT, test, "%#v is not the %v zero value", test, reflect.TypeOf(test))) + } + + for _, test := range nonZeros { + False(t, Zero(mockT, test, "%#v is not the %v zero value", test, reflect.TypeOf(test))) + } +} + +func TestNotZero(t *testing.T) { + mockT := new(testing.T) + + for _, test := range zeros { + False(t, NotZero(mockT, test, "%#v is not the %v zero value", test, reflect.TypeOf(test))) + } + + for _, test := range nonZeros { + True(t, NotZero(mockT, test, "%#v is not the %v zero value", test, reflect.TypeOf(test))) + } +} + +func TestJSONEq_EqualSONString(t *testing.T) { + mockT := new(testing.T) + True(t, JSONEq(mockT, `{"hello": "world", "foo": "bar"}`, `{"hello": "world", "foo": "bar"}`)) +} + +func TestJSONEq_EquivalentButNotEqual(t *testing.T) { + mockT := new(testing.T) + True(t, JSONEq(mockT, `{"hello": "world", "foo": "bar"}`, `{"foo": "bar", "hello": "world"}`)) +} + +func TestJSONEq_HashOfArraysAndHashes(t *testing.T) { + mockT := new(testing.T) + True(t, JSONEq(mockT, "{\r\n\t\"numeric\": 1.5,\r\n\t\"array\": [{\"foo\": \"bar\"}, 1, \"string\", [\"nested\", \"array\", 5.5]],\r\n\t\"hash\": {\"nested\": \"hash\", \"nested_slice\": [\"this\", \"is\", \"nested\"]},\r\n\t\"string\": \"foo\"\r\n}", + "{\r\n\t\"numeric\": 1.5,\r\n\t\"hash\": {\"nested\": \"hash\", \"nested_slice\": [\"this\", \"is\", \"nested\"]},\r\n\t\"string\": \"foo\",\r\n\t\"array\": [{\"foo\": \"bar\"}, 1, \"string\", [\"nested\", \"array\", 5.5]]\r\n}")) +} + +func TestJSONEq_Array(t *testing.T) { + mockT := new(testing.T) + True(t, JSONEq(mockT, `["foo", {"hello": "world", "nested": "hash"}]`, `["foo", {"nested": "hash", "hello": "world"}]`)) +} + +func TestJSONEq_HashAndArrayNotEquivalent(t *testing.T) { + mockT := new(testing.T) + False(t, JSONEq(mockT, `["foo", {"hello": "world", "nested": "hash"}]`, `{"foo": "bar", {"nested": "hash", "hello": "world"}}`)) +} + +func TestJSONEq_HashesNotEquivalent(t *testing.T) { + mockT := new(testing.T) + False(t, JSONEq(mockT, `{"foo": "bar"}`, `{"foo": "bar", "hello": "world"}`)) +} + +func TestJSONEq_ActualIsNotJSON(t *testing.T) { + mockT := new(testing.T) + False(t, JSONEq(mockT, `{"foo": "bar"}`, "Not JSON")) +} + +func TestJSONEq_ExpectedIsNotJSON(t *testing.T) { + mockT := new(testing.T) + False(t, JSONEq(mockT, "Not JSON", `{"foo": "bar", "hello": "world"}`)) +} + +func TestJSONEq_ExpectedAndActualNotJSON(t *testing.T) { + mockT := new(testing.T) + False(t, JSONEq(mockT, "Not JSON", "Not JSON")) +} + +func TestJSONEq_ArraysOfDifferentOrder(t *testing.T) { + mockT := new(testing.T) + False(t, JSONEq(mockT, `["foo", {"hello": "world", "nested": "hash"}]`, `[{ "hello": "world", "nested": "hash"}, "foo"]`)) +} + +func TestDiff(t *testing.T) { + expected := ` + +Diff: +--- Expected ++++ Actual +@@ -1,3 +1,3 @@ + (struct { foo string }) { +- foo: (string) (len=5) "hello" ++ foo: (string) (len=3) "bar" + } +` + actual := diff( + struct{ foo string }{"hello"}, + struct{ foo string }{"bar"}, + ) + Equal(t, expected, actual) + + expected = ` + +Diff: +--- Expected ++++ Actual +@@ -2,5 +2,5 @@ + (int) 1, +- (int) 2, + (int) 3, +- (int) 4 ++ (int) 5, ++ (int) 7 + } +` + actual = diff( + []int{1, 2, 3, 4}, + []int{1, 3, 5, 7}, + ) + Equal(t, expected, actual) + + expected = ` + +Diff: +--- Expected ++++ Actual +@@ -2,4 +2,4 @@ + (int) 1, +- (int) 2, +- (int) 3 ++ (int) 3, ++ (int) 5 + } +` + actual = diff( + []int{1, 2, 3, 4}[0:3], + []int{1, 3, 5, 7}[0:3], + ) + Equal(t, expected, actual) + + expected = ` + +Diff: +--- Expected ++++ Actual +@@ -1,6 +1,6 @@ + (map[string]int) (len=4) { +- (string) (len=4) "four": (int) 4, ++ (string) (len=4) "five": (int) 5, + (string) (len=3) "one": (int) 1, +- (string) (len=5) "three": (int) 3, +- (string) (len=3) "two": (int) 2 ++ (string) (len=5) "seven": (int) 7, ++ (string) (len=5) "three": (int) 3 + } +` + + actual = diff( + map[string]int{"one": 1, "two": 2, "three": 3, "four": 4}, + map[string]int{"one": 1, "three": 3, "five": 5, "seven": 7}, + ) + Equal(t, expected, actual) +} + +func TestDiffEmptyCases(t *testing.T) { + Equal(t, "", diff(nil, nil)) + Equal(t, "", diff(struct{ foo string }{}, nil)) + Equal(t, "", diff(nil, struct{ foo string }{})) + Equal(t, "", diff(1, 2)) + Equal(t, "", diff(1, 2)) + Equal(t, "", diff([]int{1}, []bool{true})) +} + +type mockTestingT struct { +} + +func (m *mockTestingT) Errorf(format string, args ...interface{}) {} + +func TestFailNowWithPlainTestingT(t *testing.T) { + mockT := &mockTestingT{} + + Panics(t, func() { + FailNow(mockT, "failed") + }, "should panic since mockT is missing FailNow()") +} + +type mockFailNowTestingT struct { +} + +func (m *mockFailNowTestingT) Errorf(format string, args ...interface{}) {} + +func (m *mockFailNowTestingT) FailNow() {} + +func TestFailNowWithFullTestingT(t *testing.T) { + mockT := &mockFailNowTestingT{} + + NotPanics(t, func() { + FailNow(mockT, "failed") + }, "should call mockT.FailNow() rather than panicking") +} diff --git a/vendor/github.com/stretchr/testify/assert/doc.go b/vendor/github.com/stretchr/testify/assert/doc.go new file mode 100644 index 00000000..c9dccc4d --- /dev/null +++ b/vendor/github.com/stretchr/testify/assert/doc.go @@ -0,0 +1,45 @@ +// Package assert provides a set of comprehensive testing tools for use with the normal Go testing system. +// +// Example Usage +// +// The following is a complete example using assert in a standard test function: +// import ( +// "testing" +// "github.com/stretchr/testify/assert" +// ) +// +// func TestSomething(t *testing.T) { +// +// var a string = "Hello" +// var b string = "Hello" +// +// assert.Equal(t, a, b, "The two words should be the same.") +// +// } +// +// if you assert many times, use the format below: +// +// import ( +// "testing" +// "github.com/stretchr/testify/assert" +// ) +// +// func TestSomething(t *testing.T) { +// assert := assert.New(t) +// +// var a string = "Hello" +// var b string = "Hello" +// +// assert.Equal(a, b, "The two words should be the same.") +// } +// +// Assertions +// +// Assertions allow you to easily write test code, and are global funcs in the `assert` package. +// All assertion functions take, as the first argument, the `*testing.T` object provided by the +// testing framework. This allows the assertion funcs to write the failings and other details to +// the correct place. +// +// Every assertion function also takes an optional string message as the final argument, +// allowing custom error messages to be appended to the message the assertion method outputs. +package assert diff --git a/vendor/github.com/stretchr/testify/assert/errors.go b/vendor/github.com/stretchr/testify/assert/errors.go new file mode 100644 index 00000000..ac9dc9d1 --- /dev/null +++ b/vendor/github.com/stretchr/testify/assert/errors.go @@ -0,0 +1,10 @@ +package assert + +import ( + "errors" +) + +// AnError is an error instance useful for testing. If the code does not care +// about error specifics, and only needs to return the error for example, this +// error should be used to make the test code more readable. +var AnError = errors.New("assert.AnError general error for testing") diff --git a/vendor/github.com/stretchr/testify/assert/forward_assertions.go b/vendor/github.com/stretchr/testify/assert/forward_assertions.go new file mode 100644 index 00000000..b867e95e --- /dev/null +++ b/vendor/github.com/stretchr/testify/assert/forward_assertions.go @@ -0,0 +1,16 @@ +package assert + +// Assertions provides assertion methods around the +// TestingT interface. +type Assertions struct { + t TestingT +} + +// New makes a new Assertions object for the specified TestingT. +func New(t TestingT) *Assertions { + return &Assertions{ + t: t, + } +} + +//go:generate go run ../_codegen/main.go -output-package=assert -template=assertion_forward.go.tmpl diff --git a/vendor/github.com/stretchr/testify/assert/forward_assertions_test.go b/vendor/github.com/stretchr/testify/assert/forward_assertions_test.go new file mode 100644 index 00000000..22e1df1d --- /dev/null +++ b/vendor/github.com/stretchr/testify/assert/forward_assertions_test.go @@ -0,0 +1,611 @@ +package assert + +import ( + "errors" + "regexp" + "testing" + "time" +) + +func TestImplementsWrapper(t *testing.T) { + assert := New(new(testing.T)) + + if !assert.Implements((*AssertionTesterInterface)(nil), new(AssertionTesterConformingObject)) { + t.Error("Implements method should return true: AssertionTesterConformingObject implements AssertionTesterInterface") + } + if assert.Implements((*AssertionTesterInterface)(nil), new(AssertionTesterNonConformingObject)) { + t.Error("Implements method should return false: AssertionTesterNonConformingObject does not implements AssertionTesterInterface") + } +} + +func TestIsTypeWrapper(t *testing.T) { + assert := New(new(testing.T)) + + if !assert.IsType(new(AssertionTesterConformingObject), new(AssertionTesterConformingObject)) { + t.Error("IsType should return true: AssertionTesterConformingObject is the same type as AssertionTesterConformingObject") + } + if assert.IsType(new(AssertionTesterConformingObject), new(AssertionTesterNonConformingObject)) { + t.Error("IsType should return false: AssertionTesterConformingObject is not the same type as AssertionTesterNonConformingObject") + } + +} + +func TestEqualWrapper(t *testing.T) { + assert := New(new(testing.T)) + + if !assert.Equal("Hello World", "Hello World") { + t.Error("Equal should return true") + } + if !assert.Equal(123, 123) { + t.Error("Equal should return true") + } + if !assert.Equal(123.5, 123.5) { + t.Error("Equal should return true") + } + if !assert.Equal([]byte("Hello World"), []byte("Hello World")) { + t.Error("Equal should return true") + } + if !assert.Equal(nil, nil) { + t.Error("Equal should return true") + } +} + +func TestEqualValuesWrapper(t *testing.T) { + assert := New(new(testing.T)) + + if !assert.EqualValues(uint32(10), int32(10)) { + t.Error("EqualValues should return true") + } +} + +func TestNotNilWrapper(t *testing.T) { + assert := New(new(testing.T)) + + if !assert.NotNil(new(AssertionTesterConformingObject)) { + t.Error("NotNil should return true: object is not nil") + } + if assert.NotNil(nil) { + t.Error("NotNil should return false: object is nil") + } + +} + +func TestNilWrapper(t *testing.T) { + assert := New(new(testing.T)) + + if !assert.Nil(nil) { + t.Error("Nil should return true: object is nil") + } + if assert.Nil(new(AssertionTesterConformingObject)) { + t.Error("Nil should return false: object is not nil") + } + +} + +func TestTrueWrapper(t *testing.T) { + assert := New(new(testing.T)) + + if !assert.True(true) { + t.Error("True should return true") + } + if assert.True(false) { + t.Error("True should return false") + } + +} + +func TestFalseWrapper(t *testing.T) { + assert := New(new(testing.T)) + + if !assert.False(false) { + t.Error("False should return true") + } + if assert.False(true) { + t.Error("False should return false") + } + +} + +func TestExactlyWrapper(t *testing.T) { + assert := New(new(testing.T)) + + a := float32(1) + b := float64(1) + c := float32(1) + d := float32(2) + + if assert.Exactly(a, b) { + t.Error("Exactly should return false") + } + if assert.Exactly(a, d) { + t.Error("Exactly should return false") + } + if !assert.Exactly(a, c) { + t.Error("Exactly should return true") + } + + if assert.Exactly(nil, a) { + t.Error("Exactly should return false") + } + if assert.Exactly(a, nil) { + t.Error("Exactly should return false") + } + +} + +func TestNotEqualWrapper(t *testing.T) { + + assert := New(new(testing.T)) + + if !assert.NotEqual("Hello World", "Hello World!") { + t.Error("NotEqual should return true") + } + if !assert.NotEqual(123, 1234) { + t.Error("NotEqual should return true") + } + if !assert.NotEqual(123.5, 123.55) { + t.Error("NotEqual should return true") + } + if !assert.NotEqual([]byte("Hello World"), []byte("Hello World!")) { + t.Error("NotEqual should return true") + } + if !assert.NotEqual(nil, new(AssertionTesterConformingObject)) { + t.Error("NotEqual should return true") + } +} + +func TestContainsWrapper(t *testing.T) { + + assert := New(new(testing.T)) + list := []string{"Foo", "Bar"} + + if !assert.Contains("Hello World", "Hello") { + t.Error("Contains should return true: \"Hello World\" contains \"Hello\"") + } + if assert.Contains("Hello World", "Salut") { + t.Error("Contains should return false: \"Hello World\" does not contain \"Salut\"") + } + + if !assert.Contains(list, "Foo") { + t.Error("Contains should return true: \"[\"Foo\", \"Bar\"]\" contains \"Foo\"") + } + if assert.Contains(list, "Salut") { + t.Error("Contains should return false: \"[\"Foo\", \"Bar\"]\" does not contain \"Salut\"") + } + +} + +func TestNotContainsWrapper(t *testing.T) { + + assert := New(new(testing.T)) + list := []string{"Foo", "Bar"} + + if !assert.NotContains("Hello World", "Hello!") { + t.Error("NotContains should return true: \"Hello World\" does not contain \"Hello!\"") + } + if assert.NotContains("Hello World", "Hello") { + t.Error("NotContains should return false: \"Hello World\" contains \"Hello\"") + } + + if !assert.NotContains(list, "Foo!") { + t.Error("NotContains should return true: \"[\"Foo\", \"Bar\"]\" does not contain \"Foo!\"") + } + if assert.NotContains(list, "Foo") { + t.Error("NotContains should return false: \"[\"Foo\", \"Bar\"]\" contains \"Foo\"") + } + +} + +func TestConditionWrapper(t *testing.T) { + + assert := New(new(testing.T)) + + if !assert.Condition(func() bool { return true }, "Truth") { + t.Error("Condition should return true") + } + + if assert.Condition(func() bool { return false }, "Lie") { + t.Error("Condition should return false") + } + +} + +func TestDidPanicWrapper(t *testing.T) { + + if funcDidPanic, _ := didPanic(func() { + panic("Panic!") + }); !funcDidPanic { + t.Error("didPanic should return true") + } + + if funcDidPanic, _ := didPanic(func() { + }); funcDidPanic { + t.Error("didPanic should return false") + } + +} + +func TestPanicsWrapper(t *testing.T) { + + assert := New(new(testing.T)) + + if !assert.Panics(func() { + panic("Panic!") + }) { + t.Error("Panics should return true") + } + + if assert.Panics(func() { + }) { + t.Error("Panics should return false") + } + +} + +func TestNotPanicsWrapper(t *testing.T) { + + assert := New(new(testing.T)) + + if !assert.NotPanics(func() { + }) { + t.Error("NotPanics should return true") + } + + if assert.NotPanics(func() { + panic("Panic!") + }) { + t.Error("NotPanics should return false") + } + +} + +func TestNoErrorWrapper(t *testing.T) { + assert := New(t) + mockAssert := New(new(testing.T)) + + // start with a nil error + var err error + + assert.True(mockAssert.NoError(err), "NoError should return True for nil arg") + + // now set an error + err = errors.New("Some error") + + assert.False(mockAssert.NoError(err), "NoError with error should return False") + +} + +func TestErrorWrapper(t *testing.T) { + assert := New(t) + mockAssert := New(new(testing.T)) + + // start with a nil error + var err error + + assert.False(mockAssert.Error(err), "Error should return False for nil arg") + + // now set an error + err = errors.New("Some error") + + assert.True(mockAssert.Error(err), "Error with error should return True") + +} + +func TestEqualErrorWrapper(t *testing.T) { + assert := New(t) + mockAssert := New(new(testing.T)) + + // start with a nil error + var err error + assert.False(mockAssert.EqualError(err, ""), + "EqualError should return false for nil arg") + + // now set an error + err = errors.New("some error") + assert.False(mockAssert.EqualError(err, "Not some error"), + "EqualError should return false for different error string") + assert.True(mockAssert.EqualError(err, "some error"), + "EqualError should return true") +} + +func TestEmptyWrapper(t *testing.T) { + assert := New(t) + mockAssert := New(new(testing.T)) + + assert.True(mockAssert.Empty(""), "Empty string is empty") + assert.True(mockAssert.Empty(nil), "Nil is empty") + assert.True(mockAssert.Empty([]string{}), "Empty string array is empty") + assert.True(mockAssert.Empty(0), "Zero int value is empty") + assert.True(mockAssert.Empty(false), "False value is empty") + + assert.False(mockAssert.Empty("something"), "Non Empty string is not empty") + assert.False(mockAssert.Empty(errors.New("something")), "Non nil object is not empty") + assert.False(mockAssert.Empty([]string{"something"}), "Non empty string array is not empty") + assert.False(mockAssert.Empty(1), "Non-zero int value is not empty") + assert.False(mockAssert.Empty(true), "True value is not empty") + +} + +func TestNotEmptyWrapper(t *testing.T) { + assert := New(t) + mockAssert := New(new(testing.T)) + + assert.False(mockAssert.NotEmpty(""), "Empty string is empty") + assert.False(mockAssert.NotEmpty(nil), "Nil is empty") + assert.False(mockAssert.NotEmpty([]string{}), "Empty string array is empty") + assert.False(mockAssert.NotEmpty(0), "Zero int value is empty") + assert.False(mockAssert.NotEmpty(false), "False value is empty") + + assert.True(mockAssert.NotEmpty("something"), "Non Empty string is not empty") + assert.True(mockAssert.NotEmpty(errors.New("something")), "Non nil object is not empty") + assert.True(mockAssert.NotEmpty([]string{"something"}), "Non empty string array is not empty") + assert.True(mockAssert.NotEmpty(1), "Non-zero int value is not empty") + assert.True(mockAssert.NotEmpty(true), "True value is not empty") + +} + +func TestLenWrapper(t *testing.T) { + assert := New(t) + mockAssert := New(new(testing.T)) + + assert.False(mockAssert.Len(nil, 0), "nil does not have length") + assert.False(mockAssert.Len(0, 0), "int does not have length") + assert.False(mockAssert.Len(true, 0), "true does not have length") + assert.False(mockAssert.Len(false, 0), "false does not have length") + assert.False(mockAssert.Len('A', 0), "Rune does not have length") + assert.False(mockAssert.Len(struct{}{}, 0), "Struct does not have length") + + ch := make(chan int, 5) + ch <- 1 + ch <- 2 + ch <- 3 + + cases := []struct { + v interface{} + l int + }{ + {[]int{1, 2, 3}, 3}, + {[...]int{1, 2, 3}, 3}, + {"ABC", 3}, + {map[int]int{1: 2, 2: 4, 3: 6}, 3}, + {ch, 3}, + + {[]int{}, 0}, + {map[int]int{}, 0}, + {make(chan int), 0}, + + {[]int(nil), 0}, + {map[int]int(nil), 0}, + {(chan int)(nil), 0}, + } + + for _, c := range cases { + assert.True(mockAssert.Len(c.v, c.l), "%#v have %d items", c.v, c.l) + } +} + +func TestWithinDurationWrapper(t *testing.T) { + assert := New(t) + mockAssert := New(new(testing.T)) + a := time.Now() + b := a.Add(10 * time.Second) + + assert.True(mockAssert.WithinDuration(a, b, 10*time.Second), "A 10s difference is within a 10s time difference") + assert.True(mockAssert.WithinDuration(b, a, 10*time.Second), "A 10s difference is within a 10s time difference") + + assert.False(mockAssert.WithinDuration(a, b, 9*time.Second), "A 10s difference is not within a 9s time difference") + assert.False(mockAssert.WithinDuration(b, a, 9*time.Second), "A 10s difference is not within a 9s time difference") + + assert.False(mockAssert.WithinDuration(a, b, -9*time.Second), "A 10s difference is not within a 9s time difference") + assert.False(mockAssert.WithinDuration(b, a, -9*time.Second), "A 10s difference is not within a 9s time difference") + + assert.False(mockAssert.WithinDuration(a, b, -11*time.Second), "A 10s difference is not within a 9s time difference") + assert.False(mockAssert.WithinDuration(b, a, -11*time.Second), "A 10s difference is not within a 9s time difference") +} + +func TestInDeltaWrapper(t *testing.T) { + assert := New(new(testing.T)) + + True(t, assert.InDelta(1.001, 1, 0.01), "|1.001 - 1| <= 0.01") + True(t, assert.InDelta(1, 1.001, 0.01), "|1 - 1.001| <= 0.01") + True(t, assert.InDelta(1, 2, 1), "|1 - 2| <= 1") + False(t, assert.InDelta(1, 2, 0.5), "Expected |1 - 2| <= 0.5 to fail") + False(t, assert.InDelta(2, 1, 0.5), "Expected |2 - 1| <= 0.5 to fail") + False(t, assert.InDelta("", nil, 1), "Expected non numerals to fail") + + cases := []struct { + a, b interface{} + delta float64 + }{ + {uint8(2), uint8(1), 1}, + {uint16(2), uint16(1), 1}, + {uint32(2), uint32(1), 1}, + {uint64(2), uint64(1), 1}, + + {int(2), int(1), 1}, + {int8(2), int8(1), 1}, + {int16(2), int16(1), 1}, + {int32(2), int32(1), 1}, + {int64(2), int64(1), 1}, + + {float32(2), float32(1), 1}, + {float64(2), float64(1), 1}, + } + + for _, tc := range cases { + True(t, assert.InDelta(tc.a, tc.b, tc.delta), "Expected |%V - %V| <= %v", tc.a, tc.b, tc.delta) + } +} + +func TestInEpsilonWrapper(t *testing.T) { + assert := New(new(testing.T)) + + cases := []struct { + a, b interface{} + epsilon float64 + }{ + {uint8(2), uint16(2), .001}, + {2.1, 2.2, 0.1}, + {2.2, 2.1, 0.1}, + {-2.1, -2.2, 0.1}, + {-2.2, -2.1, 0.1}, + {uint64(100), uint8(101), 0.01}, + {0.1, -0.1, 2}, + } + + for _, tc := range cases { + True(t, assert.InEpsilon(tc.a, tc.b, tc.epsilon, "Expected %V and %V to have a relative difference of %v", tc.a, tc.b, tc.epsilon)) + } + + cases = []struct { + a, b interface{} + epsilon float64 + }{ + {uint8(2), int16(-2), .001}, + {uint64(100), uint8(102), 0.01}, + {2.1, 2.2, 0.001}, + {2.2, 2.1, 0.001}, + {2.1, -2.2, 1}, + {2.1, "bla-bla", 0}, + {0.1, -0.1, 1.99}, + } + + for _, tc := range cases { + False(t, assert.InEpsilon(tc.a, tc.b, tc.epsilon, "Expected %V and %V to have a relative difference of %v", tc.a, tc.b, tc.epsilon)) + } +} + +func TestRegexpWrapper(t *testing.T) { + + assert := New(new(testing.T)) + + cases := []struct { + rx, str string + }{ + {"^start", "start of the line"}, + {"end$", "in the end"}, + {"[0-9]{3}[.-]?[0-9]{2}[.-]?[0-9]{2}", "My phone number is 650.12.34"}, + } + + for _, tc := range cases { + True(t, assert.Regexp(tc.rx, tc.str)) + True(t, assert.Regexp(regexp.MustCompile(tc.rx), tc.str)) + False(t, assert.NotRegexp(tc.rx, tc.str)) + False(t, assert.NotRegexp(regexp.MustCompile(tc.rx), tc.str)) + } + + cases = []struct { + rx, str string + }{ + {"^asdfastart", "Not the start of the line"}, + {"end$", "in the end."}, + {"[0-9]{3}[.-]?[0-9]{2}[.-]?[0-9]{2}", "My phone number is 650.12a.34"}, + } + + for _, tc := range cases { + False(t, assert.Regexp(tc.rx, tc.str), "Expected \"%s\" to not match \"%s\"", tc.rx, tc.str) + False(t, assert.Regexp(regexp.MustCompile(tc.rx), tc.str)) + True(t, assert.NotRegexp(tc.rx, tc.str)) + True(t, assert.NotRegexp(regexp.MustCompile(tc.rx), tc.str)) + } +} + +func TestZeroWrapper(t *testing.T) { + assert := New(t) + mockAssert := New(new(testing.T)) + + for _, test := range zeros { + assert.True(mockAssert.Zero(test), "Zero should return true for %v", test) + } + + for _, test := range nonZeros { + assert.False(mockAssert.Zero(test), "Zero should return false for %v", test) + } +} + +func TestNotZeroWrapper(t *testing.T) { + assert := New(t) + mockAssert := New(new(testing.T)) + + for _, test := range zeros { + assert.False(mockAssert.NotZero(test), "Zero should return true for %v", test) + } + + for _, test := range nonZeros { + assert.True(mockAssert.NotZero(test), "Zero should return false for %v", test) + } +} + +func TestJSONEqWrapper_EqualSONString(t *testing.T) { + assert := New(new(testing.T)) + if !assert.JSONEq(`{"hello": "world", "foo": "bar"}`, `{"hello": "world", "foo": "bar"}`) { + t.Error("JSONEq should return true") + } + +} + +func TestJSONEqWrapper_EquivalentButNotEqual(t *testing.T) { + assert := New(new(testing.T)) + if !assert.JSONEq(`{"hello": "world", "foo": "bar"}`, `{"foo": "bar", "hello": "world"}`) { + t.Error("JSONEq should return true") + } + +} + +func TestJSONEqWrapper_HashOfArraysAndHashes(t *testing.T) { + assert := New(new(testing.T)) + if !assert.JSONEq("{\r\n\t\"numeric\": 1.5,\r\n\t\"array\": [{\"foo\": \"bar\"}, 1, \"string\", [\"nested\", \"array\", 5.5]],\r\n\t\"hash\": {\"nested\": \"hash\", \"nested_slice\": [\"this\", \"is\", \"nested\"]},\r\n\t\"string\": \"foo\"\r\n}", + "{\r\n\t\"numeric\": 1.5,\r\n\t\"hash\": {\"nested\": \"hash\", \"nested_slice\": [\"this\", \"is\", \"nested\"]},\r\n\t\"string\": \"foo\",\r\n\t\"array\": [{\"foo\": \"bar\"}, 1, \"string\", [\"nested\", \"array\", 5.5]]\r\n}") { + t.Error("JSONEq should return true") + } +} + +func TestJSONEqWrapper_Array(t *testing.T) { + assert := New(new(testing.T)) + if !assert.JSONEq(`["foo", {"hello": "world", "nested": "hash"}]`, `["foo", {"nested": "hash", "hello": "world"}]`) { + t.Error("JSONEq should return true") + } + +} + +func TestJSONEqWrapper_HashAndArrayNotEquivalent(t *testing.T) { + assert := New(new(testing.T)) + if assert.JSONEq(`["foo", {"hello": "world", "nested": "hash"}]`, `{"foo": "bar", {"nested": "hash", "hello": "world"}}`) { + t.Error("JSONEq should return false") + } +} + +func TestJSONEqWrapper_HashesNotEquivalent(t *testing.T) { + assert := New(new(testing.T)) + if assert.JSONEq(`{"foo": "bar"}`, `{"foo": "bar", "hello": "world"}`) { + t.Error("JSONEq should return false") + } +} + +func TestJSONEqWrapper_ActualIsNotJSON(t *testing.T) { + assert := New(new(testing.T)) + if assert.JSONEq(`{"foo": "bar"}`, "Not JSON") { + t.Error("JSONEq should return false") + } +} + +func TestJSONEqWrapper_ExpectedIsNotJSON(t *testing.T) { + assert := New(new(testing.T)) + if assert.JSONEq("Not JSON", `{"foo": "bar", "hello": "world"}`) { + t.Error("JSONEq should return false") + } +} + +func TestJSONEqWrapper_ExpectedAndActualNotJSON(t *testing.T) { + assert := New(new(testing.T)) + if assert.JSONEq("Not JSON", "Not JSON") { + t.Error("JSONEq should return false") + } +} + +func TestJSONEqWrapper_ArraysOfDifferentOrder(t *testing.T) { + assert := New(new(testing.T)) + if assert.JSONEq(`["foo", {"hello": "world", "nested": "hash"}]`, `[{ "hello": "world", "nested": "hash"}, "foo"]`) { + t.Error("JSONEq should return false") + } +} diff --git a/vendor/github.com/stretchr/testify/assert/http_assertions.go b/vendor/github.com/stretchr/testify/assert/http_assertions.go new file mode 100644 index 00000000..e1b9442b --- /dev/null +++ b/vendor/github.com/stretchr/testify/assert/http_assertions.go @@ -0,0 +1,106 @@ +package assert + +import ( + "fmt" + "net/http" + "net/http/httptest" + "net/url" + "strings" +) + +// httpCode is a helper that returns HTTP code of the response. It returns -1 +// if building a new request fails. +func httpCode(handler http.HandlerFunc, method, url string, values url.Values) int { + w := httptest.NewRecorder() + req, err := http.NewRequest(method, url+"?"+values.Encode(), nil) + if err != nil { + return -1 + } + handler(w, req) + return w.Code +} + +// HTTPSuccess asserts that a specified handler returns a success status code. +// +// assert.HTTPSuccess(t, myHandler, "POST", "http://www.google.com", nil) +// +// Returns whether the assertion was successful (true) or not (false). +func HTTPSuccess(t TestingT, handler http.HandlerFunc, method, url string, values url.Values) bool { + code := httpCode(handler, method, url, values) + if code == -1 { + return false + } + return code >= http.StatusOK && code <= http.StatusPartialContent +} + +// HTTPRedirect asserts that a specified handler returns a redirect status code. +// +// assert.HTTPRedirect(t, myHandler, "GET", "/a/b/c", url.Values{"a": []string{"b", "c"}} +// +// Returns whether the assertion was successful (true) or not (false). +func HTTPRedirect(t TestingT, handler http.HandlerFunc, method, url string, values url.Values) bool { + code := httpCode(handler, method, url, values) + if code == -1 { + return false + } + return code >= http.StatusMultipleChoices && code <= http.StatusTemporaryRedirect +} + +// HTTPError asserts that a specified handler returns an error status code. +// +// assert.HTTPError(t, myHandler, "POST", "/a/b/c", url.Values{"a": []string{"b", "c"}} +// +// Returns whether the assertion was successful (true) or not (false). +func HTTPError(t TestingT, handler http.HandlerFunc, method, url string, values url.Values) bool { + code := httpCode(handler, method, url, values) + if code == -1 { + return false + } + return code >= http.StatusBadRequest +} + +// HTTPBody is a helper that returns HTTP body of the response. It returns +// empty string if building a new request fails. +func HTTPBody(handler http.HandlerFunc, method, url string, values url.Values) string { + w := httptest.NewRecorder() + req, err := http.NewRequest(method, url+"?"+values.Encode(), nil) + if err != nil { + return "" + } + handler(w, req) + return w.Body.String() +} + +// HTTPBodyContains asserts that a specified handler returns a +// body that contains a string. +// +// assert.HTTPBodyContains(t, myHandler, "www.google.com", nil, "I'm Feeling Lucky") +// +// Returns whether the assertion was successful (true) or not (false). +func HTTPBodyContains(t TestingT, handler http.HandlerFunc, method, url string, values url.Values, str interface{}) bool { + body := HTTPBody(handler, method, url, values) + + contains := strings.Contains(body, fmt.Sprint(str)) + if !contains { + Fail(t, fmt.Sprintf("Expected response body for \"%s\" to contain \"%s\" but found \"%s\"", url+"?"+values.Encode(), str, body)) + } + + return contains +} + +// HTTPBodyNotContains asserts that a specified handler returns a +// body that does not contain a string. +// +// assert.HTTPBodyNotContains(t, myHandler, "www.google.com", nil, "I'm Feeling Lucky") +// +// Returns whether the assertion was successful (true) or not (false). +func HTTPBodyNotContains(t TestingT, handler http.HandlerFunc, method, url string, values url.Values, str interface{}) bool { + body := HTTPBody(handler, method, url, values) + + contains := strings.Contains(body, fmt.Sprint(str)) + if contains { + Fail(t, "Expected response body for %s to NOT contain \"%s\" but found \"%s\"", url+"?"+values.Encode(), str, body) + } + + return !contains +} diff --git a/vendor/github.com/stretchr/testify/assert/http_assertions_test.go b/vendor/github.com/stretchr/testify/assert/http_assertions_test.go new file mode 100644 index 00000000..684c2d5d --- /dev/null +++ b/vendor/github.com/stretchr/testify/assert/http_assertions_test.go @@ -0,0 +1,86 @@ +package assert + +import ( + "fmt" + "net/http" + "net/url" + "testing" +) + +func httpOK(w http.ResponseWriter, r *http.Request) { + w.WriteHeader(http.StatusOK) +} + +func httpRedirect(w http.ResponseWriter, r *http.Request) { + w.WriteHeader(http.StatusTemporaryRedirect) +} + +func httpError(w http.ResponseWriter, r *http.Request) { + w.WriteHeader(http.StatusInternalServerError) +} + +func TestHTTPStatuses(t *testing.T) { + assert := New(t) + mockT := new(testing.T) + + assert.Equal(HTTPSuccess(mockT, httpOK, "GET", "/", nil), true) + assert.Equal(HTTPSuccess(mockT, httpRedirect, "GET", "/", nil), false) + assert.Equal(HTTPSuccess(mockT, httpError, "GET", "/", nil), false) + + assert.Equal(HTTPRedirect(mockT, httpOK, "GET", "/", nil), false) + assert.Equal(HTTPRedirect(mockT, httpRedirect, "GET", "/", nil), true) + assert.Equal(HTTPRedirect(mockT, httpError, "GET", "/", nil), false) + + assert.Equal(HTTPError(mockT, httpOK, "GET", "/", nil), false) + assert.Equal(HTTPError(mockT, httpRedirect, "GET", "/", nil), false) + assert.Equal(HTTPError(mockT, httpError, "GET", "/", nil), true) +} + +func TestHTTPStatusesWrapper(t *testing.T) { + assert := New(t) + mockAssert := New(new(testing.T)) + + assert.Equal(mockAssert.HTTPSuccess(httpOK, "GET", "/", nil), true) + assert.Equal(mockAssert.HTTPSuccess(httpRedirect, "GET", "/", nil), false) + assert.Equal(mockAssert.HTTPSuccess(httpError, "GET", "/", nil), false) + + assert.Equal(mockAssert.HTTPRedirect(httpOK, "GET", "/", nil), false) + assert.Equal(mockAssert.HTTPRedirect(httpRedirect, "GET", "/", nil), true) + assert.Equal(mockAssert.HTTPRedirect(httpError, "GET", "/", nil), false) + + assert.Equal(mockAssert.HTTPError(httpOK, "GET", "/", nil), false) + assert.Equal(mockAssert.HTTPError(httpRedirect, "GET", "/", nil), false) + assert.Equal(mockAssert.HTTPError(httpError, "GET", "/", nil), true) +} + +func httpHelloName(w http.ResponseWriter, r *http.Request) { + name := r.FormValue("name") + w.Write([]byte(fmt.Sprintf("Hello, %s!", name))) +} + +func TestHttpBody(t *testing.T) { + assert := New(t) + mockT := new(testing.T) + + assert.True(HTTPBodyContains(mockT, httpHelloName, "GET", "/", url.Values{"name": []string{"World"}}, "Hello, World!")) + assert.True(HTTPBodyContains(mockT, httpHelloName, "GET", "/", url.Values{"name": []string{"World"}}, "World")) + assert.False(HTTPBodyContains(mockT, httpHelloName, "GET", "/", url.Values{"name": []string{"World"}}, "world")) + + assert.False(HTTPBodyNotContains(mockT, httpHelloName, "GET", "/", url.Values{"name": []string{"World"}}, "Hello, World!")) + assert.False(HTTPBodyNotContains(mockT, httpHelloName, "GET", "/", url.Values{"name": []string{"World"}}, "World")) + assert.True(HTTPBodyNotContains(mockT, httpHelloName, "GET", "/", url.Values{"name": []string{"World"}}, "world")) +} + +func TestHttpBodyWrappers(t *testing.T) { + assert := New(t) + mockAssert := New(new(testing.T)) + + assert.True(mockAssert.HTTPBodyContains(httpHelloName, "GET", "/", url.Values{"name": []string{"World"}}, "Hello, World!")) + assert.True(mockAssert.HTTPBodyContains(httpHelloName, "GET", "/", url.Values{"name": []string{"World"}}, "World")) + assert.False(mockAssert.HTTPBodyContains(httpHelloName, "GET", "/", url.Values{"name": []string{"World"}}, "world")) + + assert.False(mockAssert.HTTPBodyNotContains(httpHelloName, "GET", "/", url.Values{"name": []string{"World"}}, "Hello, World!")) + assert.False(mockAssert.HTTPBodyNotContains(httpHelloName, "GET", "/", url.Values{"name": []string{"World"}}, "World")) + assert.True(mockAssert.HTTPBodyNotContains(httpHelloName, "GET", "/", url.Values{"name": []string{"World"}}, "world")) + +} diff --git a/vendor/github.com/stretchr/testify/doc.go b/vendor/github.com/stretchr/testify/doc.go new file mode 100644 index 00000000..377d5cc5 --- /dev/null +++ b/vendor/github.com/stretchr/testify/doc.go @@ -0,0 +1,22 @@ +// Package testify is a set of packages that provide many tools for testifying that your code will behave as you intend. +// +// testify contains the following packages: +// +// The assert package provides a comprehensive set of assertion functions that tie in to the Go testing system. +// +// The http package contains tools to make it easier to test http activity using the Go testing system. +// +// The mock package provides a system by which it is possible to mock your objects and verify calls are happening as expected. +// +// The suite package provides a basic structure for using structs as testing suites, and methods on those structs as tests. It includes setup/teardown functionality in the way of interfaces. +package testify + +// blank imports help docs. +import ( + // assert package + _ "github.com/stretchr/testify/assert" + // http package + _ "github.com/stretchr/testify/http" + // mock package + _ "github.com/stretchr/testify/mock" +) diff --git a/vendor/github.com/stretchr/testify/http/doc.go b/vendor/github.com/stretchr/testify/http/doc.go new file mode 100644 index 00000000..695167c6 --- /dev/null +++ b/vendor/github.com/stretchr/testify/http/doc.go @@ -0,0 +1,2 @@ +// Package http DEPRECATED USE net/http/httptest +package http diff --git a/vendor/github.com/stretchr/testify/http/test_response_writer.go b/vendor/github.com/stretchr/testify/http/test_response_writer.go new file mode 100644 index 00000000..5c3f813f --- /dev/null +++ b/vendor/github.com/stretchr/testify/http/test_response_writer.go @@ -0,0 +1,49 @@ +package http + +import ( + "net/http" +) + +// TestResponseWriter DEPRECATED: We recommend you use http://golang.org/pkg/net/http/httptest instead. +type TestResponseWriter struct { + + // StatusCode is the last int written by the call to WriteHeader(int) + StatusCode int + + // Output is a string containing the written bytes using the Write([]byte) func. + Output string + + // header is the internal storage of the http.Header object + header http.Header +} + +// Header DEPRECATED: We recommend you use http://golang.org/pkg/net/http/httptest instead. +func (rw *TestResponseWriter) Header() http.Header { + + if rw.header == nil { + rw.header = make(http.Header) + } + + return rw.header +} + +// Write DEPRECATED: We recommend you use http://golang.org/pkg/net/http/httptest instead. +func (rw *TestResponseWriter) Write(bytes []byte) (int, error) { + + // assume 200 success if no header has been set + if rw.StatusCode == 0 { + rw.WriteHeader(200) + } + + // add these bytes to the output string + rw.Output = rw.Output + string(bytes) + + // return normal values + return 0, nil + +} + +// WriteHeader DEPRECATED: We recommend you use http://golang.org/pkg/net/http/httptest instead. +func (rw *TestResponseWriter) WriteHeader(i int) { + rw.StatusCode = i +} diff --git a/vendor/github.com/stretchr/testify/http/test_round_tripper.go b/vendor/github.com/stretchr/testify/http/test_round_tripper.go new file mode 100644 index 00000000..b1e32f1d --- /dev/null +++ b/vendor/github.com/stretchr/testify/http/test_round_tripper.go @@ -0,0 +1,17 @@ +package http + +import ( + "github.com/stretchr/testify/mock" + "net/http" +) + +// TestRoundTripper DEPRECATED USE net/http/httptest +type TestRoundTripper struct { + mock.Mock +} + +// RoundTrip DEPRECATED USE net/http/httptest +func (t *TestRoundTripper) RoundTrip(req *http.Request) (*http.Response, error) { + args := t.Called(req) + return args.Get(0).(*http.Response), args.Error(1) +} diff --git a/vendor/github.com/stretchr/testify/mock/doc.go b/vendor/github.com/stretchr/testify/mock/doc.go new file mode 100644 index 00000000..7324128e --- /dev/null +++ b/vendor/github.com/stretchr/testify/mock/doc.go @@ -0,0 +1,44 @@ +// Package mock provides a system by which it is possible to mock your objects +// and verify calls are happening as expected. +// +// Example Usage +// +// The mock package provides an object, Mock, that tracks activity on another object. It is usually +// embedded into a test object as shown below: +// +// type MyTestObject struct { +// // add a Mock object instance +// mock.Mock +// +// // other fields go here as normal +// } +// +// When implementing the methods of an interface, you wire your functions up +// to call the Mock.Called(args...) method, and return the appropriate values. +// +// For example, to mock a method that saves the name and age of a person and returns +// the year of their birth or an error, you might write this: +// +// func (o *MyTestObject) SavePersonDetails(firstname, lastname string, age int) (int, error) { +// args := o.Called(firstname, lastname, age) +// return args.Int(0), args.Error(1) +// } +// +// The Int, Error and Bool methods are examples of strongly typed getters that take the argument +// index position. Given this argument list: +// +// (12, true, "Something") +// +// You could read them out strongly typed like this: +// +// args.Int(0) +// args.Bool(1) +// args.String(2) +// +// For objects of your own type, use the generic Arguments.Get(index) method and make a type assertion: +// +// return args.Get(0).(*MyObject), args.Get(1).(*AnotherObjectOfMine) +// +// This may cause a panic if the object you are getting is nil (the type assertion will fail), in those +// cases you should check for nil first. +package mock diff --git a/vendor/github.com/stretchr/testify/mock/mock.go b/vendor/github.com/stretchr/testify/mock/mock.go new file mode 100644 index 00000000..cba9009a --- /dev/null +++ b/vendor/github.com/stretchr/testify/mock/mock.go @@ -0,0 +1,693 @@ +package mock + +import ( + "fmt" + "reflect" + "regexp" + "runtime" + "strings" + "sync" + "time" + + "github.com/stretchr/objx" + "github.com/stretchr/testify/assert" +) + +// TestingT is an interface wrapper around *testing.T +type TestingT interface { + Logf(format string, args ...interface{}) + Errorf(format string, args ...interface{}) + FailNow() +} + +/* + Call +*/ + +// Call represents a method call and is used for setting expectations, +// as well as recording activity. +type Call struct { + Parent *Mock + + // The name of the method that was or will be called. + Method string + + // Holds the arguments of the method. + Arguments Arguments + + // Holds the arguments that should be returned when + // this method is called. + ReturnArguments Arguments + + // The number of times to return the return arguments when setting + // expectations. 0 means to always return the value. + Repeatability int + + // Amount of times this call has been called + totalCalls int + + // Holds a channel that will be used to block the Return until it either + // recieves a message or is closed. nil means it returns immediately. + WaitFor <-chan time.Time + + // Holds a handler used to manipulate arguments content that are passed by + // reference. It's useful when mocking methods such as unmarshalers or + // decoders. + RunFn func(Arguments) +} + +func newCall(parent *Mock, methodName string, methodArguments ...interface{}) *Call { + return &Call{ + Parent: parent, + Method: methodName, + Arguments: methodArguments, + ReturnArguments: make([]interface{}, 0), + Repeatability: 0, + WaitFor: nil, + RunFn: nil, + } +} + +func (c *Call) lock() { + c.Parent.mutex.Lock() +} + +func (c *Call) unlock() { + c.Parent.mutex.Unlock() +} + +// Return specifies the return arguments for the expectation. +// +// Mock.On("DoSomething").Return(errors.New("failed")) +func (c *Call) Return(returnArguments ...interface{}) *Call { + c.lock() + defer c.unlock() + + c.ReturnArguments = returnArguments + + return c +} + +// Once indicates that that the mock should only return the value once. +// +// Mock.On("MyMethod", arg1, arg2).Return(returnArg1, returnArg2).Once() +func (c *Call) Once() *Call { + return c.Times(1) +} + +// Twice indicates that that the mock should only return the value twice. +// +// Mock.On("MyMethod", arg1, arg2).Return(returnArg1, returnArg2).Twice() +func (c *Call) Twice() *Call { + return c.Times(2) +} + +// Times indicates that that the mock should only return the indicated number +// of times. +// +// Mock.On("MyMethod", arg1, arg2).Return(returnArg1, returnArg2).Times(5) +func (c *Call) Times(i int) *Call { + c.lock() + defer c.unlock() + c.Repeatability = i + return c +} + +// WaitUntil sets the channel that will block the mock's return until its closed +// or a message is received. +// +// Mock.On("MyMethod", arg1, arg2).WaitUntil(time.After(time.Second)) +func (c *Call) WaitUntil(w <-chan time.Time) *Call { + c.lock() + defer c.unlock() + c.WaitFor = w + return c +} + +// After sets how long to block until the call returns +// +// Mock.On("MyMethod", arg1, arg2).After(time.Second) +func (c *Call) After(d time.Duration) *Call { + return c.WaitUntil(time.After(d)) +} + +// Run sets a handler to be called before returning. It can be used when +// mocking a method such as unmarshalers that takes a pointer to a struct and +// sets properties in such struct +// +// Mock.On("Unmarshal", AnythingOfType("*map[string]interface{}").Return().Run(func(args Arguments) { +// arg := args.Get(0).(*map[string]interface{}) +// arg["foo"] = "bar" +// }) +func (c *Call) Run(fn func(Arguments)) *Call { + c.lock() + defer c.unlock() + c.RunFn = fn + return c +} + +// On chains a new expectation description onto the mocked interface. This +// allows syntax like. +// +// Mock. +// On("MyMethod", 1).Return(nil). +// On("MyOtherMethod", 'a', 'b', 'c').Return(errors.New("Some Error")) +func (c *Call) On(methodName string, arguments ...interface{}) *Call { + return c.Parent.On(methodName, arguments...) +} + +// Mock is the workhorse used to track activity on another object. +// For an example of its usage, refer to the "Example Usage" section at the top +// of this document. +type Mock struct { + // Represents the calls that are expected of + // an object. + ExpectedCalls []*Call + + // Holds the calls that were made to this mocked object. + Calls []Call + + // TestData holds any data that might be useful for testing. Testify ignores + // this data completely allowing you to do whatever you like with it. + testData objx.Map + + mutex sync.Mutex +} + +// TestData holds any data that might be useful for testing. Testify ignores +// this data completely allowing you to do whatever you like with it. +func (m *Mock) TestData() objx.Map { + + if m.testData == nil { + m.testData = make(objx.Map) + } + + return m.testData +} + +/* + Setting expectations +*/ + +// On starts a description of an expectation of the specified method +// being called. +// +// Mock.On("MyMethod", arg1, arg2) +func (m *Mock) On(methodName string, arguments ...interface{}) *Call { + for _, arg := range arguments { + if v := reflect.ValueOf(arg); v.Kind() == reflect.Func { + panic(fmt.Sprintf("cannot use Func in expectations. Use mock.AnythingOfType(\"%T\")", arg)) + } + } + + m.mutex.Lock() + defer m.mutex.Unlock() + c := newCall(m, methodName, arguments...) + m.ExpectedCalls = append(m.ExpectedCalls, c) + return c +} + +// /* +// Recording and responding to activity +// */ + +func (m *Mock) findExpectedCall(method string, arguments ...interface{}) (int, *Call) { + m.mutex.Lock() + defer m.mutex.Unlock() + for i, call := range m.ExpectedCalls { + if call.Method == method && call.Repeatability > -1 { + + _, diffCount := call.Arguments.Diff(arguments) + if diffCount == 0 { + return i, call + } + + } + } + return -1, nil +} + +func (m *Mock) findClosestCall(method string, arguments ...interface{}) (bool, *Call) { + diffCount := 0 + var closestCall *Call + + for _, call := range m.expectedCalls() { + if call.Method == method { + + _, tempDiffCount := call.Arguments.Diff(arguments) + if tempDiffCount < diffCount || diffCount == 0 { + diffCount = tempDiffCount + closestCall = call + } + + } + } + + if closestCall == nil { + return false, nil + } + + return true, closestCall +} + +func callString(method string, arguments Arguments, includeArgumentValues bool) string { + + var argValsString string + if includeArgumentValues { + var argVals []string + for argIndex, arg := range arguments { + argVals = append(argVals, fmt.Sprintf("%d: %#v", argIndex, arg)) + } + argValsString = fmt.Sprintf("\n\t\t%s", strings.Join(argVals, "\n\t\t")) + } + + return fmt.Sprintf("%s(%s)%s", method, arguments.String(), argValsString) +} + +// Called tells the mock object that a method has been called, and gets an array +// of arguments to return. Panics if the call is unexpected (i.e. not preceded by +// appropriate .On .Return() calls) +// If Call.WaitFor is set, blocks until the channel is closed or receives a message. +func (m *Mock) Called(arguments ...interface{}) Arguments { + // get the calling function's name + pc, _, _, ok := runtime.Caller(1) + if !ok { + panic("Couldn't get the caller information") + } + functionPath := runtime.FuncForPC(pc).Name() + //Next four lines are required to use GCCGO function naming conventions. + //For Ex: github_com_docker_libkv_store_mock.WatchTree.pN39_github_com_docker_libkv_store_mock.Mock + //uses inteface information unlike golang github.com/docker/libkv/store/mock.(*Mock).WatchTree + //With GCCGO we need to remove interface information starting from pN
. + re := regexp.MustCompile("\\.pN\\d+_") + if re.MatchString(functionPath) { + functionPath = re.Split(functionPath, -1)[0] + } + parts := strings.Split(functionPath, ".") + functionName := parts[len(parts)-1] + + found, call := m.findExpectedCall(functionName, arguments...) + + if found < 0 { + // we have to fail here - because we don't know what to do + // as the return arguments. This is because: + // + // a) this is a totally unexpected call to this method, + // b) the arguments are not what was expected, or + // c) the developer has forgotten to add an accompanying On...Return pair. + + closestFound, closestCall := m.findClosestCall(functionName, arguments...) + + if closestFound { + panic(fmt.Sprintf("\n\nmock: Unexpected Method Call\n-----------------------------\n\n%s\n\nThe closest call I have is: \n\n%s\n", callString(functionName, arguments, true), callString(functionName, closestCall.Arguments, true))) + } else { + panic(fmt.Sprintf("\nassert: mock: I don't know what to return because the method call was unexpected.\n\tEither do Mock.On(\"%s\").Return(...) first, or remove the %s() call.\n\tThis method was unexpected:\n\t\t%s\n\tat: %s", functionName, functionName, callString(functionName, arguments, true), assert.CallerInfo())) + } + } else { + m.mutex.Lock() + switch { + case call.Repeatability == 1: + call.Repeatability = -1 + call.totalCalls++ + + case call.Repeatability > 1: + call.Repeatability-- + call.totalCalls++ + + case call.Repeatability == 0: + call.totalCalls++ + } + m.mutex.Unlock() + } + + // add the call + m.mutex.Lock() + m.Calls = append(m.Calls, *newCall(m, functionName, arguments...)) + m.mutex.Unlock() + + // block if specified + if call.WaitFor != nil { + <-call.WaitFor + } + + if call.RunFn != nil { + call.RunFn(arguments) + } + + return call.ReturnArguments +} + +/* + Assertions +*/ + +// AssertExpectationsForObjects asserts that everything specified with On and Return +// of the specified objects was in fact called as expected. +// +// Calls may have occurred in any order. +func AssertExpectationsForObjects(t TestingT, testObjects ...interface{}) bool { + var success = true + for _, obj := range testObjects { + mockObj := obj.(Mock) + success = success && mockObj.AssertExpectations(t) + } + return success +} + +// AssertExpectations asserts that everything specified with On and Return was +// in fact called as expected. Calls may have occurred in any order. +func (m *Mock) AssertExpectations(t TestingT) bool { + var somethingMissing bool + var failedExpectations int + + // iterate through each expectation + expectedCalls := m.expectedCalls() + for _, expectedCall := range expectedCalls { + if !m.methodWasCalled(expectedCall.Method, expectedCall.Arguments) && expectedCall.totalCalls == 0 { + somethingMissing = true + failedExpectations++ + t.Logf("\u274C\t%s(%s)", expectedCall.Method, expectedCall.Arguments.String()) + } else { + m.mutex.Lock() + if expectedCall.Repeatability > 0 { + somethingMissing = true + failedExpectations++ + } else { + t.Logf("\u2705\t%s(%s)", expectedCall.Method, expectedCall.Arguments.String()) + } + m.mutex.Unlock() + } + } + + if somethingMissing { + t.Errorf("FAIL: %d out of %d expectation(s) were met.\n\tThe code you are testing needs to make %d more call(s).\n\tat: %s", len(expectedCalls)-failedExpectations, len(expectedCalls), failedExpectations, assert.CallerInfo()) + } + + return !somethingMissing +} + +// AssertNumberOfCalls asserts that the method was called expectedCalls times. +func (m *Mock) AssertNumberOfCalls(t TestingT, methodName string, expectedCalls int) bool { + var actualCalls int + for _, call := range m.calls() { + if call.Method == methodName { + actualCalls++ + } + } + return assert.Equal(t, expectedCalls, actualCalls, fmt.Sprintf("Expected number of calls (%d) does not match the actual number of calls (%d).", expectedCalls, actualCalls)) +} + +// AssertCalled asserts that the method was called. +// It can produce a false result when an argument is a pointer type and the underlying value changed after calling the mocked method. +func (m *Mock) AssertCalled(t TestingT, methodName string, arguments ...interface{}) bool { + if !assert.True(t, m.methodWasCalled(methodName, arguments), fmt.Sprintf("The \"%s\" method should have been called with %d argument(s), but was not.", methodName, len(arguments))) { + t.Logf("%v", m.expectedCalls()) + return false + } + return true +} + +// AssertNotCalled asserts that the method was not called. +// It can produce a false result when an argument is a pointer type and the underlying value changed after calling the mocked method. +func (m *Mock) AssertNotCalled(t TestingT, methodName string, arguments ...interface{}) bool { + if !assert.False(t, m.methodWasCalled(methodName, arguments), fmt.Sprintf("The \"%s\" method was called with %d argument(s), but should NOT have been.", methodName, len(arguments))) { + t.Logf("%v", m.expectedCalls()) + return false + } + return true +} + +func (m *Mock) methodWasCalled(methodName string, expected []interface{}) bool { + for _, call := range m.calls() { + if call.Method == methodName { + + _, differences := Arguments(expected).Diff(call.Arguments) + + if differences == 0 { + // found the expected call + return true + } + + } + } + // we didn't find the expected call + return false +} + +func (m *Mock) expectedCalls() []*Call { + m.mutex.Lock() + defer m.mutex.Unlock() + return append([]*Call{}, m.ExpectedCalls...) +} + +func (m *Mock) calls() []Call { + m.mutex.Lock() + defer m.mutex.Unlock() + return append([]Call{}, m.Calls...) +} + +/* + Arguments +*/ + +// Arguments holds an array of method arguments or return values. +type Arguments []interface{} + +const ( + // Anything is used in Diff and Assert when the argument being tested + // shouldn't be taken into consideration. + Anything string = "mock.Anything" +) + +// AnythingOfTypeArgument is a string that contains the type of an argument +// for use when type checking. Used in Diff and Assert. +type AnythingOfTypeArgument string + +// AnythingOfType returns an AnythingOfTypeArgument object containing the +// name of the type to check for. Used in Diff and Assert. +// +// For example: +// Assert(t, AnythingOfType("string"), AnythingOfType("int")) +func AnythingOfType(t string) AnythingOfTypeArgument { + return AnythingOfTypeArgument(t) +} + +// argumentMatcher performs custom argument matching, returning whether or +// not the argument is matched by the expectation fixture function. +type argumentMatcher struct { + // fn is a function which accepts one argument, and returns a bool. + fn reflect.Value +} + +func (f argumentMatcher) Matches(argument interface{}) bool { + expectType := f.fn.Type().In(0) + + if reflect.TypeOf(argument).AssignableTo(expectType) { + result := f.fn.Call([]reflect.Value{reflect.ValueOf(argument)}) + return result[0].Bool() + } + return false +} + +func (f argumentMatcher) String() string { + return fmt.Sprintf("func(%s) bool", f.fn.Type().In(0).Name()) +} + +// MatchedBy can be used to match a mock call based on only certain properties +// from a complex struct or some calculation. It takes a function that will be +// evaluated with the called argument and will return true when there's a match +// and false otherwise. +// +// Example: +// m.On("Do", MatchedBy(func(req *http.Request) bool { return req.Host == "example.com" })) +// +// |fn|, must be a function accepting a single argument (of the expected type) +// which returns a bool. If |fn| doesn't match the required signature, +// MathedBy() panics. +func MatchedBy(fn interface{}) argumentMatcher { + fnType := reflect.TypeOf(fn) + + if fnType.Kind() != reflect.Func { + panic(fmt.Sprintf("assert: arguments: %s is not a func", fn)) + } + if fnType.NumIn() != 1 { + panic(fmt.Sprintf("assert: arguments: %s does not take exactly one argument", fn)) + } + if fnType.NumOut() != 1 || fnType.Out(0).Kind() != reflect.Bool { + panic(fmt.Sprintf("assert: arguments: %s does not return a bool", fn)) + } + + return argumentMatcher{fn: reflect.ValueOf(fn)} +} + +// Get Returns the argument at the specified index. +func (args Arguments) Get(index int) interface{} { + if index+1 > len(args) { + panic(fmt.Sprintf("assert: arguments: Cannot call Get(%d) because there are %d argument(s).", index, len(args))) + } + return args[index] +} + +// Is gets whether the objects match the arguments specified. +func (args Arguments) Is(objects ...interface{}) bool { + for i, obj := range args { + if obj != objects[i] { + return false + } + } + return true +} + +// Diff gets a string describing the differences between the arguments +// and the specified objects. +// +// Returns the diff string and number of differences found. +func (args Arguments) Diff(objects []interface{}) (string, int) { + + var output = "\n" + var differences int + + var maxArgCount = len(args) + if len(objects) > maxArgCount { + maxArgCount = len(objects) + } + + for i := 0; i < maxArgCount; i++ { + var actual, expected interface{} + + if len(objects) <= i { + actual = "(Missing)" + } else { + actual = objects[i] + } + + if len(args) <= i { + expected = "(Missing)" + } else { + expected = args[i] + } + + if matcher, ok := expected.(argumentMatcher); ok { + if matcher.Matches(actual) { + output = fmt.Sprintf("%s\t%d: \u2705 %s matched by %s\n", output, i, actual, matcher) + } else { + differences++ + output = fmt.Sprintf("%s\t%d: \u2705 %s not matched by %s\n", output, i, actual, matcher) + } + } else if reflect.TypeOf(expected) == reflect.TypeOf((*AnythingOfTypeArgument)(nil)).Elem() { + + // type checking + if reflect.TypeOf(actual).Name() != string(expected.(AnythingOfTypeArgument)) && reflect.TypeOf(actual).String() != string(expected.(AnythingOfTypeArgument)) { + // not match + differences++ + output = fmt.Sprintf("%s\t%d: \u274C type %s != type %s - %s\n", output, i, expected, reflect.TypeOf(actual).Name(), actual) + } + + } else { + + // normal checking + + if assert.ObjectsAreEqual(expected, Anything) || assert.ObjectsAreEqual(actual, Anything) || assert.ObjectsAreEqual(actual, expected) { + // match + output = fmt.Sprintf("%s\t%d: \u2705 %s == %s\n", output, i, actual, expected) + } else { + // not match + differences++ + output = fmt.Sprintf("%s\t%d: \u274C %s != %s\n", output, i, actual, expected) + } + } + + } + + if differences == 0 { + return "No differences.", differences + } + + return output, differences + +} + +// Assert compares the arguments with the specified objects and fails if +// they do not exactly match. +func (args Arguments) Assert(t TestingT, objects ...interface{}) bool { + + // get the differences + diff, diffCount := args.Diff(objects) + + if diffCount == 0 { + return true + } + + // there are differences... report them... + t.Logf(diff) + t.Errorf("%sArguments do not match.", assert.CallerInfo()) + + return false + +} + +// String gets the argument at the specified index. Panics if there is no argument, or +// if the argument is of the wrong type. +// +// If no index is provided, String() returns a complete string representation +// of the arguments. +func (args Arguments) String(indexOrNil ...int) string { + + if len(indexOrNil) == 0 { + // normal String() method - return a string representation of the args + var argsStr []string + for _, arg := range args { + argsStr = append(argsStr, fmt.Sprintf("%s", reflect.TypeOf(arg))) + } + return strings.Join(argsStr, ",") + } else if len(indexOrNil) == 1 { + // Index has been specified - get the argument at that index + var index = indexOrNil[0] + var s string + var ok bool + if s, ok = args.Get(index).(string); !ok { + panic(fmt.Sprintf("assert: arguments: String(%d) failed because object wasn't correct type: %s", index, args.Get(index))) + } + return s + } + + panic(fmt.Sprintf("assert: arguments: Wrong number of arguments passed to String. Must be 0 or 1, not %d", len(indexOrNil))) + +} + +// Int gets the argument at the specified index. Panics if there is no argument, or +// if the argument is of the wrong type. +func (args Arguments) Int(index int) int { + var s int + var ok bool + if s, ok = args.Get(index).(int); !ok { + panic(fmt.Sprintf("assert: arguments: Int(%d) failed because object wasn't correct type: %v", index, args.Get(index))) + } + return s +} + +// Error gets the argument at the specified index. Panics if there is no argument, or +// if the argument is of the wrong type. +func (args Arguments) Error(index int) error { + obj := args.Get(index) + var s error + var ok bool + if obj == nil { + return nil + } + if s, ok = obj.(error); !ok { + panic(fmt.Sprintf("assert: arguments: Error(%d) failed because object wasn't correct type: %v", index, args.Get(index))) + } + return s +} + +// Bool gets the argument at the specified index. Panics if there is no argument, or +// if the argument is of the wrong type. +func (args Arguments) Bool(index int) bool { + var s bool + var ok bool + if s, ok = args.Get(index).(bool); !ok { + panic(fmt.Sprintf("assert: arguments: Bool(%d) failed because object wasn't correct type: %v", index, args.Get(index))) + } + return s +} diff --git a/vendor/github.com/stretchr/testify/mock/mock_test.go b/vendor/github.com/stretchr/testify/mock/mock_test.go new file mode 100644 index 00000000..166c3246 --- /dev/null +++ b/vendor/github.com/stretchr/testify/mock/mock_test.go @@ -0,0 +1,1130 @@ +package mock + +import ( + "errors" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + "testing" + "time" +) + +/* + Test objects +*/ + +// ExampleInterface represents an example interface. +type ExampleInterface interface { + TheExampleMethod(a, b, c int) (int, error) +} + +// TestExampleImplementation is a test implementation of ExampleInterface +type TestExampleImplementation struct { + Mock +} + +func (i *TestExampleImplementation) TheExampleMethod(a, b, c int) (int, error) { + args := i.Called(a, b, c) + return args.Int(0), errors.New("Whoops") +} + +func (i *TestExampleImplementation) TheExampleMethod2(yesorno bool) { + i.Called(yesorno) +} + +type ExampleType struct { + ran bool +} + +func (i *TestExampleImplementation) TheExampleMethod3(et *ExampleType) error { + args := i.Called(et) + return args.Error(0) +} + +func (i *TestExampleImplementation) TheExampleMethodFunc(fn func(string) error) error { + args := i.Called(fn) + return args.Error(0) +} + +func (i *TestExampleImplementation) TheExampleMethodVariadic(a ...int) error { + args := i.Called(a) + return args.Error(0) +} + +func (i *TestExampleImplementation) TheExampleMethodVariadicInterface(a ...interface{}) error { + args := i.Called(a) + return args.Error(0) +} + +type ExampleFuncType func(string) error + +func (i *TestExampleImplementation) TheExampleMethodFuncType(fn ExampleFuncType) error { + args := i.Called(fn) + return args.Error(0) +} + +/* + Mock +*/ + +func Test_Mock_TestData(t *testing.T) { + + var mockedService = new(TestExampleImplementation) + + if assert.NotNil(t, mockedService.TestData()) { + + mockedService.TestData().Set("something", 123) + assert.Equal(t, 123, mockedService.TestData().Get("something").Data()) + } +} + +func Test_Mock_On(t *testing.T) { + + // make a test impl object + var mockedService = new(TestExampleImplementation) + + c := mockedService.On("TheExampleMethod") + assert.Equal(t, []*Call{c}, mockedService.ExpectedCalls) + assert.Equal(t, "TheExampleMethod", c.Method) +} + +func Test_Mock_Chained_On(t *testing.T) { + // make a test impl object + var mockedService = new(TestExampleImplementation) + + mockedService. + On("TheExampleMethod", 1, 2, 3). + Return(0). + On("TheExampleMethod3", AnythingOfType("*mock.ExampleType")). + Return(nil) + + expectedCalls := []*Call{ + &Call{ + Parent: &mockedService.Mock, + Method: "TheExampleMethod", + Arguments: []interface{}{1, 2, 3}, + ReturnArguments: []interface{}{0}, + }, + &Call{ + Parent: &mockedService.Mock, + Method: "TheExampleMethod3", + Arguments: []interface{}{AnythingOfType("*mock.ExampleType")}, + ReturnArguments: []interface{}{nil}, + }, + } + assert.Equal(t, expectedCalls, mockedService.ExpectedCalls) +} + +func Test_Mock_On_WithArgs(t *testing.T) { + + // make a test impl object + var mockedService = new(TestExampleImplementation) + + c := mockedService.On("TheExampleMethod", 1, 2, 3, 4) + + assert.Equal(t, []*Call{c}, mockedService.ExpectedCalls) + assert.Equal(t, "TheExampleMethod", c.Method) + assert.Equal(t, Arguments{1, 2, 3, 4}, c.Arguments) +} + +func Test_Mock_On_WithFuncArg(t *testing.T) { + + // make a test impl object + var mockedService = new(TestExampleImplementation) + + c := mockedService. + On("TheExampleMethodFunc", AnythingOfType("func(string) error")). + Return(nil) + + assert.Equal(t, []*Call{c}, mockedService.ExpectedCalls) + assert.Equal(t, "TheExampleMethodFunc", c.Method) + assert.Equal(t, 1, len(c.Arguments)) + assert.Equal(t, AnythingOfType("func(string) error"), c.Arguments[0]) + + fn := func(string) error { return nil } + + assert.NotPanics(t, func() { + mockedService.TheExampleMethodFunc(fn) + }) +} + +func Test_Mock_On_WithIntArgMatcher(t *testing.T) { + var mockedService TestExampleImplementation + + mockedService.On("TheExampleMethod", + MatchedBy(func(a int) bool { + return a == 1 + }), MatchedBy(func(b int) bool { + return b == 2 + }), MatchedBy(func(c int) bool { + return c == 3 + })).Return(0, nil) + + assert.Panics(t, func() { + mockedService.TheExampleMethod(1, 2, 4) + }) + assert.Panics(t, func() { + mockedService.TheExampleMethod(2, 2, 3) + }) + assert.NotPanics(t, func() { + mockedService.TheExampleMethod(1, 2, 3) + }) +} + +func Test_Mock_On_WithPtrArgMatcher(t *testing.T) { + var mockedService TestExampleImplementation + + mockedService.On("TheExampleMethod3", + MatchedBy(func(a *ExampleType) bool { return a.ran == true }), + ).Return(nil) + + mockedService.On("TheExampleMethod3", + MatchedBy(func(a *ExampleType) bool { return a.ran == false }), + ).Return(errors.New("error")) + + assert.Equal(t, mockedService.TheExampleMethod3(&ExampleType{true}), nil) + assert.EqualError(t, mockedService.TheExampleMethod3(&ExampleType{false}), "error") +} + +func Test_Mock_On_WithFuncArgMatcher(t *testing.T) { + var mockedService TestExampleImplementation + + fixture1, fixture2 := errors.New("fixture1"), errors.New("fixture2") + + mockedService.On("TheExampleMethodFunc", + MatchedBy(func(a func(string) error) bool { return a("string") == fixture1 }), + ).Return(errors.New("fixture1")) + + mockedService.On("TheExampleMethodFunc", + MatchedBy(func(a func(string) error) bool { return a("string") == fixture2 }), + ).Return(errors.New("fixture2")) + + assert.EqualError(t, mockedService.TheExampleMethodFunc( + func(string) error { return fixture1 }), "fixture1") + assert.EqualError(t, mockedService.TheExampleMethodFunc( + func(string) error { return fixture2 }), "fixture2") +} + +func Test_Mock_On_WithVariadicFunc(t *testing.T) { + + // make a test impl object + var mockedService = new(TestExampleImplementation) + + c := mockedService. + On("TheExampleMethodVariadic", []int{1, 2, 3}). + Return(nil) + + assert.Equal(t, []*Call{c}, mockedService.ExpectedCalls) + assert.Equal(t, 1, len(c.Arguments)) + assert.Equal(t, []int{1, 2, 3}, c.Arguments[0]) + + assert.NotPanics(t, func() { + mockedService.TheExampleMethodVariadic(1, 2, 3) + }) + assert.Panics(t, func() { + mockedService.TheExampleMethodVariadic(1, 2) + }) + +} + +func Test_Mock_On_WithVariadicFuncWithInterface(t *testing.T) { + + // make a test impl object + var mockedService = new(TestExampleImplementation) + + c := mockedService.On("TheExampleMethodVariadicInterface", []interface{}{1, 2, 3}). + Return(nil) + + assert.Equal(t, []*Call{c}, mockedService.ExpectedCalls) + assert.Equal(t, 1, len(c.Arguments)) + assert.Equal(t, []interface{}{1, 2, 3}, c.Arguments[0]) + + assert.NotPanics(t, func() { + mockedService.TheExampleMethodVariadicInterface(1, 2, 3) + }) + assert.Panics(t, func() { + mockedService.TheExampleMethodVariadicInterface(1, 2) + }) + +} + +func Test_Mock_On_WithVariadicFuncWithEmptyInterfaceArray(t *testing.T) { + + // make a test impl object + var mockedService = new(TestExampleImplementation) + + var expected []interface{} + c := mockedService. + On("TheExampleMethodVariadicInterface", expected). + Return(nil) + + assert.Equal(t, []*Call{c}, mockedService.ExpectedCalls) + assert.Equal(t, 1, len(c.Arguments)) + assert.Equal(t, expected, c.Arguments[0]) + + assert.NotPanics(t, func() { + mockedService.TheExampleMethodVariadicInterface() + }) + assert.Panics(t, func() { + mockedService.TheExampleMethodVariadicInterface(1, 2) + }) + +} + +func Test_Mock_On_WithFuncPanics(t *testing.T) { + // make a test impl object + var mockedService = new(TestExampleImplementation) + + assert.Panics(t, func() { + mockedService.On("TheExampleMethodFunc", func(string) error { return nil }) + }) +} + +func Test_Mock_On_WithFuncTypeArg(t *testing.T) { + + // make a test impl object + var mockedService = new(TestExampleImplementation) + + c := mockedService. + On("TheExampleMethodFuncType", AnythingOfType("mock.ExampleFuncType")). + Return(nil) + + assert.Equal(t, []*Call{c}, mockedService.ExpectedCalls) + assert.Equal(t, 1, len(c.Arguments)) + assert.Equal(t, AnythingOfType("mock.ExampleFuncType"), c.Arguments[0]) + + fn := func(string) error { return nil } + assert.NotPanics(t, func() { + mockedService.TheExampleMethodFuncType(fn) + }) +} + +func Test_Mock_Return(t *testing.T) { + + // make a test impl object + var mockedService = new(TestExampleImplementation) + + c := mockedService. + On("TheExampleMethod", "A", "B", true). + Return(1, "two", true) + + require.Equal(t, []*Call{c}, mockedService.ExpectedCalls) + + call := mockedService.ExpectedCalls[0] + + assert.Equal(t, "TheExampleMethod", call.Method) + assert.Equal(t, "A", call.Arguments[0]) + assert.Equal(t, "B", call.Arguments[1]) + assert.Equal(t, true, call.Arguments[2]) + assert.Equal(t, 1, call.ReturnArguments[0]) + assert.Equal(t, "two", call.ReturnArguments[1]) + assert.Equal(t, true, call.ReturnArguments[2]) + assert.Equal(t, 0, call.Repeatability) + assert.Nil(t, call.WaitFor) +} + +func Test_Mock_Return_WaitUntil(t *testing.T) { + + // make a test impl object + var mockedService = new(TestExampleImplementation) + ch := time.After(time.Second) + + c := mockedService.Mock. + On("TheExampleMethod", "A", "B", true). + WaitUntil(ch). + Return(1, "two", true) + + // assert that the call was created + require.Equal(t, []*Call{c}, mockedService.ExpectedCalls) + + call := mockedService.ExpectedCalls[0] + + assert.Equal(t, "TheExampleMethod", call.Method) + assert.Equal(t, "A", call.Arguments[0]) + assert.Equal(t, "B", call.Arguments[1]) + assert.Equal(t, true, call.Arguments[2]) + assert.Equal(t, 1, call.ReturnArguments[0]) + assert.Equal(t, "two", call.ReturnArguments[1]) + assert.Equal(t, true, call.ReturnArguments[2]) + assert.Equal(t, 0, call.Repeatability) + assert.Equal(t, ch, call.WaitFor) +} + +func Test_Mock_Return_After(t *testing.T) { + + // make a test impl object + var mockedService = new(TestExampleImplementation) + + c := mockedService.Mock. + On("TheExampleMethod", "A", "B", true). + Return(1, "two", true). + After(time.Second) + + require.Equal(t, []*Call{c}, mockedService.ExpectedCalls) + + call := mockedService.Mock.ExpectedCalls[0] + + assert.Equal(t, "TheExampleMethod", call.Method) + assert.Equal(t, "A", call.Arguments[0]) + assert.Equal(t, "B", call.Arguments[1]) + assert.Equal(t, true, call.Arguments[2]) + assert.Equal(t, 1, call.ReturnArguments[0]) + assert.Equal(t, "two", call.ReturnArguments[1]) + assert.Equal(t, true, call.ReturnArguments[2]) + assert.Equal(t, 0, call.Repeatability) + assert.NotEqual(t, nil, call.WaitFor) + +} + +func Test_Mock_Return_Run(t *testing.T) { + + // make a test impl object + var mockedService = new(TestExampleImplementation) + + fn := func(args Arguments) { + arg := args.Get(0).(*ExampleType) + arg.ran = true + } + + c := mockedService.Mock. + On("TheExampleMethod3", AnythingOfType("*mock.ExampleType")). + Return(nil). + Run(fn) + + require.Equal(t, []*Call{c}, mockedService.ExpectedCalls) + + call := mockedService.Mock.ExpectedCalls[0] + + assert.Equal(t, "TheExampleMethod3", call.Method) + assert.Equal(t, AnythingOfType("*mock.ExampleType"), call.Arguments[0]) + assert.Equal(t, nil, call.ReturnArguments[0]) + assert.Equal(t, 0, call.Repeatability) + assert.NotEqual(t, nil, call.WaitFor) + assert.NotNil(t, call.Run) + + et := ExampleType{} + assert.Equal(t, false, et.ran) + mockedService.TheExampleMethod3(&et) + assert.Equal(t, true, et.ran) +} + +func Test_Mock_Return_Run_Out_Of_Order(t *testing.T) { + // make a test impl object + var mockedService = new(TestExampleImplementation) + f := func(args Arguments) { + arg := args.Get(0).(*ExampleType) + arg.ran = true + } + + c := mockedService.Mock. + On("TheExampleMethod3", AnythingOfType("*mock.ExampleType")). + Run(f). + Return(nil) + + require.Equal(t, []*Call{c}, mockedService.ExpectedCalls) + + call := mockedService.Mock.ExpectedCalls[0] + + assert.Equal(t, "TheExampleMethod3", call.Method) + assert.Equal(t, AnythingOfType("*mock.ExampleType"), call.Arguments[0]) + assert.Equal(t, nil, call.ReturnArguments[0]) + assert.Equal(t, 0, call.Repeatability) + assert.NotEqual(t, nil, call.WaitFor) + assert.NotNil(t, call.Run) +} + +func Test_Mock_Return_Once(t *testing.T) { + + // make a test impl object + var mockedService = new(TestExampleImplementation) + + c := mockedService.On("TheExampleMethod", "A", "B", true). + Return(1, "two", true). + Once() + + require.Equal(t, []*Call{c}, mockedService.ExpectedCalls) + + call := mockedService.ExpectedCalls[0] + + assert.Equal(t, "TheExampleMethod", call.Method) + assert.Equal(t, "A", call.Arguments[0]) + assert.Equal(t, "B", call.Arguments[1]) + assert.Equal(t, true, call.Arguments[2]) + assert.Equal(t, 1, call.ReturnArguments[0]) + assert.Equal(t, "two", call.ReturnArguments[1]) + assert.Equal(t, true, call.ReturnArguments[2]) + assert.Equal(t, 1, call.Repeatability) + assert.Nil(t, call.WaitFor) +} + +func Test_Mock_Return_Twice(t *testing.T) { + + // make a test impl object + var mockedService = new(TestExampleImplementation) + + c := mockedService. + On("TheExampleMethod", "A", "B", true). + Return(1, "two", true). + Twice() + + require.Equal(t, []*Call{c}, mockedService.ExpectedCalls) + + call := mockedService.ExpectedCalls[0] + + assert.Equal(t, "TheExampleMethod", call.Method) + assert.Equal(t, "A", call.Arguments[0]) + assert.Equal(t, "B", call.Arguments[1]) + assert.Equal(t, true, call.Arguments[2]) + assert.Equal(t, 1, call.ReturnArguments[0]) + assert.Equal(t, "two", call.ReturnArguments[1]) + assert.Equal(t, true, call.ReturnArguments[2]) + assert.Equal(t, 2, call.Repeatability) + assert.Nil(t, call.WaitFor) +} + +func Test_Mock_Return_Times(t *testing.T) { + + // make a test impl object + var mockedService = new(TestExampleImplementation) + + c := mockedService. + On("TheExampleMethod", "A", "B", true). + Return(1, "two", true). + Times(5) + + require.Equal(t, []*Call{c}, mockedService.ExpectedCalls) + + call := mockedService.ExpectedCalls[0] + + assert.Equal(t, "TheExampleMethod", call.Method) + assert.Equal(t, "A", call.Arguments[0]) + assert.Equal(t, "B", call.Arguments[1]) + assert.Equal(t, true, call.Arguments[2]) + assert.Equal(t, 1, call.ReturnArguments[0]) + assert.Equal(t, "two", call.ReturnArguments[1]) + assert.Equal(t, true, call.ReturnArguments[2]) + assert.Equal(t, 5, call.Repeatability) + assert.Nil(t, call.WaitFor) +} + +func Test_Mock_Return_Nothing(t *testing.T) { + + // make a test impl object + var mockedService = new(TestExampleImplementation) + + c := mockedService. + On("TheExampleMethod", "A", "B", true). + Return() + + require.Equal(t, []*Call{c}, mockedService.ExpectedCalls) + + call := mockedService.ExpectedCalls[0] + + assert.Equal(t, "TheExampleMethod", call.Method) + assert.Equal(t, "A", call.Arguments[0]) + assert.Equal(t, "B", call.Arguments[1]) + assert.Equal(t, true, call.Arguments[2]) + assert.Equal(t, 0, len(call.ReturnArguments)) +} + +func Test_Mock_findExpectedCall(t *testing.T) { + + m := new(Mock) + m.On("One", 1).Return("one") + m.On("Two", 2).Return("two") + m.On("Two", 3).Return("three") + + f, c := m.findExpectedCall("Two", 3) + + if assert.Equal(t, 2, f) { + if assert.NotNil(t, c) { + assert.Equal(t, "Two", c.Method) + assert.Equal(t, 3, c.Arguments[0]) + assert.Equal(t, "three", c.ReturnArguments[0]) + } + } + +} + +func Test_Mock_findExpectedCall_For_Unknown_Method(t *testing.T) { + + m := new(Mock) + m.On("One", 1).Return("one") + m.On("Two", 2).Return("two") + m.On("Two", 3).Return("three") + + f, _ := m.findExpectedCall("Two") + + assert.Equal(t, -1, f) + +} + +func Test_Mock_findExpectedCall_Respects_Repeatability(t *testing.T) { + + m := new(Mock) + m.On("One", 1).Return("one") + m.On("Two", 2).Return("two").Once() + m.On("Two", 3).Return("three").Twice() + m.On("Two", 3).Return("three").Times(8) + + f, c := m.findExpectedCall("Two", 3) + + if assert.Equal(t, 2, f) { + if assert.NotNil(t, c) { + assert.Equal(t, "Two", c.Method) + assert.Equal(t, 3, c.Arguments[0]) + assert.Equal(t, "three", c.ReturnArguments[0]) + } + } + +} + +func Test_callString(t *testing.T) { + + assert.Equal(t, `Method(int,bool,string)`, callString("Method", []interface{}{1, true, "something"}, false)) + +} + +func Test_Mock_Called(t *testing.T) { + + var mockedService = new(TestExampleImplementation) + + mockedService.On("Test_Mock_Called", 1, 2, 3).Return(5, "6", true) + + returnArguments := mockedService.Called(1, 2, 3) + + if assert.Equal(t, 1, len(mockedService.Calls)) { + assert.Equal(t, "Test_Mock_Called", mockedService.Calls[0].Method) + assert.Equal(t, 1, mockedService.Calls[0].Arguments[0]) + assert.Equal(t, 2, mockedService.Calls[0].Arguments[1]) + assert.Equal(t, 3, mockedService.Calls[0].Arguments[2]) + } + + if assert.Equal(t, 3, len(returnArguments)) { + assert.Equal(t, 5, returnArguments[0]) + assert.Equal(t, "6", returnArguments[1]) + assert.Equal(t, true, returnArguments[2]) + } + +} + +func asyncCall(m *Mock, ch chan Arguments) { + ch <- m.Called(1, 2, 3) +} + +func Test_Mock_Called_blocks(t *testing.T) { + + var mockedService = new(TestExampleImplementation) + + mockedService.Mock.On("asyncCall", 1, 2, 3).Return(5, "6", true).After(2 * time.Millisecond) + + ch := make(chan Arguments) + + go asyncCall(&mockedService.Mock, ch) + + select { + case <-ch: + t.Fatal("should have waited") + case <-time.After(1 * time.Millisecond): + } + + returnArguments := <-ch + + if assert.Equal(t, 1, len(mockedService.Mock.Calls)) { + assert.Equal(t, "asyncCall", mockedService.Mock.Calls[0].Method) + assert.Equal(t, 1, mockedService.Mock.Calls[0].Arguments[0]) + assert.Equal(t, 2, mockedService.Mock.Calls[0].Arguments[1]) + assert.Equal(t, 3, mockedService.Mock.Calls[0].Arguments[2]) + } + + if assert.Equal(t, 3, len(returnArguments)) { + assert.Equal(t, 5, returnArguments[0]) + assert.Equal(t, "6", returnArguments[1]) + assert.Equal(t, true, returnArguments[2]) + } + +} + +func Test_Mock_Called_For_Bounded_Repeatability(t *testing.T) { + + var mockedService = new(TestExampleImplementation) + + mockedService. + On("Test_Mock_Called_For_Bounded_Repeatability", 1, 2, 3). + Return(5, "6", true). + Once() + mockedService. + On("Test_Mock_Called_For_Bounded_Repeatability", 1, 2, 3). + Return(-1, "hi", false) + + returnArguments1 := mockedService.Called(1, 2, 3) + returnArguments2 := mockedService.Called(1, 2, 3) + + if assert.Equal(t, 2, len(mockedService.Calls)) { + assert.Equal(t, "Test_Mock_Called_For_Bounded_Repeatability", mockedService.Calls[0].Method) + assert.Equal(t, 1, mockedService.Calls[0].Arguments[0]) + assert.Equal(t, 2, mockedService.Calls[0].Arguments[1]) + assert.Equal(t, 3, mockedService.Calls[0].Arguments[2]) + + assert.Equal(t, "Test_Mock_Called_For_Bounded_Repeatability", mockedService.Calls[1].Method) + assert.Equal(t, 1, mockedService.Calls[1].Arguments[0]) + assert.Equal(t, 2, mockedService.Calls[1].Arguments[1]) + assert.Equal(t, 3, mockedService.Calls[1].Arguments[2]) + } + + if assert.Equal(t, 3, len(returnArguments1)) { + assert.Equal(t, 5, returnArguments1[0]) + assert.Equal(t, "6", returnArguments1[1]) + assert.Equal(t, true, returnArguments1[2]) + } + + if assert.Equal(t, 3, len(returnArguments2)) { + assert.Equal(t, -1, returnArguments2[0]) + assert.Equal(t, "hi", returnArguments2[1]) + assert.Equal(t, false, returnArguments2[2]) + } + +} + +func Test_Mock_Called_For_SetTime_Expectation(t *testing.T) { + + var mockedService = new(TestExampleImplementation) + + mockedService.On("TheExampleMethod", 1, 2, 3).Return(5, "6", true).Times(4) + + mockedService.TheExampleMethod(1, 2, 3) + mockedService.TheExampleMethod(1, 2, 3) + mockedService.TheExampleMethod(1, 2, 3) + mockedService.TheExampleMethod(1, 2, 3) + assert.Panics(t, func() { + mockedService.TheExampleMethod(1, 2, 3) + }) + +} + +func Test_Mock_Called_Unexpected(t *testing.T) { + + var mockedService = new(TestExampleImplementation) + + // make sure it panics if no expectation was made + assert.Panics(t, func() { + mockedService.Called(1, 2, 3) + }, "Calling unexpected method should panic") + +} + +func Test_AssertExpectationsForObjects_Helper(t *testing.T) { + + var mockedService1 = new(TestExampleImplementation) + var mockedService2 = new(TestExampleImplementation) + var mockedService3 = new(TestExampleImplementation) + + mockedService1.On("Test_AssertExpectationsForObjects_Helper", 1).Return() + mockedService2.On("Test_AssertExpectationsForObjects_Helper", 2).Return() + mockedService3.On("Test_AssertExpectationsForObjects_Helper", 3).Return() + + mockedService1.Called(1) + mockedService2.Called(2) + mockedService3.Called(3) + + assert.True(t, AssertExpectationsForObjects(t, mockedService1.Mock, mockedService2.Mock, mockedService3.Mock)) + +} + +func Test_AssertExpectationsForObjects_Helper_Failed(t *testing.T) { + + var mockedService1 = new(TestExampleImplementation) + var mockedService2 = new(TestExampleImplementation) + var mockedService3 = new(TestExampleImplementation) + + mockedService1.On("Test_AssertExpectationsForObjects_Helper_Failed", 1).Return() + mockedService2.On("Test_AssertExpectationsForObjects_Helper_Failed", 2).Return() + mockedService3.On("Test_AssertExpectationsForObjects_Helper_Failed", 3).Return() + + mockedService1.Called(1) + mockedService3.Called(3) + + tt := new(testing.T) + assert.False(t, AssertExpectationsForObjects(tt, mockedService1.Mock, mockedService2.Mock, mockedService3.Mock)) + +} + +func Test_Mock_AssertExpectations(t *testing.T) { + + var mockedService = new(TestExampleImplementation) + + mockedService.On("Test_Mock_AssertExpectations", 1, 2, 3).Return(5, 6, 7) + + tt := new(testing.T) + assert.False(t, mockedService.AssertExpectations(tt)) + + // make the call now + mockedService.Called(1, 2, 3) + + // now assert expectations + assert.True(t, mockedService.AssertExpectations(tt)) + +} + +func Test_Mock_AssertExpectations_Placeholder_NoArgs(t *testing.T) { + + var mockedService = new(TestExampleImplementation) + + mockedService.On("Test_Mock_AssertExpectations_Placeholder_NoArgs").Return(5, 6, 7).Once() + mockedService.On("Test_Mock_AssertExpectations_Placeholder_NoArgs").Return(7, 6, 5) + + tt := new(testing.T) + assert.False(t, mockedService.AssertExpectations(tt)) + + // make the call now + mockedService.Called() + + // now assert expectations + assert.True(t, mockedService.AssertExpectations(tt)) + +} + +func Test_Mock_AssertExpectations_Placeholder(t *testing.T) { + + var mockedService = new(TestExampleImplementation) + + mockedService.On("Test_Mock_AssertExpectations_Placeholder", 1, 2, 3).Return(5, 6, 7).Once() + mockedService.On("Test_Mock_AssertExpectations_Placeholder", 3, 2, 1).Return(7, 6, 5) + + tt := new(testing.T) + assert.False(t, mockedService.AssertExpectations(tt)) + + // make the call now + mockedService.Called(1, 2, 3) + + // now assert expectations + assert.False(t, mockedService.AssertExpectations(tt)) + + // make call to the second expectation + mockedService.Called(3, 2, 1) + + // now assert expectations again + assert.True(t, mockedService.AssertExpectations(tt)) +} + +func Test_Mock_AssertExpectations_With_Pointers(t *testing.T) { + + var mockedService = new(TestExampleImplementation) + + mockedService.On("Test_Mock_AssertExpectations_With_Pointers", &struct{ Foo int }{1}).Return(1) + mockedService.On("Test_Mock_AssertExpectations_With_Pointers", &struct{ Foo int }{2}).Return(2) + + tt := new(testing.T) + assert.False(t, mockedService.AssertExpectations(tt)) + + s := struct{ Foo int }{1} + // make the calls now + mockedService.Called(&s) + s.Foo = 2 + mockedService.Called(&s) + + // now assert expectations + assert.True(t, mockedService.AssertExpectations(tt)) + +} + +func Test_Mock_AssertExpectationsCustomType(t *testing.T) { + + var mockedService = new(TestExampleImplementation) + + mockedService.On("TheExampleMethod3", AnythingOfType("*mock.ExampleType")).Return(nil).Once() + + tt := new(testing.T) + assert.False(t, mockedService.AssertExpectations(tt)) + + // make the call now + mockedService.TheExampleMethod3(&ExampleType{}) + + // now assert expectations + assert.True(t, mockedService.AssertExpectations(tt)) + +} + +func Test_Mock_AssertExpectations_With_Repeatability(t *testing.T) { + + var mockedService = new(TestExampleImplementation) + + mockedService.On("Test_Mock_AssertExpectations_With_Repeatability", 1, 2, 3).Return(5, 6, 7).Twice() + + tt := new(testing.T) + assert.False(t, mockedService.AssertExpectations(tt)) + + // make the call now + mockedService.Called(1, 2, 3) + + assert.False(t, mockedService.AssertExpectations(tt)) + + mockedService.Called(1, 2, 3) + + // now assert expectations + assert.True(t, mockedService.AssertExpectations(tt)) + +} + +func Test_Mock_TwoCallsWithDifferentArguments(t *testing.T) { + + var mockedService = new(TestExampleImplementation) + + mockedService.On("Test_Mock_TwoCallsWithDifferentArguments", 1, 2, 3).Return(5, 6, 7) + mockedService.On("Test_Mock_TwoCallsWithDifferentArguments", 4, 5, 6).Return(5, 6, 7) + + args1 := mockedService.Called(1, 2, 3) + assert.Equal(t, 5, args1.Int(0)) + assert.Equal(t, 6, args1.Int(1)) + assert.Equal(t, 7, args1.Int(2)) + + args2 := mockedService.Called(4, 5, 6) + assert.Equal(t, 5, args2.Int(0)) + assert.Equal(t, 6, args2.Int(1)) + assert.Equal(t, 7, args2.Int(2)) + +} + +func Test_Mock_AssertNumberOfCalls(t *testing.T) { + + var mockedService = new(TestExampleImplementation) + + mockedService.On("Test_Mock_AssertNumberOfCalls", 1, 2, 3).Return(5, 6, 7) + + mockedService.Called(1, 2, 3) + assert.True(t, mockedService.AssertNumberOfCalls(t, "Test_Mock_AssertNumberOfCalls", 1)) + + mockedService.Called(1, 2, 3) + assert.True(t, mockedService.AssertNumberOfCalls(t, "Test_Mock_AssertNumberOfCalls", 2)) + +} + +func Test_Mock_AssertCalled(t *testing.T) { + + var mockedService = new(TestExampleImplementation) + + mockedService.On("Test_Mock_AssertCalled", 1, 2, 3).Return(5, 6, 7) + + mockedService.Called(1, 2, 3) + + assert.True(t, mockedService.AssertCalled(t, "Test_Mock_AssertCalled", 1, 2, 3)) + +} + +func Test_Mock_AssertCalled_WithAnythingOfTypeArgument(t *testing.T) { + + var mockedService = new(TestExampleImplementation) + + mockedService. + On("Test_Mock_AssertCalled_WithAnythingOfTypeArgument", Anything, Anything, Anything). + Return() + + mockedService.Called(1, "two", []uint8("three")) + + assert.True(t, mockedService.AssertCalled(t, "Test_Mock_AssertCalled_WithAnythingOfTypeArgument", AnythingOfType("int"), AnythingOfType("string"), AnythingOfType("[]uint8"))) + +} + +func Test_Mock_AssertCalled_WithArguments(t *testing.T) { + + var mockedService = new(TestExampleImplementation) + + mockedService.On("Test_Mock_AssertCalled_WithArguments", 1, 2, 3).Return(5, 6, 7) + + mockedService.Called(1, 2, 3) + + tt := new(testing.T) + assert.True(t, mockedService.AssertCalled(tt, "Test_Mock_AssertCalled_WithArguments", 1, 2, 3)) + assert.False(t, mockedService.AssertCalled(tt, "Test_Mock_AssertCalled_WithArguments", 2, 3, 4)) + +} + +func Test_Mock_AssertCalled_WithArguments_With_Repeatability(t *testing.T) { + + var mockedService = new(TestExampleImplementation) + + mockedService.On("Test_Mock_AssertCalled_WithArguments_With_Repeatability", 1, 2, 3).Return(5, 6, 7).Once() + mockedService.On("Test_Mock_AssertCalled_WithArguments_With_Repeatability", 2, 3, 4).Return(5, 6, 7).Once() + + mockedService.Called(1, 2, 3) + mockedService.Called(2, 3, 4) + + tt := new(testing.T) + assert.True(t, mockedService.AssertCalled(tt, "Test_Mock_AssertCalled_WithArguments_With_Repeatability", 1, 2, 3)) + assert.True(t, mockedService.AssertCalled(tt, "Test_Mock_AssertCalled_WithArguments_With_Repeatability", 2, 3, 4)) + assert.False(t, mockedService.AssertCalled(tt, "Test_Mock_AssertCalled_WithArguments_With_Repeatability", 3, 4, 5)) + +} + +func Test_Mock_AssertNotCalled(t *testing.T) { + + var mockedService = new(TestExampleImplementation) + + mockedService.On("Test_Mock_AssertNotCalled", 1, 2, 3).Return(5, 6, 7) + + mockedService.Called(1, 2, 3) + + assert.True(t, mockedService.AssertNotCalled(t, "Test_Mock_NotCalled")) + +} + +/* + Arguments helper methods +*/ +func Test_Arguments_Get(t *testing.T) { + + var args = Arguments([]interface{}{"string", 123, true}) + + assert.Equal(t, "string", args.Get(0).(string)) + assert.Equal(t, 123, args.Get(1).(int)) + assert.Equal(t, true, args.Get(2).(bool)) + +} + +func Test_Arguments_Is(t *testing.T) { + + var args = Arguments([]interface{}{"string", 123, true}) + + assert.True(t, args.Is("string", 123, true)) + assert.False(t, args.Is("wrong", 456, false)) + +} + +func Test_Arguments_Diff(t *testing.T) { + + var args = Arguments([]interface{}{"Hello World", 123, true}) + var diff string + var count int + diff, count = args.Diff([]interface{}{"Hello World", 456, "false"}) + + assert.Equal(t, 2, count) + assert.Contains(t, diff, `%!s(int=456) != %!s(int=123)`) + assert.Contains(t, diff, `false != %!s(bool=true)`) + +} + +func Test_Arguments_Diff_DifferentNumberOfArgs(t *testing.T) { + + var args = Arguments([]interface{}{"string", 123, true}) + var diff string + var count int + diff, count = args.Diff([]interface{}{"string", 456, "false", "extra"}) + + assert.Equal(t, 3, count) + assert.Contains(t, diff, `extra != (Missing)`) + +} + +func Test_Arguments_Diff_WithAnythingArgument(t *testing.T) { + + var args = Arguments([]interface{}{"string", 123, true}) + var count int + _, count = args.Diff([]interface{}{"string", Anything, true}) + + assert.Equal(t, 0, count) + +} + +func Test_Arguments_Diff_WithAnythingArgument_InActualToo(t *testing.T) { + + var args = Arguments([]interface{}{"string", Anything, true}) + var count int + _, count = args.Diff([]interface{}{"string", 123, true}) + + assert.Equal(t, 0, count) + +} + +func Test_Arguments_Diff_WithAnythingOfTypeArgument(t *testing.T) { + + var args = Arguments([]interface{}{"string", AnythingOfType("int"), true}) + var count int + _, count = args.Diff([]interface{}{"string", 123, true}) + + assert.Equal(t, 0, count) + +} + +func Test_Arguments_Diff_WithAnythingOfTypeArgument_Failing(t *testing.T) { + + var args = Arguments([]interface{}{"string", AnythingOfType("string"), true}) + var count int + var diff string + diff, count = args.Diff([]interface{}{"string", 123, true}) + + assert.Equal(t, 1, count) + assert.Contains(t, diff, `string != type int - %!s(int=123)`) + +} + +func Test_Arguments_Diff_WithArgMatcher(t *testing.T) { + matchFn := func(a int) bool { + return a == 123 + } + var args = Arguments([]interface{}{"string", MatchedBy(matchFn), true}) + + diff, count := args.Diff([]interface{}{"string", 124, true}) + assert.Equal(t, 1, count) + assert.Contains(t, diff, `%!s(int=124) not matched by func(int) bool`) + + diff, count = args.Diff([]interface{}{"string", false, true}) + assert.Equal(t, 1, count) + assert.Contains(t, diff, `%!s(bool=false) not matched by func(int) bool`) + + diff, count = args.Diff([]interface{}{"string", 123, false}) + assert.Contains(t, diff, `%!s(int=123) matched by func(int) bool`) + + diff, count = args.Diff([]interface{}{"string", 123, true}) + assert.Equal(t, 0, count) + assert.Contains(t, diff, `No differences.`) +} + +func Test_Arguments_Assert(t *testing.T) { + + var args = Arguments([]interface{}{"string", 123, true}) + + assert.True(t, args.Assert(t, "string", 123, true)) + +} + +func Test_Arguments_String_Representation(t *testing.T) { + + var args = Arguments([]interface{}{"string", 123, true}) + assert.Equal(t, `string,int,bool`, args.String()) + +} + +func Test_Arguments_String(t *testing.T) { + + var args = Arguments([]interface{}{"string", 123, true}) + assert.Equal(t, "string", args.String(0)) + +} + +func Test_Arguments_Error(t *testing.T) { + + var err = errors.New("An Error") + var args = Arguments([]interface{}{"string", 123, true, err}) + assert.Equal(t, err, args.Error(3)) + +} + +func Test_Arguments_Error_Nil(t *testing.T) { + + var args = Arguments([]interface{}{"string", 123, true, nil}) + assert.Equal(t, nil, args.Error(3)) + +} + +func Test_Arguments_Int(t *testing.T) { + + var args = Arguments([]interface{}{"string", 123, true}) + assert.Equal(t, 123, args.Int(1)) + +} + +func Test_Arguments_Bool(t *testing.T) { + + var args = Arguments([]interface{}{"string", 123, true}) + assert.Equal(t, true, args.Bool(2)) + +} diff --git a/vendor/github.com/stretchr/testify/package_test.go b/vendor/github.com/stretchr/testify/package_test.go new file mode 100644 index 00000000..7ac5d6d8 --- /dev/null +++ b/vendor/github.com/stretchr/testify/package_test.go @@ -0,0 +1,12 @@ +package testify + +import ( + "github.com/stretchr/testify/assert" + "testing" +) + +func TestImports(t *testing.T) { + if assert.Equal(t, 1, 1) != true { + t.Error("Something is wrong.") + } +} diff --git a/vendor/github.com/stretchr/testify/require/doc.go b/vendor/github.com/stretchr/testify/require/doc.go new file mode 100644 index 00000000..169de392 --- /dev/null +++ b/vendor/github.com/stretchr/testify/require/doc.go @@ -0,0 +1,28 @@ +// Package require implements the same assertions as the `assert` package but +// stops test execution when a test fails. +// +// Example Usage +// +// The following is a complete example using require in a standard test function: +// import ( +// "testing" +// "github.com/stretchr/testify/require" +// ) +// +// func TestSomething(t *testing.T) { +// +// var a string = "Hello" +// var b string = "Hello" +// +// require.Equal(t, a, b, "The two words should be the same.") +// +// } +// +// Assertions +// +// The `require` package have same global functions as in the `assert` package, +// but instead of returning a boolean result they call `t.FailNow()`. +// +// Every assertion function also takes an optional string message as the final argument, +// allowing custom error messages to be appended to the message the assertion method outputs. +package require diff --git a/vendor/github.com/stretchr/testify/require/forward_requirements.go b/vendor/github.com/stretchr/testify/require/forward_requirements.go new file mode 100644 index 00000000..d3c2ab9b --- /dev/null +++ b/vendor/github.com/stretchr/testify/require/forward_requirements.go @@ -0,0 +1,16 @@ +package require + +// Assertions provides assertion methods around the +// TestingT interface. +type Assertions struct { + t TestingT +} + +// New makes a new Assertions object for the specified TestingT. +func New(t TestingT) *Assertions { + return &Assertions{ + t: t, + } +} + +//go:generate go run ../_codegen/main.go -output-package=require -template=require_forward.go.tmpl diff --git a/vendor/github.com/stretchr/testify/require/forward_requirements_test.go b/vendor/github.com/stretchr/testify/require/forward_requirements_test.go new file mode 100644 index 00000000..b120ae3b --- /dev/null +++ b/vendor/github.com/stretchr/testify/require/forward_requirements_test.go @@ -0,0 +1,385 @@ +package require + +import ( + "errors" + "testing" + "time" +) + +func TestImplementsWrapper(t *testing.T) { + require := New(t) + + require.Implements((*AssertionTesterInterface)(nil), new(AssertionTesterConformingObject)) + + mockT := new(MockT) + mockRequire := New(mockT) + mockRequire.Implements((*AssertionTesterInterface)(nil), new(AssertionTesterNonConformingObject)) + if !mockT.Failed { + t.Error("Check should fail") + } +} + +func TestIsTypeWrapper(t *testing.T) { + require := New(t) + require.IsType(new(AssertionTesterConformingObject), new(AssertionTesterConformingObject)) + + mockT := new(MockT) + mockRequire := New(mockT) + mockRequire.IsType(new(AssertionTesterConformingObject), new(AssertionTesterNonConformingObject)) + if !mockT.Failed { + t.Error("Check should fail") + } +} + +func TestEqualWrapper(t *testing.T) { + require := New(t) + require.Equal(1, 1) + + mockT := new(MockT) + mockRequire := New(mockT) + mockRequire.Equal(1, 2) + if !mockT.Failed { + t.Error("Check should fail") + } +} + +func TestNotEqualWrapper(t *testing.T) { + require := New(t) + require.NotEqual(1, 2) + + mockT := new(MockT) + mockRequire := New(mockT) + mockRequire.NotEqual(2, 2) + if !mockT.Failed { + t.Error("Check should fail") + } +} + +func TestExactlyWrapper(t *testing.T) { + require := New(t) + + a := float32(1) + b := float32(1) + c := float64(1) + + require.Exactly(a, b) + + mockT := new(MockT) + mockRequire := New(mockT) + mockRequire.Exactly(a, c) + if !mockT.Failed { + t.Error("Check should fail") + } +} + +func TestNotNilWrapper(t *testing.T) { + require := New(t) + require.NotNil(t, new(AssertionTesterConformingObject)) + + mockT := new(MockT) + mockRequire := New(mockT) + mockRequire.NotNil(nil) + if !mockT.Failed { + t.Error("Check should fail") + } +} + +func TestNilWrapper(t *testing.T) { + require := New(t) + require.Nil(nil) + + mockT := new(MockT) + mockRequire := New(mockT) + mockRequire.Nil(new(AssertionTesterConformingObject)) + if !mockT.Failed { + t.Error("Check should fail") + } +} + +func TestTrueWrapper(t *testing.T) { + require := New(t) + require.True(true) + + mockT := new(MockT) + mockRequire := New(mockT) + mockRequire.True(false) + if !mockT.Failed { + t.Error("Check should fail") + } +} + +func TestFalseWrapper(t *testing.T) { + require := New(t) + require.False(false) + + mockT := new(MockT) + mockRequire := New(mockT) + mockRequire.False(true) + if !mockT.Failed { + t.Error("Check should fail") + } +} + +func TestContainsWrapper(t *testing.T) { + require := New(t) + require.Contains("Hello World", "Hello") + + mockT := new(MockT) + mockRequire := New(mockT) + mockRequire.Contains("Hello World", "Salut") + if !mockT.Failed { + t.Error("Check should fail") + } +} + +func TestNotContainsWrapper(t *testing.T) { + require := New(t) + require.NotContains("Hello World", "Hello!") + + mockT := new(MockT) + mockRequire := New(mockT) + mockRequire.NotContains("Hello World", "Hello") + if !mockT.Failed { + t.Error("Check should fail") + } +} + +func TestPanicsWrapper(t *testing.T) { + require := New(t) + require.Panics(func() { + panic("Panic!") + }) + + mockT := new(MockT) + mockRequire := New(mockT) + mockRequire.Panics(func() {}) + if !mockT.Failed { + t.Error("Check should fail") + } +} + +func TestNotPanicsWrapper(t *testing.T) { + require := New(t) + require.NotPanics(func() {}) + + mockT := new(MockT) + mockRequire := New(mockT) + mockRequire.NotPanics(func() { + panic("Panic!") + }) + if !mockT.Failed { + t.Error("Check should fail") + } +} + +func TestNoErrorWrapper(t *testing.T) { + require := New(t) + require.NoError(nil) + + mockT := new(MockT) + mockRequire := New(mockT) + mockRequire.NoError(errors.New("some error")) + if !mockT.Failed { + t.Error("Check should fail") + } +} + +func TestErrorWrapper(t *testing.T) { + require := New(t) + require.Error(errors.New("some error")) + + mockT := new(MockT) + mockRequire := New(mockT) + mockRequire.Error(nil) + if !mockT.Failed { + t.Error("Check should fail") + } +} + +func TestEqualErrorWrapper(t *testing.T) { + require := New(t) + require.EqualError(errors.New("some error"), "some error") + + mockT := new(MockT) + mockRequire := New(mockT) + mockRequire.EqualError(errors.New("some error"), "Not some error") + if !mockT.Failed { + t.Error("Check should fail") + } +} + +func TestEmptyWrapper(t *testing.T) { + require := New(t) + require.Empty("") + + mockT := new(MockT) + mockRequire := New(mockT) + mockRequire.Empty("x") + if !mockT.Failed { + t.Error("Check should fail") + } +} + +func TestNotEmptyWrapper(t *testing.T) { + require := New(t) + require.NotEmpty("x") + + mockT := new(MockT) + mockRequire := New(mockT) + mockRequire.NotEmpty("") + if !mockT.Failed { + t.Error("Check should fail") + } +} + +func TestWithinDurationWrapper(t *testing.T) { + require := New(t) + a := time.Now() + b := a.Add(10 * time.Second) + + require.WithinDuration(a, b, 15*time.Second) + + mockT := new(MockT) + mockRequire := New(mockT) + mockRequire.WithinDuration(a, b, 5*time.Second) + if !mockT.Failed { + t.Error("Check should fail") + } +} + +func TestInDeltaWrapper(t *testing.T) { + require := New(t) + require.InDelta(1.001, 1, 0.01) + + mockT := new(MockT) + mockRequire := New(mockT) + mockRequire.InDelta(1, 2, 0.5) + if !mockT.Failed { + t.Error("Check should fail") + } +} + +func TestZeroWrapper(t *testing.T) { + require := New(t) + require.Zero(0) + + mockT := new(MockT) + mockRequire := New(mockT) + mockRequire.Zero(1) + if !mockT.Failed { + t.Error("Check should fail") + } +} + +func TestNotZeroWrapper(t *testing.T) { + require := New(t) + require.NotZero(1) + + mockT := new(MockT) + mockRequire := New(mockT) + mockRequire.NotZero(0) + if !mockT.Failed { + t.Error("Check should fail") + } +} + +func TestJSONEqWrapper_EqualSONString(t *testing.T) { + mockT := new(MockT) + mockRequire := New(mockT) + + mockRequire.JSONEq(`{"hello": "world", "foo": "bar"}`, `{"hello": "world", "foo": "bar"}`) + if mockT.Failed { + t.Error("Check should pass") + } +} + +func TestJSONEqWrapper_EquivalentButNotEqual(t *testing.T) { + mockT := new(MockT) + mockRequire := New(mockT) + + mockRequire.JSONEq(`{"hello": "world", "foo": "bar"}`, `{"foo": "bar", "hello": "world"}`) + if mockT.Failed { + t.Error("Check should pass") + } +} + +func TestJSONEqWrapper_HashOfArraysAndHashes(t *testing.T) { + mockT := new(MockT) + mockRequire := New(mockT) + + mockRequire.JSONEq("{\r\n\t\"numeric\": 1.5,\r\n\t\"array\": [{\"foo\": \"bar\"}, 1, \"string\", [\"nested\", \"array\", 5.5]],\r\n\t\"hash\": {\"nested\": \"hash\", \"nested_slice\": [\"this\", \"is\", \"nested\"]},\r\n\t\"string\": \"foo\"\r\n}", + "{\r\n\t\"numeric\": 1.5,\r\n\t\"hash\": {\"nested\": \"hash\", \"nested_slice\": [\"this\", \"is\", \"nested\"]},\r\n\t\"string\": \"foo\",\r\n\t\"array\": [{\"foo\": \"bar\"}, 1, \"string\", [\"nested\", \"array\", 5.5]]\r\n}") + if mockT.Failed { + t.Error("Check should pass") + } +} + +func TestJSONEqWrapper_Array(t *testing.T) { + mockT := new(MockT) + mockRequire := New(mockT) + + mockRequire.JSONEq(`["foo", {"hello": "world", "nested": "hash"}]`, `["foo", {"nested": "hash", "hello": "world"}]`) + if mockT.Failed { + t.Error("Check should pass") + } +} + +func TestJSONEqWrapper_HashAndArrayNotEquivalent(t *testing.T) { + mockT := new(MockT) + mockRequire := New(mockT) + + mockRequire.JSONEq(`["foo", {"hello": "world", "nested": "hash"}]`, `{"foo": "bar", {"nested": "hash", "hello": "world"}}`) + if !mockT.Failed { + t.Error("Check should fail") + } +} + +func TestJSONEqWrapper_HashesNotEquivalent(t *testing.T) { + mockT := new(MockT) + mockRequire := New(mockT) + + mockRequire.JSONEq(`{"foo": "bar"}`, `{"foo": "bar", "hello": "world"}`) + if !mockT.Failed { + t.Error("Check should fail") + } +} + +func TestJSONEqWrapper_ActualIsNotJSON(t *testing.T) { + mockT := new(MockT) + mockRequire := New(mockT) + + mockRequire.JSONEq(`{"foo": "bar"}`, "Not JSON") + if !mockT.Failed { + t.Error("Check should fail") + } +} + +func TestJSONEqWrapper_ExpectedIsNotJSON(t *testing.T) { + mockT := new(MockT) + mockRequire := New(mockT) + + mockRequire.JSONEq("Not JSON", `{"foo": "bar", "hello": "world"}`) + if !mockT.Failed { + t.Error("Check should fail") + } +} + +func TestJSONEqWrapper_ExpectedAndActualNotJSON(t *testing.T) { + mockT := new(MockT) + mockRequire := New(mockT) + + mockRequire.JSONEq("Not JSON", "Not JSON") + if !mockT.Failed { + t.Error("Check should fail") + } +} + +func TestJSONEqWrapper_ArraysOfDifferentOrder(t *testing.T) { + mockT := new(MockT) + mockRequire := New(mockT) + + mockRequire.JSONEq(`["foo", {"hello": "world", "nested": "hash"}]`, `[{ "hello": "world", "nested": "hash"}, "foo"]`) + if !mockT.Failed { + t.Error("Check should fail") + } +} diff --git a/vendor/github.com/stretchr/testify/require/require.go b/vendor/github.com/stretchr/testify/require/require.go new file mode 100644 index 00000000..1bcfcb0d --- /dev/null +++ b/vendor/github.com/stretchr/testify/require/require.go @@ -0,0 +1,464 @@ +/* +* CODE GENERATED AUTOMATICALLY WITH github.com/stretchr/testify/_codegen +* THIS FILE MUST NOT BE EDITED BY HAND +*/ + +package require + +import ( + + assert "github.com/stretchr/testify/assert" + http "net/http" + url "net/url" + time "time" +) + + +// Condition uses a Comparison to assert a complex condition. +func Condition(t TestingT, comp assert.Comparison, msgAndArgs ...interface{}) { + if !assert.Condition(t, comp, msgAndArgs...) { + t.FailNow() + } +} + + +// Contains asserts that the specified string, list(array, slice...) or map contains the +// specified substring or element. +// +// assert.Contains(t, "Hello World", "World", "But 'Hello World' does contain 'World'") +// assert.Contains(t, ["Hello", "World"], "World", "But ["Hello", "World"] does contain 'World'") +// assert.Contains(t, {"Hello": "World"}, "Hello", "But {'Hello': 'World'} does contain 'Hello'") +// +// Returns whether the assertion was successful (true) or not (false). +func Contains(t TestingT, s interface{}, contains interface{}, msgAndArgs ...interface{}) { + if !assert.Contains(t, s, contains, msgAndArgs...) { + t.FailNow() + } +} + + +// Empty asserts that the specified object is empty. I.e. nil, "", false, 0 or either +// a slice or a channel with len == 0. +// +// assert.Empty(t, obj) +// +// Returns whether the assertion was successful (true) or not (false). +func Empty(t TestingT, object interface{}, msgAndArgs ...interface{}) { + if !assert.Empty(t, object, msgAndArgs...) { + t.FailNow() + } +} + + +// Equal asserts that two objects are equal. +// +// assert.Equal(t, 123, 123, "123 and 123 should be equal") +// +// Returns whether the assertion was successful (true) or not (false). +func Equal(t TestingT, expected interface{}, actual interface{}, msgAndArgs ...interface{}) { + if !assert.Equal(t, expected, actual, msgAndArgs...) { + t.FailNow() + } +} + + +// EqualError asserts that a function returned an error (i.e. not `nil`) +// and that it is equal to the provided error. +// +// actualObj, err := SomeFunction() +// if assert.Error(t, err, "An error was expected") { +// assert.Equal(t, err, expectedError) +// } +// +// Returns whether the assertion was successful (true) or not (false). +func EqualError(t TestingT, theError error, errString string, msgAndArgs ...interface{}) { + if !assert.EqualError(t, theError, errString, msgAndArgs...) { + t.FailNow() + } +} + + +// EqualValues asserts that two objects are equal or convertable to the same types +// and equal. +// +// assert.EqualValues(t, uint32(123), int32(123), "123 and 123 should be equal") +// +// Returns whether the assertion was successful (true) or not (false). +func EqualValues(t TestingT, expected interface{}, actual interface{}, msgAndArgs ...interface{}) { + if !assert.EqualValues(t, expected, actual, msgAndArgs...) { + t.FailNow() + } +} + + +// Error asserts that a function returned an error (i.e. not `nil`). +// +// actualObj, err := SomeFunction() +// if assert.Error(t, err, "An error was expected") { +// assert.Equal(t, err, expectedError) +// } +// +// Returns whether the assertion was successful (true) or not (false). +func Error(t TestingT, err error, msgAndArgs ...interface{}) { + if !assert.Error(t, err, msgAndArgs...) { + t.FailNow() + } +} + + +// Exactly asserts that two objects are equal is value and type. +// +// assert.Exactly(t, int32(123), int64(123), "123 and 123 should NOT be equal") +// +// Returns whether the assertion was successful (true) or not (false). +func Exactly(t TestingT, expected interface{}, actual interface{}, msgAndArgs ...interface{}) { + if !assert.Exactly(t, expected, actual, msgAndArgs...) { + t.FailNow() + } +} + + +// Fail reports a failure through +func Fail(t TestingT, failureMessage string, msgAndArgs ...interface{}) { + if !assert.Fail(t, failureMessage, msgAndArgs...) { + t.FailNow() + } +} + + +// FailNow fails test +func FailNow(t TestingT, failureMessage string, msgAndArgs ...interface{}) { + if !assert.FailNow(t, failureMessage, msgAndArgs...) { + t.FailNow() + } +} + + +// False asserts that the specified value is false. +// +// assert.False(t, myBool, "myBool should be false") +// +// Returns whether the assertion was successful (true) or not (false). +func False(t TestingT, value bool, msgAndArgs ...interface{}) { + if !assert.False(t, value, msgAndArgs...) { + t.FailNow() + } +} + + +// HTTPBodyContains asserts that a specified handler returns a +// body that contains a string. +// +// assert.HTTPBodyContains(t, myHandler, "www.google.com", nil, "I'm Feeling Lucky") +// +// Returns whether the assertion was successful (true) or not (false). +func HTTPBodyContains(t TestingT, handler http.HandlerFunc, method string, url string, values url.Values, str interface{}) { + if !assert.HTTPBodyContains(t, handler, method, url, values, str) { + t.FailNow() + } +} + + +// HTTPBodyNotContains asserts that a specified handler returns a +// body that does not contain a string. +// +// assert.HTTPBodyNotContains(t, myHandler, "www.google.com", nil, "I'm Feeling Lucky") +// +// Returns whether the assertion was successful (true) or not (false). +func HTTPBodyNotContains(t TestingT, handler http.HandlerFunc, method string, url string, values url.Values, str interface{}) { + if !assert.HTTPBodyNotContains(t, handler, method, url, values, str) { + t.FailNow() + } +} + + +// HTTPError asserts that a specified handler returns an error status code. +// +// assert.HTTPError(t, myHandler, "POST", "/a/b/c", url.Values{"a": []string{"b", "c"}} +// +// Returns whether the assertion was successful (true) or not (false). +func HTTPError(t TestingT, handler http.HandlerFunc, method string, url string, values url.Values) { + if !assert.HTTPError(t, handler, method, url, values) { + t.FailNow() + } +} + + +// HTTPRedirect asserts that a specified handler returns a redirect status code. +// +// assert.HTTPRedirect(t, myHandler, "GET", "/a/b/c", url.Values{"a": []string{"b", "c"}} +// +// Returns whether the assertion was successful (true) or not (false). +func HTTPRedirect(t TestingT, handler http.HandlerFunc, method string, url string, values url.Values) { + if !assert.HTTPRedirect(t, handler, method, url, values) { + t.FailNow() + } +} + + +// HTTPSuccess asserts that a specified handler returns a success status code. +// +// assert.HTTPSuccess(t, myHandler, "POST", "http://www.google.com", nil) +// +// Returns whether the assertion was successful (true) or not (false). +func HTTPSuccess(t TestingT, handler http.HandlerFunc, method string, url string, values url.Values) { + if !assert.HTTPSuccess(t, handler, method, url, values) { + t.FailNow() + } +} + + +// Implements asserts that an object is implemented by the specified interface. +// +// assert.Implements(t, (*MyInterface)(nil), new(MyObject), "MyObject") +func Implements(t TestingT, interfaceObject interface{}, object interface{}, msgAndArgs ...interface{}) { + if !assert.Implements(t, interfaceObject, object, msgAndArgs...) { + t.FailNow() + } +} + + +// InDelta asserts that the two numerals are within delta of each other. +// +// assert.InDelta(t, math.Pi, (22 / 7.0), 0.01) +// +// Returns whether the assertion was successful (true) or not (false). +func InDelta(t TestingT, expected interface{}, actual interface{}, delta float64, msgAndArgs ...interface{}) { + if !assert.InDelta(t, expected, actual, delta, msgAndArgs...) { + t.FailNow() + } +} + + +// InDeltaSlice is the same as InDelta, except it compares two slices. +func InDeltaSlice(t TestingT, expected interface{}, actual interface{}, delta float64, msgAndArgs ...interface{}) { + if !assert.InDeltaSlice(t, expected, actual, delta, msgAndArgs...) { + t.FailNow() + } +} + + +// InEpsilon asserts that expected and actual have a relative error less than epsilon +// +// Returns whether the assertion was successful (true) or not (false). +func InEpsilon(t TestingT, expected interface{}, actual interface{}, epsilon float64, msgAndArgs ...interface{}) { + if !assert.InEpsilon(t, expected, actual, epsilon, msgAndArgs...) { + t.FailNow() + } +} + + +// InEpsilonSlice is the same as InEpsilon, except it compares two slices. +func InEpsilonSlice(t TestingT, expected interface{}, actual interface{}, delta float64, msgAndArgs ...interface{}) { + if !assert.InEpsilonSlice(t, expected, actual, delta, msgAndArgs...) { + t.FailNow() + } +} + + +// IsType asserts that the specified objects are of the same type. +func IsType(t TestingT, expectedType interface{}, object interface{}, msgAndArgs ...interface{}) { + if !assert.IsType(t, expectedType, object, msgAndArgs...) { + t.FailNow() + } +} + + +// JSONEq asserts that two JSON strings are equivalent. +// +// assert.JSONEq(t, `{"hello": "world", "foo": "bar"}`, `{"foo": "bar", "hello": "world"}`) +// +// Returns whether the assertion was successful (true) or not (false). +func JSONEq(t TestingT, expected string, actual string, msgAndArgs ...interface{}) { + if !assert.JSONEq(t, expected, actual, msgAndArgs...) { + t.FailNow() + } +} + + +// Len asserts that the specified object has specific length. +// Len also fails if the object has a type that len() not accept. +// +// assert.Len(t, mySlice, 3, "The size of slice is not 3") +// +// Returns whether the assertion was successful (true) or not (false). +func Len(t TestingT, object interface{}, length int, msgAndArgs ...interface{}) { + if !assert.Len(t, object, length, msgAndArgs...) { + t.FailNow() + } +} + + +// Nil asserts that the specified object is nil. +// +// assert.Nil(t, err, "err should be nothing") +// +// Returns whether the assertion was successful (true) or not (false). +func Nil(t TestingT, object interface{}, msgAndArgs ...interface{}) { + if !assert.Nil(t, object, msgAndArgs...) { + t.FailNow() + } +} + + +// NoError asserts that a function returned no error (i.e. `nil`). +// +// actualObj, err := SomeFunction() +// if assert.NoError(t, err) { +// assert.Equal(t, actualObj, expectedObj) +// } +// +// Returns whether the assertion was successful (true) or not (false). +func NoError(t TestingT, err error, msgAndArgs ...interface{}) { + if !assert.NoError(t, err, msgAndArgs...) { + t.FailNow() + } +} + + +// NotContains asserts that the specified string, list(array, slice...) or map does NOT contain the +// specified substring or element. +// +// assert.NotContains(t, "Hello World", "Earth", "But 'Hello World' does NOT contain 'Earth'") +// assert.NotContains(t, ["Hello", "World"], "Earth", "But ['Hello', 'World'] does NOT contain 'Earth'") +// assert.NotContains(t, {"Hello": "World"}, "Earth", "But {'Hello': 'World'} does NOT contain 'Earth'") +// +// Returns whether the assertion was successful (true) or not (false). +func NotContains(t TestingT, s interface{}, contains interface{}, msgAndArgs ...interface{}) { + if !assert.NotContains(t, s, contains, msgAndArgs...) { + t.FailNow() + } +} + + +// NotEmpty asserts that the specified object is NOT empty. I.e. not nil, "", false, 0 or either +// a slice or a channel with len == 0. +// +// if assert.NotEmpty(t, obj) { +// assert.Equal(t, "two", obj[1]) +// } +// +// Returns whether the assertion was successful (true) or not (false). +func NotEmpty(t TestingT, object interface{}, msgAndArgs ...interface{}) { + if !assert.NotEmpty(t, object, msgAndArgs...) { + t.FailNow() + } +} + + +// NotEqual asserts that the specified values are NOT equal. +// +// assert.NotEqual(t, obj1, obj2, "two objects shouldn't be equal") +// +// Returns whether the assertion was successful (true) or not (false). +func NotEqual(t TestingT, expected interface{}, actual interface{}, msgAndArgs ...interface{}) { + if !assert.NotEqual(t, expected, actual, msgAndArgs...) { + t.FailNow() + } +} + + +// NotNil asserts that the specified object is not nil. +// +// assert.NotNil(t, err, "err should be something") +// +// Returns whether the assertion was successful (true) or not (false). +func NotNil(t TestingT, object interface{}, msgAndArgs ...interface{}) { + if !assert.NotNil(t, object, msgAndArgs...) { + t.FailNow() + } +} + + +// NotPanics asserts that the code inside the specified PanicTestFunc does NOT panic. +// +// assert.NotPanics(t, func(){ +// RemainCalm() +// }, "Calling RemainCalm() should NOT panic") +// +// Returns whether the assertion was successful (true) or not (false). +func NotPanics(t TestingT, f assert.PanicTestFunc, msgAndArgs ...interface{}) { + if !assert.NotPanics(t, f, msgAndArgs...) { + t.FailNow() + } +} + + +// NotRegexp asserts that a specified regexp does not match a string. +// +// assert.NotRegexp(t, regexp.MustCompile("starts"), "it's starting") +// assert.NotRegexp(t, "^start", "it's not starting") +// +// Returns whether the assertion was successful (true) or not (false). +func NotRegexp(t TestingT, rx interface{}, str interface{}, msgAndArgs ...interface{}) { + if !assert.NotRegexp(t, rx, str, msgAndArgs...) { + t.FailNow() + } +} + + +// NotZero asserts that i is not the zero value for its type and returns the truth. +func NotZero(t TestingT, i interface{}, msgAndArgs ...interface{}) { + if !assert.NotZero(t, i, msgAndArgs...) { + t.FailNow() + } +} + + +// Panics asserts that the code inside the specified PanicTestFunc panics. +// +// assert.Panics(t, func(){ +// GoCrazy() +// }, "Calling GoCrazy() should panic") +// +// Returns whether the assertion was successful (true) or not (false). +func Panics(t TestingT, f assert.PanicTestFunc, msgAndArgs ...interface{}) { + if !assert.Panics(t, f, msgAndArgs...) { + t.FailNow() + } +} + + +// Regexp asserts that a specified regexp matches a string. +// +// assert.Regexp(t, regexp.MustCompile("start"), "it's starting") +// assert.Regexp(t, "start...$", "it's not starting") +// +// Returns whether the assertion was successful (true) or not (false). +func Regexp(t TestingT, rx interface{}, str interface{}, msgAndArgs ...interface{}) { + if !assert.Regexp(t, rx, str, msgAndArgs...) { + t.FailNow() + } +} + + +// True asserts that the specified value is true. +// +// assert.True(t, myBool, "myBool should be true") +// +// Returns whether the assertion was successful (true) or not (false). +func True(t TestingT, value bool, msgAndArgs ...interface{}) { + if !assert.True(t, value, msgAndArgs...) { + t.FailNow() + } +} + + +// WithinDuration asserts that the two times are within duration delta of each other. +// +// assert.WithinDuration(t, time.Now(), time.Now(), 10*time.Second, "The difference should not be more than 10s") +// +// Returns whether the assertion was successful (true) or not (false). +func WithinDuration(t TestingT, expected time.Time, actual time.Time, delta time.Duration, msgAndArgs ...interface{}) { + if !assert.WithinDuration(t, expected, actual, delta, msgAndArgs...) { + t.FailNow() + } +} + + +// Zero asserts that i is the zero value for its type and returns the truth. +func Zero(t TestingT, i interface{}, msgAndArgs ...interface{}) { + if !assert.Zero(t, i, msgAndArgs...) { + t.FailNow() + } +} diff --git a/vendor/github.com/stretchr/testify/require/require.go.tmpl b/vendor/github.com/stretchr/testify/require/require.go.tmpl new file mode 100644 index 00000000..ab1b1e9f --- /dev/null +++ b/vendor/github.com/stretchr/testify/require/require.go.tmpl @@ -0,0 +1,6 @@ +{{.Comment}} +func {{.DocInfo.Name}}(t TestingT, {{.Params}}) { + if !assert.{{.DocInfo.Name}}(t, {{.ForwardedParams}}) { + t.FailNow() + } +} diff --git a/vendor/github.com/stretchr/testify/require/require_forward.go b/vendor/github.com/stretchr/testify/require/require_forward.go new file mode 100644 index 00000000..58324f10 --- /dev/null +++ b/vendor/github.com/stretchr/testify/require/require_forward.go @@ -0,0 +1,388 @@ +/* +* CODE GENERATED AUTOMATICALLY WITH github.com/stretchr/testify/_codegen +* THIS FILE MUST NOT BE EDITED BY HAND +*/ + +package require + +import ( + + assert "github.com/stretchr/testify/assert" + http "net/http" + url "net/url" + time "time" +) + + +// Condition uses a Comparison to assert a complex condition. +func (a *Assertions) Condition(comp assert.Comparison, msgAndArgs ...interface{}) { + Condition(a.t, comp, msgAndArgs...) +} + + +// Contains asserts that the specified string, list(array, slice...) or map contains the +// specified substring or element. +// +// a.Contains("Hello World", "World", "But 'Hello World' does contain 'World'") +// a.Contains(["Hello", "World"], "World", "But ["Hello", "World"] does contain 'World'") +// a.Contains({"Hello": "World"}, "Hello", "But {'Hello': 'World'} does contain 'Hello'") +// +// Returns whether the assertion was successful (true) or not (false). +func (a *Assertions) Contains(s interface{}, contains interface{}, msgAndArgs ...interface{}) { + Contains(a.t, s, contains, msgAndArgs...) +} + + +// Empty asserts that the specified object is empty. I.e. nil, "", false, 0 or either +// a slice or a channel with len == 0. +// +// a.Empty(obj) +// +// Returns whether the assertion was successful (true) or not (false). +func (a *Assertions) Empty(object interface{}, msgAndArgs ...interface{}) { + Empty(a.t, object, msgAndArgs...) +} + + +// Equal asserts that two objects are equal. +// +// a.Equal(123, 123, "123 and 123 should be equal") +// +// Returns whether the assertion was successful (true) or not (false). +func (a *Assertions) Equal(expected interface{}, actual interface{}, msgAndArgs ...interface{}) { + Equal(a.t, expected, actual, msgAndArgs...) +} + + +// EqualError asserts that a function returned an error (i.e. not `nil`) +// and that it is equal to the provided error. +// +// actualObj, err := SomeFunction() +// if assert.Error(t, err, "An error was expected") { +// assert.Equal(t, err, expectedError) +// } +// +// Returns whether the assertion was successful (true) or not (false). +func (a *Assertions) EqualError(theError error, errString string, msgAndArgs ...interface{}) { + EqualError(a.t, theError, errString, msgAndArgs...) +} + + +// EqualValues asserts that two objects are equal or convertable to the same types +// and equal. +// +// a.EqualValues(uint32(123), int32(123), "123 and 123 should be equal") +// +// Returns whether the assertion was successful (true) or not (false). +func (a *Assertions) EqualValues(expected interface{}, actual interface{}, msgAndArgs ...interface{}) { + EqualValues(a.t, expected, actual, msgAndArgs...) +} + + +// Error asserts that a function returned an error (i.e. not `nil`). +// +// actualObj, err := SomeFunction() +// if a.Error(err, "An error was expected") { +// assert.Equal(t, err, expectedError) +// } +// +// Returns whether the assertion was successful (true) or not (false). +func (a *Assertions) Error(err error, msgAndArgs ...interface{}) { + Error(a.t, err, msgAndArgs...) +} + + +// Exactly asserts that two objects are equal is value and type. +// +// a.Exactly(int32(123), int64(123), "123 and 123 should NOT be equal") +// +// Returns whether the assertion was successful (true) or not (false). +func (a *Assertions) Exactly(expected interface{}, actual interface{}, msgAndArgs ...interface{}) { + Exactly(a.t, expected, actual, msgAndArgs...) +} + + +// Fail reports a failure through +func (a *Assertions) Fail(failureMessage string, msgAndArgs ...interface{}) { + Fail(a.t, failureMessage, msgAndArgs...) +} + + +// FailNow fails test +func (a *Assertions) FailNow(failureMessage string, msgAndArgs ...interface{}) { + FailNow(a.t, failureMessage, msgAndArgs...) +} + + +// False asserts that the specified value is false. +// +// a.False(myBool, "myBool should be false") +// +// Returns whether the assertion was successful (true) or not (false). +func (a *Assertions) False(value bool, msgAndArgs ...interface{}) { + False(a.t, value, msgAndArgs...) +} + + +// HTTPBodyContains asserts that a specified handler returns a +// body that contains a string. +// +// a.HTTPBodyContains(myHandler, "www.google.com", nil, "I'm Feeling Lucky") +// +// Returns whether the assertion was successful (true) or not (false). +func (a *Assertions) HTTPBodyContains(handler http.HandlerFunc, method string, url string, values url.Values, str interface{}) { + HTTPBodyContains(a.t, handler, method, url, values, str) +} + + +// HTTPBodyNotContains asserts that a specified handler returns a +// body that does not contain a string. +// +// a.HTTPBodyNotContains(myHandler, "www.google.com", nil, "I'm Feeling Lucky") +// +// Returns whether the assertion was successful (true) or not (false). +func (a *Assertions) HTTPBodyNotContains(handler http.HandlerFunc, method string, url string, values url.Values, str interface{}) { + HTTPBodyNotContains(a.t, handler, method, url, values, str) +} + + +// HTTPError asserts that a specified handler returns an error status code. +// +// a.HTTPError(myHandler, "POST", "/a/b/c", url.Values{"a": []string{"b", "c"}} +// +// Returns whether the assertion was successful (true) or not (false). +func (a *Assertions) HTTPError(handler http.HandlerFunc, method string, url string, values url.Values) { + HTTPError(a.t, handler, method, url, values) +} + + +// HTTPRedirect asserts that a specified handler returns a redirect status code. +// +// a.HTTPRedirect(myHandler, "GET", "/a/b/c", url.Values{"a": []string{"b", "c"}} +// +// Returns whether the assertion was successful (true) or not (false). +func (a *Assertions) HTTPRedirect(handler http.HandlerFunc, method string, url string, values url.Values) { + HTTPRedirect(a.t, handler, method, url, values) +} + + +// HTTPSuccess asserts that a specified handler returns a success status code. +// +// a.HTTPSuccess(myHandler, "POST", "http://www.google.com", nil) +// +// Returns whether the assertion was successful (true) or not (false). +func (a *Assertions) HTTPSuccess(handler http.HandlerFunc, method string, url string, values url.Values) { + HTTPSuccess(a.t, handler, method, url, values) +} + + +// Implements asserts that an object is implemented by the specified interface. +// +// a.Implements((*MyInterface)(nil), new(MyObject), "MyObject") +func (a *Assertions) Implements(interfaceObject interface{}, object interface{}, msgAndArgs ...interface{}) { + Implements(a.t, interfaceObject, object, msgAndArgs...) +} + + +// InDelta asserts that the two numerals are within delta of each other. +// +// a.InDelta(math.Pi, (22 / 7.0), 0.01) +// +// Returns whether the assertion was successful (true) or not (false). +func (a *Assertions) InDelta(expected interface{}, actual interface{}, delta float64, msgAndArgs ...interface{}) { + InDelta(a.t, expected, actual, delta, msgAndArgs...) +} + + +// InDeltaSlice is the same as InDelta, except it compares two slices. +func (a *Assertions) InDeltaSlice(expected interface{}, actual interface{}, delta float64, msgAndArgs ...interface{}) { + InDeltaSlice(a.t, expected, actual, delta, msgAndArgs...) +} + + +// InEpsilon asserts that expected and actual have a relative error less than epsilon +// +// Returns whether the assertion was successful (true) or not (false). +func (a *Assertions) InEpsilon(expected interface{}, actual interface{}, epsilon float64, msgAndArgs ...interface{}) { + InEpsilon(a.t, expected, actual, epsilon, msgAndArgs...) +} + + +// InEpsilonSlice is the same as InEpsilon, except it compares two slices. +func (a *Assertions) InEpsilonSlice(expected interface{}, actual interface{}, delta float64, msgAndArgs ...interface{}) { + InEpsilonSlice(a.t, expected, actual, delta, msgAndArgs...) +} + + +// IsType asserts that the specified objects are of the same type. +func (a *Assertions) IsType(expectedType interface{}, object interface{}, msgAndArgs ...interface{}) { + IsType(a.t, expectedType, object, msgAndArgs...) +} + + +// JSONEq asserts that two JSON strings are equivalent. +// +// a.JSONEq(`{"hello": "world", "foo": "bar"}`, `{"foo": "bar", "hello": "world"}`) +// +// Returns whether the assertion was successful (true) or not (false). +func (a *Assertions) JSONEq(expected string, actual string, msgAndArgs ...interface{}) { + JSONEq(a.t, expected, actual, msgAndArgs...) +} + + +// Len asserts that the specified object has specific length. +// Len also fails if the object has a type that len() not accept. +// +// a.Len(mySlice, 3, "The size of slice is not 3") +// +// Returns whether the assertion was successful (true) or not (false). +func (a *Assertions) Len(object interface{}, length int, msgAndArgs ...interface{}) { + Len(a.t, object, length, msgAndArgs...) +} + + +// Nil asserts that the specified object is nil. +// +// a.Nil(err, "err should be nothing") +// +// Returns whether the assertion was successful (true) or not (false). +func (a *Assertions) Nil(object interface{}, msgAndArgs ...interface{}) { + Nil(a.t, object, msgAndArgs...) +} + + +// NoError asserts that a function returned no error (i.e. `nil`). +// +// actualObj, err := SomeFunction() +// if a.NoError(err) { +// assert.Equal(t, actualObj, expectedObj) +// } +// +// Returns whether the assertion was successful (true) or not (false). +func (a *Assertions) NoError(err error, msgAndArgs ...interface{}) { + NoError(a.t, err, msgAndArgs...) +} + + +// NotContains asserts that the specified string, list(array, slice...) or map does NOT contain the +// specified substring or element. +// +// a.NotContains("Hello World", "Earth", "But 'Hello World' does NOT contain 'Earth'") +// a.NotContains(["Hello", "World"], "Earth", "But ['Hello', 'World'] does NOT contain 'Earth'") +// a.NotContains({"Hello": "World"}, "Earth", "But {'Hello': 'World'} does NOT contain 'Earth'") +// +// Returns whether the assertion was successful (true) or not (false). +func (a *Assertions) NotContains(s interface{}, contains interface{}, msgAndArgs ...interface{}) { + NotContains(a.t, s, contains, msgAndArgs...) +} + + +// NotEmpty asserts that the specified object is NOT empty. I.e. not nil, "", false, 0 or either +// a slice or a channel with len == 0. +// +// if a.NotEmpty(obj) { +// assert.Equal(t, "two", obj[1]) +// } +// +// Returns whether the assertion was successful (true) or not (false). +func (a *Assertions) NotEmpty(object interface{}, msgAndArgs ...interface{}) { + NotEmpty(a.t, object, msgAndArgs...) +} + + +// NotEqual asserts that the specified values are NOT equal. +// +// a.NotEqual(obj1, obj2, "two objects shouldn't be equal") +// +// Returns whether the assertion was successful (true) or not (false). +func (a *Assertions) NotEqual(expected interface{}, actual interface{}, msgAndArgs ...interface{}) { + NotEqual(a.t, expected, actual, msgAndArgs...) +} + + +// NotNil asserts that the specified object is not nil. +// +// a.NotNil(err, "err should be something") +// +// Returns whether the assertion was successful (true) or not (false). +func (a *Assertions) NotNil(object interface{}, msgAndArgs ...interface{}) { + NotNil(a.t, object, msgAndArgs...) +} + + +// NotPanics asserts that the code inside the specified PanicTestFunc does NOT panic. +// +// a.NotPanics(func(){ +// RemainCalm() +// }, "Calling RemainCalm() should NOT panic") +// +// Returns whether the assertion was successful (true) or not (false). +func (a *Assertions) NotPanics(f assert.PanicTestFunc, msgAndArgs ...interface{}) { + NotPanics(a.t, f, msgAndArgs...) +} + + +// NotRegexp asserts that a specified regexp does not match a string. +// +// a.NotRegexp(regexp.MustCompile("starts"), "it's starting") +// a.NotRegexp("^start", "it's not starting") +// +// Returns whether the assertion was successful (true) or not (false). +func (a *Assertions) NotRegexp(rx interface{}, str interface{}, msgAndArgs ...interface{}) { + NotRegexp(a.t, rx, str, msgAndArgs...) +} + + +// NotZero asserts that i is not the zero value for its type and returns the truth. +func (a *Assertions) NotZero(i interface{}, msgAndArgs ...interface{}) { + NotZero(a.t, i, msgAndArgs...) +} + + +// Panics asserts that the code inside the specified PanicTestFunc panics. +// +// a.Panics(func(){ +// GoCrazy() +// }, "Calling GoCrazy() should panic") +// +// Returns whether the assertion was successful (true) or not (false). +func (a *Assertions) Panics(f assert.PanicTestFunc, msgAndArgs ...interface{}) { + Panics(a.t, f, msgAndArgs...) +} + + +// Regexp asserts that a specified regexp matches a string. +// +// a.Regexp(regexp.MustCompile("start"), "it's starting") +// a.Regexp("start...$", "it's not starting") +// +// Returns whether the assertion was successful (true) or not (false). +func (a *Assertions) Regexp(rx interface{}, str interface{}, msgAndArgs ...interface{}) { + Regexp(a.t, rx, str, msgAndArgs...) +} + + +// True asserts that the specified value is true. +// +// a.True(myBool, "myBool should be true") +// +// Returns whether the assertion was successful (true) or not (false). +func (a *Assertions) True(value bool, msgAndArgs ...interface{}) { + True(a.t, value, msgAndArgs...) +} + + +// WithinDuration asserts that the two times are within duration delta of each other. +// +// a.WithinDuration(time.Now(), time.Now(), 10*time.Second, "The difference should not be more than 10s") +// +// Returns whether the assertion was successful (true) or not (false). +func (a *Assertions) WithinDuration(expected time.Time, actual time.Time, delta time.Duration, msgAndArgs ...interface{}) { + WithinDuration(a.t, expected, actual, delta, msgAndArgs...) +} + + +// Zero asserts that i is the zero value for its type and returns the truth. +func (a *Assertions) Zero(i interface{}, msgAndArgs ...interface{}) { + Zero(a.t, i, msgAndArgs...) +} diff --git a/vendor/github.com/stretchr/testify/require/require_forward.go.tmpl b/vendor/github.com/stretchr/testify/require/require_forward.go.tmpl new file mode 100644 index 00000000..b93569e0 --- /dev/null +++ b/vendor/github.com/stretchr/testify/require/require_forward.go.tmpl @@ -0,0 +1,4 @@ +{{.CommentWithoutT "a"}} +func (a *Assertions) {{.DocInfo.Name}}({{.Params}}) { + {{.DocInfo.Name}}(a.t, {{.ForwardedParams}}) +} diff --git a/vendor/github.com/stretchr/testify/require/requirements.go b/vendor/github.com/stretchr/testify/require/requirements.go new file mode 100644 index 00000000..41147562 --- /dev/null +++ b/vendor/github.com/stretchr/testify/require/requirements.go @@ -0,0 +1,9 @@ +package require + +// TestingT is an interface wrapper around *testing.T +type TestingT interface { + Errorf(format string, args ...interface{}) + FailNow() +} + +//go:generate go run ../_codegen/main.go -output-package=require -template=require.go.tmpl diff --git a/vendor/github.com/stretchr/testify/require/requirements_test.go b/vendor/github.com/stretchr/testify/require/requirements_test.go new file mode 100644 index 00000000..d2ccc99c --- /dev/null +++ b/vendor/github.com/stretchr/testify/require/requirements_test.go @@ -0,0 +1,369 @@ +package require + +import ( + "errors" + "testing" + "time" +) + +// AssertionTesterInterface defines an interface to be used for testing assertion methods +type AssertionTesterInterface interface { + TestMethod() +} + +// AssertionTesterConformingObject is an object that conforms to the AssertionTesterInterface interface +type AssertionTesterConformingObject struct { +} + +func (a *AssertionTesterConformingObject) TestMethod() { +} + +// AssertionTesterNonConformingObject is an object that does not conform to the AssertionTesterInterface interface +type AssertionTesterNonConformingObject struct { +} + +type MockT struct { + Failed bool +} + +func (t *MockT) FailNow() { + t.Failed = true +} + +func (t *MockT) Errorf(format string, args ...interface{}) { + _, _ = format, args +} + +func TestImplements(t *testing.T) { + + Implements(t, (*AssertionTesterInterface)(nil), new(AssertionTesterConformingObject)) + + mockT := new(MockT) + Implements(mockT, (*AssertionTesterInterface)(nil), new(AssertionTesterNonConformingObject)) + if !mockT.Failed { + t.Error("Check should fail") + } +} + +func TestIsType(t *testing.T) { + + IsType(t, new(AssertionTesterConformingObject), new(AssertionTesterConformingObject)) + + mockT := new(MockT) + IsType(mockT, new(AssertionTesterConformingObject), new(AssertionTesterNonConformingObject)) + if !mockT.Failed { + t.Error("Check should fail") + } +} + +func TestEqual(t *testing.T) { + + Equal(t, 1, 1) + + mockT := new(MockT) + Equal(mockT, 1, 2) + if !mockT.Failed { + t.Error("Check should fail") + } + +} + +func TestNotEqual(t *testing.T) { + + NotEqual(t, 1, 2) + mockT := new(MockT) + NotEqual(mockT, 2, 2) + if !mockT.Failed { + t.Error("Check should fail") + } +} + +func TestExactly(t *testing.T) { + + a := float32(1) + b := float32(1) + c := float64(1) + + Exactly(t, a, b) + + mockT := new(MockT) + Exactly(mockT, a, c) + if !mockT.Failed { + t.Error("Check should fail") + } +} + +func TestNotNil(t *testing.T) { + + NotNil(t, new(AssertionTesterConformingObject)) + + mockT := new(MockT) + NotNil(mockT, nil) + if !mockT.Failed { + t.Error("Check should fail") + } +} + +func TestNil(t *testing.T) { + + Nil(t, nil) + + mockT := new(MockT) + Nil(mockT, new(AssertionTesterConformingObject)) + if !mockT.Failed { + t.Error("Check should fail") + } +} + +func TestTrue(t *testing.T) { + + True(t, true) + + mockT := new(MockT) + True(mockT, false) + if !mockT.Failed { + t.Error("Check should fail") + } +} + +func TestFalse(t *testing.T) { + + False(t, false) + + mockT := new(MockT) + False(mockT, true) + if !mockT.Failed { + t.Error("Check should fail") + } +} + +func TestContains(t *testing.T) { + + Contains(t, "Hello World", "Hello") + + mockT := new(MockT) + Contains(mockT, "Hello World", "Salut") + if !mockT.Failed { + t.Error("Check should fail") + } +} + +func TestNotContains(t *testing.T) { + + NotContains(t, "Hello World", "Hello!") + + mockT := new(MockT) + NotContains(mockT, "Hello World", "Hello") + if !mockT.Failed { + t.Error("Check should fail") + } +} + +func TestPanics(t *testing.T) { + + Panics(t, func() { + panic("Panic!") + }) + + mockT := new(MockT) + Panics(mockT, func() {}) + if !mockT.Failed { + t.Error("Check should fail") + } +} + +func TestNotPanics(t *testing.T) { + + NotPanics(t, func() {}) + + mockT := new(MockT) + NotPanics(mockT, func() { + panic("Panic!") + }) + if !mockT.Failed { + t.Error("Check should fail") + } +} + +func TestNoError(t *testing.T) { + + NoError(t, nil) + + mockT := new(MockT) + NoError(mockT, errors.New("some error")) + if !mockT.Failed { + t.Error("Check should fail") + } +} + +func TestError(t *testing.T) { + + Error(t, errors.New("some error")) + + mockT := new(MockT) + Error(mockT, nil) + if !mockT.Failed { + t.Error("Check should fail") + } +} + +func TestEqualError(t *testing.T) { + + EqualError(t, errors.New("some error"), "some error") + + mockT := new(MockT) + EqualError(mockT, errors.New("some error"), "Not some error") + if !mockT.Failed { + t.Error("Check should fail") + } +} + +func TestEmpty(t *testing.T) { + + Empty(t, "") + + mockT := new(MockT) + Empty(mockT, "x") + if !mockT.Failed { + t.Error("Check should fail") + } +} + +func TestNotEmpty(t *testing.T) { + + NotEmpty(t, "x") + + mockT := new(MockT) + NotEmpty(mockT, "") + if !mockT.Failed { + t.Error("Check should fail") + } +} + +func TestWithinDuration(t *testing.T) { + + a := time.Now() + b := a.Add(10 * time.Second) + + WithinDuration(t, a, b, 15*time.Second) + + mockT := new(MockT) + WithinDuration(mockT, a, b, 5*time.Second) + if !mockT.Failed { + t.Error("Check should fail") + } +} + +func TestInDelta(t *testing.T) { + + InDelta(t, 1.001, 1, 0.01) + + mockT := new(MockT) + InDelta(mockT, 1, 2, 0.5) + if !mockT.Failed { + t.Error("Check should fail") + } +} + +func TestZero(t *testing.T) { + + Zero(t, "") + + mockT := new(MockT) + Zero(mockT, "x") + if !mockT.Failed { + t.Error("Check should fail") + } +} + +func TestNotZero(t *testing.T) { + + NotZero(t, "x") + + mockT := new(MockT) + NotZero(mockT, "") + if !mockT.Failed { + t.Error("Check should fail") + } +} + +func TestJSONEq_EqualSONString(t *testing.T) { + mockT := new(MockT) + JSONEq(mockT, `{"hello": "world", "foo": "bar"}`, `{"hello": "world", "foo": "bar"}`) + if mockT.Failed { + t.Error("Check should pass") + } +} + +func TestJSONEq_EquivalentButNotEqual(t *testing.T) { + mockT := new(MockT) + JSONEq(mockT, `{"hello": "world", "foo": "bar"}`, `{"foo": "bar", "hello": "world"}`) + if mockT.Failed { + t.Error("Check should pass") + } +} + +func TestJSONEq_HashOfArraysAndHashes(t *testing.T) { + mockT := new(MockT) + JSONEq(mockT, "{\r\n\t\"numeric\": 1.5,\r\n\t\"array\": [{\"foo\": \"bar\"}, 1, \"string\", [\"nested\", \"array\", 5.5]],\r\n\t\"hash\": {\"nested\": \"hash\", \"nested_slice\": [\"this\", \"is\", \"nested\"]},\r\n\t\"string\": \"foo\"\r\n}", + "{\r\n\t\"numeric\": 1.5,\r\n\t\"hash\": {\"nested\": \"hash\", \"nested_slice\": [\"this\", \"is\", \"nested\"]},\r\n\t\"string\": \"foo\",\r\n\t\"array\": [{\"foo\": \"bar\"}, 1, \"string\", [\"nested\", \"array\", 5.5]]\r\n}") + if mockT.Failed { + t.Error("Check should pass") + } +} + +func TestJSONEq_Array(t *testing.T) { + mockT := new(MockT) + JSONEq(mockT, `["foo", {"hello": "world", "nested": "hash"}]`, `["foo", {"nested": "hash", "hello": "world"}]`) + if mockT.Failed { + t.Error("Check should pass") + } +} + +func TestJSONEq_HashAndArrayNotEquivalent(t *testing.T) { + mockT := new(MockT) + JSONEq(mockT, `["foo", {"hello": "world", "nested": "hash"}]`, `{"foo": "bar", {"nested": "hash", "hello": "world"}}`) + if !mockT.Failed { + t.Error("Check should fail") + } +} + +func TestJSONEq_HashesNotEquivalent(t *testing.T) { + mockT := new(MockT) + JSONEq(mockT, `{"foo": "bar"}`, `{"foo": "bar", "hello": "world"}`) + if !mockT.Failed { + t.Error("Check should fail") + } +} + +func TestJSONEq_ActualIsNotJSON(t *testing.T) { + mockT := new(MockT) + JSONEq(mockT, `{"foo": "bar"}`, "Not JSON") + if !mockT.Failed { + t.Error("Check should fail") + } +} + +func TestJSONEq_ExpectedIsNotJSON(t *testing.T) { + mockT := new(MockT) + JSONEq(mockT, "Not JSON", `{"foo": "bar", "hello": "world"}`) + if !mockT.Failed { + t.Error("Check should fail") + } +} + +func TestJSONEq_ExpectedAndActualNotJSON(t *testing.T) { + mockT := new(MockT) + JSONEq(mockT, "Not JSON", "Not JSON") + if !mockT.Failed { + t.Error("Check should fail") + } +} + +func TestJSONEq_ArraysOfDifferentOrder(t *testing.T) { + mockT := new(MockT) + JSONEq(mockT, `["foo", {"hello": "world", "nested": "hash"}]`, `[{ "hello": "world", "nested": "hash"}, "foo"]`) + if !mockT.Failed { + t.Error("Check should fail") + } +} diff --git a/vendor/github.com/stretchr/testify/suite/doc.go b/vendor/github.com/stretchr/testify/suite/doc.go new file mode 100644 index 00000000..f91a245d --- /dev/null +++ b/vendor/github.com/stretchr/testify/suite/doc.go @@ -0,0 +1,65 @@ +// Package suite contains logic for creating testing suite structs +// and running the methods on those structs as tests. The most useful +// piece of this package is that you can create setup/teardown methods +// on your testing suites, which will run before/after the whole suite +// or individual tests (depending on which interface(s) you +// implement). +// +// A testing suite is usually built by first extending the built-in +// suite functionality from suite.Suite in testify. Alternatively, +// you could reproduce that logic on your own if you wanted (you +// just need to implement the TestingSuite interface from +// suite/interfaces.go). +// +// After that, you can implement any of the interfaces in +// suite/interfaces.go to add setup/teardown functionality to your +// suite, and add any methods that start with "Test" to add tests. +// Methods that do not match any suite interfaces and do not begin +// with "Test" will not be run by testify, and can safely be used as +// helper methods. +// +// Once you've built your testing suite, you need to run the suite +// (using suite.Run from testify) inside any function that matches the +// identity that "go test" is already looking for (i.e. +// func(*testing.T)). +// +// Regular expression to select test suites specified command-line +// argument "-run". Regular expression to select the methods +// of test suites specified command-line argument "-m". +// Suite object has assertion methods. +// +// A crude example: +// // Basic imports +// import ( +// "testing" +// "github.com/stretchr/testify/assert" +// "github.com/stretchr/testify/suite" +// ) +// +// // Define the suite, and absorb the built-in basic suite +// // functionality from testify - including a T() method which +// // returns the current testing context +// type ExampleTestSuite struct { +// suite.Suite +// VariableThatShouldStartAtFive int +// } +// +// // Make sure that VariableThatShouldStartAtFive is set to five +// // before each test +// func (suite *ExampleTestSuite) SetupTest() { +// suite.VariableThatShouldStartAtFive = 5 +// } +// +// // All methods that begin with "Test" are run as tests within a +// // suite. +// func (suite *ExampleTestSuite) TestExample() { +// assert.Equal(suite.T(), 5, suite.VariableThatShouldStartAtFive) +// suite.Equal(5, suite.VariableThatShouldStartAtFive) +// } +// +// // In order for 'go test' to run this suite, we need to create +// // a normal test function and pass our suite to suite.Run +// func TestExampleTestSuite(t *testing.T) { +// suite.Run(t, new(ExampleTestSuite)) +// } +package suite diff --git a/vendor/github.com/stretchr/testify/suite/interfaces.go b/vendor/github.com/stretchr/testify/suite/interfaces.go new file mode 100644 index 00000000..20969472 --- /dev/null +++ b/vendor/github.com/stretchr/testify/suite/interfaces.go @@ -0,0 +1,34 @@ +package suite + +import "testing" + +// TestingSuite can store and return the current *testing.T context +// generated by 'go test'. +type TestingSuite interface { + T() *testing.T + SetT(*testing.T) +} + +// SetupAllSuite has a SetupSuite method, which will run before the +// tests in the suite are run. +type SetupAllSuite interface { + SetupSuite() +} + +// SetupTestSuite has a SetupTest method, which will run before each +// test in the suite. +type SetupTestSuite interface { + SetupTest() +} + +// TearDownAllSuite has a TearDownSuite method, which will run after +// all the tests in the suite have been run. +type TearDownAllSuite interface { + TearDownSuite() +} + +// TearDownTestSuite has a TearDownTest method, which will run after +// each test in the suite. +type TearDownTestSuite interface { + TearDownTest() +} diff --git a/vendor/github.com/stretchr/testify/suite/suite.go b/vendor/github.com/stretchr/testify/suite/suite.go new file mode 100644 index 00000000..f831e251 --- /dev/null +++ b/vendor/github.com/stretchr/testify/suite/suite.go @@ -0,0 +1,115 @@ +package suite + +import ( + "flag" + "fmt" + "os" + "reflect" + "regexp" + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +var matchMethod = flag.String("m", "", "regular expression to select tests of the suite to run") + +// Suite is a basic testing suite with methods for storing and +// retrieving the current *testing.T context. +type Suite struct { + *assert.Assertions + require *require.Assertions + t *testing.T +} + +// T retrieves the current *testing.T context. +func (suite *Suite) T() *testing.T { + return suite.t +} + +// SetT sets the current *testing.T context. +func (suite *Suite) SetT(t *testing.T) { + suite.t = t + suite.Assertions = assert.New(t) + suite.require = require.New(t) +} + +// Require returns a require context for suite. +func (suite *Suite) Require() *require.Assertions { + if suite.require == nil { + suite.require = require.New(suite.T()) + } + return suite.require +} + +// Assert returns an assert context for suite. Normally, you can call +// `suite.NoError(expected, actual)`, but for situations where the embedded +// methods are overridden (for example, you might want to override +// assert.Assertions with require.Assertions), this method is provided so you +// can call `suite.Assert().NoError()`. +func (suite *Suite) Assert() *assert.Assertions { + if suite.Assertions == nil { + suite.Assertions = assert.New(suite.T()) + } + return suite.Assertions +} + +// Run takes a testing suite and runs all of the tests attached +// to it. +func Run(t *testing.T, suite TestingSuite) { + suite.SetT(t) + + if setupAllSuite, ok := suite.(SetupAllSuite); ok { + setupAllSuite.SetupSuite() + } + defer func() { + if tearDownAllSuite, ok := suite.(TearDownAllSuite); ok { + tearDownAllSuite.TearDownSuite() + } + }() + + methodFinder := reflect.TypeOf(suite) + tests := []testing.InternalTest{} + for index := 0; index < methodFinder.NumMethod(); index++ { + method := methodFinder.Method(index) + ok, err := methodFilter(method.Name) + if err != nil { + fmt.Fprintf(os.Stderr, "testify: invalid regexp for -m: %s\n", err) + os.Exit(1) + } + if ok { + test := testing.InternalTest{ + Name: method.Name, + F: func(t *testing.T) { + parentT := suite.T() + suite.SetT(t) + if setupTestSuite, ok := suite.(SetupTestSuite); ok { + setupTestSuite.SetupTest() + } + defer func() { + if tearDownTestSuite, ok := suite.(TearDownTestSuite); ok { + tearDownTestSuite.TearDownTest() + } + suite.SetT(parentT) + }() + method.Func.Call([]reflect.Value{reflect.ValueOf(suite)}) + }, + } + tests = append(tests, test) + } + } + + if !testing.RunTests(func(_, _ string) (bool, error) { return true, nil }, + tests) { + t.Fail() + } +} + +// Filtering method according to set regular expression +// specified command-line argument -m +func methodFilter(name string) (bool, error) { + if ok, _ := regexp.MatchString("^Test", name); !ok { + return false, nil + } + return regexp.MatchString(*matchMethod, name) +} diff --git a/vendor/github.com/stretchr/testify/suite/suite_test.go b/vendor/github.com/stretchr/testify/suite/suite_test.go new file mode 100644 index 00000000..c7c4e88f --- /dev/null +++ b/vendor/github.com/stretchr/testify/suite/suite_test.go @@ -0,0 +1,239 @@ +package suite + +import ( + "errors" + "io/ioutil" + "os" + "testing" + + "github.com/stretchr/testify/assert" +) + +// SuiteRequireTwice is intended to test the usage of suite.Require in two +// different tests +type SuiteRequireTwice struct{ Suite } + +// TestSuiteRequireTwice checks for regressions of issue #149 where +// suite.requirements was not initialised in suite.SetT() +// A regression would result on these tests panicking rather than failing. +func TestSuiteRequireTwice(t *testing.T) { + ok := testing.RunTests( + func(_, _ string) (bool, error) { return true, nil }, + []testing.InternalTest{{ + Name: "TestSuiteRequireTwice", + F: func(t *testing.T) { + suite := new(SuiteRequireTwice) + Run(t, suite) + }, + }}, + ) + assert.Equal(t, false, ok) +} + +func (s *SuiteRequireTwice) TestRequireOne() { + r := s.Require() + r.Equal(1, 2) +} + +func (s *SuiteRequireTwice) TestRequireTwo() { + r := s.Require() + r.Equal(1, 2) +} + +// This suite is intended to store values to make sure that only +// testing-suite-related methods are run. It's also a fully +// functional example of a testing suite, using setup/teardown methods +// and a helper method that is ignored by testify. To make this look +// more like a real world example, all tests in the suite perform some +// type of assertion. +type SuiteTester struct { + // Include our basic suite logic. + Suite + + // Keep counts of how many times each method is run. + SetupSuiteRunCount int + TearDownSuiteRunCount int + SetupTestRunCount int + TearDownTestRunCount int + TestOneRunCount int + TestTwoRunCount int + NonTestMethodRunCount int +} + +type SuiteSkipTester struct { + // Include our basic suite logic. + Suite + + // Keep counts of how many times each method is run. + SetupSuiteRunCount int + TearDownSuiteRunCount int +} + +// The SetupSuite method will be run by testify once, at the very +// start of the testing suite, before any tests are run. +func (suite *SuiteTester) SetupSuite() { + suite.SetupSuiteRunCount++ +} + +func (suite *SuiteSkipTester) SetupSuite() { + suite.SetupSuiteRunCount++ + suite.T().Skip() +} + +// The TearDownSuite method will be run by testify once, at the very +// end of the testing suite, after all tests have been run. +func (suite *SuiteTester) TearDownSuite() { + suite.TearDownSuiteRunCount++ +} + +func (suite *SuiteSkipTester) TearDownSuite() { + suite.TearDownSuiteRunCount++ +} + +// The SetupTest method will be run before every test in the suite. +func (suite *SuiteTester) SetupTest() { + suite.SetupTestRunCount++ +} + +// The TearDownTest method will be run after every test in the suite. +func (suite *SuiteTester) TearDownTest() { + suite.TearDownTestRunCount++ +} + +// Every method in a testing suite that begins with "Test" will be run +// as a test. TestOne is an example of a test. For the purposes of +// this example, we've included assertions in the tests, since most +// tests will issue assertions. +func (suite *SuiteTester) TestOne() { + beforeCount := suite.TestOneRunCount + suite.TestOneRunCount++ + assert.Equal(suite.T(), suite.TestOneRunCount, beforeCount+1) + suite.Equal(suite.TestOneRunCount, beforeCount+1) +} + +// TestTwo is another example of a test. +func (suite *SuiteTester) TestTwo() { + beforeCount := suite.TestTwoRunCount + suite.TestTwoRunCount++ + assert.NotEqual(suite.T(), suite.TestTwoRunCount, beforeCount) + suite.NotEqual(suite.TestTwoRunCount, beforeCount) +} + +func (suite *SuiteTester) TestSkip() { + suite.T().Skip() +} + +// NonTestMethod does not begin with "Test", so it will not be run by +// testify as a test in the suite. This is useful for creating helper +// methods for your tests. +func (suite *SuiteTester) NonTestMethod() { + suite.NonTestMethodRunCount++ +} + +// TestRunSuite will be run by the 'go test' command, so within it, we +// can run our suite using the Run(*testing.T, TestingSuite) function. +func TestRunSuite(t *testing.T) { + suiteTester := new(SuiteTester) + Run(t, suiteTester) + + // Normally, the test would end here. The following are simply + // some assertions to ensure that the Run function is working as + // intended - they are not part of the example. + + // The suite was only run once, so the SetupSuite and TearDownSuite + // methods should have each been run only once. + assert.Equal(t, suiteTester.SetupSuiteRunCount, 1) + assert.Equal(t, suiteTester.TearDownSuiteRunCount, 1) + + // There are three test methods (TestOne, TestTwo, and TestSkip), so + // the SetupTest and TearDownTest methods (which should be run once for + // each test) should have been run three times. + assert.Equal(t, suiteTester.SetupTestRunCount, 3) + assert.Equal(t, suiteTester.TearDownTestRunCount, 3) + + // Each test should have been run once. + assert.Equal(t, suiteTester.TestOneRunCount, 1) + assert.Equal(t, suiteTester.TestTwoRunCount, 1) + + // Methods that don't match the test method identifier shouldn't + // have been run at all. + assert.Equal(t, suiteTester.NonTestMethodRunCount, 0) + + suiteSkipTester := new(SuiteSkipTester) + Run(t, suiteSkipTester) + + // The suite was only run once, so the SetupSuite and TearDownSuite + // methods should have each been run only once, even though SetupSuite + // called Skip() + assert.Equal(t, suiteSkipTester.SetupSuiteRunCount, 1) + assert.Equal(t, suiteSkipTester.TearDownSuiteRunCount, 1) + +} + +func TestSuiteGetters(t *testing.T) { + suite := new(SuiteTester) + suite.SetT(t) + assert.NotNil(t, suite.Assert()) + assert.Equal(t, suite.Assertions, suite.Assert()) + assert.NotNil(t, suite.Require()) + assert.Equal(t, suite.require, suite.Require()) +} + +type SuiteLoggingTester struct { + Suite +} + +func (s *SuiteLoggingTester) TestLoggingPass() { + s.T().Log("TESTLOGPASS") +} + +func (s *SuiteLoggingTester) TestLoggingFail() { + s.T().Log("TESTLOGFAIL") + assert.NotNil(s.T(), nil) // expected to fail +} + +type StdoutCapture struct { + oldStdout *os.File + readPipe *os.File +} + +func (sc *StdoutCapture) StartCapture() { + sc.oldStdout = os.Stdout + sc.readPipe, os.Stdout, _ = os.Pipe() +} + +func (sc *StdoutCapture) StopCapture() (string, error) { + if sc.oldStdout == nil || sc.readPipe == nil { + return "", errors.New("StartCapture not called before StopCapture") + } + os.Stdout.Close() + os.Stdout = sc.oldStdout + bytes, err := ioutil.ReadAll(sc.readPipe) + if err != nil { + return "", err + } + return string(bytes), nil +} + +func TestSuiteLogging(t *testing.T) { + testT := testing.T{} + + suiteLoggingTester := new(SuiteLoggingTester) + + capture := StdoutCapture{} + capture.StartCapture() + Run(&testT, suiteLoggingTester) + output, err := capture.StopCapture() + + assert.Nil(t, err, "Got an error trying to capture stdout!") + + // Failed tests' output is always printed + assert.Contains(t, output, "TESTLOGFAIL") + + if testing.Verbose() { + // In verbose mode, output from successful tests is also printed + assert.Contains(t, output, "TESTLOGPASS") + } else { + assert.NotContains(t, output, "TESTLOGPASS") + } +} diff --git a/vendor/github.com/stretchr/testify/vendor/github.com/davecgh/go-spew/LICENSE b/vendor/github.com/stretchr/testify/vendor/github.com/davecgh/go-spew/LICENSE new file mode 100644 index 00000000..2a7cfd2b --- /dev/null +++ b/vendor/github.com/stretchr/testify/vendor/github.com/davecgh/go-spew/LICENSE @@ -0,0 +1,13 @@ +Copyright (c) 2012-2013 Dave Collins + +Permission to use, copy, modify, and distribute this software for any +purpose with or without fee is hereby granted, provided that the above +copyright notice and this permission notice appear in all copies. + +THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES +WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF +MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR +ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES +WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN +ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF +OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. diff --git a/vendor/github.com/stretchr/testify/vendor/github.com/davecgh/go-spew/spew/bypass.go b/vendor/github.com/stretchr/testify/vendor/github.com/davecgh/go-spew/spew/bypass.go new file mode 100644 index 00000000..565bf589 --- /dev/null +++ b/vendor/github.com/stretchr/testify/vendor/github.com/davecgh/go-spew/spew/bypass.go @@ -0,0 +1,151 @@ +// Copyright (c) 2015 Dave Collins +// +// Permission to use, copy, modify, and distribute this software for any +// purpose with or without fee is hereby granted, provided that the above +// copyright notice and this permission notice appear in all copies. +// +// THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES +// WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF +// MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR +// ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES +// WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN +// ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF +// OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + +// NOTE: Due to the following build constraints, this file will only be compiled +// when the code is not running on Google App Engine and "-tags disableunsafe" +// is not added to the go build command line. +// +build !appengine,!disableunsafe + +package spew + +import ( + "reflect" + "unsafe" +) + +const ( + // UnsafeDisabled is a build-time constant which specifies whether or + // not access to the unsafe package is available. + UnsafeDisabled = false + + // ptrSize is the size of a pointer on the current arch. + ptrSize = unsafe.Sizeof((*byte)(nil)) +) + +var ( + // offsetPtr, offsetScalar, and offsetFlag are the offsets for the + // internal reflect.Value fields. These values are valid before golang + // commit ecccf07e7f9d which changed the format. The are also valid + // after commit 82f48826c6c7 which changed the format again to mirror + // the original format. Code in the init function updates these offsets + // as necessary. + offsetPtr = uintptr(ptrSize) + offsetScalar = uintptr(0) + offsetFlag = uintptr(ptrSize * 2) + + // flagKindWidth and flagKindShift indicate various bits that the + // reflect package uses internally to track kind information. + // + // flagRO indicates whether or not the value field of a reflect.Value is + // read-only. + // + // flagIndir indicates whether the value field of a reflect.Value is + // the actual data or a pointer to the data. + // + // These values are valid before golang commit 90a7c3c86944 which + // changed their positions. Code in the init function updates these + // flags as necessary. + flagKindWidth = uintptr(5) + flagKindShift = uintptr(flagKindWidth - 1) + flagRO = uintptr(1 << 0) + flagIndir = uintptr(1 << 1) +) + +func init() { + // Older versions of reflect.Value stored small integers directly in the + // ptr field (which is named val in the older versions). Versions + // between commits ecccf07e7f9d and 82f48826c6c7 added a new field named + // scalar for this purpose which unfortunately came before the flag + // field, so the offset of the flag field is different for those + // versions. + // + // This code constructs a new reflect.Value from a known small integer + // and checks if the size of the reflect.Value struct indicates it has + // the scalar field. When it does, the offsets are updated accordingly. + vv := reflect.ValueOf(0xf00) + if unsafe.Sizeof(vv) == (ptrSize * 4) { + offsetScalar = ptrSize * 2 + offsetFlag = ptrSize * 3 + } + + // Commit 90a7c3c86944 changed the flag positions such that the low + // order bits are the kind. This code extracts the kind from the flags + // field and ensures it's the correct type. When it's not, the flag + // order has been changed to the newer format, so the flags are updated + // accordingly. + upf := unsafe.Pointer(uintptr(unsafe.Pointer(&vv)) + offsetFlag) + upfv := *(*uintptr)(upf) + flagKindMask := uintptr((1<>flagKindShift != uintptr(reflect.Int) { + flagKindShift = 0 + flagRO = 1 << 5 + flagIndir = 1 << 6 + + // Commit adf9b30e5594 modified the flags to separate the + // flagRO flag into two bits which specifies whether or not the + // field is embedded. This causes flagIndir to move over a bit + // and means that flagRO is the combination of either of the + // original flagRO bit and the new bit. + // + // This code detects the change by extracting what used to be + // the indirect bit to ensure it's set. When it's not, the flag + // order has been changed to the newer format, so the flags are + // updated accordingly. + if upfv&flagIndir == 0 { + flagRO = 3 << 5 + flagIndir = 1 << 7 + } + } +} + +// unsafeReflectValue converts the passed reflect.Value into a one that bypasses +// the typical safety restrictions preventing access to unaddressable and +// unexported data. It works by digging the raw pointer to the underlying +// value out of the protected value and generating a new unprotected (unsafe) +// reflect.Value to it. +// +// This allows us to check for implementations of the Stringer and error +// interfaces to be used for pretty printing ordinarily unaddressable and +// inaccessible values such as unexported struct fields. +func unsafeReflectValue(v reflect.Value) (rv reflect.Value) { + indirects := 1 + vt := v.Type() + upv := unsafe.Pointer(uintptr(unsafe.Pointer(&v)) + offsetPtr) + rvf := *(*uintptr)(unsafe.Pointer(uintptr(unsafe.Pointer(&v)) + offsetFlag)) + if rvf&flagIndir != 0 { + vt = reflect.PtrTo(v.Type()) + indirects++ + } else if offsetScalar != 0 { + // The value is in the scalar field when it's not one of the + // reference types. + switch vt.Kind() { + case reflect.Uintptr: + case reflect.Chan: + case reflect.Func: + case reflect.Map: + case reflect.Ptr: + case reflect.UnsafePointer: + default: + upv = unsafe.Pointer(uintptr(unsafe.Pointer(&v)) + + offsetScalar) + } + } + + pv := reflect.NewAt(vt, upv) + rv = pv + for i := 0; i < indirects; i++ { + rv = rv.Elem() + } + return rv +} diff --git a/vendor/github.com/stretchr/testify/vendor/github.com/davecgh/go-spew/spew/bypasssafe.go b/vendor/github.com/stretchr/testify/vendor/github.com/davecgh/go-spew/spew/bypasssafe.go new file mode 100644 index 00000000..457e4123 --- /dev/null +++ b/vendor/github.com/stretchr/testify/vendor/github.com/davecgh/go-spew/spew/bypasssafe.go @@ -0,0 +1,37 @@ +// Copyright (c) 2015 Dave Collins +// +// Permission to use, copy, modify, and distribute this software for any +// purpose with or without fee is hereby granted, provided that the above +// copyright notice and this permission notice appear in all copies. +// +// THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES +// WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF +// MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR +// ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES +// WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN +// ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF +// OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + +// NOTE: Due to the following build constraints, this file will only be compiled +// when either the code is running on Google App Engine or "-tags disableunsafe" +// is added to the go build command line. +// +build appengine disableunsafe + +package spew + +import "reflect" + +const ( + // UnsafeDisabled is a build-time constant which specifies whether or + // not access to the unsafe package is available. + UnsafeDisabled = true +) + +// unsafeReflectValue typically converts the passed reflect.Value into a one +// that bypasses the typical safety restrictions preventing access to +// unaddressable and unexported data. However, doing this relies on access to +// the unsafe package. This is a stub version which simply returns the passed +// reflect.Value when the unsafe package is not available. +func unsafeReflectValue(v reflect.Value) reflect.Value { + return v +} diff --git a/vendor/github.com/stretchr/testify/vendor/github.com/davecgh/go-spew/spew/common.go b/vendor/github.com/stretchr/testify/vendor/github.com/davecgh/go-spew/spew/common.go new file mode 100644 index 00000000..14f02dc1 --- /dev/null +++ b/vendor/github.com/stretchr/testify/vendor/github.com/davecgh/go-spew/spew/common.go @@ -0,0 +1,341 @@ +/* + * Copyright (c) 2013 Dave Collins + * + * Permission to use, copy, modify, and distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ + +package spew + +import ( + "bytes" + "fmt" + "io" + "reflect" + "sort" + "strconv" +) + +// Some constants in the form of bytes to avoid string overhead. This mirrors +// the technique used in the fmt package. +var ( + panicBytes = []byte("(PANIC=") + plusBytes = []byte("+") + iBytes = []byte("i") + trueBytes = []byte("true") + falseBytes = []byte("false") + interfaceBytes = []byte("(interface {})") + commaNewlineBytes = []byte(",\n") + newlineBytes = []byte("\n") + openBraceBytes = []byte("{") + openBraceNewlineBytes = []byte("{\n") + closeBraceBytes = []byte("}") + asteriskBytes = []byte("*") + colonBytes = []byte(":") + colonSpaceBytes = []byte(": ") + openParenBytes = []byte("(") + closeParenBytes = []byte(")") + spaceBytes = []byte(" ") + pointerChainBytes = []byte("->") + nilAngleBytes = []byte("") + maxNewlineBytes = []byte("\n") + maxShortBytes = []byte("") + circularBytes = []byte("") + circularShortBytes = []byte("") + invalidAngleBytes = []byte("") + openBracketBytes = []byte("[") + closeBracketBytes = []byte("]") + percentBytes = []byte("%") + precisionBytes = []byte(".") + openAngleBytes = []byte("<") + closeAngleBytes = []byte(">") + openMapBytes = []byte("map[") + closeMapBytes = []byte("]") + lenEqualsBytes = []byte("len=") + capEqualsBytes = []byte("cap=") +) + +// hexDigits is used to map a decimal value to a hex digit. +var hexDigits = "0123456789abcdef" + +// catchPanic handles any panics that might occur during the handleMethods +// calls. +func catchPanic(w io.Writer, v reflect.Value) { + if err := recover(); err != nil { + w.Write(panicBytes) + fmt.Fprintf(w, "%v", err) + w.Write(closeParenBytes) + } +} + +// handleMethods attempts to call the Error and String methods on the underlying +// type the passed reflect.Value represents and outputes the result to Writer w. +// +// It handles panics in any called methods by catching and displaying the error +// as the formatted value. +func handleMethods(cs *ConfigState, w io.Writer, v reflect.Value) (handled bool) { + // We need an interface to check if the type implements the error or + // Stringer interface. However, the reflect package won't give us an + // interface on certain things like unexported struct fields in order + // to enforce visibility rules. We use unsafe, when it's available, + // to bypass these restrictions since this package does not mutate the + // values. + if !v.CanInterface() { + if UnsafeDisabled { + return false + } + + v = unsafeReflectValue(v) + } + + // Choose whether or not to do error and Stringer interface lookups against + // the base type or a pointer to the base type depending on settings. + // Technically calling one of these methods with a pointer receiver can + // mutate the value, however, types which choose to satisify an error or + // Stringer interface with a pointer receiver should not be mutating their + // state inside these interface methods. + if !cs.DisablePointerMethods && !UnsafeDisabled && !v.CanAddr() { + v = unsafeReflectValue(v) + } + if v.CanAddr() { + v = v.Addr() + } + + // Is it an error or Stringer? + switch iface := v.Interface().(type) { + case error: + defer catchPanic(w, v) + if cs.ContinueOnMethod { + w.Write(openParenBytes) + w.Write([]byte(iface.Error())) + w.Write(closeParenBytes) + w.Write(spaceBytes) + return false + } + + w.Write([]byte(iface.Error())) + return true + + case fmt.Stringer: + defer catchPanic(w, v) + if cs.ContinueOnMethod { + w.Write(openParenBytes) + w.Write([]byte(iface.String())) + w.Write(closeParenBytes) + w.Write(spaceBytes) + return false + } + w.Write([]byte(iface.String())) + return true + } + return false +} + +// printBool outputs a boolean value as true or false to Writer w. +func printBool(w io.Writer, val bool) { + if val { + w.Write(trueBytes) + } else { + w.Write(falseBytes) + } +} + +// printInt outputs a signed integer value to Writer w. +func printInt(w io.Writer, val int64, base int) { + w.Write([]byte(strconv.FormatInt(val, base))) +} + +// printUint outputs an unsigned integer value to Writer w. +func printUint(w io.Writer, val uint64, base int) { + w.Write([]byte(strconv.FormatUint(val, base))) +} + +// printFloat outputs a floating point value using the specified precision, +// which is expected to be 32 or 64bit, to Writer w. +func printFloat(w io.Writer, val float64, precision int) { + w.Write([]byte(strconv.FormatFloat(val, 'g', -1, precision))) +} + +// printComplex outputs a complex value using the specified float precision +// for the real and imaginary parts to Writer w. +func printComplex(w io.Writer, c complex128, floatPrecision int) { + r := real(c) + w.Write(openParenBytes) + w.Write([]byte(strconv.FormatFloat(r, 'g', -1, floatPrecision))) + i := imag(c) + if i >= 0 { + w.Write(plusBytes) + } + w.Write([]byte(strconv.FormatFloat(i, 'g', -1, floatPrecision))) + w.Write(iBytes) + w.Write(closeParenBytes) +} + +// printHexPtr outputs a uintptr formatted as hexidecimal with a leading '0x' +// prefix to Writer w. +func printHexPtr(w io.Writer, p uintptr) { + // Null pointer. + num := uint64(p) + if num == 0 { + w.Write(nilAngleBytes) + return + } + + // Max uint64 is 16 bytes in hex + 2 bytes for '0x' prefix + buf := make([]byte, 18) + + // It's simpler to construct the hex string right to left. + base := uint64(16) + i := len(buf) - 1 + for num >= base { + buf[i] = hexDigits[num%base] + num /= base + i-- + } + buf[i] = hexDigits[num] + + // Add '0x' prefix. + i-- + buf[i] = 'x' + i-- + buf[i] = '0' + + // Strip unused leading bytes. + buf = buf[i:] + w.Write(buf) +} + +// valuesSorter implements sort.Interface to allow a slice of reflect.Value +// elements to be sorted. +type valuesSorter struct { + values []reflect.Value + strings []string // either nil or same len and values + cs *ConfigState +} + +// newValuesSorter initializes a valuesSorter instance, which holds a set of +// surrogate keys on which the data should be sorted. It uses flags in +// ConfigState to decide if and how to populate those surrogate keys. +func newValuesSorter(values []reflect.Value, cs *ConfigState) sort.Interface { + vs := &valuesSorter{values: values, cs: cs} + if canSortSimply(vs.values[0].Kind()) { + return vs + } + if !cs.DisableMethods { + vs.strings = make([]string, len(values)) + for i := range vs.values { + b := bytes.Buffer{} + if !handleMethods(cs, &b, vs.values[i]) { + vs.strings = nil + break + } + vs.strings[i] = b.String() + } + } + if vs.strings == nil && cs.SpewKeys { + vs.strings = make([]string, len(values)) + for i := range vs.values { + vs.strings[i] = Sprintf("%#v", vs.values[i].Interface()) + } + } + return vs +} + +// canSortSimply tests whether a reflect.Kind is a primitive that can be sorted +// directly, or whether it should be considered for sorting by surrogate keys +// (if the ConfigState allows it). +func canSortSimply(kind reflect.Kind) bool { + // This switch parallels valueSortLess, except for the default case. + switch kind { + case reflect.Bool: + return true + case reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, reflect.Int: + return true + case reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uint: + return true + case reflect.Float32, reflect.Float64: + return true + case reflect.String: + return true + case reflect.Uintptr: + return true + case reflect.Array: + return true + } + return false +} + +// Len returns the number of values in the slice. It is part of the +// sort.Interface implementation. +func (s *valuesSorter) Len() int { + return len(s.values) +} + +// Swap swaps the values at the passed indices. It is part of the +// sort.Interface implementation. +func (s *valuesSorter) Swap(i, j int) { + s.values[i], s.values[j] = s.values[j], s.values[i] + if s.strings != nil { + s.strings[i], s.strings[j] = s.strings[j], s.strings[i] + } +} + +// valueSortLess returns whether the first value should sort before the second +// value. It is used by valueSorter.Less as part of the sort.Interface +// implementation. +func valueSortLess(a, b reflect.Value) bool { + switch a.Kind() { + case reflect.Bool: + return !a.Bool() && b.Bool() + case reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, reflect.Int: + return a.Int() < b.Int() + case reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uint: + return a.Uint() < b.Uint() + case reflect.Float32, reflect.Float64: + return a.Float() < b.Float() + case reflect.String: + return a.String() < b.String() + case reflect.Uintptr: + return a.Uint() < b.Uint() + case reflect.Array: + // Compare the contents of both arrays. + l := a.Len() + for i := 0; i < l; i++ { + av := a.Index(i) + bv := b.Index(i) + if av.Interface() == bv.Interface() { + continue + } + return valueSortLess(av, bv) + } + } + return a.String() < b.String() +} + +// Less returns whether the value at index i should sort before the +// value at index j. It is part of the sort.Interface implementation. +func (s *valuesSorter) Less(i, j int) bool { + if s.strings == nil { + return valueSortLess(s.values[i], s.values[j]) + } + return s.strings[i] < s.strings[j] +} + +// sortValues is a sort function that handles both native types and any type that +// can be converted to error or Stringer. Other inputs are sorted according to +// their Value.String() value to ensure display stability. +func sortValues(values []reflect.Value, cs *ConfigState) { + if len(values) == 0 { + return + } + sort.Sort(newValuesSorter(values, cs)) +} diff --git a/vendor/github.com/stretchr/testify/vendor/github.com/davecgh/go-spew/spew/config.go b/vendor/github.com/stretchr/testify/vendor/github.com/davecgh/go-spew/spew/config.go new file mode 100644 index 00000000..ee1ab07b --- /dev/null +++ b/vendor/github.com/stretchr/testify/vendor/github.com/davecgh/go-spew/spew/config.go @@ -0,0 +1,297 @@ +/* + * Copyright (c) 2013 Dave Collins + * + * Permission to use, copy, modify, and distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ + +package spew + +import ( + "bytes" + "fmt" + "io" + "os" +) + +// ConfigState houses the configuration options used by spew to format and +// display values. There is a global instance, Config, that is used to control +// all top-level Formatter and Dump functionality. Each ConfigState instance +// provides methods equivalent to the top-level functions. +// +// The zero value for ConfigState provides no indentation. You would typically +// want to set it to a space or a tab. +// +// Alternatively, you can use NewDefaultConfig to get a ConfigState instance +// with default settings. See the documentation of NewDefaultConfig for default +// values. +type ConfigState struct { + // Indent specifies the string to use for each indentation level. The + // global config instance that all top-level functions use set this to a + // single space by default. If you would like more indentation, you might + // set this to a tab with "\t" or perhaps two spaces with " ". + Indent string + + // MaxDepth controls the maximum number of levels to descend into nested + // data structures. The default, 0, means there is no limit. + // + // NOTE: Circular data structures are properly detected, so it is not + // necessary to set this value unless you specifically want to limit deeply + // nested data structures. + MaxDepth int + + // DisableMethods specifies whether or not error and Stringer interfaces are + // invoked for types that implement them. + DisableMethods bool + + // DisablePointerMethods specifies whether or not to check for and invoke + // error and Stringer interfaces on types which only accept a pointer + // receiver when the current type is not a pointer. + // + // NOTE: This might be an unsafe action since calling one of these methods + // with a pointer receiver could technically mutate the value, however, + // in practice, types which choose to satisify an error or Stringer + // interface with a pointer receiver should not be mutating their state + // inside these interface methods. As a result, this option relies on + // access to the unsafe package, so it will not have any effect when + // running in environments without access to the unsafe package such as + // Google App Engine or with the "disableunsafe" build tag specified. + DisablePointerMethods bool + + // ContinueOnMethod specifies whether or not recursion should continue once + // a custom error or Stringer interface is invoked. The default, false, + // means it will print the results of invoking the custom error or Stringer + // interface and return immediately instead of continuing to recurse into + // the internals of the data type. + // + // NOTE: This flag does not have any effect if method invocation is disabled + // via the DisableMethods or DisablePointerMethods options. + ContinueOnMethod bool + + // SortKeys specifies map keys should be sorted before being printed. Use + // this to have a more deterministic, diffable output. Note that only + // native types (bool, int, uint, floats, uintptr and string) and types + // that support the error or Stringer interfaces (if methods are + // enabled) are supported, with other types sorted according to the + // reflect.Value.String() output which guarantees display stability. + SortKeys bool + + // SpewKeys specifies that, as a last resort attempt, map keys should + // be spewed to strings and sorted by those strings. This is only + // considered if SortKeys is true. + SpewKeys bool +} + +// Config is the active configuration of the top-level functions. +// The configuration can be changed by modifying the contents of spew.Config. +var Config = ConfigState{Indent: " "} + +// Errorf is a wrapper for fmt.Errorf that treats each argument as if it were +// passed with a Formatter interface returned by c.NewFormatter. It returns +// the formatted string as a value that satisfies error. See NewFormatter +// for formatting details. +// +// This function is shorthand for the following syntax: +// +// fmt.Errorf(format, c.NewFormatter(a), c.NewFormatter(b)) +func (c *ConfigState) Errorf(format string, a ...interface{}) (err error) { + return fmt.Errorf(format, c.convertArgs(a)...) +} + +// Fprint is a wrapper for fmt.Fprint that treats each argument as if it were +// passed with a Formatter interface returned by c.NewFormatter. It returns +// the number of bytes written and any write error encountered. See +// NewFormatter for formatting details. +// +// This function is shorthand for the following syntax: +// +// fmt.Fprint(w, c.NewFormatter(a), c.NewFormatter(b)) +func (c *ConfigState) Fprint(w io.Writer, a ...interface{}) (n int, err error) { + return fmt.Fprint(w, c.convertArgs(a)...) +} + +// Fprintf is a wrapper for fmt.Fprintf that treats each argument as if it were +// passed with a Formatter interface returned by c.NewFormatter. It returns +// the number of bytes written and any write error encountered. See +// NewFormatter for formatting details. +// +// This function is shorthand for the following syntax: +// +// fmt.Fprintf(w, format, c.NewFormatter(a), c.NewFormatter(b)) +func (c *ConfigState) Fprintf(w io.Writer, format string, a ...interface{}) (n int, err error) { + return fmt.Fprintf(w, format, c.convertArgs(a)...) +} + +// Fprintln is a wrapper for fmt.Fprintln that treats each argument as if it +// passed with a Formatter interface returned by c.NewFormatter. See +// NewFormatter for formatting details. +// +// This function is shorthand for the following syntax: +// +// fmt.Fprintln(w, c.NewFormatter(a), c.NewFormatter(b)) +func (c *ConfigState) Fprintln(w io.Writer, a ...interface{}) (n int, err error) { + return fmt.Fprintln(w, c.convertArgs(a)...) +} + +// Print is a wrapper for fmt.Print that treats each argument as if it were +// passed with a Formatter interface returned by c.NewFormatter. It returns +// the number of bytes written and any write error encountered. See +// NewFormatter for formatting details. +// +// This function is shorthand for the following syntax: +// +// fmt.Print(c.NewFormatter(a), c.NewFormatter(b)) +func (c *ConfigState) Print(a ...interface{}) (n int, err error) { + return fmt.Print(c.convertArgs(a)...) +} + +// Printf is a wrapper for fmt.Printf that treats each argument as if it were +// passed with a Formatter interface returned by c.NewFormatter. It returns +// the number of bytes written and any write error encountered. See +// NewFormatter for formatting details. +// +// This function is shorthand for the following syntax: +// +// fmt.Printf(format, c.NewFormatter(a), c.NewFormatter(b)) +func (c *ConfigState) Printf(format string, a ...interface{}) (n int, err error) { + return fmt.Printf(format, c.convertArgs(a)...) +} + +// Println is a wrapper for fmt.Println that treats each argument as if it were +// passed with a Formatter interface returned by c.NewFormatter. It returns +// the number of bytes written and any write error encountered. See +// NewFormatter for formatting details. +// +// This function is shorthand for the following syntax: +// +// fmt.Println(c.NewFormatter(a), c.NewFormatter(b)) +func (c *ConfigState) Println(a ...interface{}) (n int, err error) { + return fmt.Println(c.convertArgs(a)...) +} + +// Sprint is a wrapper for fmt.Sprint that treats each argument as if it were +// passed with a Formatter interface returned by c.NewFormatter. It returns +// the resulting string. See NewFormatter for formatting details. +// +// This function is shorthand for the following syntax: +// +// fmt.Sprint(c.NewFormatter(a), c.NewFormatter(b)) +func (c *ConfigState) Sprint(a ...interface{}) string { + return fmt.Sprint(c.convertArgs(a)...) +} + +// Sprintf is a wrapper for fmt.Sprintf that treats each argument as if it were +// passed with a Formatter interface returned by c.NewFormatter. It returns +// the resulting string. See NewFormatter for formatting details. +// +// This function is shorthand for the following syntax: +// +// fmt.Sprintf(format, c.NewFormatter(a), c.NewFormatter(b)) +func (c *ConfigState) Sprintf(format string, a ...interface{}) string { + return fmt.Sprintf(format, c.convertArgs(a)...) +} + +// Sprintln is a wrapper for fmt.Sprintln that treats each argument as if it +// were passed with a Formatter interface returned by c.NewFormatter. It +// returns the resulting string. See NewFormatter for formatting details. +// +// This function is shorthand for the following syntax: +// +// fmt.Sprintln(c.NewFormatter(a), c.NewFormatter(b)) +func (c *ConfigState) Sprintln(a ...interface{}) string { + return fmt.Sprintln(c.convertArgs(a)...) +} + +/* +NewFormatter returns a custom formatter that satisfies the fmt.Formatter +interface. As a result, it integrates cleanly with standard fmt package +printing functions. The formatter is useful for inline printing of smaller data +types similar to the standard %v format specifier. + +The custom formatter only responds to the %v (most compact), %+v (adds pointer +addresses), %#v (adds types), and %#+v (adds types and pointer addresses) verb +combinations. Any other verbs such as %x and %q will be sent to the the +standard fmt package for formatting. In addition, the custom formatter ignores +the width and precision arguments (however they will still work on the format +specifiers not handled by the custom formatter). + +Typically this function shouldn't be called directly. It is much easier to make +use of the custom formatter by calling one of the convenience functions such as +c.Printf, c.Println, or c.Printf. +*/ +func (c *ConfigState) NewFormatter(v interface{}) fmt.Formatter { + return newFormatter(c, v) +} + +// Fdump formats and displays the passed arguments to io.Writer w. It formats +// exactly the same as Dump. +func (c *ConfigState) Fdump(w io.Writer, a ...interface{}) { + fdump(c, w, a...) +} + +/* +Dump displays the passed parameters to standard out with newlines, customizable +indentation, and additional debug information such as complete types and all +pointer addresses used to indirect to the final value. It provides the +following features over the built-in printing facilities provided by the fmt +package: + + * Pointers are dereferenced and followed + * Circular data structures are detected and handled properly + * Custom Stringer/error interfaces are optionally invoked, including + on unexported types + * Custom types which only implement the Stringer/error interfaces via + a pointer receiver are optionally invoked when passing non-pointer + variables + * Byte arrays and slices are dumped like the hexdump -C command which + includes offsets, byte values in hex, and ASCII output + +The configuration options are controlled by modifying the public members +of c. See ConfigState for options documentation. + +See Fdump if you would prefer dumping to an arbitrary io.Writer or Sdump to +get the formatted result as a string. +*/ +func (c *ConfigState) Dump(a ...interface{}) { + fdump(c, os.Stdout, a...) +} + +// Sdump returns a string with the passed arguments formatted exactly the same +// as Dump. +func (c *ConfigState) Sdump(a ...interface{}) string { + var buf bytes.Buffer + fdump(c, &buf, a...) + return buf.String() +} + +// convertArgs accepts a slice of arguments and returns a slice of the same +// length with each argument converted to a spew Formatter interface using +// the ConfigState associated with s. +func (c *ConfigState) convertArgs(args []interface{}) (formatters []interface{}) { + formatters = make([]interface{}, len(args)) + for index, arg := range args { + formatters[index] = newFormatter(c, arg) + } + return formatters +} + +// NewDefaultConfig returns a ConfigState with the following default settings. +// +// Indent: " " +// MaxDepth: 0 +// DisableMethods: false +// DisablePointerMethods: false +// ContinueOnMethod: false +// SortKeys: false +func NewDefaultConfig() *ConfigState { + return &ConfigState{Indent: " "} +} diff --git a/vendor/github.com/stretchr/testify/vendor/github.com/davecgh/go-spew/spew/doc.go b/vendor/github.com/stretchr/testify/vendor/github.com/davecgh/go-spew/spew/doc.go new file mode 100644 index 00000000..5be0c406 --- /dev/null +++ b/vendor/github.com/stretchr/testify/vendor/github.com/davecgh/go-spew/spew/doc.go @@ -0,0 +1,202 @@ +/* + * Copyright (c) 2013 Dave Collins + * + * Permission to use, copy, modify, and distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ + +/* +Package spew implements a deep pretty printer for Go data structures to aid in +debugging. + +A quick overview of the additional features spew provides over the built-in +printing facilities for Go data types are as follows: + + * Pointers are dereferenced and followed + * Circular data structures are detected and handled properly + * Custom Stringer/error interfaces are optionally invoked, including + on unexported types + * Custom types which only implement the Stringer/error interfaces via + a pointer receiver are optionally invoked when passing non-pointer + variables + * Byte arrays and slices are dumped like the hexdump -C command which + includes offsets, byte values in hex, and ASCII output (only when using + Dump style) + +There are two different approaches spew allows for dumping Go data structures: + + * Dump style which prints with newlines, customizable indentation, + and additional debug information such as types and all pointer addresses + used to indirect to the final value + * A custom Formatter interface that integrates cleanly with the standard fmt + package and replaces %v, %+v, %#v, and %#+v to provide inline printing + similar to the default %v while providing the additional functionality + outlined above and passing unsupported format verbs such as %x and %q + along to fmt + +Quick Start + +This section demonstrates how to quickly get started with spew. See the +sections below for further details on formatting and configuration options. + +To dump a variable with full newlines, indentation, type, and pointer +information use Dump, Fdump, or Sdump: + spew.Dump(myVar1, myVar2, ...) + spew.Fdump(someWriter, myVar1, myVar2, ...) + str := spew.Sdump(myVar1, myVar2, ...) + +Alternatively, if you would prefer to use format strings with a compacted inline +printing style, use the convenience wrappers Printf, Fprintf, etc with +%v (most compact), %+v (adds pointer addresses), %#v (adds types), or +%#+v (adds types and pointer addresses): + spew.Printf("myVar1: %v -- myVar2: %+v", myVar1, myVar2) + spew.Printf("myVar3: %#v -- myVar4: %#+v", myVar3, myVar4) + spew.Fprintf(someWriter, "myVar1: %v -- myVar2: %+v", myVar1, myVar2) + spew.Fprintf(someWriter, "myVar3: %#v -- myVar4: %#+v", myVar3, myVar4) + +Configuration Options + +Configuration of spew is handled by fields in the ConfigState type. For +convenience, all of the top-level functions use a global state available +via the spew.Config global. + +It is also possible to create a ConfigState instance that provides methods +equivalent to the top-level functions. This allows concurrent configuration +options. See the ConfigState documentation for more details. + +The following configuration options are available: + * Indent + String to use for each indentation level for Dump functions. + It is a single space by default. A popular alternative is "\t". + + * MaxDepth + Maximum number of levels to descend into nested data structures. + There is no limit by default. + + * DisableMethods + Disables invocation of error and Stringer interface methods. + Method invocation is enabled by default. + + * DisablePointerMethods + Disables invocation of error and Stringer interface methods on types + which only accept pointer receivers from non-pointer variables. + Pointer method invocation is enabled by default. + + * ContinueOnMethod + Enables recursion into types after invoking error and Stringer interface + methods. Recursion after method invocation is disabled by default. + + * SortKeys + Specifies map keys should be sorted before being printed. Use + this to have a more deterministic, diffable output. Note that + only native types (bool, int, uint, floats, uintptr and string) + and types which implement error or Stringer interfaces are + supported with other types sorted according to the + reflect.Value.String() output which guarantees display + stability. Natural map order is used by default. + + * SpewKeys + Specifies that, as a last resort attempt, map keys should be + spewed to strings and sorted by those strings. This is only + considered if SortKeys is true. + +Dump Usage + +Simply call spew.Dump with a list of variables you want to dump: + + spew.Dump(myVar1, myVar2, ...) + +You may also call spew.Fdump if you would prefer to output to an arbitrary +io.Writer. For example, to dump to standard error: + + spew.Fdump(os.Stderr, myVar1, myVar2, ...) + +A third option is to call spew.Sdump to get the formatted output as a string: + + str := spew.Sdump(myVar1, myVar2, ...) + +Sample Dump Output + +See the Dump example for details on the setup of the types and variables being +shown here. + + (main.Foo) { + unexportedField: (*main.Bar)(0xf84002e210)({ + flag: (main.Flag) flagTwo, + data: (uintptr) + }), + ExportedField: (map[interface {}]interface {}) (len=1) { + (string) (len=3) "one": (bool) true + } + } + +Byte (and uint8) arrays and slices are displayed uniquely like the hexdump -C +command as shown. + ([]uint8) (len=32 cap=32) { + 00000000 11 12 13 14 15 16 17 18 19 1a 1b 1c 1d 1e 1f 20 |............... | + 00000010 21 22 23 24 25 26 27 28 29 2a 2b 2c 2d 2e 2f 30 |!"#$%&'()*+,-./0| + 00000020 31 32 |12| + } + +Custom Formatter + +Spew provides a custom formatter that implements the fmt.Formatter interface +so that it integrates cleanly with standard fmt package printing functions. The +formatter is useful for inline printing of smaller data types similar to the +standard %v format specifier. + +The custom formatter only responds to the %v (most compact), %+v (adds pointer +addresses), %#v (adds types), or %#+v (adds types and pointer addresses) verb +combinations. Any other verbs such as %x and %q will be sent to the the +standard fmt package for formatting. In addition, the custom formatter ignores +the width and precision arguments (however they will still work on the format +specifiers not handled by the custom formatter). + +Custom Formatter Usage + +The simplest way to make use of the spew custom formatter is to call one of the +convenience functions such as spew.Printf, spew.Println, or spew.Printf. The +functions have syntax you are most likely already familiar with: + + spew.Printf("myVar1: %v -- myVar2: %+v", myVar1, myVar2) + spew.Printf("myVar3: %#v -- myVar4: %#+v", myVar3, myVar4) + spew.Println(myVar, myVar2) + spew.Fprintf(os.Stderr, "myVar1: %v -- myVar2: %+v", myVar1, myVar2) + spew.Fprintf(os.Stderr, "myVar3: %#v -- myVar4: %#+v", myVar3, myVar4) + +See the Index for the full list convenience functions. + +Sample Formatter Output + +Double pointer to a uint8: + %v: <**>5 + %+v: <**>(0xf8400420d0->0xf8400420c8)5 + %#v: (**uint8)5 + %#+v: (**uint8)(0xf8400420d0->0xf8400420c8)5 + +Pointer to circular struct with a uint8 field and a pointer to itself: + %v: <*>{1 <*>} + %+v: <*>(0xf84003e260){ui8:1 c:<*>(0xf84003e260)} + %#v: (*main.circular){ui8:(uint8)1 c:(*main.circular)} + %#+v: (*main.circular)(0xf84003e260){ui8:(uint8)1 c:(*main.circular)(0xf84003e260)} + +See the Printf example for details on the setup of variables being shown +here. + +Errors + +Since it is possible for custom Stringer/error interfaces to panic, spew +detects them and handles them internally by printing the panic information +inline with the output. Since spew is intended to provide deep pretty printing +capabilities on structures, it intentionally does not return any errors. +*/ +package spew diff --git a/vendor/github.com/stretchr/testify/vendor/github.com/davecgh/go-spew/spew/dump.go b/vendor/github.com/stretchr/testify/vendor/github.com/davecgh/go-spew/spew/dump.go new file mode 100644 index 00000000..a0ff95e2 --- /dev/null +++ b/vendor/github.com/stretchr/testify/vendor/github.com/davecgh/go-spew/spew/dump.go @@ -0,0 +1,509 @@ +/* + * Copyright (c) 2013 Dave Collins + * + * Permission to use, copy, modify, and distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ + +package spew + +import ( + "bytes" + "encoding/hex" + "fmt" + "io" + "os" + "reflect" + "regexp" + "strconv" + "strings" +) + +var ( + // uint8Type is a reflect.Type representing a uint8. It is used to + // convert cgo types to uint8 slices for hexdumping. + uint8Type = reflect.TypeOf(uint8(0)) + + // cCharRE is a regular expression that matches a cgo char. + // It is used to detect character arrays to hexdump them. + cCharRE = regexp.MustCompile("^.*\\._Ctype_char$") + + // cUnsignedCharRE is a regular expression that matches a cgo unsigned + // char. It is used to detect unsigned character arrays to hexdump + // them. + cUnsignedCharRE = regexp.MustCompile("^.*\\._Ctype_unsignedchar$") + + // cUint8tCharRE is a regular expression that matches a cgo uint8_t. + // It is used to detect uint8_t arrays to hexdump them. + cUint8tCharRE = regexp.MustCompile("^.*\\._Ctype_uint8_t$") +) + +// dumpState contains information about the state of a dump operation. +type dumpState struct { + w io.Writer + depth int + pointers map[uintptr]int + ignoreNextType bool + ignoreNextIndent bool + cs *ConfigState +} + +// indent performs indentation according to the depth level and cs.Indent +// option. +func (d *dumpState) indent() { + if d.ignoreNextIndent { + d.ignoreNextIndent = false + return + } + d.w.Write(bytes.Repeat([]byte(d.cs.Indent), d.depth)) +} + +// unpackValue returns values inside of non-nil interfaces when possible. +// This is useful for data types like structs, arrays, slices, and maps which +// can contain varying types packed inside an interface. +func (d *dumpState) unpackValue(v reflect.Value) reflect.Value { + if v.Kind() == reflect.Interface && !v.IsNil() { + v = v.Elem() + } + return v +} + +// dumpPtr handles formatting of pointers by indirecting them as necessary. +func (d *dumpState) dumpPtr(v reflect.Value) { + // Remove pointers at or below the current depth from map used to detect + // circular refs. + for k, depth := range d.pointers { + if depth >= d.depth { + delete(d.pointers, k) + } + } + + // Keep list of all dereferenced pointers to show later. + pointerChain := make([]uintptr, 0) + + // Figure out how many levels of indirection there are by dereferencing + // pointers and unpacking interfaces down the chain while detecting circular + // references. + nilFound := false + cycleFound := false + indirects := 0 + ve := v + for ve.Kind() == reflect.Ptr { + if ve.IsNil() { + nilFound = true + break + } + indirects++ + addr := ve.Pointer() + pointerChain = append(pointerChain, addr) + if pd, ok := d.pointers[addr]; ok && pd < d.depth { + cycleFound = true + indirects-- + break + } + d.pointers[addr] = d.depth + + ve = ve.Elem() + if ve.Kind() == reflect.Interface { + if ve.IsNil() { + nilFound = true + break + } + ve = ve.Elem() + } + } + + // Display type information. + d.w.Write(openParenBytes) + d.w.Write(bytes.Repeat(asteriskBytes, indirects)) + d.w.Write([]byte(ve.Type().String())) + d.w.Write(closeParenBytes) + + // Display pointer information. + if len(pointerChain) > 0 { + d.w.Write(openParenBytes) + for i, addr := range pointerChain { + if i > 0 { + d.w.Write(pointerChainBytes) + } + printHexPtr(d.w, addr) + } + d.w.Write(closeParenBytes) + } + + // Display dereferenced value. + d.w.Write(openParenBytes) + switch { + case nilFound == true: + d.w.Write(nilAngleBytes) + + case cycleFound == true: + d.w.Write(circularBytes) + + default: + d.ignoreNextType = true + d.dump(ve) + } + d.w.Write(closeParenBytes) +} + +// dumpSlice handles formatting of arrays and slices. Byte (uint8 under +// reflection) arrays and slices are dumped in hexdump -C fashion. +func (d *dumpState) dumpSlice(v reflect.Value) { + // Determine whether this type should be hex dumped or not. Also, + // for types which should be hexdumped, try to use the underlying data + // first, then fall back to trying to convert them to a uint8 slice. + var buf []uint8 + doConvert := false + doHexDump := false + numEntries := v.Len() + if numEntries > 0 { + vt := v.Index(0).Type() + vts := vt.String() + switch { + // C types that need to be converted. + case cCharRE.MatchString(vts): + fallthrough + case cUnsignedCharRE.MatchString(vts): + fallthrough + case cUint8tCharRE.MatchString(vts): + doConvert = true + + // Try to use existing uint8 slices and fall back to converting + // and copying if that fails. + case vt.Kind() == reflect.Uint8: + // We need an addressable interface to convert the type + // to a byte slice. However, the reflect package won't + // give us an interface on certain things like + // unexported struct fields in order to enforce + // visibility rules. We use unsafe, when available, to + // bypass these restrictions since this package does not + // mutate the values. + vs := v + if !vs.CanInterface() || !vs.CanAddr() { + vs = unsafeReflectValue(vs) + } + if !UnsafeDisabled { + vs = vs.Slice(0, numEntries) + + // Use the existing uint8 slice if it can be + // type asserted. + iface := vs.Interface() + if slice, ok := iface.([]uint8); ok { + buf = slice + doHexDump = true + break + } + } + + // The underlying data needs to be converted if it can't + // be type asserted to a uint8 slice. + doConvert = true + } + + // Copy and convert the underlying type if needed. + if doConvert && vt.ConvertibleTo(uint8Type) { + // Convert and copy each element into a uint8 byte + // slice. + buf = make([]uint8, numEntries) + for i := 0; i < numEntries; i++ { + vv := v.Index(i) + buf[i] = uint8(vv.Convert(uint8Type).Uint()) + } + doHexDump = true + } + } + + // Hexdump the entire slice as needed. + if doHexDump { + indent := strings.Repeat(d.cs.Indent, d.depth) + str := indent + hex.Dump(buf) + str = strings.Replace(str, "\n", "\n"+indent, -1) + str = strings.TrimRight(str, d.cs.Indent) + d.w.Write([]byte(str)) + return + } + + // Recursively call dump for each item. + for i := 0; i < numEntries; i++ { + d.dump(d.unpackValue(v.Index(i))) + if i < (numEntries - 1) { + d.w.Write(commaNewlineBytes) + } else { + d.w.Write(newlineBytes) + } + } +} + +// dump is the main workhorse for dumping a value. It uses the passed reflect +// value to figure out what kind of object we are dealing with and formats it +// appropriately. It is a recursive function, however circular data structures +// are detected and handled properly. +func (d *dumpState) dump(v reflect.Value) { + // Handle invalid reflect values immediately. + kind := v.Kind() + if kind == reflect.Invalid { + d.w.Write(invalidAngleBytes) + return + } + + // Handle pointers specially. + if kind == reflect.Ptr { + d.indent() + d.dumpPtr(v) + return + } + + // Print type information unless already handled elsewhere. + if !d.ignoreNextType { + d.indent() + d.w.Write(openParenBytes) + d.w.Write([]byte(v.Type().String())) + d.w.Write(closeParenBytes) + d.w.Write(spaceBytes) + } + d.ignoreNextType = false + + // Display length and capacity if the built-in len and cap functions + // work with the value's kind and the len/cap itself is non-zero. + valueLen, valueCap := 0, 0 + switch v.Kind() { + case reflect.Array, reflect.Slice, reflect.Chan: + valueLen, valueCap = v.Len(), v.Cap() + case reflect.Map, reflect.String: + valueLen = v.Len() + } + if valueLen != 0 || valueCap != 0 { + d.w.Write(openParenBytes) + if valueLen != 0 { + d.w.Write(lenEqualsBytes) + printInt(d.w, int64(valueLen), 10) + } + if valueCap != 0 { + if valueLen != 0 { + d.w.Write(spaceBytes) + } + d.w.Write(capEqualsBytes) + printInt(d.w, int64(valueCap), 10) + } + d.w.Write(closeParenBytes) + d.w.Write(spaceBytes) + } + + // Call Stringer/error interfaces if they exist and the handle methods flag + // is enabled + if !d.cs.DisableMethods { + if (kind != reflect.Invalid) && (kind != reflect.Interface) { + if handled := handleMethods(d.cs, d.w, v); handled { + return + } + } + } + + switch kind { + case reflect.Invalid: + // Do nothing. We should never get here since invalid has already + // been handled above. + + case reflect.Bool: + printBool(d.w, v.Bool()) + + case reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, reflect.Int: + printInt(d.w, v.Int(), 10) + + case reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uint: + printUint(d.w, v.Uint(), 10) + + case reflect.Float32: + printFloat(d.w, v.Float(), 32) + + case reflect.Float64: + printFloat(d.w, v.Float(), 64) + + case reflect.Complex64: + printComplex(d.w, v.Complex(), 32) + + case reflect.Complex128: + printComplex(d.w, v.Complex(), 64) + + case reflect.Slice: + if v.IsNil() { + d.w.Write(nilAngleBytes) + break + } + fallthrough + + case reflect.Array: + d.w.Write(openBraceNewlineBytes) + d.depth++ + if (d.cs.MaxDepth != 0) && (d.depth > d.cs.MaxDepth) { + d.indent() + d.w.Write(maxNewlineBytes) + } else { + d.dumpSlice(v) + } + d.depth-- + d.indent() + d.w.Write(closeBraceBytes) + + case reflect.String: + d.w.Write([]byte(strconv.Quote(v.String()))) + + case reflect.Interface: + // The only time we should get here is for nil interfaces due to + // unpackValue calls. + if v.IsNil() { + d.w.Write(nilAngleBytes) + } + + case reflect.Ptr: + // Do nothing. We should never get here since pointers have already + // been handled above. + + case reflect.Map: + // nil maps should be indicated as different than empty maps + if v.IsNil() { + d.w.Write(nilAngleBytes) + break + } + + d.w.Write(openBraceNewlineBytes) + d.depth++ + if (d.cs.MaxDepth != 0) && (d.depth > d.cs.MaxDepth) { + d.indent() + d.w.Write(maxNewlineBytes) + } else { + numEntries := v.Len() + keys := v.MapKeys() + if d.cs.SortKeys { + sortValues(keys, d.cs) + } + for i, key := range keys { + d.dump(d.unpackValue(key)) + d.w.Write(colonSpaceBytes) + d.ignoreNextIndent = true + d.dump(d.unpackValue(v.MapIndex(key))) + if i < (numEntries - 1) { + d.w.Write(commaNewlineBytes) + } else { + d.w.Write(newlineBytes) + } + } + } + d.depth-- + d.indent() + d.w.Write(closeBraceBytes) + + case reflect.Struct: + d.w.Write(openBraceNewlineBytes) + d.depth++ + if (d.cs.MaxDepth != 0) && (d.depth > d.cs.MaxDepth) { + d.indent() + d.w.Write(maxNewlineBytes) + } else { + vt := v.Type() + numFields := v.NumField() + for i := 0; i < numFields; i++ { + d.indent() + vtf := vt.Field(i) + d.w.Write([]byte(vtf.Name)) + d.w.Write(colonSpaceBytes) + d.ignoreNextIndent = true + d.dump(d.unpackValue(v.Field(i))) + if i < (numFields - 1) { + d.w.Write(commaNewlineBytes) + } else { + d.w.Write(newlineBytes) + } + } + } + d.depth-- + d.indent() + d.w.Write(closeBraceBytes) + + case reflect.Uintptr: + printHexPtr(d.w, uintptr(v.Uint())) + + case reflect.UnsafePointer, reflect.Chan, reflect.Func: + printHexPtr(d.w, v.Pointer()) + + // There were not any other types at the time this code was written, but + // fall back to letting the default fmt package handle it in case any new + // types are added. + default: + if v.CanInterface() { + fmt.Fprintf(d.w, "%v", v.Interface()) + } else { + fmt.Fprintf(d.w, "%v", v.String()) + } + } +} + +// fdump is a helper function to consolidate the logic from the various public +// methods which take varying writers and config states. +func fdump(cs *ConfigState, w io.Writer, a ...interface{}) { + for _, arg := range a { + if arg == nil { + w.Write(interfaceBytes) + w.Write(spaceBytes) + w.Write(nilAngleBytes) + w.Write(newlineBytes) + continue + } + + d := dumpState{w: w, cs: cs} + d.pointers = make(map[uintptr]int) + d.dump(reflect.ValueOf(arg)) + d.w.Write(newlineBytes) + } +} + +// Fdump formats and displays the passed arguments to io.Writer w. It formats +// exactly the same as Dump. +func Fdump(w io.Writer, a ...interface{}) { + fdump(&Config, w, a...) +} + +// Sdump returns a string with the passed arguments formatted exactly the same +// as Dump. +func Sdump(a ...interface{}) string { + var buf bytes.Buffer + fdump(&Config, &buf, a...) + return buf.String() +} + +/* +Dump displays the passed parameters to standard out with newlines, customizable +indentation, and additional debug information such as complete types and all +pointer addresses used to indirect to the final value. It provides the +following features over the built-in printing facilities provided by the fmt +package: + + * Pointers are dereferenced and followed + * Circular data structures are detected and handled properly + * Custom Stringer/error interfaces are optionally invoked, including + on unexported types + * Custom types which only implement the Stringer/error interfaces via + a pointer receiver are optionally invoked when passing non-pointer + variables + * Byte arrays and slices are dumped like the hexdump -C command which + includes offsets, byte values in hex, and ASCII output + +The configuration options are controlled by an exported package global, +spew.Config. See ConfigState for options documentation. + +See Fdump if you would prefer dumping to an arbitrary io.Writer or Sdump to +get the formatted result as a string. +*/ +func Dump(a ...interface{}) { + fdump(&Config, os.Stdout, a...) +} diff --git a/vendor/github.com/stretchr/testify/vendor/github.com/davecgh/go-spew/spew/format.go b/vendor/github.com/stretchr/testify/vendor/github.com/davecgh/go-spew/spew/format.go new file mode 100644 index 00000000..ecf3b80e --- /dev/null +++ b/vendor/github.com/stretchr/testify/vendor/github.com/davecgh/go-spew/spew/format.go @@ -0,0 +1,419 @@ +/* + * Copyright (c) 2013 Dave Collins + * + * Permission to use, copy, modify, and distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ + +package spew + +import ( + "bytes" + "fmt" + "reflect" + "strconv" + "strings" +) + +// supportedFlags is a list of all the character flags supported by fmt package. +const supportedFlags = "0-+# " + +// formatState implements the fmt.Formatter interface and contains information +// about the state of a formatting operation. The NewFormatter function can +// be used to get a new Formatter which can be used directly as arguments +// in standard fmt package printing calls. +type formatState struct { + value interface{} + fs fmt.State + depth int + pointers map[uintptr]int + ignoreNextType bool + cs *ConfigState +} + +// buildDefaultFormat recreates the original format string without precision +// and width information to pass in to fmt.Sprintf in the case of an +// unrecognized type. Unless new types are added to the language, this +// function won't ever be called. +func (f *formatState) buildDefaultFormat() (format string) { + buf := bytes.NewBuffer(percentBytes) + + for _, flag := range supportedFlags { + if f.fs.Flag(int(flag)) { + buf.WriteRune(flag) + } + } + + buf.WriteRune('v') + + format = buf.String() + return format +} + +// constructOrigFormat recreates the original format string including precision +// and width information to pass along to the standard fmt package. This allows +// automatic deferral of all format strings this package doesn't support. +func (f *formatState) constructOrigFormat(verb rune) (format string) { + buf := bytes.NewBuffer(percentBytes) + + for _, flag := range supportedFlags { + if f.fs.Flag(int(flag)) { + buf.WriteRune(flag) + } + } + + if width, ok := f.fs.Width(); ok { + buf.WriteString(strconv.Itoa(width)) + } + + if precision, ok := f.fs.Precision(); ok { + buf.Write(precisionBytes) + buf.WriteString(strconv.Itoa(precision)) + } + + buf.WriteRune(verb) + + format = buf.String() + return format +} + +// unpackValue returns values inside of non-nil interfaces when possible and +// ensures that types for values which have been unpacked from an interface +// are displayed when the show types flag is also set. +// This is useful for data types like structs, arrays, slices, and maps which +// can contain varying types packed inside an interface. +func (f *formatState) unpackValue(v reflect.Value) reflect.Value { + if v.Kind() == reflect.Interface { + f.ignoreNextType = false + if !v.IsNil() { + v = v.Elem() + } + } + return v +} + +// formatPtr handles formatting of pointers by indirecting them as necessary. +func (f *formatState) formatPtr(v reflect.Value) { + // Display nil if top level pointer is nil. + showTypes := f.fs.Flag('#') + if v.IsNil() && (!showTypes || f.ignoreNextType) { + f.fs.Write(nilAngleBytes) + return + } + + // Remove pointers at or below the current depth from map used to detect + // circular refs. + for k, depth := range f.pointers { + if depth >= f.depth { + delete(f.pointers, k) + } + } + + // Keep list of all dereferenced pointers to possibly show later. + pointerChain := make([]uintptr, 0) + + // Figure out how many levels of indirection there are by derferencing + // pointers and unpacking interfaces down the chain while detecting circular + // references. + nilFound := false + cycleFound := false + indirects := 0 + ve := v + for ve.Kind() == reflect.Ptr { + if ve.IsNil() { + nilFound = true + break + } + indirects++ + addr := ve.Pointer() + pointerChain = append(pointerChain, addr) + if pd, ok := f.pointers[addr]; ok && pd < f.depth { + cycleFound = true + indirects-- + break + } + f.pointers[addr] = f.depth + + ve = ve.Elem() + if ve.Kind() == reflect.Interface { + if ve.IsNil() { + nilFound = true + break + } + ve = ve.Elem() + } + } + + // Display type or indirection level depending on flags. + if showTypes && !f.ignoreNextType { + f.fs.Write(openParenBytes) + f.fs.Write(bytes.Repeat(asteriskBytes, indirects)) + f.fs.Write([]byte(ve.Type().String())) + f.fs.Write(closeParenBytes) + } else { + if nilFound || cycleFound { + indirects += strings.Count(ve.Type().String(), "*") + } + f.fs.Write(openAngleBytes) + f.fs.Write([]byte(strings.Repeat("*", indirects))) + f.fs.Write(closeAngleBytes) + } + + // Display pointer information depending on flags. + if f.fs.Flag('+') && (len(pointerChain) > 0) { + f.fs.Write(openParenBytes) + for i, addr := range pointerChain { + if i > 0 { + f.fs.Write(pointerChainBytes) + } + printHexPtr(f.fs, addr) + } + f.fs.Write(closeParenBytes) + } + + // Display dereferenced value. + switch { + case nilFound == true: + f.fs.Write(nilAngleBytes) + + case cycleFound == true: + f.fs.Write(circularShortBytes) + + default: + f.ignoreNextType = true + f.format(ve) + } +} + +// format is the main workhorse for providing the Formatter interface. It +// uses the passed reflect value to figure out what kind of object we are +// dealing with and formats it appropriately. It is a recursive function, +// however circular data structures are detected and handled properly. +func (f *formatState) format(v reflect.Value) { + // Handle invalid reflect values immediately. + kind := v.Kind() + if kind == reflect.Invalid { + f.fs.Write(invalidAngleBytes) + return + } + + // Handle pointers specially. + if kind == reflect.Ptr { + f.formatPtr(v) + return + } + + // Print type information unless already handled elsewhere. + if !f.ignoreNextType && f.fs.Flag('#') { + f.fs.Write(openParenBytes) + f.fs.Write([]byte(v.Type().String())) + f.fs.Write(closeParenBytes) + } + f.ignoreNextType = false + + // Call Stringer/error interfaces if they exist and the handle methods + // flag is enabled. + if !f.cs.DisableMethods { + if (kind != reflect.Invalid) && (kind != reflect.Interface) { + if handled := handleMethods(f.cs, f.fs, v); handled { + return + } + } + } + + switch kind { + case reflect.Invalid: + // Do nothing. We should never get here since invalid has already + // been handled above. + + case reflect.Bool: + printBool(f.fs, v.Bool()) + + case reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, reflect.Int: + printInt(f.fs, v.Int(), 10) + + case reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uint: + printUint(f.fs, v.Uint(), 10) + + case reflect.Float32: + printFloat(f.fs, v.Float(), 32) + + case reflect.Float64: + printFloat(f.fs, v.Float(), 64) + + case reflect.Complex64: + printComplex(f.fs, v.Complex(), 32) + + case reflect.Complex128: + printComplex(f.fs, v.Complex(), 64) + + case reflect.Slice: + if v.IsNil() { + f.fs.Write(nilAngleBytes) + break + } + fallthrough + + case reflect.Array: + f.fs.Write(openBracketBytes) + f.depth++ + if (f.cs.MaxDepth != 0) && (f.depth > f.cs.MaxDepth) { + f.fs.Write(maxShortBytes) + } else { + numEntries := v.Len() + for i := 0; i < numEntries; i++ { + if i > 0 { + f.fs.Write(spaceBytes) + } + f.ignoreNextType = true + f.format(f.unpackValue(v.Index(i))) + } + } + f.depth-- + f.fs.Write(closeBracketBytes) + + case reflect.String: + f.fs.Write([]byte(v.String())) + + case reflect.Interface: + // The only time we should get here is for nil interfaces due to + // unpackValue calls. + if v.IsNil() { + f.fs.Write(nilAngleBytes) + } + + case reflect.Ptr: + // Do nothing. We should never get here since pointers have already + // been handled above. + + case reflect.Map: + // nil maps should be indicated as different than empty maps + if v.IsNil() { + f.fs.Write(nilAngleBytes) + break + } + + f.fs.Write(openMapBytes) + f.depth++ + if (f.cs.MaxDepth != 0) && (f.depth > f.cs.MaxDepth) { + f.fs.Write(maxShortBytes) + } else { + keys := v.MapKeys() + if f.cs.SortKeys { + sortValues(keys, f.cs) + } + for i, key := range keys { + if i > 0 { + f.fs.Write(spaceBytes) + } + f.ignoreNextType = true + f.format(f.unpackValue(key)) + f.fs.Write(colonBytes) + f.ignoreNextType = true + f.format(f.unpackValue(v.MapIndex(key))) + } + } + f.depth-- + f.fs.Write(closeMapBytes) + + case reflect.Struct: + numFields := v.NumField() + f.fs.Write(openBraceBytes) + f.depth++ + if (f.cs.MaxDepth != 0) && (f.depth > f.cs.MaxDepth) { + f.fs.Write(maxShortBytes) + } else { + vt := v.Type() + for i := 0; i < numFields; i++ { + if i > 0 { + f.fs.Write(spaceBytes) + } + vtf := vt.Field(i) + if f.fs.Flag('+') || f.fs.Flag('#') { + f.fs.Write([]byte(vtf.Name)) + f.fs.Write(colonBytes) + } + f.format(f.unpackValue(v.Field(i))) + } + } + f.depth-- + f.fs.Write(closeBraceBytes) + + case reflect.Uintptr: + printHexPtr(f.fs, uintptr(v.Uint())) + + case reflect.UnsafePointer, reflect.Chan, reflect.Func: + printHexPtr(f.fs, v.Pointer()) + + // There were not any other types at the time this code was written, but + // fall back to letting the default fmt package handle it if any get added. + default: + format := f.buildDefaultFormat() + if v.CanInterface() { + fmt.Fprintf(f.fs, format, v.Interface()) + } else { + fmt.Fprintf(f.fs, format, v.String()) + } + } +} + +// Format satisfies the fmt.Formatter interface. See NewFormatter for usage +// details. +func (f *formatState) Format(fs fmt.State, verb rune) { + f.fs = fs + + // Use standard formatting for verbs that are not v. + if verb != 'v' { + format := f.constructOrigFormat(verb) + fmt.Fprintf(fs, format, f.value) + return + } + + if f.value == nil { + if fs.Flag('#') { + fs.Write(interfaceBytes) + } + fs.Write(nilAngleBytes) + return + } + + f.format(reflect.ValueOf(f.value)) +} + +// newFormatter is a helper function to consolidate the logic from the various +// public methods which take varying config states. +func newFormatter(cs *ConfigState, v interface{}) fmt.Formatter { + fs := &formatState{value: v, cs: cs} + fs.pointers = make(map[uintptr]int) + return fs +} + +/* +NewFormatter returns a custom formatter that satisfies the fmt.Formatter +interface. As a result, it integrates cleanly with standard fmt package +printing functions. The formatter is useful for inline printing of smaller data +types similar to the standard %v format specifier. + +The custom formatter only responds to the %v (most compact), %+v (adds pointer +addresses), %#v (adds types), or %#+v (adds types and pointer addresses) verb +combinations. Any other verbs such as %x and %q will be sent to the the +standard fmt package for formatting. In addition, the custom formatter ignores +the width and precision arguments (however they will still work on the format +specifiers not handled by the custom formatter). + +Typically this function shouldn't be called directly. It is much easier to make +use of the custom formatter by calling one of the convenience functions such as +Printf, Println, or Fprintf. +*/ +func NewFormatter(v interface{}) fmt.Formatter { + return newFormatter(&Config, v) +} diff --git a/vendor/github.com/stretchr/testify/vendor/github.com/davecgh/go-spew/spew/spew.go b/vendor/github.com/stretchr/testify/vendor/github.com/davecgh/go-spew/spew/spew.go new file mode 100644 index 00000000..d8233f54 --- /dev/null +++ b/vendor/github.com/stretchr/testify/vendor/github.com/davecgh/go-spew/spew/spew.go @@ -0,0 +1,148 @@ +/* + * Copyright (c) 2013 Dave Collins + * + * Permission to use, copy, modify, and distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ + +package spew + +import ( + "fmt" + "io" +) + +// Errorf is a wrapper for fmt.Errorf that treats each argument as if it were +// passed with a default Formatter interface returned by NewFormatter. It +// returns the formatted string as a value that satisfies error. See +// NewFormatter for formatting details. +// +// This function is shorthand for the following syntax: +// +// fmt.Errorf(format, spew.NewFormatter(a), spew.NewFormatter(b)) +func Errorf(format string, a ...interface{}) (err error) { + return fmt.Errorf(format, convertArgs(a)...) +} + +// Fprint is a wrapper for fmt.Fprint that treats each argument as if it were +// passed with a default Formatter interface returned by NewFormatter. It +// returns the number of bytes written and any write error encountered. See +// NewFormatter for formatting details. +// +// This function is shorthand for the following syntax: +// +// fmt.Fprint(w, spew.NewFormatter(a), spew.NewFormatter(b)) +func Fprint(w io.Writer, a ...interface{}) (n int, err error) { + return fmt.Fprint(w, convertArgs(a)...) +} + +// Fprintf is a wrapper for fmt.Fprintf that treats each argument as if it were +// passed with a default Formatter interface returned by NewFormatter. It +// returns the number of bytes written and any write error encountered. See +// NewFormatter for formatting details. +// +// This function is shorthand for the following syntax: +// +// fmt.Fprintf(w, format, spew.NewFormatter(a), spew.NewFormatter(b)) +func Fprintf(w io.Writer, format string, a ...interface{}) (n int, err error) { + return fmt.Fprintf(w, format, convertArgs(a)...) +} + +// Fprintln is a wrapper for fmt.Fprintln that treats each argument as if it +// passed with a default Formatter interface returned by NewFormatter. See +// NewFormatter for formatting details. +// +// This function is shorthand for the following syntax: +// +// fmt.Fprintln(w, spew.NewFormatter(a), spew.NewFormatter(b)) +func Fprintln(w io.Writer, a ...interface{}) (n int, err error) { + return fmt.Fprintln(w, convertArgs(a)...) +} + +// Print is a wrapper for fmt.Print that treats each argument as if it were +// passed with a default Formatter interface returned by NewFormatter. It +// returns the number of bytes written and any write error encountered. See +// NewFormatter for formatting details. +// +// This function is shorthand for the following syntax: +// +// fmt.Print(spew.NewFormatter(a), spew.NewFormatter(b)) +func Print(a ...interface{}) (n int, err error) { + return fmt.Print(convertArgs(a)...) +} + +// Printf is a wrapper for fmt.Printf that treats each argument as if it were +// passed with a default Formatter interface returned by NewFormatter. It +// returns the number of bytes written and any write error encountered. See +// NewFormatter for formatting details. +// +// This function is shorthand for the following syntax: +// +// fmt.Printf(format, spew.NewFormatter(a), spew.NewFormatter(b)) +func Printf(format string, a ...interface{}) (n int, err error) { + return fmt.Printf(format, convertArgs(a)...) +} + +// Println is a wrapper for fmt.Println that treats each argument as if it were +// passed with a default Formatter interface returned by NewFormatter. It +// returns the number of bytes written and any write error encountered. See +// NewFormatter for formatting details. +// +// This function is shorthand for the following syntax: +// +// fmt.Println(spew.NewFormatter(a), spew.NewFormatter(b)) +func Println(a ...interface{}) (n int, err error) { + return fmt.Println(convertArgs(a)...) +} + +// Sprint is a wrapper for fmt.Sprint that treats each argument as if it were +// passed with a default Formatter interface returned by NewFormatter. It +// returns the resulting string. See NewFormatter for formatting details. +// +// This function is shorthand for the following syntax: +// +// fmt.Sprint(spew.NewFormatter(a), spew.NewFormatter(b)) +func Sprint(a ...interface{}) string { + return fmt.Sprint(convertArgs(a)...) +} + +// Sprintf is a wrapper for fmt.Sprintf that treats each argument as if it were +// passed with a default Formatter interface returned by NewFormatter. It +// returns the resulting string. See NewFormatter for formatting details. +// +// This function is shorthand for the following syntax: +// +// fmt.Sprintf(format, spew.NewFormatter(a), spew.NewFormatter(b)) +func Sprintf(format string, a ...interface{}) string { + return fmt.Sprintf(format, convertArgs(a)...) +} + +// Sprintln is a wrapper for fmt.Sprintln that treats each argument as if it +// were passed with a default Formatter interface returned by NewFormatter. It +// returns the resulting string. See NewFormatter for formatting details. +// +// This function is shorthand for the following syntax: +// +// fmt.Sprintln(spew.NewFormatter(a), spew.NewFormatter(b)) +func Sprintln(a ...interface{}) string { + return fmt.Sprintln(convertArgs(a)...) +} + +// convertArgs accepts a slice of arguments and returns a slice of the same +// length with each argument converted to a default spew Formatter interface. +func convertArgs(args []interface{}) (formatters []interface{}) { + formatters = make([]interface{}, len(args)) + for index, arg := range args { + formatters[index] = NewFormatter(arg) + } + return formatters +} diff --git a/vendor/github.com/stretchr/testify/vendor/github.com/pmezard/go-difflib/LICENSE b/vendor/github.com/stretchr/testify/vendor/github.com/pmezard/go-difflib/LICENSE new file mode 100644 index 00000000..c67dad61 --- /dev/null +++ b/vendor/github.com/stretchr/testify/vendor/github.com/pmezard/go-difflib/LICENSE @@ -0,0 +1,27 @@ +Copyright (c) 2013, Patrick Mezard +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + Redistributions in binary form must reproduce the above copyright +notice, this list of conditions and the following disclaimer in the +documentation and/or other materials provided with the distribution. + The names of its contributors may not be used to endorse or promote +products derived from this software without specific prior written +permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS +IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED +TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A +PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED +TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR +PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF +LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING +NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/github.com/stretchr/testify/vendor/github.com/pmezard/go-difflib/difflib/difflib.go b/vendor/github.com/stretchr/testify/vendor/github.com/pmezard/go-difflib/difflib/difflib.go new file mode 100644 index 00000000..64cc40fe --- /dev/null +++ b/vendor/github.com/stretchr/testify/vendor/github.com/pmezard/go-difflib/difflib/difflib.go @@ -0,0 +1,758 @@ +// Package difflib is a partial port of Python difflib module. +// +// It provides tools to compare sequences of strings and generate textual diffs. +// +// The following class and functions have been ported: +// +// - SequenceMatcher +// +// - unified_diff +// +// - context_diff +// +// Getting unified diffs was the main goal of the port. Keep in mind this code +// is mostly suitable to output text differences in a human friendly way, there +// are no guarantees generated diffs are consumable by patch(1). +package difflib + +import ( + "bufio" + "bytes" + "fmt" + "io" + "strings" +) + +func min(a, b int) int { + if a < b { + return a + } + return b +} + +func max(a, b int) int { + if a > b { + return a + } + return b +} + +func calculateRatio(matches, length int) float64 { + if length > 0 { + return 2.0 * float64(matches) / float64(length) + } + return 1.0 +} + +type Match struct { + A int + B int + Size int +} + +type OpCode struct { + Tag byte + I1 int + I2 int + J1 int + J2 int +} + +// SequenceMatcher compares sequence of strings. The basic +// algorithm predates, and is a little fancier than, an algorithm +// published in the late 1980's by Ratcliff and Obershelp under the +// hyperbolic name "gestalt pattern matching". The basic idea is to find +// the longest contiguous matching subsequence that contains no "junk" +// elements (R-O doesn't address junk). The same idea is then applied +// recursively to the pieces of the sequences to the left and to the right +// of the matching subsequence. This does not yield minimal edit +// sequences, but does tend to yield matches that "look right" to people. +// +// SequenceMatcher tries to compute a "human-friendly diff" between two +// sequences. Unlike e.g. UNIX(tm) diff, the fundamental notion is the +// longest *contiguous* & junk-free matching subsequence. That's what +// catches peoples' eyes. The Windows(tm) windiff has another interesting +// notion, pairing up elements that appear uniquely in each sequence. +// That, and the method here, appear to yield more intuitive difference +// reports than does diff. This method appears to be the least vulnerable +// to synching up on blocks of "junk lines", though (like blank lines in +// ordinary text files, or maybe "

" lines in HTML files). That may be +// because this is the only method of the 3 that has a *concept* of +// "junk" . +// +// Timing: Basic R-O is cubic time worst case and quadratic time expected +// case. SequenceMatcher is quadratic time for the worst case and has +// expected-case behavior dependent in a complicated way on how many +// elements the sequences have in common; best case time is linear. +type SequenceMatcher struct { + a []string + b []string + b2j map[string][]int + IsJunk func(string) bool + autoJunk bool + bJunk map[string]struct{} + matchingBlocks []Match + fullBCount map[string]int + bPopular map[string]struct{} + opCodes []OpCode +} + +func NewMatcher(a, b []string) *SequenceMatcher { + m := SequenceMatcher{autoJunk: true} + m.SetSeqs(a, b) + return &m +} + +func NewMatcherWithJunk(a, b []string, autoJunk bool, + isJunk func(string) bool) *SequenceMatcher { + + m := SequenceMatcher{IsJunk: isJunk, autoJunk: autoJunk} + m.SetSeqs(a, b) + return &m +} + +// Set two sequences to be compared. +func (m *SequenceMatcher) SetSeqs(a, b []string) { + m.SetSeq1(a) + m.SetSeq2(b) +} + +// Set the first sequence to be compared. The second sequence to be compared is +// not changed. +// +// SequenceMatcher computes and caches detailed information about the second +// sequence, so if you want to compare one sequence S against many sequences, +// use .SetSeq2(s) once and call .SetSeq1(x) repeatedly for each of the other +// sequences. +// +// See also SetSeqs() and SetSeq2(). +func (m *SequenceMatcher) SetSeq1(a []string) { + if &a == &m.a { + return + } + m.a = a + m.matchingBlocks = nil + m.opCodes = nil +} + +// Set the second sequence to be compared. The first sequence to be compared is +// not changed. +func (m *SequenceMatcher) SetSeq2(b []string) { + if &b == &m.b { + return + } + m.b = b + m.matchingBlocks = nil + m.opCodes = nil + m.fullBCount = nil + m.chainB() +} + +func (m *SequenceMatcher) chainB() { + // Populate line -> index mapping + b2j := map[string][]int{} + for i, s := range m.b { + indices := b2j[s] + indices = append(indices, i) + b2j[s] = indices + } + + // Purge junk elements + m.bJunk = map[string]struct{}{} + if m.IsJunk != nil { + junk := m.bJunk + for s, _ := range b2j { + if m.IsJunk(s) { + junk[s] = struct{}{} + } + } + for s, _ := range junk { + delete(b2j, s) + } + } + + // Purge remaining popular elements + popular := map[string]struct{}{} + n := len(m.b) + if m.autoJunk && n >= 200 { + ntest := n/100 + 1 + for s, indices := range b2j { + if len(indices) > ntest { + popular[s] = struct{}{} + } + } + for s, _ := range popular { + delete(b2j, s) + } + } + m.bPopular = popular + m.b2j = b2j +} + +func (m *SequenceMatcher) isBJunk(s string) bool { + _, ok := m.bJunk[s] + return ok +} + +// Find longest matching block in a[alo:ahi] and b[blo:bhi]. +// +// If IsJunk is not defined: +// +// Return (i,j,k) such that a[i:i+k] is equal to b[j:j+k], where +// alo <= i <= i+k <= ahi +// blo <= j <= j+k <= bhi +// and for all (i',j',k') meeting those conditions, +// k >= k' +// i <= i' +// and if i == i', j <= j' +// +// In other words, of all maximal matching blocks, return one that +// starts earliest in a, and of all those maximal matching blocks that +// start earliest in a, return the one that starts earliest in b. +// +// If IsJunk is defined, first the longest matching block is +// determined as above, but with the additional restriction that no +// junk element appears in the block. Then that block is extended as +// far as possible by matching (only) junk elements on both sides. So +// the resulting block never matches on junk except as identical junk +// happens to be adjacent to an "interesting" match. +// +// If no blocks match, return (alo, blo, 0). +func (m *SequenceMatcher) findLongestMatch(alo, ahi, blo, bhi int) Match { + // CAUTION: stripping common prefix or suffix would be incorrect. + // E.g., + // ab + // acab + // Longest matching block is "ab", but if common prefix is + // stripped, it's "a" (tied with "b"). UNIX(tm) diff does so + // strip, so ends up claiming that ab is changed to acab by + // inserting "ca" in the middle. That's minimal but unintuitive: + // "it's obvious" that someone inserted "ac" at the front. + // Windiff ends up at the same place as diff, but by pairing up + // the unique 'b's and then matching the first two 'a's. + besti, bestj, bestsize := alo, blo, 0 + + // find longest junk-free match + // during an iteration of the loop, j2len[j] = length of longest + // junk-free match ending with a[i-1] and b[j] + j2len := map[int]int{} + for i := alo; i != ahi; i++ { + // look at all instances of a[i] in b; note that because + // b2j has no junk keys, the loop is skipped if a[i] is junk + newj2len := map[int]int{} + for _, j := range m.b2j[m.a[i]] { + // a[i] matches b[j] + if j < blo { + continue + } + if j >= bhi { + break + } + k := j2len[j-1] + 1 + newj2len[j] = k + if k > bestsize { + besti, bestj, bestsize = i-k+1, j-k+1, k + } + } + j2len = newj2len + } + + // Extend the best by non-junk elements on each end. In particular, + // "popular" non-junk elements aren't in b2j, which greatly speeds + // the inner loop above, but also means "the best" match so far + // doesn't contain any junk *or* popular non-junk elements. + for besti > alo && bestj > blo && !m.isBJunk(m.b[bestj-1]) && + m.a[besti-1] == m.b[bestj-1] { + besti, bestj, bestsize = besti-1, bestj-1, bestsize+1 + } + for besti+bestsize < ahi && bestj+bestsize < bhi && + !m.isBJunk(m.b[bestj+bestsize]) && + m.a[besti+bestsize] == m.b[bestj+bestsize] { + bestsize += 1 + } + + // Now that we have a wholly interesting match (albeit possibly + // empty!), we may as well suck up the matching junk on each + // side of it too. Can't think of a good reason not to, and it + // saves post-processing the (possibly considerable) expense of + // figuring out what to do with it. In the case of an empty + // interesting match, this is clearly the right thing to do, + // because no other kind of match is possible in the regions. + for besti > alo && bestj > blo && m.isBJunk(m.b[bestj-1]) && + m.a[besti-1] == m.b[bestj-1] { + besti, bestj, bestsize = besti-1, bestj-1, bestsize+1 + } + for besti+bestsize < ahi && bestj+bestsize < bhi && + m.isBJunk(m.b[bestj+bestsize]) && + m.a[besti+bestsize] == m.b[bestj+bestsize] { + bestsize += 1 + } + + return Match{A: besti, B: bestj, Size: bestsize} +} + +// Return list of triples describing matching subsequences. +// +// Each triple is of the form (i, j, n), and means that +// a[i:i+n] == b[j:j+n]. The triples are monotonically increasing in +// i and in j. It's also guaranteed that if (i, j, n) and (i', j', n') are +// adjacent triples in the list, and the second is not the last triple in the +// list, then i+n != i' or j+n != j'. IOW, adjacent triples never describe +// adjacent equal blocks. +// +// The last triple is a dummy, (len(a), len(b), 0), and is the only +// triple with n==0. +func (m *SequenceMatcher) GetMatchingBlocks() []Match { + if m.matchingBlocks != nil { + return m.matchingBlocks + } + + var matchBlocks func(alo, ahi, blo, bhi int, matched []Match) []Match + matchBlocks = func(alo, ahi, blo, bhi int, matched []Match) []Match { + match := m.findLongestMatch(alo, ahi, blo, bhi) + i, j, k := match.A, match.B, match.Size + if match.Size > 0 { + if alo < i && blo < j { + matched = matchBlocks(alo, i, blo, j, matched) + } + matched = append(matched, match) + if i+k < ahi && j+k < bhi { + matched = matchBlocks(i+k, ahi, j+k, bhi, matched) + } + } + return matched + } + matched := matchBlocks(0, len(m.a), 0, len(m.b), nil) + + // It's possible that we have adjacent equal blocks in the + // matching_blocks list now. + nonAdjacent := []Match{} + i1, j1, k1 := 0, 0, 0 + for _, b := range matched { + // Is this block adjacent to i1, j1, k1? + i2, j2, k2 := b.A, b.B, b.Size + if i1+k1 == i2 && j1+k1 == j2 { + // Yes, so collapse them -- this just increases the length of + // the first block by the length of the second, and the first + // block so lengthened remains the block to compare against. + k1 += k2 + } else { + // Not adjacent. Remember the first block (k1==0 means it's + // the dummy we started with), and make the second block the + // new block to compare against. + if k1 > 0 { + nonAdjacent = append(nonAdjacent, Match{i1, j1, k1}) + } + i1, j1, k1 = i2, j2, k2 + } + } + if k1 > 0 { + nonAdjacent = append(nonAdjacent, Match{i1, j1, k1}) + } + + nonAdjacent = append(nonAdjacent, Match{len(m.a), len(m.b), 0}) + m.matchingBlocks = nonAdjacent + return m.matchingBlocks +} + +// Return list of 5-tuples describing how to turn a into b. +// +// Each tuple is of the form (tag, i1, i2, j1, j2). The first tuple +// has i1 == j1 == 0, and remaining tuples have i1 == the i2 from the +// tuple preceding it, and likewise for j1 == the previous j2. +// +// The tags are characters, with these meanings: +// +// 'r' (replace): a[i1:i2] should be replaced by b[j1:j2] +// +// 'd' (delete): a[i1:i2] should be deleted, j1==j2 in this case. +// +// 'i' (insert): b[j1:j2] should be inserted at a[i1:i1], i1==i2 in this case. +// +// 'e' (equal): a[i1:i2] == b[j1:j2] +func (m *SequenceMatcher) GetOpCodes() []OpCode { + if m.opCodes != nil { + return m.opCodes + } + i, j := 0, 0 + matching := m.GetMatchingBlocks() + opCodes := make([]OpCode, 0, len(matching)) + for _, m := range matching { + // invariant: we've pumped out correct diffs to change + // a[:i] into b[:j], and the next matching block is + // a[ai:ai+size] == b[bj:bj+size]. So we need to pump + // out a diff to change a[i:ai] into b[j:bj], pump out + // the matching block, and move (i,j) beyond the match + ai, bj, size := m.A, m.B, m.Size + tag := byte(0) + if i < ai && j < bj { + tag = 'r' + } else if i < ai { + tag = 'd' + } else if j < bj { + tag = 'i' + } + if tag > 0 { + opCodes = append(opCodes, OpCode{tag, i, ai, j, bj}) + } + i, j = ai+size, bj+size + // the list of matching blocks is terminated by a + // sentinel with size 0 + if size > 0 { + opCodes = append(opCodes, OpCode{'e', ai, i, bj, j}) + } + } + m.opCodes = opCodes + return m.opCodes +} + +// Isolate change clusters by eliminating ranges with no changes. +// +// Return a generator of groups with up to n lines of context. +// Each group is in the same format as returned by GetOpCodes(). +func (m *SequenceMatcher) GetGroupedOpCodes(n int) [][]OpCode { + if n < 0 { + n = 3 + } + codes := m.GetOpCodes() + if len(codes) == 0 { + codes = []OpCode{OpCode{'e', 0, 1, 0, 1}} + } + // Fixup leading and trailing groups if they show no changes. + if codes[0].Tag == 'e' { + c := codes[0] + i1, i2, j1, j2 := c.I1, c.I2, c.J1, c.J2 + codes[0] = OpCode{c.Tag, max(i1, i2-n), i2, max(j1, j2-n), j2} + } + if codes[len(codes)-1].Tag == 'e' { + c := codes[len(codes)-1] + i1, i2, j1, j2 := c.I1, c.I2, c.J1, c.J2 + codes[len(codes)-1] = OpCode{c.Tag, i1, min(i2, i1+n), j1, min(j2, j1+n)} + } + nn := n + n + groups := [][]OpCode{} + group := []OpCode{} + for _, c := range codes { + i1, i2, j1, j2 := c.I1, c.I2, c.J1, c.J2 + // End the current group and start a new one whenever + // there is a large range with no changes. + if c.Tag == 'e' && i2-i1 > nn { + group = append(group, OpCode{c.Tag, i1, min(i2, i1+n), + j1, min(j2, j1+n)}) + groups = append(groups, group) + group = []OpCode{} + i1, j1 = max(i1, i2-n), max(j1, j2-n) + } + group = append(group, OpCode{c.Tag, i1, i2, j1, j2}) + } + if len(group) > 0 && !(len(group) == 1 && group[0].Tag == 'e') { + groups = append(groups, group) + } + return groups +} + +// Return a measure of the sequences' similarity (float in [0,1]). +// +// Where T is the total number of elements in both sequences, and +// M is the number of matches, this is 2.0*M / T. +// Note that this is 1 if the sequences are identical, and 0 if +// they have nothing in common. +// +// .Ratio() is expensive to compute if you haven't already computed +// .GetMatchingBlocks() or .GetOpCodes(), in which case you may +// want to try .QuickRatio() or .RealQuickRation() first to get an +// upper bound. +func (m *SequenceMatcher) Ratio() float64 { + matches := 0 + for _, m := range m.GetMatchingBlocks() { + matches += m.Size + } + return calculateRatio(matches, len(m.a)+len(m.b)) +} + +// Return an upper bound on ratio() relatively quickly. +// +// This isn't defined beyond that it is an upper bound on .Ratio(), and +// is faster to compute. +func (m *SequenceMatcher) QuickRatio() float64 { + // viewing a and b as multisets, set matches to the cardinality + // of their intersection; this counts the number of matches + // without regard to order, so is clearly an upper bound + if m.fullBCount == nil { + m.fullBCount = map[string]int{} + for _, s := range m.b { + m.fullBCount[s] = m.fullBCount[s] + 1 + } + } + + // avail[x] is the number of times x appears in 'b' less the + // number of times we've seen it in 'a' so far ... kinda + avail := map[string]int{} + matches := 0 + for _, s := range m.a { + n, ok := avail[s] + if !ok { + n = m.fullBCount[s] + } + avail[s] = n - 1 + if n > 0 { + matches += 1 + } + } + return calculateRatio(matches, len(m.a)+len(m.b)) +} + +// Return an upper bound on ratio() very quickly. +// +// This isn't defined beyond that it is an upper bound on .Ratio(), and +// is faster to compute than either .Ratio() or .QuickRatio(). +func (m *SequenceMatcher) RealQuickRatio() float64 { + la, lb := len(m.a), len(m.b) + return calculateRatio(min(la, lb), la+lb) +} + +// Convert range to the "ed" format +func formatRangeUnified(start, stop int) string { + // Per the diff spec at http://www.unix.org/single_unix_specification/ + beginning := start + 1 // lines start numbering with one + length := stop - start + if length == 1 { + return fmt.Sprintf("%d", beginning) + } + if length == 0 { + beginning -= 1 // empty ranges begin at line just before the range + } + return fmt.Sprintf("%d,%d", beginning, length) +} + +// Unified diff parameters +type UnifiedDiff struct { + A []string // First sequence lines + FromFile string // First file name + FromDate string // First file time + B []string // Second sequence lines + ToFile string // Second file name + ToDate string // Second file time + Eol string // Headers end of line, defaults to LF + Context int // Number of context lines +} + +// Compare two sequences of lines; generate the delta as a unified diff. +// +// Unified diffs are a compact way of showing line changes and a few +// lines of context. The number of context lines is set by 'n' which +// defaults to three. +// +// By default, the diff control lines (those with ---, +++, or @@) are +// created with a trailing newline. This is helpful so that inputs +// created from file.readlines() result in diffs that are suitable for +// file.writelines() since both the inputs and outputs have trailing +// newlines. +// +// For inputs that do not have trailing newlines, set the lineterm +// argument to "" so that the output will be uniformly newline free. +// +// The unidiff format normally has a header for filenames and modification +// times. Any or all of these may be specified using strings for +// 'fromfile', 'tofile', 'fromfiledate', and 'tofiledate'. +// The modification times are normally expressed in the ISO 8601 format. +func WriteUnifiedDiff(writer io.Writer, diff UnifiedDiff) error { + buf := bufio.NewWriter(writer) + defer buf.Flush() + w := func(format string, args ...interface{}) error { + _, err := buf.WriteString(fmt.Sprintf(format, args...)) + return err + } + + if len(diff.Eol) == 0 { + diff.Eol = "\n" + } + + started := false + m := NewMatcher(diff.A, diff.B) + for _, g := range m.GetGroupedOpCodes(diff.Context) { + if !started { + started = true + fromDate := "" + if len(diff.FromDate) > 0 { + fromDate = "\t" + diff.FromDate + } + toDate := "" + if len(diff.ToDate) > 0 { + toDate = "\t" + diff.ToDate + } + err := w("--- %s%s%s", diff.FromFile, fromDate, diff.Eol) + if err != nil { + return err + } + err = w("+++ %s%s%s", diff.ToFile, toDate, diff.Eol) + if err != nil { + return err + } + } + first, last := g[0], g[len(g)-1] + range1 := formatRangeUnified(first.I1, last.I2) + range2 := formatRangeUnified(first.J1, last.J2) + if err := w("@@ -%s +%s @@%s", range1, range2, diff.Eol); err != nil { + return err + } + for _, c := range g { + i1, i2, j1, j2 := c.I1, c.I2, c.J1, c.J2 + if c.Tag == 'e' { + for _, line := range diff.A[i1:i2] { + if err := w(" " + line); err != nil { + return err + } + } + continue + } + if c.Tag == 'r' || c.Tag == 'd' { + for _, line := range diff.A[i1:i2] { + if err := w("-" + line); err != nil { + return err + } + } + } + if c.Tag == 'r' || c.Tag == 'i' { + for _, line := range diff.B[j1:j2] { + if err := w("+" + line); err != nil { + return err + } + } + } + } + } + return nil +} + +// Like WriteUnifiedDiff but returns the diff a string. +func GetUnifiedDiffString(diff UnifiedDiff) (string, error) { + w := &bytes.Buffer{} + err := WriteUnifiedDiff(w, diff) + return string(w.Bytes()), err +} + +// Convert range to the "ed" format. +func formatRangeContext(start, stop int) string { + // Per the diff spec at http://www.unix.org/single_unix_specification/ + beginning := start + 1 // lines start numbering with one + length := stop - start + if length == 0 { + beginning -= 1 // empty ranges begin at line just before the range + } + if length <= 1 { + return fmt.Sprintf("%d", beginning) + } + return fmt.Sprintf("%d,%d", beginning, beginning+length-1) +} + +type ContextDiff UnifiedDiff + +// Compare two sequences of lines; generate the delta as a context diff. +// +// Context diffs are a compact way of showing line changes and a few +// lines of context. The number of context lines is set by diff.Context +// which defaults to three. +// +// By default, the diff control lines (those with *** or ---) are +// created with a trailing newline. +// +// For inputs that do not have trailing newlines, set the diff.Eol +// argument to "" so that the output will be uniformly newline free. +// +// The context diff format normally has a header for filenames and +// modification times. Any or all of these may be specified using +// strings for diff.FromFile, diff.ToFile, diff.FromDate, diff.ToDate. +// The modification times are normally expressed in the ISO 8601 format. +// If not specified, the strings default to blanks. +func WriteContextDiff(writer io.Writer, diff ContextDiff) error { + buf := bufio.NewWriter(writer) + defer buf.Flush() + var diffErr error + w := func(format string, args ...interface{}) { + _, err := buf.WriteString(fmt.Sprintf(format, args...)) + if diffErr == nil && err != nil { + diffErr = err + } + } + + if len(diff.Eol) == 0 { + diff.Eol = "\n" + } + + prefix := map[byte]string{ + 'i': "+ ", + 'd': "- ", + 'r': "! ", + 'e': " ", + } + + started := false + m := NewMatcher(diff.A, diff.B) + for _, g := range m.GetGroupedOpCodes(diff.Context) { + if !started { + started = true + fromDate := "" + if len(diff.FromDate) > 0 { + fromDate = "\t" + diff.FromDate + } + toDate := "" + if len(diff.ToDate) > 0 { + toDate = "\t" + diff.ToDate + } + w("*** %s%s%s", diff.FromFile, fromDate, diff.Eol) + w("--- %s%s%s", diff.ToFile, toDate, diff.Eol) + } + + first, last := g[0], g[len(g)-1] + w("***************" + diff.Eol) + + range1 := formatRangeContext(first.I1, last.I2) + w("*** %s ****%s", range1, diff.Eol) + for _, c := range g { + if c.Tag == 'r' || c.Tag == 'd' { + for _, cc := range g { + if cc.Tag == 'i' { + continue + } + for _, line := range diff.A[cc.I1:cc.I2] { + w(prefix[cc.Tag] + line) + } + } + break + } + } + + range2 := formatRangeContext(first.J1, last.J2) + w("--- %s ----%s", range2, diff.Eol) + for _, c := range g { + if c.Tag == 'r' || c.Tag == 'i' { + for _, cc := range g { + if cc.Tag == 'd' { + continue + } + for _, line := range diff.B[cc.J1:cc.J2] { + w(prefix[cc.Tag] + line) + } + } + break + } + } + } + return diffErr +} + +// Like WriteContextDiff but returns the diff a string. +func GetContextDiffString(diff ContextDiff) (string, error) { + w := &bytes.Buffer{} + err := WriteContextDiff(w, diff) + return string(w.Bytes()), err +} + +// Split a string on "\n" while preserving them. The output can be used +// as input for UnifiedDiff and ContextDiff structures. +func SplitLines(s string) []string { + lines := strings.SplitAfter(s, "\n") + lines[len(lines)-1] += "\n" + return lines +} diff --git a/vendor/github.com/stretchr/testify/vendor/github.com/stretchr/objx/.gitignore b/vendor/github.com/stretchr/testify/vendor/github.com/stretchr/objx/.gitignore new file mode 100644 index 00000000..00268614 --- /dev/null +++ b/vendor/github.com/stretchr/testify/vendor/github.com/stretchr/objx/.gitignore @@ -0,0 +1,22 @@ +# Compiled Object files, Static and Dynamic libs (Shared Objects) +*.o +*.a +*.so + +# Folders +_obj +_test + +# Architecture specific extensions/prefixes +*.[568vq] +[568vq].out + +*.cgo1.go +*.cgo2.c +_cgo_defun.c +_cgo_gotypes.go +_cgo_export.* + +_testmain.go + +*.exe diff --git a/vendor/github.com/stretchr/testify/vendor/github.com/stretchr/objx/LICENSE.md b/vendor/github.com/stretchr/testify/vendor/github.com/stretchr/objx/LICENSE.md new file mode 100644 index 00000000..21999458 --- /dev/null +++ b/vendor/github.com/stretchr/testify/vendor/github.com/stretchr/objx/LICENSE.md @@ -0,0 +1,23 @@ +objx - by Mat Ryer and Tyler Bunnell + +The MIT License (MIT) + +Copyright (c) 2014 Stretchr, Inc. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/vendor/github.com/stretchr/testify/vendor/github.com/stretchr/objx/README.md b/vendor/github.com/stretchr/testify/vendor/github.com/stretchr/objx/README.md new file mode 100644 index 00000000..4aa18068 --- /dev/null +++ b/vendor/github.com/stretchr/testify/vendor/github.com/stretchr/objx/README.md @@ -0,0 +1,3 @@ +# objx + + * Jump into the [API Documentation](http://godoc.org/github.com/stretchr/objx) diff --git a/vendor/github.com/stretchr/testify/vendor/github.com/stretchr/objx/accessors.go b/vendor/github.com/stretchr/testify/vendor/github.com/stretchr/objx/accessors.go new file mode 100644 index 00000000..721bcac7 --- /dev/null +++ b/vendor/github.com/stretchr/testify/vendor/github.com/stretchr/objx/accessors.go @@ -0,0 +1,179 @@ +package objx + +import ( + "fmt" + "regexp" + "strconv" + "strings" +) + +// arrayAccesRegexString is the regex used to extract the array number +// from the access path +const arrayAccesRegexString = `^(.+)\[([0-9]+)\]$` + +// arrayAccesRegex is the compiled arrayAccesRegexString +var arrayAccesRegex = regexp.MustCompile(arrayAccesRegexString) + +// Get gets the value using the specified selector and +// returns it inside a new Obj object. +// +// If it cannot find the value, Get will return a nil +// value inside an instance of Obj. +// +// Get can only operate directly on map[string]interface{} and []interface. +// +// Example +// +// To access the title of the third chapter of the second book, do: +// +// o.Get("books[1].chapters[2].title") +func (m Map) Get(selector string) *Value { + rawObj := access(m, selector, nil, false, false) + return &Value{data: rawObj} +} + +// Set sets the value using the specified selector and +// returns the object on which Set was called. +// +// Set can only operate directly on map[string]interface{} and []interface +// +// Example +// +// To set the title of the third chapter of the second book, do: +// +// o.Set("books[1].chapters[2].title","Time to Go") +func (m Map) Set(selector string, value interface{}) Map { + access(m, selector, value, true, false) + return m +} + +// access accesses the object using the selector and performs the +// appropriate action. +func access(current, selector, value interface{}, isSet, panics bool) interface{} { + + switch selector.(type) { + case int, int8, int16, int32, int64, uint, uint8, uint16, uint32, uint64: + + if array, ok := current.([]interface{}); ok { + index := intFromInterface(selector) + + if index >= len(array) { + if panics { + panic(fmt.Sprintf("objx: Index %d is out of range. Slice only contains %d items.", index, len(array))) + } + return nil + } + + return array[index] + } + + return nil + + case string: + + selStr := selector.(string) + selSegs := strings.SplitN(selStr, PathSeparator, 2) + thisSel := selSegs[0] + index := -1 + var err error + + // https://github.com/stretchr/objx/issues/12 + if strings.Contains(thisSel, "[") { + + arrayMatches := arrayAccesRegex.FindStringSubmatch(thisSel) + + if len(arrayMatches) > 0 { + + // Get the key into the map + thisSel = arrayMatches[1] + + // Get the index into the array at the key + index, err = strconv.Atoi(arrayMatches[2]) + + if err != nil { + // This should never happen. If it does, something has gone + // seriously wrong. Panic. + panic("objx: Array index is not an integer. Must use array[int].") + } + + } + } + + if curMap, ok := current.(Map); ok { + current = map[string]interface{}(curMap) + } + + // get the object in question + switch current.(type) { + case map[string]interface{}: + curMSI := current.(map[string]interface{}) + if len(selSegs) <= 1 && isSet { + curMSI[thisSel] = value + return nil + } else { + current = curMSI[thisSel] + } + default: + current = nil + } + + if current == nil && panics { + panic(fmt.Sprintf("objx: '%v' invalid on object.", selector)) + } + + // do we need to access the item of an array? + if index > -1 { + if array, ok := current.([]interface{}); ok { + if index < len(array) { + current = array[index] + } else { + if panics { + panic(fmt.Sprintf("objx: Index %d is out of range. Slice only contains %d items.", index, len(array))) + } + current = nil + } + } + } + + if len(selSegs) > 1 { + current = access(current, selSegs[1], value, isSet, panics) + } + + } + + return current + +} + +// intFromInterface converts an interface object to the largest +// representation of an unsigned integer using a type switch and +// assertions +func intFromInterface(selector interface{}) int { + var value int + switch selector.(type) { + case int: + value = selector.(int) + case int8: + value = int(selector.(int8)) + case int16: + value = int(selector.(int16)) + case int32: + value = int(selector.(int32)) + case int64: + value = int(selector.(int64)) + case uint: + value = int(selector.(uint)) + case uint8: + value = int(selector.(uint8)) + case uint16: + value = int(selector.(uint16)) + case uint32: + value = int(selector.(uint32)) + case uint64: + value = int(selector.(uint64)) + default: + panic("objx: array access argument is not an integer type (this should never happen)") + } + + return value +} diff --git a/vendor/github.com/stretchr/testify/vendor/github.com/stretchr/objx/codegen/array-access.txt b/vendor/github.com/stretchr/testify/vendor/github.com/stretchr/objx/codegen/array-access.txt new file mode 100644 index 00000000..30602347 --- /dev/null +++ b/vendor/github.com/stretchr/testify/vendor/github.com/stretchr/objx/codegen/array-access.txt @@ -0,0 +1,14 @@ + case []{1}: + a := object.([]{1}) + if isSet { + a[index] = value.({1}) + } else { + if index >= len(a) { + if panics { + panic(fmt.Sprintf("objx: Index %d is out of range because the []{1} only contains %d items.", index, len(a))) + } + return nil + } else { + return a[index] + } + } diff --git a/vendor/github.com/stretchr/testify/vendor/github.com/stretchr/objx/codegen/index.html b/vendor/github.com/stretchr/testify/vendor/github.com/stretchr/objx/codegen/index.html new file mode 100644 index 00000000..379ffc3c --- /dev/null +++ b/vendor/github.com/stretchr/testify/vendor/github.com/stretchr/objx/codegen/index.html @@ -0,0 +1,86 @@ + + + + Codegen + + + + + +

+ Template +

+

+ Use {x} as a placeholder for each argument. +

+ + +

+ Arguments (comma separated) +

+

+ One block per line +

+ + +

+ Output +

+ + + + + + + + diff --git a/vendor/github.com/stretchr/testify/vendor/github.com/stretchr/objx/codegen/template.txt b/vendor/github.com/stretchr/testify/vendor/github.com/stretchr/objx/codegen/template.txt new file mode 100644 index 00000000..b396900b --- /dev/null +++ b/vendor/github.com/stretchr/testify/vendor/github.com/stretchr/objx/codegen/template.txt @@ -0,0 +1,286 @@ +/* + {4} ({1} and []{1}) + -------------------------------------------------- +*/ + +// {4} gets the value as a {1}, returns the optionalDefault +// value or a system default object if the value is the wrong type. +func (v *Value) {4}(optionalDefault ...{1}) {1} { + if s, ok := v.data.({1}); ok { + return s + } + if len(optionalDefault) == 1 { + return optionalDefault[0] + } + return {3} +} + +// Must{4} gets the value as a {1}. +// +// Panics if the object is not a {1}. +func (v *Value) Must{4}() {1} { + return v.data.({1}) +} + +// {4}Slice gets the value as a []{1}, returns the optionalDefault +// value or nil if the value is not a []{1}. +func (v *Value) {4}Slice(optionalDefault ...[]{1}) []{1} { + if s, ok := v.data.([]{1}); ok { + return s + } + if len(optionalDefault) == 1 { + return optionalDefault[0] + } + return nil +} + +// Must{4}Slice gets the value as a []{1}. +// +// Panics if the object is not a []{1}. +func (v *Value) Must{4}Slice() []{1} { + return v.data.([]{1}) +} + +// Is{4} gets whether the object contained is a {1} or not. +func (v *Value) Is{4}() bool { + _, ok := v.data.({1}) + return ok +} + +// Is{4}Slice gets whether the object contained is a []{1} or not. +func (v *Value) Is{4}Slice() bool { + _, ok := v.data.([]{1}) + return ok +} + +// Each{4} calls the specified callback for each object +// in the []{1}. +// +// Panics if the object is the wrong type. +func (v *Value) Each{4}(callback func(int, {1}) bool) *Value { + + for index, val := range v.Must{4}Slice() { + carryon := callback(index, val) + if carryon == false { + break + } + } + + return v + +} + +// Where{4} uses the specified decider function to select items +// from the []{1}. The object contained in the result will contain +// only the selected items. +func (v *Value) Where{4}(decider func(int, {1}) bool) *Value { + + var selected []{1} + + v.Each{4}(func(index int, val {1}) bool { + shouldSelect := decider(index, val) + if shouldSelect == false { + selected = append(selected, val) + } + return true + }) + + return &Value{data:selected} + +} + +// Group{4} uses the specified grouper function to group the items +// keyed by the return of the grouper. The object contained in the +// result will contain a map[string][]{1}. +func (v *Value) Group{4}(grouper func(int, {1}) string) *Value { + + groups := make(map[string][]{1}) + + v.Each{4}(func(index int, val {1}) bool { + group := grouper(index, val) + if _, ok := groups[group]; !ok { + groups[group] = make([]{1}, 0) + } + groups[group] = append(groups[group], val) + return true + }) + + return &Value{data:groups} + +} + +// Replace{4} uses the specified function to replace each {1}s +// by iterating each item. The data in the returned result will be a +// []{1} containing the replaced items. +func (v *Value) Replace{4}(replacer func(int, {1}) {1}) *Value { + + arr := v.Must{4}Slice() + replaced := make([]{1}, len(arr)) + + v.Each{4}(func(index int, val {1}) bool { + replaced[index] = replacer(index, val) + return true + }) + + return &Value{data:replaced} + +} + +// Collect{4} uses the specified collector function to collect a value +// for each of the {1}s in the slice. The data returned will be a +// []interface{}. +func (v *Value) Collect{4}(collector func(int, {1}) interface{}) *Value { + + arr := v.Must{4}Slice() + collected := make([]interface{}, len(arr)) + + v.Each{4}(func(index int, val {1}) bool { + collected[index] = collector(index, val) + return true + }) + + return &Value{data:collected} +} + +// ************************************************************ +// TESTS +// ************************************************************ + +func Test{4}(t *testing.T) { + + val := {1}( {2} ) + m := map[string]interface{}{"value": val, "nothing": nil} + assert.Equal(t, val, New(m).Get("value").{4}()) + assert.Equal(t, val, New(m).Get("value").Must{4}()) + assert.Equal(t, {1}({3}), New(m).Get("nothing").{4}()) + assert.Equal(t, val, New(m).Get("nothing").{4}({2})) + + assert.Panics(t, func() { + New(m).Get("age").Must{4}() + }) + +} + +func Test{4}Slice(t *testing.T) { + + val := {1}( {2} ) + m := map[string]interface{}{"value": []{1}{ val }, "nothing": nil} + assert.Equal(t, val, New(m).Get("value").{4}Slice()[0]) + assert.Equal(t, val, New(m).Get("value").Must{4}Slice()[0]) + assert.Equal(t, []{1}(nil), New(m).Get("nothing").{4}Slice()) + assert.Equal(t, val, New(m).Get("nothing").{4}Slice( []{1}{ {1}({2}) } )[0]) + + assert.Panics(t, func() { + New(m).Get("nothing").Must{4}Slice() + }) + +} + +func TestIs{4}(t *testing.T) { + + var v *Value + + v = &Value{data: {1}({2})} + assert.True(t, v.Is{4}()) + + v = &Value{data: []{1}{ {1}({2}) }} + assert.True(t, v.Is{4}Slice()) + +} + +func TestEach{4}(t *testing.T) { + + v := &Value{data: []{1}{ {1}({2}), {1}({2}), {1}({2}), {1}({2}), {1}({2}) }} + count := 0 + replacedVals := make([]{1}, 0) + assert.Equal(t, v, v.Each{4}(func(i int, val {1}) bool { + + count++ + replacedVals = append(replacedVals, val) + + // abort early + if i == 2 { + return false + } + + return true + + })) + + assert.Equal(t, count, 3) + assert.Equal(t, replacedVals[0], v.Must{4}Slice()[0]) + assert.Equal(t, replacedVals[1], v.Must{4}Slice()[1]) + assert.Equal(t, replacedVals[2], v.Must{4}Slice()[2]) + +} + +func TestWhere{4}(t *testing.T) { + + v := &Value{data: []{1}{ {1}({2}), {1}({2}), {1}({2}), {1}({2}), {1}({2}), {1}({2}) }} + + selected := v.Where{4}(func(i int, val {1}) bool { + return i%2==0 + }).Must{4}Slice() + + assert.Equal(t, 3, len(selected)) + +} + +func TestGroup{4}(t *testing.T) { + + v := &Value{data: []{1}{ {1}({2}), {1}({2}), {1}({2}), {1}({2}), {1}({2}), {1}({2}) }} + + grouped := v.Group{4}(func(i int, val {1}) string { + return fmt.Sprintf("%v", i%2==0) + }).data.(map[string][]{1}) + + assert.Equal(t, 2, len(grouped)) + assert.Equal(t, 3, len(grouped["true"])) + assert.Equal(t, 3, len(grouped["false"])) + +} + +func TestReplace{4}(t *testing.T) { + + v := &Value{data: []{1}{ {1}({2}), {1}({2}), {1}({2}), {1}({2}), {1}({2}), {1}({2}) }} + + rawArr := v.Must{4}Slice() + + replaced := v.Replace{4}(func(index int, val {1}) {1} { + if index < len(rawArr)-1 { + return rawArr[index+1] + } + return rawArr[0] + }) + + replacedArr := replaced.Must{4}Slice() + if assert.Equal(t, 6, len(replacedArr)) { + assert.Equal(t, replacedArr[0], rawArr[1]) + assert.Equal(t, replacedArr[1], rawArr[2]) + assert.Equal(t, replacedArr[2], rawArr[3]) + assert.Equal(t, replacedArr[3], rawArr[4]) + assert.Equal(t, replacedArr[4], rawArr[5]) + assert.Equal(t, replacedArr[5], rawArr[0]) + } + +} + +func TestCollect{4}(t *testing.T) { + + v := &Value{data: []{1}{ {1}({2}), {1}({2}), {1}({2}), {1}({2}), {1}({2}), {1}({2}) }} + + collected := v.Collect{4}(func(index int, val {1}) interface{} { + return index + }) + + collectedArr := collected.MustInterSlice() + if assert.Equal(t, 6, len(collectedArr)) { + assert.Equal(t, collectedArr[0], 0) + assert.Equal(t, collectedArr[1], 1) + assert.Equal(t, collectedArr[2], 2) + assert.Equal(t, collectedArr[3], 3) + assert.Equal(t, collectedArr[4], 4) + assert.Equal(t, collectedArr[5], 5) + } + +} diff --git a/vendor/github.com/stretchr/testify/vendor/github.com/stretchr/objx/codegen/types_list.txt b/vendor/github.com/stretchr/testify/vendor/github.com/stretchr/objx/codegen/types_list.txt new file mode 100644 index 00000000..069d43d8 --- /dev/null +++ b/vendor/github.com/stretchr/testify/vendor/github.com/stretchr/objx/codegen/types_list.txt @@ -0,0 +1,20 @@ +Interface,interface{},"something",nil,Inter +Map,map[string]interface{},map[string]interface{}{"name":"Tyler"},nil,MSI +ObjxMap,(Map),New(1),New(nil),ObjxMap +Bool,bool,true,false,Bool +String,string,"hello","",Str +Int,int,1,0,Int +Int8,int8,1,0,Int8 +Int16,int16,1,0,Int16 +Int32,int32,1,0,Int32 +Int64,int64,1,0,Int64 +Uint,uint,1,0,Uint +Uint8,uint8,1,0,Uint8 +Uint16,uint16,1,0,Uint16 +Uint32,uint32,1,0,Uint32 +Uint64,uint64,1,0,Uint64 +Uintptr,uintptr,1,0,Uintptr +Float32,float32,1,0,Float32 +Float64,float64,1,0,Float64 +Complex64,complex64,1,0,Complex64 +Complex128,complex128,1,0,Complex128 diff --git a/vendor/github.com/stretchr/testify/vendor/github.com/stretchr/objx/constants.go b/vendor/github.com/stretchr/testify/vendor/github.com/stretchr/objx/constants.go new file mode 100644 index 00000000..f9eb42a2 --- /dev/null +++ b/vendor/github.com/stretchr/testify/vendor/github.com/stretchr/objx/constants.go @@ -0,0 +1,13 @@ +package objx + +const ( + // PathSeparator is the character used to separate the elements + // of the keypath. + // + // For example, `location.address.city` + PathSeparator string = "." + + // SignatureSeparator is the character that is used to + // separate the Base64 string from the security signature. + SignatureSeparator = "_" +) diff --git a/vendor/github.com/stretchr/testify/vendor/github.com/stretchr/objx/conversions.go b/vendor/github.com/stretchr/testify/vendor/github.com/stretchr/objx/conversions.go new file mode 100644 index 00000000..9cdfa9f9 --- /dev/null +++ b/vendor/github.com/stretchr/testify/vendor/github.com/stretchr/objx/conversions.go @@ -0,0 +1,117 @@ +package objx + +import ( + "bytes" + "encoding/base64" + "encoding/json" + "errors" + "fmt" + "net/url" +) + +// JSON converts the contained object to a JSON string +// representation +func (m Map) JSON() (string, error) { + + result, err := json.Marshal(m) + + if err != nil { + err = errors.New("objx: JSON encode failed with: " + err.Error()) + } + + return string(result), err + +} + +// MustJSON converts the contained object to a JSON string +// representation and panics if there is an error +func (m Map) MustJSON() string { + result, err := m.JSON() + if err != nil { + panic(err.Error()) + } + return result +} + +// Base64 converts the contained object to a Base64 string +// representation of the JSON string representation +func (m Map) Base64() (string, error) { + + var buf bytes.Buffer + + jsonData, err := m.JSON() + if err != nil { + return "", err + } + + encoder := base64.NewEncoder(base64.StdEncoding, &buf) + encoder.Write([]byte(jsonData)) + encoder.Close() + + return buf.String(), nil + +} + +// MustBase64 converts the contained object to a Base64 string +// representation of the JSON string representation and panics +// if there is an error +func (m Map) MustBase64() string { + result, err := m.Base64() + if err != nil { + panic(err.Error()) + } + return result +} + +// SignedBase64 converts the contained object to a Base64 string +// representation of the JSON string representation and signs it +// using the provided key. +func (m Map) SignedBase64(key string) (string, error) { + + base64, err := m.Base64() + if err != nil { + return "", err + } + + sig := HashWithKey(base64, key) + + return base64 + SignatureSeparator + sig, nil + +} + +// MustSignedBase64 converts the contained object to a Base64 string +// representation of the JSON string representation and signs it +// using the provided key and panics if there is an error +func (m Map) MustSignedBase64(key string) string { + result, err := m.SignedBase64(key) + if err != nil { + panic(err.Error()) + } + return result +} + +/* + URL Query + ------------------------------------------------ +*/ + +// URLValues creates a url.Values object from an Obj. This +// function requires that the wrapped object be a map[string]interface{} +func (m Map) URLValues() url.Values { + + vals := make(url.Values) + + for k, v := range m { + //TODO: can this be done without sprintf? + vals.Set(k, fmt.Sprintf("%v", v)) + } + + return vals +} + +// URLQuery gets an encoded URL query representing the given +// Obj. This function requires that the wrapped object be a +// map[string]interface{} +func (m Map) URLQuery() (string, error) { + return m.URLValues().Encode(), nil +} diff --git a/vendor/github.com/stretchr/testify/vendor/github.com/stretchr/objx/doc.go b/vendor/github.com/stretchr/testify/vendor/github.com/stretchr/objx/doc.go new file mode 100644 index 00000000..47bf85e4 --- /dev/null +++ b/vendor/github.com/stretchr/testify/vendor/github.com/stretchr/objx/doc.go @@ -0,0 +1,72 @@ +// objx - Go package for dealing with maps, slices, JSON and other data. +// +// Overview +// +// Objx provides the `objx.Map` type, which is a `map[string]interface{}` that exposes +// a powerful `Get` method (among others) that allows you to easily and quickly get +// access to data within the map, without having to worry too much about type assertions, +// missing data, default values etc. +// +// Pattern +// +// Objx uses a preditable pattern to make access data from within `map[string]interface{}'s +// easy. +// +// Call one of the `objx.` functions to create your `objx.Map` to get going: +// +// m, err := objx.FromJSON(json) +// +// NOTE: Any methods or functions with the `Must` prefix will panic if something goes wrong, +// the rest will be optimistic and try to figure things out without panicking. +// +// Use `Get` to access the value you're interested in. You can use dot and array +// notation too: +// +// m.Get("places[0].latlng") +// +// Once you have saught the `Value` you're interested in, you can use the `Is*` methods +// to determine its type. +// +// if m.Get("code").IsStr() { /* ... */ } +// +// Or you can just assume the type, and use one of the strong type methods to +// extract the real value: +// +// m.Get("code").Int() +// +// If there's no value there (or if it's the wrong type) then a default value +// will be returned, or you can be explicit about the default value. +// +// Get("code").Int(-1) +// +// If you're dealing with a slice of data as a value, Objx provides many useful +// methods for iterating, manipulating and selecting that data. You can find out more +// by exploring the index below. +// +// Reading data +// +// A simple example of how to use Objx: +// +// // use MustFromJSON to make an objx.Map from some JSON +// m := objx.MustFromJSON(`{"name": "Mat", "age": 30}`) +// +// // get the details +// name := m.Get("name").Str() +// age := m.Get("age").Int() +// +// // get their nickname (or use their name if they +// // don't have one) +// nickname := m.Get("nickname").Str(name) +// +// Ranging +// +// Since `objx.Map` is a `map[string]interface{}` you can treat it as such. For +// example, to `range` the data, do what you would expect: +// +// m := objx.MustFromJSON(json) +// for key, value := range m { +// +// /* ... do your magic ... */ +// +// } +package objx diff --git a/vendor/github.com/stretchr/testify/vendor/github.com/stretchr/objx/map.go b/vendor/github.com/stretchr/testify/vendor/github.com/stretchr/objx/map.go new file mode 100644 index 00000000..eb6ed8e2 --- /dev/null +++ b/vendor/github.com/stretchr/testify/vendor/github.com/stretchr/objx/map.go @@ -0,0 +1,222 @@ +package objx + +import ( + "encoding/base64" + "encoding/json" + "errors" + "io/ioutil" + "net/url" + "strings" +) + +// MSIConvertable is an interface that defines methods for converting your +// custom types to a map[string]interface{} representation. +type MSIConvertable interface { + // MSI gets a map[string]interface{} (msi) representing the + // object. + MSI() map[string]interface{} +} + +// Map provides extended functionality for working with +// untyped data, in particular map[string]interface (msi). +type Map map[string]interface{} + +// Value returns the internal value instance +func (m Map) Value() *Value { + return &Value{data: m} +} + +// Nil represents a nil Map. +var Nil Map = New(nil) + +// New creates a new Map containing the map[string]interface{} in the data argument. +// If the data argument is not a map[string]interface, New attempts to call the +// MSI() method on the MSIConvertable interface to create one. +func New(data interface{}) Map { + if _, ok := data.(map[string]interface{}); !ok { + if converter, ok := data.(MSIConvertable); ok { + data = converter.MSI() + } else { + return nil + } + } + return Map(data.(map[string]interface{})) +} + +// MSI creates a map[string]interface{} and puts it inside a new Map. +// +// The arguments follow a key, value pattern. +// +// Panics +// +// Panics if any key arugment is non-string or if there are an odd number of arguments. +// +// Example +// +// To easily create Maps: +// +// m := objx.MSI("name", "Mat", "age", 29, "subobj", objx.MSI("active", true)) +// +// // creates an Map equivalent to +// m := objx.New(map[string]interface{}{"name": "Mat", "age": 29, "subobj": map[string]interface{}{"active": true}}) +func MSI(keyAndValuePairs ...interface{}) Map { + + newMap := make(map[string]interface{}) + keyAndValuePairsLen := len(keyAndValuePairs) + + if keyAndValuePairsLen%2 != 0 { + panic("objx: MSI must have an even number of arguments following the 'key, value' pattern.") + } + + for i := 0; i < keyAndValuePairsLen; i = i + 2 { + + key := keyAndValuePairs[i] + value := keyAndValuePairs[i+1] + + // make sure the key is a string + keyString, keyStringOK := key.(string) + if !keyStringOK { + panic("objx: MSI must follow 'string, interface{}' pattern. " + keyString + " is not a valid key.") + } + + newMap[keyString] = value + + } + + return New(newMap) +} + +// ****** Conversion Constructors + +// MustFromJSON creates a new Map containing the data specified in the +// jsonString. +// +// Panics if the JSON is invalid. +func MustFromJSON(jsonString string) Map { + o, err := FromJSON(jsonString) + + if err != nil { + panic("objx: MustFromJSON failed with error: " + err.Error()) + } + + return o +} + +// FromJSON creates a new Map containing the data specified in the +// jsonString. +// +// Returns an error if the JSON is invalid. +func FromJSON(jsonString string) (Map, error) { + + var data interface{} + err := json.Unmarshal([]byte(jsonString), &data) + + if err != nil { + return Nil, err + } + + return New(data), nil + +} + +// FromBase64 creates a new Obj containing the data specified +// in the Base64 string. +// +// The string is an encoded JSON string returned by Base64 +func FromBase64(base64String string) (Map, error) { + + decoder := base64.NewDecoder(base64.StdEncoding, strings.NewReader(base64String)) + + decoded, err := ioutil.ReadAll(decoder) + if err != nil { + return nil, err + } + + return FromJSON(string(decoded)) +} + +// MustFromBase64 creates a new Obj containing the data specified +// in the Base64 string and panics if there is an error. +// +// The string is an encoded JSON string returned by Base64 +func MustFromBase64(base64String string) Map { + + result, err := FromBase64(base64String) + + if err != nil { + panic("objx: MustFromBase64 failed with error: " + err.Error()) + } + + return result +} + +// FromSignedBase64 creates a new Obj containing the data specified +// in the Base64 string. +// +// The string is an encoded JSON string returned by SignedBase64 +func FromSignedBase64(base64String, key string) (Map, error) { + parts := strings.Split(base64String, SignatureSeparator) + if len(parts) != 2 { + return nil, errors.New("objx: Signed base64 string is malformed.") + } + + sig := HashWithKey(parts[0], key) + if parts[1] != sig { + return nil, errors.New("objx: Signature for base64 data does not match.") + } + + return FromBase64(parts[0]) +} + +// MustFromSignedBase64 creates a new Obj containing the data specified +// in the Base64 string and panics if there is an error. +// +// The string is an encoded JSON string returned by Base64 +func MustFromSignedBase64(base64String, key string) Map { + + result, err := FromSignedBase64(base64String, key) + + if err != nil { + panic("objx: MustFromSignedBase64 failed with error: " + err.Error()) + } + + return result +} + +// FromURLQuery generates a new Obj by parsing the specified +// query. +// +// For queries with multiple values, the first value is selected. +func FromURLQuery(query string) (Map, error) { + + vals, err := url.ParseQuery(query) + + if err != nil { + return nil, err + } + + m := make(map[string]interface{}) + for k, vals := range vals { + m[k] = vals[0] + } + + return New(m), nil +} + +// MustFromURLQuery generates a new Obj by parsing the specified +// query. +// +// For queries with multiple values, the first value is selected. +// +// Panics if it encounters an error +func MustFromURLQuery(query string) Map { + + o, err := FromURLQuery(query) + + if err != nil { + panic("objx: MustFromURLQuery failed with error: " + err.Error()) + } + + return o + +} diff --git a/vendor/github.com/stretchr/testify/vendor/github.com/stretchr/objx/mutations.go b/vendor/github.com/stretchr/testify/vendor/github.com/stretchr/objx/mutations.go new file mode 100644 index 00000000..b35c8639 --- /dev/null +++ b/vendor/github.com/stretchr/testify/vendor/github.com/stretchr/objx/mutations.go @@ -0,0 +1,81 @@ +package objx + +// Exclude returns a new Map with the keys in the specified []string +// excluded. +func (d Map) Exclude(exclude []string) Map { + + excluded := make(Map) + for k, v := range d { + var shouldInclude bool = true + for _, toExclude := range exclude { + if k == toExclude { + shouldInclude = false + break + } + } + if shouldInclude { + excluded[k] = v + } + } + + return excluded +} + +// Copy creates a shallow copy of the Obj. +func (m Map) Copy() Map { + copied := make(map[string]interface{}) + for k, v := range m { + copied[k] = v + } + return New(copied) +} + +// Merge blends the specified map with a copy of this map and returns the result. +// +// Keys that appear in both will be selected from the specified map. +// This method requires that the wrapped object be a map[string]interface{} +func (m Map) Merge(merge Map) Map { + return m.Copy().MergeHere(merge) +} + +// Merge blends the specified map with this map and returns the current map. +// +// Keys that appear in both will be selected from the specified map. The original map +// will be modified. This method requires that +// the wrapped object be a map[string]interface{} +func (m Map) MergeHere(merge Map) Map { + + for k, v := range merge { + m[k] = v + } + + return m + +} + +// Transform builds a new Obj giving the transformer a chance +// to change the keys and values as it goes. This method requires that +// the wrapped object be a map[string]interface{} +func (m Map) Transform(transformer func(key string, value interface{}) (string, interface{})) Map { + newMap := make(map[string]interface{}) + for k, v := range m { + modifiedKey, modifiedVal := transformer(k, v) + newMap[modifiedKey] = modifiedVal + } + return New(newMap) +} + +// TransformKeys builds a new map using the specified key mapping. +// +// Unspecified keys will be unaltered. +// This method requires that the wrapped object be a map[string]interface{} +func (m Map) TransformKeys(mapping map[string]string) Map { + return m.Transform(func(key string, value interface{}) (string, interface{}) { + + if newKey, ok := mapping[key]; ok { + return newKey, value + } + + return key, value + }) +} diff --git a/vendor/github.com/stretchr/testify/vendor/github.com/stretchr/objx/security.go b/vendor/github.com/stretchr/testify/vendor/github.com/stretchr/objx/security.go new file mode 100644 index 00000000..fdd6be9c --- /dev/null +++ b/vendor/github.com/stretchr/testify/vendor/github.com/stretchr/objx/security.go @@ -0,0 +1,14 @@ +package objx + +import ( + "crypto/sha1" + "encoding/hex" +) + +// HashWithKey hashes the specified string using the security +// key. +func HashWithKey(data, key string) string { + hash := sha1.New() + hash.Write([]byte(data + ":" + key)) + return hex.EncodeToString(hash.Sum(nil)) +} diff --git a/vendor/github.com/stretchr/testify/vendor/github.com/stretchr/objx/tests.go b/vendor/github.com/stretchr/testify/vendor/github.com/stretchr/objx/tests.go new file mode 100644 index 00000000..d9e0b479 --- /dev/null +++ b/vendor/github.com/stretchr/testify/vendor/github.com/stretchr/objx/tests.go @@ -0,0 +1,17 @@ +package objx + +// Has gets whether there is something at the specified selector +// or not. +// +// If m is nil, Has will always return false. +func (m Map) Has(selector string) bool { + if m == nil { + return false + } + return !m.Get(selector).IsNil() +} + +// IsNil gets whether the data is nil or not. +func (v *Value) IsNil() bool { + return v == nil || v.data == nil +} diff --git a/vendor/github.com/stretchr/testify/vendor/github.com/stretchr/objx/type_specific_codegen.go b/vendor/github.com/stretchr/testify/vendor/github.com/stretchr/objx/type_specific_codegen.go new file mode 100644 index 00000000..f3ecb29b --- /dev/null +++ b/vendor/github.com/stretchr/testify/vendor/github.com/stretchr/objx/type_specific_codegen.go @@ -0,0 +1,2881 @@ +package objx + +/* + Inter (interface{} and []interface{}) + -------------------------------------------------- +*/ + +// Inter gets the value as a interface{}, returns the optionalDefault +// value or a system default object if the value is the wrong type. +func (v *Value) Inter(optionalDefault ...interface{}) interface{} { + if s, ok := v.data.(interface{}); ok { + return s + } + if len(optionalDefault) == 1 { + return optionalDefault[0] + } + return nil +} + +// MustInter gets the value as a interface{}. +// +// Panics if the object is not a interface{}. +func (v *Value) MustInter() interface{} { + return v.data.(interface{}) +} + +// InterSlice gets the value as a []interface{}, returns the optionalDefault +// value or nil if the value is not a []interface{}. +func (v *Value) InterSlice(optionalDefault ...[]interface{}) []interface{} { + if s, ok := v.data.([]interface{}); ok { + return s + } + if len(optionalDefault) == 1 { + return optionalDefault[0] + } + return nil +} + +// MustInterSlice gets the value as a []interface{}. +// +// Panics if the object is not a []interface{}. +func (v *Value) MustInterSlice() []interface{} { + return v.data.([]interface{}) +} + +// IsInter gets whether the object contained is a interface{} or not. +func (v *Value) IsInter() bool { + _, ok := v.data.(interface{}) + return ok +} + +// IsInterSlice gets whether the object contained is a []interface{} or not. +func (v *Value) IsInterSlice() bool { + _, ok := v.data.([]interface{}) + return ok +} + +// EachInter calls the specified callback for each object +// in the []interface{}. +// +// Panics if the object is the wrong type. +func (v *Value) EachInter(callback func(int, interface{}) bool) *Value { + + for index, val := range v.MustInterSlice() { + carryon := callback(index, val) + if carryon == false { + break + } + } + + return v + +} + +// WhereInter uses the specified decider function to select items +// from the []interface{}. The object contained in the result will contain +// only the selected items. +func (v *Value) WhereInter(decider func(int, interface{}) bool) *Value { + + var selected []interface{} + + v.EachInter(func(index int, val interface{}) bool { + shouldSelect := decider(index, val) + if shouldSelect == false { + selected = append(selected, val) + } + return true + }) + + return &Value{data: selected} + +} + +// GroupInter uses the specified grouper function to group the items +// keyed by the return of the grouper. The object contained in the +// result will contain a map[string][]interface{}. +func (v *Value) GroupInter(grouper func(int, interface{}) string) *Value { + + groups := make(map[string][]interface{}) + + v.EachInter(func(index int, val interface{}) bool { + group := grouper(index, val) + if _, ok := groups[group]; !ok { + groups[group] = make([]interface{}, 0) + } + groups[group] = append(groups[group], val) + return true + }) + + return &Value{data: groups} + +} + +// ReplaceInter uses the specified function to replace each interface{}s +// by iterating each item. The data in the returned result will be a +// []interface{} containing the replaced items. +func (v *Value) ReplaceInter(replacer func(int, interface{}) interface{}) *Value { + + arr := v.MustInterSlice() + replaced := make([]interface{}, len(arr)) + + v.EachInter(func(index int, val interface{}) bool { + replaced[index] = replacer(index, val) + return true + }) + + return &Value{data: replaced} + +} + +// CollectInter uses the specified collector function to collect a value +// for each of the interface{}s in the slice. The data returned will be a +// []interface{}. +func (v *Value) CollectInter(collector func(int, interface{}) interface{}) *Value { + + arr := v.MustInterSlice() + collected := make([]interface{}, len(arr)) + + v.EachInter(func(index int, val interface{}) bool { + collected[index] = collector(index, val) + return true + }) + + return &Value{data: collected} +} + +/* + MSI (map[string]interface{} and []map[string]interface{}) + -------------------------------------------------- +*/ + +// MSI gets the value as a map[string]interface{}, returns the optionalDefault +// value or a system default object if the value is the wrong type. +func (v *Value) MSI(optionalDefault ...map[string]interface{}) map[string]interface{} { + if s, ok := v.data.(map[string]interface{}); ok { + return s + } + if len(optionalDefault) == 1 { + return optionalDefault[0] + } + return nil +} + +// MustMSI gets the value as a map[string]interface{}. +// +// Panics if the object is not a map[string]interface{}. +func (v *Value) MustMSI() map[string]interface{} { + return v.data.(map[string]interface{}) +} + +// MSISlice gets the value as a []map[string]interface{}, returns the optionalDefault +// value or nil if the value is not a []map[string]interface{}. +func (v *Value) MSISlice(optionalDefault ...[]map[string]interface{}) []map[string]interface{} { + if s, ok := v.data.([]map[string]interface{}); ok { + return s + } + if len(optionalDefault) == 1 { + return optionalDefault[0] + } + return nil +} + +// MustMSISlice gets the value as a []map[string]interface{}. +// +// Panics if the object is not a []map[string]interface{}. +func (v *Value) MustMSISlice() []map[string]interface{} { + return v.data.([]map[string]interface{}) +} + +// IsMSI gets whether the object contained is a map[string]interface{} or not. +func (v *Value) IsMSI() bool { + _, ok := v.data.(map[string]interface{}) + return ok +} + +// IsMSISlice gets whether the object contained is a []map[string]interface{} or not. +func (v *Value) IsMSISlice() bool { + _, ok := v.data.([]map[string]interface{}) + return ok +} + +// EachMSI calls the specified callback for each object +// in the []map[string]interface{}. +// +// Panics if the object is the wrong type. +func (v *Value) EachMSI(callback func(int, map[string]interface{}) bool) *Value { + + for index, val := range v.MustMSISlice() { + carryon := callback(index, val) + if carryon == false { + break + } + } + + return v + +} + +// WhereMSI uses the specified decider function to select items +// from the []map[string]interface{}. The object contained in the result will contain +// only the selected items. +func (v *Value) WhereMSI(decider func(int, map[string]interface{}) bool) *Value { + + var selected []map[string]interface{} + + v.EachMSI(func(index int, val map[string]interface{}) bool { + shouldSelect := decider(index, val) + if shouldSelect == false { + selected = append(selected, val) + } + return true + }) + + return &Value{data: selected} + +} + +// GroupMSI uses the specified grouper function to group the items +// keyed by the return of the grouper. The object contained in the +// result will contain a map[string][]map[string]interface{}. +func (v *Value) GroupMSI(grouper func(int, map[string]interface{}) string) *Value { + + groups := make(map[string][]map[string]interface{}) + + v.EachMSI(func(index int, val map[string]interface{}) bool { + group := grouper(index, val) + if _, ok := groups[group]; !ok { + groups[group] = make([]map[string]interface{}, 0) + } + groups[group] = append(groups[group], val) + return true + }) + + return &Value{data: groups} + +} + +// ReplaceMSI uses the specified function to replace each map[string]interface{}s +// by iterating each item. The data in the returned result will be a +// []map[string]interface{} containing the replaced items. +func (v *Value) ReplaceMSI(replacer func(int, map[string]interface{}) map[string]interface{}) *Value { + + arr := v.MustMSISlice() + replaced := make([]map[string]interface{}, len(arr)) + + v.EachMSI(func(index int, val map[string]interface{}) bool { + replaced[index] = replacer(index, val) + return true + }) + + return &Value{data: replaced} + +} + +// CollectMSI uses the specified collector function to collect a value +// for each of the map[string]interface{}s in the slice. The data returned will be a +// []interface{}. +func (v *Value) CollectMSI(collector func(int, map[string]interface{}) interface{}) *Value { + + arr := v.MustMSISlice() + collected := make([]interface{}, len(arr)) + + v.EachMSI(func(index int, val map[string]interface{}) bool { + collected[index] = collector(index, val) + return true + }) + + return &Value{data: collected} +} + +/* + ObjxMap ((Map) and [](Map)) + -------------------------------------------------- +*/ + +// ObjxMap gets the value as a (Map), returns the optionalDefault +// value or a system default object if the value is the wrong type. +func (v *Value) ObjxMap(optionalDefault ...(Map)) Map { + if s, ok := v.data.((Map)); ok { + return s + } + if len(optionalDefault) == 1 { + return optionalDefault[0] + } + return New(nil) +} + +// MustObjxMap gets the value as a (Map). +// +// Panics if the object is not a (Map). +func (v *Value) MustObjxMap() Map { + return v.data.((Map)) +} + +// ObjxMapSlice gets the value as a [](Map), returns the optionalDefault +// value or nil if the value is not a [](Map). +func (v *Value) ObjxMapSlice(optionalDefault ...[](Map)) [](Map) { + if s, ok := v.data.([](Map)); ok { + return s + } + if len(optionalDefault) == 1 { + return optionalDefault[0] + } + return nil +} + +// MustObjxMapSlice gets the value as a [](Map). +// +// Panics if the object is not a [](Map). +func (v *Value) MustObjxMapSlice() [](Map) { + return v.data.([](Map)) +} + +// IsObjxMap gets whether the object contained is a (Map) or not. +func (v *Value) IsObjxMap() bool { + _, ok := v.data.((Map)) + return ok +} + +// IsObjxMapSlice gets whether the object contained is a [](Map) or not. +func (v *Value) IsObjxMapSlice() bool { + _, ok := v.data.([](Map)) + return ok +} + +// EachObjxMap calls the specified callback for each object +// in the [](Map). +// +// Panics if the object is the wrong type. +func (v *Value) EachObjxMap(callback func(int, Map) bool) *Value { + + for index, val := range v.MustObjxMapSlice() { + carryon := callback(index, val) + if carryon == false { + break + } + } + + return v + +} + +// WhereObjxMap uses the specified decider function to select items +// from the [](Map). The object contained in the result will contain +// only the selected items. +func (v *Value) WhereObjxMap(decider func(int, Map) bool) *Value { + + var selected [](Map) + + v.EachObjxMap(func(index int, val Map) bool { + shouldSelect := decider(index, val) + if shouldSelect == false { + selected = append(selected, val) + } + return true + }) + + return &Value{data: selected} + +} + +// GroupObjxMap uses the specified grouper function to group the items +// keyed by the return of the grouper. The object contained in the +// result will contain a map[string][](Map). +func (v *Value) GroupObjxMap(grouper func(int, Map) string) *Value { + + groups := make(map[string][](Map)) + + v.EachObjxMap(func(index int, val Map) bool { + group := grouper(index, val) + if _, ok := groups[group]; !ok { + groups[group] = make([](Map), 0) + } + groups[group] = append(groups[group], val) + return true + }) + + return &Value{data: groups} + +} + +// ReplaceObjxMap uses the specified function to replace each (Map)s +// by iterating each item. The data in the returned result will be a +// [](Map) containing the replaced items. +func (v *Value) ReplaceObjxMap(replacer func(int, Map) Map) *Value { + + arr := v.MustObjxMapSlice() + replaced := make([](Map), len(arr)) + + v.EachObjxMap(func(index int, val Map) bool { + replaced[index] = replacer(index, val) + return true + }) + + return &Value{data: replaced} + +} + +// CollectObjxMap uses the specified collector function to collect a value +// for each of the (Map)s in the slice. The data returned will be a +// []interface{}. +func (v *Value) CollectObjxMap(collector func(int, Map) interface{}) *Value { + + arr := v.MustObjxMapSlice() + collected := make([]interface{}, len(arr)) + + v.EachObjxMap(func(index int, val Map) bool { + collected[index] = collector(index, val) + return true + }) + + return &Value{data: collected} +} + +/* + Bool (bool and []bool) + -------------------------------------------------- +*/ + +// Bool gets the value as a bool, returns the optionalDefault +// value or a system default object if the value is the wrong type. +func (v *Value) Bool(optionalDefault ...bool) bool { + if s, ok := v.data.(bool); ok { + return s + } + if len(optionalDefault) == 1 { + return optionalDefault[0] + } + return false +} + +// MustBool gets the value as a bool. +// +// Panics if the object is not a bool. +func (v *Value) MustBool() bool { + return v.data.(bool) +} + +// BoolSlice gets the value as a []bool, returns the optionalDefault +// value or nil if the value is not a []bool. +func (v *Value) BoolSlice(optionalDefault ...[]bool) []bool { + if s, ok := v.data.([]bool); ok { + return s + } + if len(optionalDefault) == 1 { + return optionalDefault[0] + } + return nil +} + +// MustBoolSlice gets the value as a []bool. +// +// Panics if the object is not a []bool. +func (v *Value) MustBoolSlice() []bool { + return v.data.([]bool) +} + +// IsBool gets whether the object contained is a bool or not. +func (v *Value) IsBool() bool { + _, ok := v.data.(bool) + return ok +} + +// IsBoolSlice gets whether the object contained is a []bool or not. +func (v *Value) IsBoolSlice() bool { + _, ok := v.data.([]bool) + return ok +} + +// EachBool calls the specified callback for each object +// in the []bool. +// +// Panics if the object is the wrong type. +func (v *Value) EachBool(callback func(int, bool) bool) *Value { + + for index, val := range v.MustBoolSlice() { + carryon := callback(index, val) + if carryon == false { + break + } + } + + return v + +} + +// WhereBool uses the specified decider function to select items +// from the []bool. The object contained in the result will contain +// only the selected items. +func (v *Value) WhereBool(decider func(int, bool) bool) *Value { + + var selected []bool + + v.EachBool(func(index int, val bool) bool { + shouldSelect := decider(index, val) + if shouldSelect == false { + selected = append(selected, val) + } + return true + }) + + return &Value{data: selected} + +} + +// GroupBool uses the specified grouper function to group the items +// keyed by the return of the grouper. The object contained in the +// result will contain a map[string][]bool. +func (v *Value) GroupBool(grouper func(int, bool) string) *Value { + + groups := make(map[string][]bool) + + v.EachBool(func(index int, val bool) bool { + group := grouper(index, val) + if _, ok := groups[group]; !ok { + groups[group] = make([]bool, 0) + } + groups[group] = append(groups[group], val) + return true + }) + + return &Value{data: groups} + +} + +// ReplaceBool uses the specified function to replace each bools +// by iterating each item. The data in the returned result will be a +// []bool containing the replaced items. +func (v *Value) ReplaceBool(replacer func(int, bool) bool) *Value { + + arr := v.MustBoolSlice() + replaced := make([]bool, len(arr)) + + v.EachBool(func(index int, val bool) bool { + replaced[index] = replacer(index, val) + return true + }) + + return &Value{data: replaced} + +} + +// CollectBool uses the specified collector function to collect a value +// for each of the bools in the slice. The data returned will be a +// []interface{}. +func (v *Value) CollectBool(collector func(int, bool) interface{}) *Value { + + arr := v.MustBoolSlice() + collected := make([]interface{}, len(arr)) + + v.EachBool(func(index int, val bool) bool { + collected[index] = collector(index, val) + return true + }) + + return &Value{data: collected} +} + +/* + Str (string and []string) + -------------------------------------------------- +*/ + +// Str gets the value as a string, returns the optionalDefault +// value or a system default object if the value is the wrong type. +func (v *Value) Str(optionalDefault ...string) string { + if s, ok := v.data.(string); ok { + return s + } + if len(optionalDefault) == 1 { + return optionalDefault[0] + } + return "" +} + +// MustStr gets the value as a string. +// +// Panics if the object is not a string. +func (v *Value) MustStr() string { + return v.data.(string) +} + +// StrSlice gets the value as a []string, returns the optionalDefault +// value or nil if the value is not a []string. +func (v *Value) StrSlice(optionalDefault ...[]string) []string { + if s, ok := v.data.([]string); ok { + return s + } + if len(optionalDefault) == 1 { + return optionalDefault[0] + } + return nil +} + +// MustStrSlice gets the value as a []string. +// +// Panics if the object is not a []string. +func (v *Value) MustStrSlice() []string { + return v.data.([]string) +} + +// IsStr gets whether the object contained is a string or not. +func (v *Value) IsStr() bool { + _, ok := v.data.(string) + return ok +} + +// IsStrSlice gets whether the object contained is a []string or not. +func (v *Value) IsStrSlice() bool { + _, ok := v.data.([]string) + return ok +} + +// EachStr calls the specified callback for each object +// in the []string. +// +// Panics if the object is the wrong type. +func (v *Value) EachStr(callback func(int, string) bool) *Value { + + for index, val := range v.MustStrSlice() { + carryon := callback(index, val) + if carryon == false { + break + } + } + + return v + +} + +// WhereStr uses the specified decider function to select items +// from the []string. The object contained in the result will contain +// only the selected items. +func (v *Value) WhereStr(decider func(int, string) bool) *Value { + + var selected []string + + v.EachStr(func(index int, val string) bool { + shouldSelect := decider(index, val) + if shouldSelect == false { + selected = append(selected, val) + } + return true + }) + + return &Value{data: selected} + +} + +// GroupStr uses the specified grouper function to group the items +// keyed by the return of the grouper. The object contained in the +// result will contain a map[string][]string. +func (v *Value) GroupStr(grouper func(int, string) string) *Value { + + groups := make(map[string][]string) + + v.EachStr(func(index int, val string) bool { + group := grouper(index, val) + if _, ok := groups[group]; !ok { + groups[group] = make([]string, 0) + } + groups[group] = append(groups[group], val) + return true + }) + + return &Value{data: groups} + +} + +// ReplaceStr uses the specified function to replace each strings +// by iterating each item. The data in the returned result will be a +// []string containing the replaced items. +func (v *Value) ReplaceStr(replacer func(int, string) string) *Value { + + arr := v.MustStrSlice() + replaced := make([]string, len(arr)) + + v.EachStr(func(index int, val string) bool { + replaced[index] = replacer(index, val) + return true + }) + + return &Value{data: replaced} + +} + +// CollectStr uses the specified collector function to collect a value +// for each of the strings in the slice. The data returned will be a +// []interface{}. +func (v *Value) CollectStr(collector func(int, string) interface{}) *Value { + + arr := v.MustStrSlice() + collected := make([]interface{}, len(arr)) + + v.EachStr(func(index int, val string) bool { + collected[index] = collector(index, val) + return true + }) + + return &Value{data: collected} +} + +/* + Int (int and []int) + -------------------------------------------------- +*/ + +// Int gets the value as a int, returns the optionalDefault +// value or a system default object if the value is the wrong type. +func (v *Value) Int(optionalDefault ...int) int { + if s, ok := v.data.(int); ok { + return s + } + if len(optionalDefault) == 1 { + return optionalDefault[0] + } + return 0 +} + +// MustInt gets the value as a int. +// +// Panics if the object is not a int. +func (v *Value) MustInt() int { + return v.data.(int) +} + +// IntSlice gets the value as a []int, returns the optionalDefault +// value or nil if the value is not a []int. +func (v *Value) IntSlice(optionalDefault ...[]int) []int { + if s, ok := v.data.([]int); ok { + return s + } + if len(optionalDefault) == 1 { + return optionalDefault[0] + } + return nil +} + +// MustIntSlice gets the value as a []int. +// +// Panics if the object is not a []int. +func (v *Value) MustIntSlice() []int { + return v.data.([]int) +} + +// IsInt gets whether the object contained is a int or not. +func (v *Value) IsInt() bool { + _, ok := v.data.(int) + return ok +} + +// IsIntSlice gets whether the object contained is a []int or not. +func (v *Value) IsIntSlice() bool { + _, ok := v.data.([]int) + return ok +} + +// EachInt calls the specified callback for each object +// in the []int. +// +// Panics if the object is the wrong type. +func (v *Value) EachInt(callback func(int, int) bool) *Value { + + for index, val := range v.MustIntSlice() { + carryon := callback(index, val) + if carryon == false { + break + } + } + + return v + +} + +// WhereInt uses the specified decider function to select items +// from the []int. The object contained in the result will contain +// only the selected items. +func (v *Value) WhereInt(decider func(int, int) bool) *Value { + + var selected []int + + v.EachInt(func(index int, val int) bool { + shouldSelect := decider(index, val) + if shouldSelect == false { + selected = append(selected, val) + } + return true + }) + + return &Value{data: selected} + +} + +// GroupInt uses the specified grouper function to group the items +// keyed by the return of the grouper. The object contained in the +// result will contain a map[string][]int. +func (v *Value) GroupInt(grouper func(int, int) string) *Value { + + groups := make(map[string][]int) + + v.EachInt(func(index int, val int) bool { + group := grouper(index, val) + if _, ok := groups[group]; !ok { + groups[group] = make([]int, 0) + } + groups[group] = append(groups[group], val) + return true + }) + + return &Value{data: groups} + +} + +// ReplaceInt uses the specified function to replace each ints +// by iterating each item. The data in the returned result will be a +// []int containing the replaced items. +func (v *Value) ReplaceInt(replacer func(int, int) int) *Value { + + arr := v.MustIntSlice() + replaced := make([]int, len(arr)) + + v.EachInt(func(index int, val int) bool { + replaced[index] = replacer(index, val) + return true + }) + + return &Value{data: replaced} + +} + +// CollectInt uses the specified collector function to collect a value +// for each of the ints in the slice. The data returned will be a +// []interface{}. +func (v *Value) CollectInt(collector func(int, int) interface{}) *Value { + + arr := v.MustIntSlice() + collected := make([]interface{}, len(arr)) + + v.EachInt(func(index int, val int) bool { + collected[index] = collector(index, val) + return true + }) + + return &Value{data: collected} +} + +/* + Int8 (int8 and []int8) + -------------------------------------------------- +*/ + +// Int8 gets the value as a int8, returns the optionalDefault +// value or a system default object if the value is the wrong type. +func (v *Value) Int8(optionalDefault ...int8) int8 { + if s, ok := v.data.(int8); ok { + return s + } + if len(optionalDefault) == 1 { + return optionalDefault[0] + } + return 0 +} + +// MustInt8 gets the value as a int8. +// +// Panics if the object is not a int8. +func (v *Value) MustInt8() int8 { + return v.data.(int8) +} + +// Int8Slice gets the value as a []int8, returns the optionalDefault +// value or nil if the value is not a []int8. +func (v *Value) Int8Slice(optionalDefault ...[]int8) []int8 { + if s, ok := v.data.([]int8); ok { + return s + } + if len(optionalDefault) == 1 { + return optionalDefault[0] + } + return nil +} + +// MustInt8Slice gets the value as a []int8. +// +// Panics if the object is not a []int8. +func (v *Value) MustInt8Slice() []int8 { + return v.data.([]int8) +} + +// IsInt8 gets whether the object contained is a int8 or not. +func (v *Value) IsInt8() bool { + _, ok := v.data.(int8) + return ok +} + +// IsInt8Slice gets whether the object contained is a []int8 or not. +func (v *Value) IsInt8Slice() bool { + _, ok := v.data.([]int8) + return ok +} + +// EachInt8 calls the specified callback for each object +// in the []int8. +// +// Panics if the object is the wrong type. +func (v *Value) EachInt8(callback func(int, int8) bool) *Value { + + for index, val := range v.MustInt8Slice() { + carryon := callback(index, val) + if carryon == false { + break + } + } + + return v + +} + +// WhereInt8 uses the specified decider function to select items +// from the []int8. The object contained in the result will contain +// only the selected items. +func (v *Value) WhereInt8(decider func(int, int8) bool) *Value { + + var selected []int8 + + v.EachInt8(func(index int, val int8) bool { + shouldSelect := decider(index, val) + if shouldSelect == false { + selected = append(selected, val) + } + return true + }) + + return &Value{data: selected} + +} + +// GroupInt8 uses the specified grouper function to group the items +// keyed by the return of the grouper. The object contained in the +// result will contain a map[string][]int8. +func (v *Value) GroupInt8(grouper func(int, int8) string) *Value { + + groups := make(map[string][]int8) + + v.EachInt8(func(index int, val int8) bool { + group := grouper(index, val) + if _, ok := groups[group]; !ok { + groups[group] = make([]int8, 0) + } + groups[group] = append(groups[group], val) + return true + }) + + return &Value{data: groups} + +} + +// ReplaceInt8 uses the specified function to replace each int8s +// by iterating each item. The data in the returned result will be a +// []int8 containing the replaced items. +func (v *Value) ReplaceInt8(replacer func(int, int8) int8) *Value { + + arr := v.MustInt8Slice() + replaced := make([]int8, len(arr)) + + v.EachInt8(func(index int, val int8) bool { + replaced[index] = replacer(index, val) + return true + }) + + return &Value{data: replaced} + +} + +// CollectInt8 uses the specified collector function to collect a value +// for each of the int8s in the slice. The data returned will be a +// []interface{}. +func (v *Value) CollectInt8(collector func(int, int8) interface{}) *Value { + + arr := v.MustInt8Slice() + collected := make([]interface{}, len(arr)) + + v.EachInt8(func(index int, val int8) bool { + collected[index] = collector(index, val) + return true + }) + + return &Value{data: collected} +} + +/* + Int16 (int16 and []int16) + -------------------------------------------------- +*/ + +// Int16 gets the value as a int16, returns the optionalDefault +// value or a system default object if the value is the wrong type. +func (v *Value) Int16(optionalDefault ...int16) int16 { + if s, ok := v.data.(int16); ok { + return s + } + if len(optionalDefault) == 1 { + return optionalDefault[0] + } + return 0 +} + +// MustInt16 gets the value as a int16. +// +// Panics if the object is not a int16. +func (v *Value) MustInt16() int16 { + return v.data.(int16) +} + +// Int16Slice gets the value as a []int16, returns the optionalDefault +// value or nil if the value is not a []int16. +func (v *Value) Int16Slice(optionalDefault ...[]int16) []int16 { + if s, ok := v.data.([]int16); ok { + return s + } + if len(optionalDefault) == 1 { + return optionalDefault[0] + } + return nil +} + +// MustInt16Slice gets the value as a []int16. +// +// Panics if the object is not a []int16. +func (v *Value) MustInt16Slice() []int16 { + return v.data.([]int16) +} + +// IsInt16 gets whether the object contained is a int16 or not. +func (v *Value) IsInt16() bool { + _, ok := v.data.(int16) + return ok +} + +// IsInt16Slice gets whether the object contained is a []int16 or not. +func (v *Value) IsInt16Slice() bool { + _, ok := v.data.([]int16) + return ok +} + +// EachInt16 calls the specified callback for each object +// in the []int16. +// +// Panics if the object is the wrong type. +func (v *Value) EachInt16(callback func(int, int16) bool) *Value { + + for index, val := range v.MustInt16Slice() { + carryon := callback(index, val) + if carryon == false { + break + } + } + + return v + +} + +// WhereInt16 uses the specified decider function to select items +// from the []int16. The object contained in the result will contain +// only the selected items. +func (v *Value) WhereInt16(decider func(int, int16) bool) *Value { + + var selected []int16 + + v.EachInt16(func(index int, val int16) bool { + shouldSelect := decider(index, val) + if shouldSelect == false { + selected = append(selected, val) + } + return true + }) + + return &Value{data: selected} + +} + +// GroupInt16 uses the specified grouper function to group the items +// keyed by the return of the grouper. The object contained in the +// result will contain a map[string][]int16. +func (v *Value) GroupInt16(grouper func(int, int16) string) *Value { + + groups := make(map[string][]int16) + + v.EachInt16(func(index int, val int16) bool { + group := grouper(index, val) + if _, ok := groups[group]; !ok { + groups[group] = make([]int16, 0) + } + groups[group] = append(groups[group], val) + return true + }) + + return &Value{data: groups} + +} + +// ReplaceInt16 uses the specified function to replace each int16s +// by iterating each item. The data in the returned result will be a +// []int16 containing the replaced items. +func (v *Value) ReplaceInt16(replacer func(int, int16) int16) *Value { + + arr := v.MustInt16Slice() + replaced := make([]int16, len(arr)) + + v.EachInt16(func(index int, val int16) bool { + replaced[index] = replacer(index, val) + return true + }) + + return &Value{data: replaced} + +} + +// CollectInt16 uses the specified collector function to collect a value +// for each of the int16s in the slice. The data returned will be a +// []interface{}. +func (v *Value) CollectInt16(collector func(int, int16) interface{}) *Value { + + arr := v.MustInt16Slice() + collected := make([]interface{}, len(arr)) + + v.EachInt16(func(index int, val int16) bool { + collected[index] = collector(index, val) + return true + }) + + return &Value{data: collected} +} + +/* + Int32 (int32 and []int32) + -------------------------------------------------- +*/ + +// Int32 gets the value as a int32, returns the optionalDefault +// value or a system default object if the value is the wrong type. +func (v *Value) Int32(optionalDefault ...int32) int32 { + if s, ok := v.data.(int32); ok { + return s + } + if len(optionalDefault) == 1 { + return optionalDefault[0] + } + return 0 +} + +// MustInt32 gets the value as a int32. +// +// Panics if the object is not a int32. +func (v *Value) MustInt32() int32 { + return v.data.(int32) +} + +// Int32Slice gets the value as a []int32, returns the optionalDefault +// value or nil if the value is not a []int32. +func (v *Value) Int32Slice(optionalDefault ...[]int32) []int32 { + if s, ok := v.data.([]int32); ok { + return s + } + if len(optionalDefault) == 1 { + return optionalDefault[0] + } + return nil +} + +// MustInt32Slice gets the value as a []int32. +// +// Panics if the object is not a []int32. +func (v *Value) MustInt32Slice() []int32 { + return v.data.([]int32) +} + +// IsInt32 gets whether the object contained is a int32 or not. +func (v *Value) IsInt32() bool { + _, ok := v.data.(int32) + return ok +} + +// IsInt32Slice gets whether the object contained is a []int32 or not. +func (v *Value) IsInt32Slice() bool { + _, ok := v.data.([]int32) + return ok +} + +// EachInt32 calls the specified callback for each object +// in the []int32. +// +// Panics if the object is the wrong type. +func (v *Value) EachInt32(callback func(int, int32) bool) *Value { + + for index, val := range v.MustInt32Slice() { + carryon := callback(index, val) + if carryon == false { + break + } + } + + return v + +} + +// WhereInt32 uses the specified decider function to select items +// from the []int32. The object contained in the result will contain +// only the selected items. +func (v *Value) WhereInt32(decider func(int, int32) bool) *Value { + + var selected []int32 + + v.EachInt32(func(index int, val int32) bool { + shouldSelect := decider(index, val) + if shouldSelect == false { + selected = append(selected, val) + } + return true + }) + + return &Value{data: selected} + +} + +// GroupInt32 uses the specified grouper function to group the items +// keyed by the return of the grouper. The object contained in the +// result will contain a map[string][]int32. +func (v *Value) GroupInt32(grouper func(int, int32) string) *Value { + + groups := make(map[string][]int32) + + v.EachInt32(func(index int, val int32) bool { + group := grouper(index, val) + if _, ok := groups[group]; !ok { + groups[group] = make([]int32, 0) + } + groups[group] = append(groups[group], val) + return true + }) + + return &Value{data: groups} + +} + +// ReplaceInt32 uses the specified function to replace each int32s +// by iterating each item. The data in the returned result will be a +// []int32 containing the replaced items. +func (v *Value) ReplaceInt32(replacer func(int, int32) int32) *Value { + + arr := v.MustInt32Slice() + replaced := make([]int32, len(arr)) + + v.EachInt32(func(index int, val int32) bool { + replaced[index] = replacer(index, val) + return true + }) + + return &Value{data: replaced} + +} + +// CollectInt32 uses the specified collector function to collect a value +// for each of the int32s in the slice. The data returned will be a +// []interface{}. +func (v *Value) CollectInt32(collector func(int, int32) interface{}) *Value { + + arr := v.MustInt32Slice() + collected := make([]interface{}, len(arr)) + + v.EachInt32(func(index int, val int32) bool { + collected[index] = collector(index, val) + return true + }) + + return &Value{data: collected} +} + +/* + Int64 (int64 and []int64) + -------------------------------------------------- +*/ + +// Int64 gets the value as a int64, returns the optionalDefault +// value or a system default object if the value is the wrong type. +func (v *Value) Int64(optionalDefault ...int64) int64 { + if s, ok := v.data.(int64); ok { + return s + } + if len(optionalDefault) == 1 { + return optionalDefault[0] + } + return 0 +} + +// MustInt64 gets the value as a int64. +// +// Panics if the object is not a int64. +func (v *Value) MustInt64() int64 { + return v.data.(int64) +} + +// Int64Slice gets the value as a []int64, returns the optionalDefault +// value or nil if the value is not a []int64. +func (v *Value) Int64Slice(optionalDefault ...[]int64) []int64 { + if s, ok := v.data.([]int64); ok { + return s + } + if len(optionalDefault) == 1 { + return optionalDefault[0] + } + return nil +} + +// MustInt64Slice gets the value as a []int64. +// +// Panics if the object is not a []int64. +func (v *Value) MustInt64Slice() []int64 { + return v.data.([]int64) +} + +// IsInt64 gets whether the object contained is a int64 or not. +func (v *Value) IsInt64() bool { + _, ok := v.data.(int64) + return ok +} + +// IsInt64Slice gets whether the object contained is a []int64 or not. +func (v *Value) IsInt64Slice() bool { + _, ok := v.data.([]int64) + return ok +} + +// EachInt64 calls the specified callback for each object +// in the []int64. +// +// Panics if the object is the wrong type. +func (v *Value) EachInt64(callback func(int, int64) bool) *Value { + + for index, val := range v.MustInt64Slice() { + carryon := callback(index, val) + if carryon == false { + break + } + } + + return v + +} + +// WhereInt64 uses the specified decider function to select items +// from the []int64. The object contained in the result will contain +// only the selected items. +func (v *Value) WhereInt64(decider func(int, int64) bool) *Value { + + var selected []int64 + + v.EachInt64(func(index int, val int64) bool { + shouldSelect := decider(index, val) + if shouldSelect == false { + selected = append(selected, val) + } + return true + }) + + return &Value{data: selected} + +} + +// GroupInt64 uses the specified grouper function to group the items +// keyed by the return of the grouper. The object contained in the +// result will contain a map[string][]int64. +func (v *Value) GroupInt64(grouper func(int, int64) string) *Value { + + groups := make(map[string][]int64) + + v.EachInt64(func(index int, val int64) bool { + group := grouper(index, val) + if _, ok := groups[group]; !ok { + groups[group] = make([]int64, 0) + } + groups[group] = append(groups[group], val) + return true + }) + + return &Value{data: groups} + +} + +// ReplaceInt64 uses the specified function to replace each int64s +// by iterating each item. The data in the returned result will be a +// []int64 containing the replaced items. +func (v *Value) ReplaceInt64(replacer func(int, int64) int64) *Value { + + arr := v.MustInt64Slice() + replaced := make([]int64, len(arr)) + + v.EachInt64(func(index int, val int64) bool { + replaced[index] = replacer(index, val) + return true + }) + + return &Value{data: replaced} + +} + +// CollectInt64 uses the specified collector function to collect a value +// for each of the int64s in the slice. The data returned will be a +// []interface{}. +func (v *Value) CollectInt64(collector func(int, int64) interface{}) *Value { + + arr := v.MustInt64Slice() + collected := make([]interface{}, len(arr)) + + v.EachInt64(func(index int, val int64) bool { + collected[index] = collector(index, val) + return true + }) + + return &Value{data: collected} +} + +/* + Uint (uint and []uint) + -------------------------------------------------- +*/ + +// Uint gets the value as a uint, returns the optionalDefault +// value or a system default object if the value is the wrong type. +func (v *Value) Uint(optionalDefault ...uint) uint { + if s, ok := v.data.(uint); ok { + return s + } + if len(optionalDefault) == 1 { + return optionalDefault[0] + } + return 0 +} + +// MustUint gets the value as a uint. +// +// Panics if the object is not a uint. +func (v *Value) MustUint() uint { + return v.data.(uint) +} + +// UintSlice gets the value as a []uint, returns the optionalDefault +// value or nil if the value is not a []uint. +func (v *Value) UintSlice(optionalDefault ...[]uint) []uint { + if s, ok := v.data.([]uint); ok { + return s + } + if len(optionalDefault) == 1 { + return optionalDefault[0] + } + return nil +} + +// MustUintSlice gets the value as a []uint. +// +// Panics if the object is not a []uint. +func (v *Value) MustUintSlice() []uint { + return v.data.([]uint) +} + +// IsUint gets whether the object contained is a uint or not. +func (v *Value) IsUint() bool { + _, ok := v.data.(uint) + return ok +} + +// IsUintSlice gets whether the object contained is a []uint or not. +func (v *Value) IsUintSlice() bool { + _, ok := v.data.([]uint) + return ok +} + +// EachUint calls the specified callback for each object +// in the []uint. +// +// Panics if the object is the wrong type. +func (v *Value) EachUint(callback func(int, uint) bool) *Value { + + for index, val := range v.MustUintSlice() { + carryon := callback(index, val) + if carryon == false { + break + } + } + + return v + +} + +// WhereUint uses the specified decider function to select items +// from the []uint. The object contained in the result will contain +// only the selected items. +func (v *Value) WhereUint(decider func(int, uint) bool) *Value { + + var selected []uint + + v.EachUint(func(index int, val uint) bool { + shouldSelect := decider(index, val) + if shouldSelect == false { + selected = append(selected, val) + } + return true + }) + + return &Value{data: selected} + +} + +// GroupUint uses the specified grouper function to group the items +// keyed by the return of the grouper. The object contained in the +// result will contain a map[string][]uint. +func (v *Value) GroupUint(grouper func(int, uint) string) *Value { + + groups := make(map[string][]uint) + + v.EachUint(func(index int, val uint) bool { + group := grouper(index, val) + if _, ok := groups[group]; !ok { + groups[group] = make([]uint, 0) + } + groups[group] = append(groups[group], val) + return true + }) + + return &Value{data: groups} + +} + +// ReplaceUint uses the specified function to replace each uints +// by iterating each item. The data in the returned result will be a +// []uint containing the replaced items. +func (v *Value) ReplaceUint(replacer func(int, uint) uint) *Value { + + arr := v.MustUintSlice() + replaced := make([]uint, len(arr)) + + v.EachUint(func(index int, val uint) bool { + replaced[index] = replacer(index, val) + return true + }) + + return &Value{data: replaced} + +} + +// CollectUint uses the specified collector function to collect a value +// for each of the uints in the slice. The data returned will be a +// []interface{}. +func (v *Value) CollectUint(collector func(int, uint) interface{}) *Value { + + arr := v.MustUintSlice() + collected := make([]interface{}, len(arr)) + + v.EachUint(func(index int, val uint) bool { + collected[index] = collector(index, val) + return true + }) + + return &Value{data: collected} +} + +/* + Uint8 (uint8 and []uint8) + -------------------------------------------------- +*/ + +// Uint8 gets the value as a uint8, returns the optionalDefault +// value or a system default object if the value is the wrong type. +func (v *Value) Uint8(optionalDefault ...uint8) uint8 { + if s, ok := v.data.(uint8); ok { + return s + } + if len(optionalDefault) == 1 { + return optionalDefault[0] + } + return 0 +} + +// MustUint8 gets the value as a uint8. +// +// Panics if the object is not a uint8. +func (v *Value) MustUint8() uint8 { + return v.data.(uint8) +} + +// Uint8Slice gets the value as a []uint8, returns the optionalDefault +// value or nil if the value is not a []uint8. +func (v *Value) Uint8Slice(optionalDefault ...[]uint8) []uint8 { + if s, ok := v.data.([]uint8); ok { + return s + } + if len(optionalDefault) == 1 { + return optionalDefault[0] + } + return nil +} + +// MustUint8Slice gets the value as a []uint8. +// +// Panics if the object is not a []uint8. +func (v *Value) MustUint8Slice() []uint8 { + return v.data.([]uint8) +} + +// IsUint8 gets whether the object contained is a uint8 or not. +func (v *Value) IsUint8() bool { + _, ok := v.data.(uint8) + return ok +} + +// IsUint8Slice gets whether the object contained is a []uint8 or not. +func (v *Value) IsUint8Slice() bool { + _, ok := v.data.([]uint8) + return ok +} + +// EachUint8 calls the specified callback for each object +// in the []uint8. +// +// Panics if the object is the wrong type. +func (v *Value) EachUint8(callback func(int, uint8) bool) *Value { + + for index, val := range v.MustUint8Slice() { + carryon := callback(index, val) + if carryon == false { + break + } + } + + return v + +} + +// WhereUint8 uses the specified decider function to select items +// from the []uint8. The object contained in the result will contain +// only the selected items. +func (v *Value) WhereUint8(decider func(int, uint8) bool) *Value { + + var selected []uint8 + + v.EachUint8(func(index int, val uint8) bool { + shouldSelect := decider(index, val) + if shouldSelect == false { + selected = append(selected, val) + } + return true + }) + + return &Value{data: selected} + +} + +// GroupUint8 uses the specified grouper function to group the items +// keyed by the return of the grouper. The object contained in the +// result will contain a map[string][]uint8. +func (v *Value) GroupUint8(grouper func(int, uint8) string) *Value { + + groups := make(map[string][]uint8) + + v.EachUint8(func(index int, val uint8) bool { + group := grouper(index, val) + if _, ok := groups[group]; !ok { + groups[group] = make([]uint8, 0) + } + groups[group] = append(groups[group], val) + return true + }) + + return &Value{data: groups} + +} + +// ReplaceUint8 uses the specified function to replace each uint8s +// by iterating each item. The data in the returned result will be a +// []uint8 containing the replaced items. +func (v *Value) ReplaceUint8(replacer func(int, uint8) uint8) *Value { + + arr := v.MustUint8Slice() + replaced := make([]uint8, len(arr)) + + v.EachUint8(func(index int, val uint8) bool { + replaced[index] = replacer(index, val) + return true + }) + + return &Value{data: replaced} + +} + +// CollectUint8 uses the specified collector function to collect a value +// for each of the uint8s in the slice. The data returned will be a +// []interface{}. +func (v *Value) CollectUint8(collector func(int, uint8) interface{}) *Value { + + arr := v.MustUint8Slice() + collected := make([]interface{}, len(arr)) + + v.EachUint8(func(index int, val uint8) bool { + collected[index] = collector(index, val) + return true + }) + + return &Value{data: collected} +} + +/* + Uint16 (uint16 and []uint16) + -------------------------------------------------- +*/ + +// Uint16 gets the value as a uint16, returns the optionalDefault +// value or a system default object if the value is the wrong type. +func (v *Value) Uint16(optionalDefault ...uint16) uint16 { + if s, ok := v.data.(uint16); ok { + return s + } + if len(optionalDefault) == 1 { + return optionalDefault[0] + } + return 0 +} + +// MustUint16 gets the value as a uint16. +// +// Panics if the object is not a uint16. +func (v *Value) MustUint16() uint16 { + return v.data.(uint16) +} + +// Uint16Slice gets the value as a []uint16, returns the optionalDefault +// value or nil if the value is not a []uint16. +func (v *Value) Uint16Slice(optionalDefault ...[]uint16) []uint16 { + if s, ok := v.data.([]uint16); ok { + return s + } + if len(optionalDefault) == 1 { + return optionalDefault[0] + } + return nil +} + +// MustUint16Slice gets the value as a []uint16. +// +// Panics if the object is not a []uint16. +func (v *Value) MustUint16Slice() []uint16 { + return v.data.([]uint16) +} + +// IsUint16 gets whether the object contained is a uint16 or not. +func (v *Value) IsUint16() bool { + _, ok := v.data.(uint16) + return ok +} + +// IsUint16Slice gets whether the object contained is a []uint16 or not. +func (v *Value) IsUint16Slice() bool { + _, ok := v.data.([]uint16) + return ok +} + +// EachUint16 calls the specified callback for each object +// in the []uint16. +// +// Panics if the object is the wrong type. +func (v *Value) EachUint16(callback func(int, uint16) bool) *Value { + + for index, val := range v.MustUint16Slice() { + carryon := callback(index, val) + if carryon == false { + break + } + } + + return v + +} + +// WhereUint16 uses the specified decider function to select items +// from the []uint16. The object contained in the result will contain +// only the selected items. +func (v *Value) WhereUint16(decider func(int, uint16) bool) *Value { + + var selected []uint16 + + v.EachUint16(func(index int, val uint16) bool { + shouldSelect := decider(index, val) + if shouldSelect == false { + selected = append(selected, val) + } + return true + }) + + return &Value{data: selected} + +} + +// GroupUint16 uses the specified grouper function to group the items +// keyed by the return of the grouper. The object contained in the +// result will contain a map[string][]uint16. +func (v *Value) GroupUint16(grouper func(int, uint16) string) *Value { + + groups := make(map[string][]uint16) + + v.EachUint16(func(index int, val uint16) bool { + group := grouper(index, val) + if _, ok := groups[group]; !ok { + groups[group] = make([]uint16, 0) + } + groups[group] = append(groups[group], val) + return true + }) + + return &Value{data: groups} + +} + +// ReplaceUint16 uses the specified function to replace each uint16s +// by iterating each item. The data in the returned result will be a +// []uint16 containing the replaced items. +func (v *Value) ReplaceUint16(replacer func(int, uint16) uint16) *Value { + + arr := v.MustUint16Slice() + replaced := make([]uint16, len(arr)) + + v.EachUint16(func(index int, val uint16) bool { + replaced[index] = replacer(index, val) + return true + }) + + return &Value{data: replaced} + +} + +// CollectUint16 uses the specified collector function to collect a value +// for each of the uint16s in the slice. The data returned will be a +// []interface{}. +func (v *Value) CollectUint16(collector func(int, uint16) interface{}) *Value { + + arr := v.MustUint16Slice() + collected := make([]interface{}, len(arr)) + + v.EachUint16(func(index int, val uint16) bool { + collected[index] = collector(index, val) + return true + }) + + return &Value{data: collected} +} + +/* + Uint32 (uint32 and []uint32) + -------------------------------------------------- +*/ + +// Uint32 gets the value as a uint32, returns the optionalDefault +// value or a system default object if the value is the wrong type. +func (v *Value) Uint32(optionalDefault ...uint32) uint32 { + if s, ok := v.data.(uint32); ok { + return s + } + if len(optionalDefault) == 1 { + return optionalDefault[0] + } + return 0 +} + +// MustUint32 gets the value as a uint32. +// +// Panics if the object is not a uint32. +func (v *Value) MustUint32() uint32 { + return v.data.(uint32) +} + +// Uint32Slice gets the value as a []uint32, returns the optionalDefault +// value or nil if the value is not a []uint32. +func (v *Value) Uint32Slice(optionalDefault ...[]uint32) []uint32 { + if s, ok := v.data.([]uint32); ok { + return s + } + if len(optionalDefault) == 1 { + return optionalDefault[0] + } + return nil +} + +// MustUint32Slice gets the value as a []uint32. +// +// Panics if the object is not a []uint32. +func (v *Value) MustUint32Slice() []uint32 { + return v.data.([]uint32) +} + +// IsUint32 gets whether the object contained is a uint32 or not. +func (v *Value) IsUint32() bool { + _, ok := v.data.(uint32) + return ok +} + +// IsUint32Slice gets whether the object contained is a []uint32 or not. +func (v *Value) IsUint32Slice() bool { + _, ok := v.data.([]uint32) + return ok +} + +// EachUint32 calls the specified callback for each object +// in the []uint32. +// +// Panics if the object is the wrong type. +func (v *Value) EachUint32(callback func(int, uint32) bool) *Value { + + for index, val := range v.MustUint32Slice() { + carryon := callback(index, val) + if carryon == false { + break + } + } + + return v + +} + +// WhereUint32 uses the specified decider function to select items +// from the []uint32. The object contained in the result will contain +// only the selected items. +func (v *Value) WhereUint32(decider func(int, uint32) bool) *Value { + + var selected []uint32 + + v.EachUint32(func(index int, val uint32) bool { + shouldSelect := decider(index, val) + if shouldSelect == false { + selected = append(selected, val) + } + return true + }) + + return &Value{data: selected} + +} + +// GroupUint32 uses the specified grouper function to group the items +// keyed by the return of the grouper. The object contained in the +// result will contain a map[string][]uint32. +func (v *Value) GroupUint32(grouper func(int, uint32) string) *Value { + + groups := make(map[string][]uint32) + + v.EachUint32(func(index int, val uint32) bool { + group := grouper(index, val) + if _, ok := groups[group]; !ok { + groups[group] = make([]uint32, 0) + } + groups[group] = append(groups[group], val) + return true + }) + + return &Value{data: groups} + +} + +// ReplaceUint32 uses the specified function to replace each uint32s +// by iterating each item. The data in the returned result will be a +// []uint32 containing the replaced items. +func (v *Value) ReplaceUint32(replacer func(int, uint32) uint32) *Value { + + arr := v.MustUint32Slice() + replaced := make([]uint32, len(arr)) + + v.EachUint32(func(index int, val uint32) bool { + replaced[index] = replacer(index, val) + return true + }) + + return &Value{data: replaced} + +} + +// CollectUint32 uses the specified collector function to collect a value +// for each of the uint32s in the slice. The data returned will be a +// []interface{}. +func (v *Value) CollectUint32(collector func(int, uint32) interface{}) *Value { + + arr := v.MustUint32Slice() + collected := make([]interface{}, len(arr)) + + v.EachUint32(func(index int, val uint32) bool { + collected[index] = collector(index, val) + return true + }) + + return &Value{data: collected} +} + +/* + Uint64 (uint64 and []uint64) + -------------------------------------------------- +*/ + +// Uint64 gets the value as a uint64, returns the optionalDefault +// value or a system default object if the value is the wrong type. +func (v *Value) Uint64(optionalDefault ...uint64) uint64 { + if s, ok := v.data.(uint64); ok { + return s + } + if len(optionalDefault) == 1 { + return optionalDefault[0] + } + return 0 +} + +// MustUint64 gets the value as a uint64. +// +// Panics if the object is not a uint64. +func (v *Value) MustUint64() uint64 { + return v.data.(uint64) +} + +// Uint64Slice gets the value as a []uint64, returns the optionalDefault +// value or nil if the value is not a []uint64. +func (v *Value) Uint64Slice(optionalDefault ...[]uint64) []uint64 { + if s, ok := v.data.([]uint64); ok { + return s + } + if len(optionalDefault) == 1 { + return optionalDefault[0] + } + return nil +} + +// MustUint64Slice gets the value as a []uint64. +// +// Panics if the object is not a []uint64. +func (v *Value) MustUint64Slice() []uint64 { + return v.data.([]uint64) +} + +// IsUint64 gets whether the object contained is a uint64 or not. +func (v *Value) IsUint64() bool { + _, ok := v.data.(uint64) + return ok +} + +// IsUint64Slice gets whether the object contained is a []uint64 or not. +func (v *Value) IsUint64Slice() bool { + _, ok := v.data.([]uint64) + return ok +} + +// EachUint64 calls the specified callback for each object +// in the []uint64. +// +// Panics if the object is the wrong type. +func (v *Value) EachUint64(callback func(int, uint64) bool) *Value { + + for index, val := range v.MustUint64Slice() { + carryon := callback(index, val) + if carryon == false { + break + } + } + + return v + +} + +// WhereUint64 uses the specified decider function to select items +// from the []uint64. The object contained in the result will contain +// only the selected items. +func (v *Value) WhereUint64(decider func(int, uint64) bool) *Value { + + var selected []uint64 + + v.EachUint64(func(index int, val uint64) bool { + shouldSelect := decider(index, val) + if shouldSelect == false { + selected = append(selected, val) + } + return true + }) + + return &Value{data: selected} + +} + +// GroupUint64 uses the specified grouper function to group the items +// keyed by the return of the grouper. The object contained in the +// result will contain a map[string][]uint64. +func (v *Value) GroupUint64(grouper func(int, uint64) string) *Value { + + groups := make(map[string][]uint64) + + v.EachUint64(func(index int, val uint64) bool { + group := grouper(index, val) + if _, ok := groups[group]; !ok { + groups[group] = make([]uint64, 0) + } + groups[group] = append(groups[group], val) + return true + }) + + return &Value{data: groups} + +} + +// ReplaceUint64 uses the specified function to replace each uint64s +// by iterating each item. The data in the returned result will be a +// []uint64 containing the replaced items. +func (v *Value) ReplaceUint64(replacer func(int, uint64) uint64) *Value { + + arr := v.MustUint64Slice() + replaced := make([]uint64, len(arr)) + + v.EachUint64(func(index int, val uint64) bool { + replaced[index] = replacer(index, val) + return true + }) + + return &Value{data: replaced} + +} + +// CollectUint64 uses the specified collector function to collect a value +// for each of the uint64s in the slice. The data returned will be a +// []interface{}. +func (v *Value) CollectUint64(collector func(int, uint64) interface{}) *Value { + + arr := v.MustUint64Slice() + collected := make([]interface{}, len(arr)) + + v.EachUint64(func(index int, val uint64) bool { + collected[index] = collector(index, val) + return true + }) + + return &Value{data: collected} +} + +/* + Uintptr (uintptr and []uintptr) + -------------------------------------------------- +*/ + +// Uintptr gets the value as a uintptr, returns the optionalDefault +// value or a system default object if the value is the wrong type. +func (v *Value) Uintptr(optionalDefault ...uintptr) uintptr { + if s, ok := v.data.(uintptr); ok { + return s + } + if len(optionalDefault) == 1 { + return optionalDefault[0] + } + return 0 +} + +// MustUintptr gets the value as a uintptr. +// +// Panics if the object is not a uintptr. +func (v *Value) MustUintptr() uintptr { + return v.data.(uintptr) +} + +// UintptrSlice gets the value as a []uintptr, returns the optionalDefault +// value or nil if the value is not a []uintptr. +func (v *Value) UintptrSlice(optionalDefault ...[]uintptr) []uintptr { + if s, ok := v.data.([]uintptr); ok { + return s + } + if len(optionalDefault) == 1 { + return optionalDefault[0] + } + return nil +} + +// MustUintptrSlice gets the value as a []uintptr. +// +// Panics if the object is not a []uintptr. +func (v *Value) MustUintptrSlice() []uintptr { + return v.data.([]uintptr) +} + +// IsUintptr gets whether the object contained is a uintptr or not. +func (v *Value) IsUintptr() bool { + _, ok := v.data.(uintptr) + return ok +} + +// IsUintptrSlice gets whether the object contained is a []uintptr or not. +func (v *Value) IsUintptrSlice() bool { + _, ok := v.data.([]uintptr) + return ok +} + +// EachUintptr calls the specified callback for each object +// in the []uintptr. +// +// Panics if the object is the wrong type. +func (v *Value) EachUintptr(callback func(int, uintptr) bool) *Value { + + for index, val := range v.MustUintptrSlice() { + carryon := callback(index, val) + if carryon == false { + break + } + } + + return v + +} + +// WhereUintptr uses the specified decider function to select items +// from the []uintptr. The object contained in the result will contain +// only the selected items. +func (v *Value) WhereUintptr(decider func(int, uintptr) bool) *Value { + + var selected []uintptr + + v.EachUintptr(func(index int, val uintptr) bool { + shouldSelect := decider(index, val) + if shouldSelect == false { + selected = append(selected, val) + } + return true + }) + + return &Value{data: selected} + +} + +// GroupUintptr uses the specified grouper function to group the items +// keyed by the return of the grouper. The object contained in the +// result will contain a map[string][]uintptr. +func (v *Value) GroupUintptr(grouper func(int, uintptr) string) *Value { + + groups := make(map[string][]uintptr) + + v.EachUintptr(func(index int, val uintptr) bool { + group := grouper(index, val) + if _, ok := groups[group]; !ok { + groups[group] = make([]uintptr, 0) + } + groups[group] = append(groups[group], val) + return true + }) + + return &Value{data: groups} + +} + +// ReplaceUintptr uses the specified function to replace each uintptrs +// by iterating each item. The data in the returned result will be a +// []uintptr containing the replaced items. +func (v *Value) ReplaceUintptr(replacer func(int, uintptr) uintptr) *Value { + + arr := v.MustUintptrSlice() + replaced := make([]uintptr, len(arr)) + + v.EachUintptr(func(index int, val uintptr) bool { + replaced[index] = replacer(index, val) + return true + }) + + return &Value{data: replaced} + +} + +// CollectUintptr uses the specified collector function to collect a value +// for each of the uintptrs in the slice. The data returned will be a +// []interface{}. +func (v *Value) CollectUintptr(collector func(int, uintptr) interface{}) *Value { + + arr := v.MustUintptrSlice() + collected := make([]interface{}, len(arr)) + + v.EachUintptr(func(index int, val uintptr) bool { + collected[index] = collector(index, val) + return true + }) + + return &Value{data: collected} +} + +/* + Float32 (float32 and []float32) + -------------------------------------------------- +*/ + +// Float32 gets the value as a float32, returns the optionalDefault +// value or a system default object if the value is the wrong type. +func (v *Value) Float32(optionalDefault ...float32) float32 { + if s, ok := v.data.(float32); ok { + return s + } + if len(optionalDefault) == 1 { + return optionalDefault[0] + } + return 0 +} + +// MustFloat32 gets the value as a float32. +// +// Panics if the object is not a float32. +func (v *Value) MustFloat32() float32 { + return v.data.(float32) +} + +// Float32Slice gets the value as a []float32, returns the optionalDefault +// value or nil if the value is not a []float32. +func (v *Value) Float32Slice(optionalDefault ...[]float32) []float32 { + if s, ok := v.data.([]float32); ok { + return s + } + if len(optionalDefault) == 1 { + return optionalDefault[0] + } + return nil +} + +// MustFloat32Slice gets the value as a []float32. +// +// Panics if the object is not a []float32. +func (v *Value) MustFloat32Slice() []float32 { + return v.data.([]float32) +} + +// IsFloat32 gets whether the object contained is a float32 or not. +func (v *Value) IsFloat32() bool { + _, ok := v.data.(float32) + return ok +} + +// IsFloat32Slice gets whether the object contained is a []float32 or not. +func (v *Value) IsFloat32Slice() bool { + _, ok := v.data.([]float32) + return ok +} + +// EachFloat32 calls the specified callback for each object +// in the []float32. +// +// Panics if the object is the wrong type. +func (v *Value) EachFloat32(callback func(int, float32) bool) *Value { + + for index, val := range v.MustFloat32Slice() { + carryon := callback(index, val) + if carryon == false { + break + } + } + + return v + +} + +// WhereFloat32 uses the specified decider function to select items +// from the []float32. The object contained in the result will contain +// only the selected items. +func (v *Value) WhereFloat32(decider func(int, float32) bool) *Value { + + var selected []float32 + + v.EachFloat32(func(index int, val float32) bool { + shouldSelect := decider(index, val) + if shouldSelect == false { + selected = append(selected, val) + } + return true + }) + + return &Value{data: selected} + +} + +// GroupFloat32 uses the specified grouper function to group the items +// keyed by the return of the grouper. The object contained in the +// result will contain a map[string][]float32. +func (v *Value) GroupFloat32(grouper func(int, float32) string) *Value { + + groups := make(map[string][]float32) + + v.EachFloat32(func(index int, val float32) bool { + group := grouper(index, val) + if _, ok := groups[group]; !ok { + groups[group] = make([]float32, 0) + } + groups[group] = append(groups[group], val) + return true + }) + + return &Value{data: groups} + +} + +// ReplaceFloat32 uses the specified function to replace each float32s +// by iterating each item. The data in the returned result will be a +// []float32 containing the replaced items. +func (v *Value) ReplaceFloat32(replacer func(int, float32) float32) *Value { + + arr := v.MustFloat32Slice() + replaced := make([]float32, len(arr)) + + v.EachFloat32(func(index int, val float32) bool { + replaced[index] = replacer(index, val) + return true + }) + + return &Value{data: replaced} + +} + +// CollectFloat32 uses the specified collector function to collect a value +// for each of the float32s in the slice. The data returned will be a +// []interface{}. +func (v *Value) CollectFloat32(collector func(int, float32) interface{}) *Value { + + arr := v.MustFloat32Slice() + collected := make([]interface{}, len(arr)) + + v.EachFloat32(func(index int, val float32) bool { + collected[index] = collector(index, val) + return true + }) + + return &Value{data: collected} +} + +/* + Float64 (float64 and []float64) + -------------------------------------------------- +*/ + +// Float64 gets the value as a float64, returns the optionalDefault +// value or a system default object if the value is the wrong type. +func (v *Value) Float64(optionalDefault ...float64) float64 { + if s, ok := v.data.(float64); ok { + return s + } + if len(optionalDefault) == 1 { + return optionalDefault[0] + } + return 0 +} + +// MustFloat64 gets the value as a float64. +// +// Panics if the object is not a float64. +func (v *Value) MustFloat64() float64 { + return v.data.(float64) +} + +// Float64Slice gets the value as a []float64, returns the optionalDefault +// value or nil if the value is not a []float64. +func (v *Value) Float64Slice(optionalDefault ...[]float64) []float64 { + if s, ok := v.data.([]float64); ok { + return s + } + if len(optionalDefault) == 1 { + return optionalDefault[0] + } + return nil +} + +// MustFloat64Slice gets the value as a []float64. +// +// Panics if the object is not a []float64. +func (v *Value) MustFloat64Slice() []float64 { + return v.data.([]float64) +} + +// IsFloat64 gets whether the object contained is a float64 or not. +func (v *Value) IsFloat64() bool { + _, ok := v.data.(float64) + return ok +} + +// IsFloat64Slice gets whether the object contained is a []float64 or not. +func (v *Value) IsFloat64Slice() bool { + _, ok := v.data.([]float64) + return ok +} + +// EachFloat64 calls the specified callback for each object +// in the []float64. +// +// Panics if the object is the wrong type. +func (v *Value) EachFloat64(callback func(int, float64) bool) *Value { + + for index, val := range v.MustFloat64Slice() { + carryon := callback(index, val) + if carryon == false { + break + } + } + + return v + +} + +// WhereFloat64 uses the specified decider function to select items +// from the []float64. The object contained in the result will contain +// only the selected items. +func (v *Value) WhereFloat64(decider func(int, float64) bool) *Value { + + var selected []float64 + + v.EachFloat64(func(index int, val float64) bool { + shouldSelect := decider(index, val) + if shouldSelect == false { + selected = append(selected, val) + } + return true + }) + + return &Value{data: selected} + +} + +// GroupFloat64 uses the specified grouper function to group the items +// keyed by the return of the grouper. The object contained in the +// result will contain a map[string][]float64. +func (v *Value) GroupFloat64(grouper func(int, float64) string) *Value { + + groups := make(map[string][]float64) + + v.EachFloat64(func(index int, val float64) bool { + group := grouper(index, val) + if _, ok := groups[group]; !ok { + groups[group] = make([]float64, 0) + } + groups[group] = append(groups[group], val) + return true + }) + + return &Value{data: groups} + +} + +// ReplaceFloat64 uses the specified function to replace each float64s +// by iterating each item. The data in the returned result will be a +// []float64 containing the replaced items. +func (v *Value) ReplaceFloat64(replacer func(int, float64) float64) *Value { + + arr := v.MustFloat64Slice() + replaced := make([]float64, len(arr)) + + v.EachFloat64(func(index int, val float64) bool { + replaced[index] = replacer(index, val) + return true + }) + + return &Value{data: replaced} + +} + +// CollectFloat64 uses the specified collector function to collect a value +// for each of the float64s in the slice. The data returned will be a +// []interface{}. +func (v *Value) CollectFloat64(collector func(int, float64) interface{}) *Value { + + arr := v.MustFloat64Slice() + collected := make([]interface{}, len(arr)) + + v.EachFloat64(func(index int, val float64) bool { + collected[index] = collector(index, val) + return true + }) + + return &Value{data: collected} +} + +/* + Complex64 (complex64 and []complex64) + -------------------------------------------------- +*/ + +// Complex64 gets the value as a complex64, returns the optionalDefault +// value or a system default object if the value is the wrong type. +func (v *Value) Complex64(optionalDefault ...complex64) complex64 { + if s, ok := v.data.(complex64); ok { + return s + } + if len(optionalDefault) == 1 { + return optionalDefault[0] + } + return 0 +} + +// MustComplex64 gets the value as a complex64. +// +// Panics if the object is not a complex64. +func (v *Value) MustComplex64() complex64 { + return v.data.(complex64) +} + +// Complex64Slice gets the value as a []complex64, returns the optionalDefault +// value or nil if the value is not a []complex64. +func (v *Value) Complex64Slice(optionalDefault ...[]complex64) []complex64 { + if s, ok := v.data.([]complex64); ok { + return s + } + if len(optionalDefault) == 1 { + return optionalDefault[0] + } + return nil +} + +// MustComplex64Slice gets the value as a []complex64. +// +// Panics if the object is not a []complex64. +func (v *Value) MustComplex64Slice() []complex64 { + return v.data.([]complex64) +} + +// IsComplex64 gets whether the object contained is a complex64 or not. +func (v *Value) IsComplex64() bool { + _, ok := v.data.(complex64) + return ok +} + +// IsComplex64Slice gets whether the object contained is a []complex64 or not. +func (v *Value) IsComplex64Slice() bool { + _, ok := v.data.([]complex64) + return ok +} + +// EachComplex64 calls the specified callback for each object +// in the []complex64. +// +// Panics if the object is the wrong type. +func (v *Value) EachComplex64(callback func(int, complex64) bool) *Value { + + for index, val := range v.MustComplex64Slice() { + carryon := callback(index, val) + if carryon == false { + break + } + } + + return v + +} + +// WhereComplex64 uses the specified decider function to select items +// from the []complex64. The object contained in the result will contain +// only the selected items. +func (v *Value) WhereComplex64(decider func(int, complex64) bool) *Value { + + var selected []complex64 + + v.EachComplex64(func(index int, val complex64) bool { + shouldSelect := decider(index, val) + if shouldSelect == false { + selected = append(selected, val) + } + return true + }) + + return &Value{data: selected} + +} + +// GroupComplex64 uses the specified grouper function to group the items +// keyed by the return of the grouper. The object contained in the +// result will contain a map[string][]complex64. +func (v *Value) GroupComplex64(grouper func(int, complex64) string) *Value { + + groups := make(map[string][]complex64) + + v.EachComplex64(func(index int, val complex64) bool { + group := grouper(index, val) + if _, ok := groups[group]; !ok { + groups[group] = make([]complex64, 0) + } + groups[group] = append(groups[group], val) + return true + }) + + return &Value{data: groups} + +} + +// ReplaceComplex64 uses the specified function to replace each complex64s +// by iterating each item. The data in the returned result will be a +// []complex64 containing the replaced items. +func (v *Value) ReplaceComplex64(replacer func(int, complex64) complex64) *Value { + + arr := v.MustComplex64Slice() + replaced := make([]complex64, len(arr)) + + v.EachComplex64(func(index int, val complex64) bool { + replaced[index] = replacer(index, val) + return true + }) + + return &Value{data: replaced} + +} + +// CollectComplex64 uses the specified collector function to collect a value +// for each of the complex64s in the slice. The data returned will be a +// []interface{}. +func (v *Value) CollectComplex64(collector func(int, complex64) interface{}) *Value { + + arr := v.MustComplex64Slice() + collected := make([]interface{}, len(arr)) + + v.EachComplex64(func(index int, val complex64) bool { + collected[index] = collector(index, val) + return true + }) + + return &Value{data: collected} +} + +/* + Complex128 (complex128 and []complex128) + -------------------------------------------------- +*/ + +// Complex128 gets the value as a complex128, returns the optionalDefault +// value or a system default object if the value is the wrong type. +func (v *Value) Complex128(optionalDefault ...complex128) complex128 { + if s, ok := v.data.(complex128); ok { + return s + } + if len(optionalDefault) == 1 { + return optionalDefault[0] + } + return 0 +} + +// MustComplex128 gets the value as a complex128. +// +// Panics if the object is not a complex128. +func (v *Value) MustComplex128() complex128 { + return v.data.(complex128) +} + +// Complex128Slice gets the value as a []complex128, returns the optionalDefault +// value or nil if the value is not a []complex128. +func (v *Value) Complex128Slice(optionalDefault ...[]complex128) []complex128 { + if s, ok := v.data.([]complex128); ok { + return s + } + if len(optionalDefault) == 1 { + return optionalDefault[0] + } + return nil +} + +// MustComplex128Slice gets the value as a []complex128. +// +// Panics if the object is not a []complex128. +func (v *Value) MustComplex128Slice() []complex128 { + return v.data.([]complex128) +} + +// IsComplex128 gets whether the object contained is a complex128 or not. +func (v *Value) IsComplex128() bool { + _, ok := v.data.(complex128) + return ok +} + +// IsComplex128Slice gets whether the object contained is a []complex128 or not. +func (v *Value) IsComplex128Slice() bool { + _, ok := v.data.([]complex128) + return ok +} + +// EachComplex128 calls the specified callback for each object +// in the []complex128. +// +// Panics if the object is the wrong type. +func (v *Value) EachComplex128(callback func(int, complex128) bool) *Value { + + for index, val := range v.MustComplex128Slice() { + carryon := callback(index, val) + if carryon == false { + break + } + } + + return v + +} + +// WhereComplex128 uses the specified decider function to select items +// from the []complex128. The object contained in the result will contain +// only the selected items. +func (v *Value) WhereComplex128(decider func(int, complex128) bool) *Value { + + var selected []complex128 + + v.EachComplex128(func(index int, val complex128) bool { + shouldSelect := decider(index, val) + if shouldSelect == false { + selected = append(selected, val) + } + return true + }) + + return &Value{data: selected} + +} + +// GroupComplex128 uses the specified grouper function to group the items +// keyed by the return of the grouper. The object contained in the +// result will contain a map[string][]complex128. +func (v *Value) GroupComplex128(grouper func(int, complex128) string) *Value { + + groups := make(map[string][]complex128) + + v.EachComplex128(func(index int, val complex128) bool { + group := grouper(index, val) + if _, ok := groups[group]; !ok { + groups[group] = make([]complex128, 0) + } + groups[group] = append(groups[group], val) + return true + }) + + return &Value{data: groups} + +} + +// ReplaceComplex128 uses the specified function to replace each complex128s +// by iterating each item. The data in the returned result will be a +// []complex128 containing the replaced items. +func (v *Value) ReplaceComplex128(replacer func(int, complex128) complex128) *Value { + + arr := v.MustComplex128Slice() + replaced := make([]complex128, len(arr)) + + v.EachComplex128(func(index int, val complex128) bool { + replaced[index] = replacer(index, val) + return true + }) + + return &Value{data: replaced} + +} + +// CollectComplex128 uses the specified collector function to collect a value +// for each of the complex128s in the slice. The data returned will be a +// []interface{}. +func (v *Value) CollectComplex128(collector func(int, complex128) interface{}) *Value { + + arr := v.MustComplex128Slice() + collected := make([]interface{}, len(arr)) + + v.EachComplex128(func(index int, val complex128) bool { + collected[index] = collector(index, val) + return true + }) + + return &Value{data: collected} +} diff --git a/vendor/github.com/stretchr/testify/vendor/github.com/stretchr/objx/value.go b/vendor/github.com/stretchr/testify/vendor/github.com/stretchr/objx/value.go new file mode 100644 index 00000000..7aaef06b --- /dev/null +++ b/vendor/github.com/stretchr/testify/vendor/github.com/stretchr/objx/value.go @@ -0,0 +1,13 @@ +package objx + +// Value provides methods for extracting interface{} data in various +// types. +type Value struct { + // data contains the raw data being managed by this Value + data interface{} +} + +// Data returns the raw data contained by this Value +func (v *Value) Data() interface{} { + return v.data +} diff --git a/vendor/github.com/technoweenie/go-contentaddressable/LICENSE b/vendor/github.com/technoweenie/go-contentaddressable/LICENSE new file mode 100644 index 00000000..1bc5edff --- /dev/null +++ b/vendor/github.com/technoweenie/go-contentaddressable/LICENSE @@ -0,0 +1,20 @@ +Copyright (c) 2014 rick olson + +Permission is hereby granted, free of charge, to any person obtaining +a copy of this software and associated documentation files (the +"Software"), to deal in the Software without restriction, including +without limitation the rights to use, copy, modify, merge, publish, +distribute, sublicense, and/or sell copies of the Software, and to +permit persons to whom the Software is furnished to do so, subject to +the following conditions: + +The above copyright notice and this permission notice shall be +included in all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, +EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF +MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. +IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY +CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, +TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE +SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/vendor/github.com/technoweenie/go-contentaddressable/README.md b/vendor/github.com/technoweenie/go-contentaddressable/README.md new file mode 100644 index 00000000..1af73df4 --- /dev/null +++ b/vendor/github.com/technoweenie/go-contentaddressable/README.md @@ -0,0 +1,49 @@ +# Content Addressable + +Package contentaddressable contains tools for writing content addressable files. +Files are written to a temporary location, and only renamed to the final +location after the file's OID (Object ID) has been verified. + +```go +filename := "path/to/01ba4719c80b6fe911b091a7c05124b64eeece964e09c058ef8f9805daca546b" +file, err := contentaddressable.NewFile(filename) +if err != nil { + panic(err) +} +defer file.Close() + +file.Oid // 01ba4719c80b6fe911b091a7c05124b64eeece964e09c058ef8f9805daca546b + +written, err := io.Copy(file, someReader) + +if err == nil { +// Move file to final location if OID is verified. + err = file.Accept() +} + +if err != nil { + panic(err) +} +``` + +See the [godocs](http://godoc.org/github.com/technoweenie/go-contentaddressable) +for details. + +## Installation + + $ go get github.com/technoweenie/go-contentaddressable + +Then import it: + + import "github.com/technoweenie/go-contentaddressable" + +## Note on Patches/Pull Requests + +1. Fork the project on GitHub. +2. Make your feature addition or bug fix. +3. Add tests for it. This is important so I don't break it in a future version + unintentionally. +4. Commit, do not mess with version or history. (if you want to have + your own version, that is fine but bump version in a commit by itself I can + ignore when I pull) +5. Send me a pull request. Bonus points for topic branches. diff --git a/vendor/github.com/technoweenie/go-contentaddressable/contentaddressable.go b/vendor/github.com/technoweenie/go-contentaddressable/contentaddressable.go new file mode 100644 index 00000000..7c8c4d5a --- /dev/null +++ b/vendor/github.com/technoweenie/go-contentaddressable/contentaddressable.go @@ -0,0 +1,28 @@ +/* +Package contentaddressable contains tools for writing content addressable files. +Files are written to a temporary location, and only renamed to the final +location after the file's OID (Object ID) has been verified. + + filename := "path/to/01ba4719c80b6fe911b091a7c05124b64eeece964e09c058ef8f9805daca546b" + file, err := contentaddressable.NewFile(filename) + if err != nil { + panic(err) + } + defer file.Close() + + file.Oid // 01ba4719c80b6fe911b091a7c05124b64eeece964e09c058ef8f9805daca546b + + written, err := io.Copy(file, someReader) + + if err == nil { + // Move file to final location if OID is verified. + err = file.Accept() + } + + if err != nil { + panic(err) + } + +Currently SHA-256 is used for a file's OID. +*/ +package contentaddressable diff --git a/vendor/github.com/technoweenie/go-contentaddressable/file.go b/vendor/github.com/technoweenie/go-contentaddressable/file.go new file mode 100644 index 00000000..7909ec18 --- /dev/null +++ b/vendor/github.com/technoweenie/go-contentaddressable/file.go @@ -0,0 +1,131 @@ +package contentaddressable + +import ( + "crypto/sha256" + "encoding/hex" + "errors" + "fmt" + "hash" + "os" + "path/filepath" +) + +var ( + AlreadyClosed = errors.New("Already closed.") + HasData = errors.New("Destination file already has data.") +) + +// File handles the atomic writing of a content addressable file. It writes to +// a temp file, and then renames to the final location after Accept(). +type File struct { + Oid string + filename string + tempFilename string + file *os.File + tempFile *os.File + hasher hash.Hash +} + +// NewFile initializes a content addressable file for writing. It opens both +// the given filename, and a temp filename in exclusive mode. The *File OID +// is taken from the base name of the given filename. +func NewFile(filename string) (*File, error) { + oid := filepath.Base(filename) + dir := filepath.Dir(filename) + if err := os.MkdirAll(dir, 0755); err != nil { + return nil, err + } + + tempFilename := filename + "-temp" + tempFile, err := os.OpenFile(tempFilename, os.O_WRONLY|os.O_CREATE|os.O_EXCL, 0644) + if err != nil { + return nil, err + } + + file, err := os.OpenFile(filename, os.O_WRONLY|os.O_CREATE|os.O_EXCL, 0644) + if err != nil { + cleanupFile(tempFile) + return nil, err + } + + caw := &File{ + Oid: oid, + filename: filename, + tempFilename: tempFilename, + file: file, + tempFile: tempFile, + hasher: sha256.New(), + } + + return caw, nil +} + +// Write sends data to the temporary file. +func (w *File) Write(p []byte) (int, error) { + if w.Closed() { + return 0, AlreadyClosed + } + + w.hasher.Write(p) + return w.tempFile.Write(p) +} + +// Accept verifies the written content SHA-256 signature matches the given OID. +// If it matches, the temp file is renamed to the original filename. If not, +// an error is returned. +func (w *File) Accept() error { + if w.Closed() { + return AlreadyClosed + } + + sig := hex.EncodeToString(w.hasher.Sum(nil)) + if sig != w.Oid { + return fmt.Errorf("Content mismatch. Expected OID %s, got %s", w.Oid, sig) + } + + if err := cleanupFile(w.file); err != nil { + return err + } + w.file = nil + + w.tempFile.Close() + err := os.Rename(w.tempFilename, w.filename) + w.Close() + return err +} + +// Close cleans up the internal file objects. +func (w *File) Close() error { + if w.tempFile != nil { + if err := cleanupFile(w.tempFile); err != nil { + return err + } + w.tempFile = nil + } + + if w.file != nil { + if err := cleanupFile(w.file); err != nil { + return err + } + w.file = nil + } + + return nil +} + +// Closed reports whether this file object has been closed. +func (w *File) Closed() bool { + if w.tempFile == nil || w.file == nil { + return true + } + return false +} + +func cleanupFile(f *os.File) error { + err := f.Close() + if err := os.RemoveAll(f.Name()); err != nil { + return err + } + + return err +} diff --git a/vendor/github.com/technoweenie/go-contentaddressable/file_test.go b/vendor/github.com/technoweenie/go-contentaddressable/file_test.go new file mode 100644 index 00000000..b9dca47c --- /dev/null +++ b/vendor/github.com/technoweenie/go-contentaddressable/file_test.go @@ -0,0 +1,176 @@ +package contentaddressable + +import ( + "io/ioutil" + "os" + "path/filepath" + "strings" + "testing" + "reflect" + "runtime" +) + +var supOid = "a2b71d6ee8997eb87b25ab42d566c44f6a32871752c7c73eb5578cb1182f7be0" + +func TestFile(t *testing.T) { + test := SetupFile(t) + defer test.Teardown() + + filename := filepath.Join(test.Path, supOid) + aw, err := NewFile(filename) + assertEqual(t, nil, err) + + n, err := aw.Write([]byte("SUP")) + assertEqual(t, nil, err) + assertEqual(t, 3, n) + + by, err := ioutil.ReadFile(filename) + assertEqual(t, nil, err) + assertEqual(t, 0, len(by)) + + assertEqual(t, nil, aw.Accept()) + + by, err = ioutil.ReadFile(filename) + assertEqual(t, nil, err) + assertEqual(t, "SUP", string(by)) + + assertEqual(t, nil, aw.Close()) +} + +func TestFileMismatch(t *testing.T) { + test := SetupFile(t) + defer test.Teardown() + + filename := filepath.Join(test.Path, "b2b71d6ee8997eb87b25ab42d566c44f6a32871752c7c73eb5578cb1182f7be0") + aw, err := NewFile(filename) + assertEqual(t, nil, err) + + n, err := aw.Write([]byte("SUP")) + assertEqual(t, nil, err) + assertEqual(t, 3, n) + + by, err := ioutil.ReadFile(filename) + assertEqual(t, nil, err) + assertEqual(t, 0, len(by)) + + err = aw.Accept() + if err == nil || !strings.Contains(err.Error(), "Content mismatch") { + t.Errorf("Expected mismatch error: %s", err) + } + + by, err = ioutil.ReadFile(filename) + assertEqual(t, nil, err) + assertEqual(t, "", string(by)) + + assertEqual(t, nil, aw.Close()) + + _, err = ioutil.ReadFile(filename) + assertEqual(t, true, os.IsNotExist(err)) +} + +func TestFileCancel(t *testing.T) { + test := SetupFile(t) + defer test.Teardown() + + filename := filepath.Join(test.Path, supOid) + aw, err := NewFile(filename) + assertEqual(t, nil, err) + + n, err := aw.Write([]byte("SUP")) + assertEqual(t, nil, err) + assertEqual(t, 3, n) + + assertEqual(t, nil, aw.Close()) + + for _, name := range []string{aw.filename, aw.tempFilename} { + if _, err := os.Stat(name); err == nil { + t.Errorf("%s exists?", name) + } + } +} + +func TestFileLocks(t *testing.T) { + test := SetupFile(t) + defer test.Teardown() + + filename := filepath.Join(test.Path, supOid) + aw, err := NewFile(filename) + assertEqual(t, nil, err) + assertEqual(t, filename, aw.filename) + assertEqual(t, filename+"-temp", aw.tempFilename) + + files := []string{aw.filename, aw.tempFilename} + + for _, name := range files { + if _, err := os.OpenFile(name, os.O_WRONLY|os.O_CREATE|os.O_EXCL, 0665); err == nil { + t.Errorf("Able to open %s!", name) + } + } + + assertEqual(t, nil, aw.Close()) + + for _, name := range files { + f, err := os.OpenFile(name, os.O_WRONLY|os.O_CREATE|os.O_EXCL, 0665) + assertEqualf(t, nil, err, "unable to open %s: %s", name, err) + cleanupFile(f) + } +} + +func TestFileDuel(t *testing.T) { + test := SetupFile(t) + defer test.Teardown() + + filename := filepath.Join(test.Path, supOid) + aw, err := NewFile(filename) + assertEqual(t, nil, err) + defer aw.Close() + + if _, err := NewFile(filename); err == nil { + t.Errorf("Expected a file open conflict!") + } +} + +func SetupFile(t *testing.T) *FileTest { + wd, err := os.Getwd() + if err != nil { + t.Fatalf("Error getting wd: %s", err) + } + + return &FileTest{filepath.Join(wd, "File"), t} +} + +type FileTest struct { + Path string + *testing.T +} + +func (t *FileTest) Teardown() { + if err := os.RemoveAll(t.Path); err != nil { + t.Fatalf("Error removing %s: %s", t.Path, err) + } +} + +func assertEqual(t *testing.T, expected, actual interface{}) { + checkAssertion(t, expected, actual, "") +} + +func assertEqualf(t *testing.T, expected, actual interface{}, format string, args ...interface{}) { + checkAssertion(t, expected, actual, format, args...) +} + +func checkAssertion(t *testing.T, expected, actual interface{}, format string, args ...interface{}) { + if expected == nil { + if actual == nil { + return + } + } else if reflect.DeepEqual(expected, actual) { + return + } + + _, file, line, _ := runtime.Caller(2) // assertEqual + checkAssertion + t.Logf("%s:%d\nExpected: %v\nActual: %v", file, line, expected, actual) + if len(args) > 0 { + t.Logf("! - "+format, args...) + } + t.FailNow() +} diff --git a/vendor/github.com/xeipuuv/gojsonpointer/LICENSE-APACHE-2.0.txt b/vendor/github.com/xeipuuv/gojsonpointer/LICENSE-APACHE-2.0.txt new file mode 100644 index 00000000..55ede8a4 --- /dev/null +++ b/vendor/github.com/xeipuuv/gojsonpointer/LICENSE-APACHE-2.0.txt @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright 2015 xeipuuv + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/xeipuuv/gojsonpointer/README.md b/vendor/github.com/xeipuuv/gojsonpointer/README.md new file mode 100644 index 00000000..dbe4d508 --- /dev/null +++ b/vendor/github.com/xeipuuv/gojsonpointer/README.md @@ -0,0 +1,8 @@ +# gojsonpointer +An implementation of JSON Pointer - Go language + +## References +http://tools.ietf.org/html/draft-ietf-appsawg-json-pointer-07 + +### Note +The 4.Evaluation part of the previous reference, starting with 'If the currently referenced value is a JSON array, the reference token MUST contain either...' is not implemented. diff --git a/vendor/github.com/xeipuuv/gojsonpointer/pointer.go b/vendor/github.com/xeipuuv/gojsonpointer/pointer.go new file mode 100644 index 00000000..6ca317a4 --- /dev/null +++ b/vendor/github.com/xeipuuv/gojsonpointer/pointer.go @@ -0,0 +1,217 @@ +// Copyright 2015 xeipuuv ( https://github.com/xeipuuv ) +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// author xeipuuv +// author-github https://github.com/xeipuuv +// author-mail xeipuuv@gmail.com +// +// repository-name gojsonpointer +// repository-desc An implementation of JSON Pointer - Go language +// +// description Main and unique file. +// +// created 25-02-2013 + +package gojsonpointer + +import ( + "errors" + "fmt" + "reflect" + "strconv" + "strings" +) + +const ( + const_empty_pointer = `` + const_pointer_separator = `/` + + const_invalid_start = `JSON pointer must be empty or start with a "` + const_pointer_separator + `"` +) + +type implStruct struct { + mode string // "SET" or "GET" + + inDocument interface{} + + setInValue interface{} + + getOutNode interface{} + getOutKind reflect.Kind + outError error +} + +func NewJsonPointer(jsonPointerString string) (JsonPointer, error) { + + var p JsonPointer + err := p.parse(jsonPointerString) + return p, err + +} + +type JsonPointer struct { + referenceTokens []string +} + +// "Constructor", parses the given string JSON pointer +func (p *JsonPointer) parse(jsonPointerString string) error { + + var err error + + if jsonPointerString != const_empty_pointer { + if !strings.HasPrefix(jsonPointerString, const_pointer_separator) { + err = errors.New(const_invalid_start) + } else { + referenceTokens := strings.Split(jsonPointerString, const_pointer_separator) + for _, referenceToken := range referenceTokens[1:] { + p.referenceTokens = append(p.referenceTokens, referenceToken) + } + } + } + + return err +} + +// Uses the pointer to retrieve a value from a JSON document +func (p *JsonPointer) Get(document interface{}) (interface{}, reflect.Kind, error) { + + is := &implStruct{mode: "GET", inDocument: document} + p.implementation(is) + return is.getOutNode, is.getOutKind, is.outError + +} + +// Uses the pointer to update a value from a JSON document +func (p *JsonPointer) Set(document interface{}, value interface{}) (interface{}, error) { + + is := &implStruct{mode: "SET", inDocument: document, setInValue: value} + p.implementation(is) + return document, is.outError + +} + +// Both Get and Set functions use the same implementation to avoid code duplication +func (p *JsonPointer) implementation(i *implStruct) { + + kind := reflect.Invalid + + // Full document when empty + if len(p.referenceTokens) == 0 { + i.getOutNode = i.inDocument + i.outError = nil + i.getOutKind = kind + i.outError = nil + return + } + + node := i.inDocument + + for ti, token := range p.referenceTokens { + + decodedToken := decodeReferenceToken(token) + isLastToken := ti == len(p.referenceTokens)-1 + + rValue := reflect.ValueOf(node) + kind = rValue.Kind() + + switch kind { + + case reflect.Map: + m := node.(map[string]interface{}) + if _, ok := m[decodedToken]; ok { + node = m[decodedToken] + if isLastToken && i.mode == "SET" { + m[decodedToken] = i.setInValue + } + } else { + i.outError = errors.New(fmt.Sprintf("Object has no key '%s'", token)) + i.getOutKind = kind + i.getOutNode = nil + return + } + + case reflect.Slice: + s := node.([]interface{}) + tokenIndex, err := strconv.Atoi(token) + if err != nil { + i.outError = errors.New(fmt.Sprintf("Invalid array index '%s'", token)) + i.getOutKind = kind + i.getOutNode = nil + return + } + sLength := len(s) + if tokenIndex < 0 || tokenIndex >= sLength { + i.outError = errors.New(fmt.Sprintf("Out of bound array[0,%d] index '%d'", sLength, tokenIndex)) + i.getOutKind = kind + i.getOutNode = nil + return + } + + node = s[tokenIndex] + if isLastToken && i.mode == "SET" { + s[tokenIndex] = i.setInValue + } + + default: + i.outError = errors.New(fmt.Sprintf("Invalid token reference '%s'", token)) + i.getOutKind = kind + i.getOutNode = nil + return + } + + } + + rValue := reflect.ValueOf(node) + kind = rValue.Kind() + + i.getOutNode = node + i.getOutKind = kind + i.outError = nil +} + +// Pointer to string representation function +func (p *JsonPointer) String() string { + + if len(p.referenceTokens) == 0 { + return const_empty_pointer + } + + pointerString := const_pointer_separator + strings.Join(p.referenceTokens, const_pointer_separator) + + return pointerString +} + +// Specific JSON pointer encoding here +// ~0 => ~ +// ~1 => / +// ... and vice versa + +const ( + const_encoded_reference_token_0 = `~0` + const_encoded_reference_token_1 = `~1` + const_decoded_reference_token_0 = `~` + const_decoded_reference_token_1 = `/` +) + +func decodeReferenceToken(token string) string { + step1 := strings.Replace(token, const_encoded_reference_token_1, const_decoded_reference_token_1, -1) + step2 := strings.Replace(step1, const_encoded_reference_token_0, const_decoded_reference_token_0, -1) + return step2 +} + +func encodeReferenceToken(token string) string { + step1 := strings.Replace(token, const_decoded_reference_token_1, const_encoded_reference_token_1, -1) + step2 := strings.Replace(step1, const_decoded_reference_token_0, const_encoded_reference_token_0, -1) + return step2 +} diff --git a/vendor/github.com/xeipuuv/gojsonpointer/pointer_test.go b/vendor/github.com/xeipuuv/gojsonpointer/pointer_test.go new file mode 100644 index 00000000..ddf3663d --- /dev/null +++ b/vendor/github.com/xeipuuv/gojsonpointer/pointer_test.go @@ -0,0 +1,205 @@ +// Copyright 2015 xeipuuv ( https://github.com/xeipuuv ) +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// author xeipuuv +// author-github https://github.com/xeipuuv +// author-mail xeipuuv@gmail.com +// +// repository-name gojsonpointer +// repository-desc An implementation of JSON Pointer - Go language +// +// description Automated tests on package. +// +// created 03-03-2013 + +package gojsonpointer + +import ( + "encoding/json" + "testing" +) + +const ( + TEST_DOCUMENT_NB_ELEMENTS = 11 + TEST_NODE_OBJ_NB_ELEMENTS = 4 + TEST_DOCUMENT_STRING = `{ +"foo": ["bar", "baz"], +"obj": { "a":1, "b":2, "c":[3,4], "d":[ {"e":9}, {"f":[50,51]} ] }, +"": 0, +"a/b": 1, +"c%d": 2, +"e^f": 3, +"g|h": 4, +"i\\j": 5, +"k\"l": 6, +" ": 7, +"m~n": 8 +}` +) + +var testDocumentJson interface{} + +func init() { + json.Unmarshal([]byte(TEST_DOCUMENT_STRING), &testDocumentJson) +} + +func TestEscaping(t *testing.T) { + + ins := []string{`/`, `/`, `/a~1b`, `/a~1b`, `/c%d`, `/e^f`, `/g|h`, `/i\j`, `/k"l`, `/ `, `/m~0n`} + outs := []float64{0, 0, 1, 1, 2, 3, 4, 5, 6, 7, 8} + + for i := range ins { + + p, err := NewJsonPointer(ins[i]) + if err != nil { + t.Errorf("NewJsonPointer(%v) error %v", ins[i], err.Error()) + } + + result, _, err := p.Get(testDocumentJson) + if err != nil { + t.Errorf("Get(%v) error %v", ins[i], err.Error()) + } + + if result != outs[i] { + t.Errorf("Get(%v) = %v, expect %v", ins[i], result, outs[i]) + } + } + +} + +func TestFullDocument(t *testing.T) { + + in := `` + + p, err := NewJsonPointer(in) + if err != nil { + t.Errorf("NewJsonPointer(%v) error %v", in, err.Error()) + } + + result, _, err := p.Get(testDocumentJson) + if err != nil { + t.Errorf("Get(%v) error %v", in, err.Error()) + } + + if len(result.(map[string]interface{})) != TEST_DOCUMENT_NB_ELEMENTS { + t.Errorf("Get(%v) = %v, expect full document", in, result) + } +} + +func TestGetNode(t *testing.T) { + + in := `/obj` + + p, err := NewJsonPointer(in) + if err != nil { + t.Errorf("NewJsonPointer(%v) error %v", in, err.Error()) + } + + result, _, err := p.Get(testDocumentJson) + if err != nil { + t.Errorf("Get(%v) error %v", in, err.Error()) + } + + if len(result.(map[string]interface{})) != TEST_NODE_OBJ_NB_ELEMENTS { + t.Errorf("Get(%v) = %v, expect full document", in, result) + } +} + +func TestArray(t *testing.T) { + + ins := []string{`/foo/0`, `/foo/0`, `/foo/1`} + outs := []string{"bar", "bar", "baz"} + + for i := range ins { + + p, err := NewJsonPointer(ins[i]) + if err != nil { + t.Errorf("NewJsonPointer(%v) error %v", ins[i], err.Error()) + } + + result, _, err := p.Get(testDocumentJson) + if err != nil { + t.Errorf("Get(%v) error %v", ins[i], err.Error()) + } + + if result != outs[i] { + t.Errorf("Get(%v) = %v, expect %v", ins[i], result, outs[i]) + } + } + +} + +func TestObject(t *testing.T) { + + ins := []string{`/obj/a`, `/obj/b`, `/obj/c/0`, `/obj/c/1`, `/obj/c/1`, `/obj/d/1/f/0`} + outs := []float64{1, 2, 3, 4, 4, 50} + + for i := range ins { + + p, err := NewJsonPointer(ins[i]) + if err != nil { + t.Errorf("NewJsonPointer(%v) error %v", ins[i], err.Error()) + } + + result, _, err := p.Get(testDocumentJson) + if err != nil { + t.Errorf("Get(%v) error %v", ins[i], err.Error()) + } + + if result != outs[i] { + t.Errorf("Get(%v) = %v, expect %v", ins[i], result, outs[i]) + } + } + +} + +func TestSetNode(t *testing.T) { + + jsonText := `{"a":[{"b": 1, "c": 2}], "d": 3}` + + var jsonDocument interface{} + json.Unmarshal([]byte(jsonText), &jsonDocument) + + in := "/a/0/c" + + p, err := NewJsonPointer(in) + if err != nil { + t.Errorf("NewJsonPointer(%v) error %v", in, err.Error()) + } + + _, err = p.Set(jsonDocument, 999) + if err != nil { + t.Errorf("Set(%v) error %v", in, err.Error()) + } + + firstNode := jsonDocument.(map[string]interface{}) + if len(firstNode) != 2 { + t.Errorf("Set(%s) failed", in) + } + + sliceNode := firstNode["a"].([]interface{}) + if len(sliceNode) != 1 { + t.Errorf("Set(%s) failed", in) + } + + changedNode := sliceNode[0].(map[string]interface{}) + changedNodeValue := changedNode["c"].(int) + + if changedNodeValue != 999 { + if len(sliceNode) != 1 { + t.Errorf("Set(%s) failed", in) + } + } + +} diff --git a/vendor/github.com/xeipuuv/gojsonreference/LICENSE-APACHE-2.0.txt b/vendor/github.com/xeipuuv/gojsonreference/LICENSE-APACHE-2.0.txt new file mode 100644 index 00000000..55ede8a4 --- /dev/null +++ b/vendor/github.com/xeipuuv/gojsonreference/LICENSE-APACHE-2.0.txt @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright 2015 xeipuuv + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/xeipuuv/gojsonreference/README.md b/vendor/github.com/xeipuuv/gojsonreference/README.md new file mode 100644 index 00000000..9ab6e1eb --- /dev/null +++ b/vendor/github.com/xeipuuv/gojsonreference/README.md @@ -0,0 +1,10 @@ +# gojsonreference +An implementation of JSON Reference - Go language + +## Dependencies +https://github.com/xeipuuv/gojsonpointer + +## References +http://tools.ietf.org/html/draft-ietf-appsawg-json-pointer-07 + +http://tools.ietf.org/html/draft-pbryan-zyp-json-ref-03 diff --git a/vendor/github.com/xeipuuv/gojsonreference/reference.go b/vendor/github.com/xeipuuv/gojsonreference/reference.go new file mode 100644 index 00000000..d4d2eca0 --- /dev/null +++ b/vendor/github.com/xeipuuv/gojsonreference/reference.go @@ -0,0 +1,141 @@ +// Copyright 2015 xeipuuv ( https://github.com/xeipuuv ) +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// author xeipuuv +// author-github https://github.com/xeipuuv +// author-mail xeipuuv@gmail.com +// +// repository-name gojsonreference +// repository-desc An implementation of JSON Reference - Go language +// +// description Main and unique file. +// +// created 26-02-2013 + +package gojsonreference + +import ( + "errors" + "github.com/xeipuuv/gojsonpointer" + "net/url" + "path/filepath" + "runtime" + "strings" +) + +const ( + const_fragment_char = `#` +) + +func NewJsonReference(jsonReferenceString string) (JsonReference, error) { + + var r JsonReference + err := r.parse(jsonReferenceString) + return r, err + +} + +type JsonReference struct { + referenceUrl *url.URL + referencePointer gojsonpointer.JsonPointer + + HasFullUrl bool + HasUrlPathOnly bool + HasFragmentOnly bool + HasFileScheme bool + HasFullFilePath bool +} + +func (r *JsonReference) GetUrl() *url.URL { + return r.referenceUrl +} + +func (r *JsonReference) GetPointer() *gojsonpointer.JsonPointer { + return &r.referencePointer +} + +func (r *JsonReference) String() string { + + if r.referenceUrl != nil { + return r.referenceUrl.String() + } + + if r.HasFragmentOnly { + return const_fragment_char + r.referencePointer.String() + } + + return r.referencePointer.String() +} + +func (r *JsonReference) IsCanonical() bool { + return (r.HasFileScheme && r.HasFullFilePath) || (!r.HasFileScheme && r.HasFullUrl) +} + +// "Constructor", parses the given string JSON reference +func (r *JsonReference) parse(jsonReferenceString string) (err error) { + + r.referenceUrl, err = url.Parse(jsonReferenceString) + if err != nil { + return + } + refUrl := r.referenceUrl + + if refUrl.Scheme != "" && refUrl.Host != "" { + r.HasFullUrl = true + } else { + if refUrl.Path != "" { + r.HasUrlPathOnly = true + } else if refUrl.RawQuery == "" && refUrl.Fragment != "" { + r.HasFragmentOnly = true + } + } + + r.HasFileScheme = refUrl.Scheme == "file" + if runtime.GOOS == "windows" { + // on Windows, a file URL may have an extra leading slash, and if it + // doesn't then its first component will be treated as the host by the + // Go runtime + if refUrl.Host == "" && strings.HasPrefix(refUrl.Path, "/") { + r.HasFullFilePath = filepath.IsAbs(refUrl.Path[1:]) + } else { + r.HasFullFilePath = filepath.IsAbs(refUrl.Host + refUrl.Path) + } + } else { + r.HasFullFilePath = filepath.IsAbs(refUrl.Path) + } + + // invalid json-pointer error means url has no json-pointer fragment. simply ignore error + r.referencePointer, _ = gojsonpointer.NewJsonPointer(refUrl.Fragment) + + return +} + +// Creates a new reference from a parent and a child +// If the child cannot inherit from the parent, an error is returned +func (r *JsonReference) Inherits(child JsonReference) (*JsonReference, error) { + childUrl := child.GetUrl() + parentUrl := r.GetUrl() + if childUrl == nil { + return nil, errors.New("childUrl is nil!") + } + if parentUrl == nil { + return nil, errors.New("parentUrl is nil!") + } + + ref, err := NewJsonReference(parentUrl.ResolveReference(childUrl).String()) + if err != nil { + return nil, err + } + return &ref, err +} diff --git a/vendor/github.com/xeipuuv/gojsonreference/reference_test.go b/vendor/github.com/xeipuuv/gojsonreference/reference_test.go new file mode 100644 index 00000000..4f6e7735 --- /dev/null +++ b/vendor/github.com/xeipuuv/gojsonreference/reference_test.go @@ -0,0 +1,378 @@ +// Copyright 2015 xeipuuv ( https://github.com/xeipuuv ) +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// author xeipuuv +// author-github https://github.com/xeipuuv +// author-mail xeipuuv@gmail.com +// +// repository-name gojsonreference +// repository-desc An implementation of JSON Reference - Go language +// +// description Automated tests on package. +// +// created 03-03-2013 + +package gojsonreference + +import ( + "testing" +) + +func TestFull(t *testing.T) { + + in := "http://host/path/a/b/c#/f/a/b" + + r1, err := NewJsonReference(in) + if err != nil { + t.Errorf("NewJsonReference(%v) error %s", in, err.Error()) + } + + if in != r1.String() { + t.Errorf("NewJsonReference(%v) = %v, expect %v", in, r1.String(), in) + } + + if r1.HasFragmentOnly != false { + t.Errorf("NewJsonReference(%v)::HasFragmentOnly %v expect %v", in, r1.HasFragmentOnly, false) + } + + if r1.HasFullUrl != true { + t.Errorf("NewJsonReference(%v)::HasFullUrl %v expect %v", in, r1.HasFullUrl, true) + } + + if r1.HasUrlPathOnly != false { + t.Errorf("NewJsonReference(%v)::HasUrlPathOnly %v expect %v", in, r1.HasUrlPathOnly, false) + } + + if r1.HasFileScheme != false { + t.Errorf("NewJsonReference(%v)::HasFileScheme %v expect %v", in, r1.HasFileScheme, false) + } + + if r1.GetPointer().String() != "/f/a/b" { + t.Errorf("NewJsonReference(%v)::GetPointer() %v expect %v", in, r1.GetPointer().String(), "/f/a/b") + } +} + +func TestFullUrl(t *testing.T) { + + in := "http://host/path/a/b/c" + + r1, err := NewJsonReference(in) + if err != nil { + t.Errorf("NewJsonReference(%v) error %s", in, err.Error()) + } + + if in != r1.String() { + t.Errorf("NewJsonReference(%v) = %v, expect %v", in, r1.String(), in) + } + + if r1.HasFragmentOnly != false { + t.Errorf("NewJsonReference(%v)::HasFragmentOnly %v expect %v", in, r1.HasFragmentOnly, false) + } + + if r1.HasFullUrl != true { + t.Errorf("NewJsonReference(%v)::HasFullUrl %v expect %v", in, r1.HasFullUrl, true) + } + + if r1.HasUrlPathOnly != false { + t.Errorf("NewJsonReference(%v)::HasUrlPathOnly %v expect %v", in, r1.HasUrlPathOnly, false) + } + + if r1.HasFileScheme != false { + t.Errorf("NewJsonReference(%v)::HasFileScheme %v expect %v", in, r1.HasFileScheme, false) + } + + if r1.GetPointer().String() != "" { + t.Errorf("NewJsonReference(%v)::GetPointer() %v expect %v", in, r1.GetPointer().String(), "") + } +} + +func TestFragmentOnly(t *testing.T) { + + in := "#/fragment/only" + + r1, err := NewJsonReference(in) + if err != nil { + t.Errorf("NewJsonReference(%v) error %s", in, err.Error()) + } + + if in != r1.String() { + t.Errorf("NewJsonReference(%v) = %v, expect %v", in, r1.String(), in) + } + + if r1.HasFragmentOnly != true { + t.Errorf("NewJsonReference(%v)::HasFragmentOnly %v expect %v", in, r1.HasFragmentOnly, true) + } + + if r1.HasFullUrl != false { + t.Errorf("NewJsonReference(%v)::HasFullUrl %v expect %v", in, r1.HasFullUrl, false) + } + + if r1.HasUrlPathOnly != false { + t.Errorf("NewJsonReference(%v)::HasUrlPathOnly %v expect %v", in, r1.HasUrlPathOnly, false) + } + + if r1.HasFileScheme != false { + t.Errorf("NewJsonReference(%v)::HasFileScheme %v expect %v", in, r1.HasFileScheme, false) + } + + if r1.GetPointer().String() != "/fragment/only" { + t.Errorf("NewJsonReference(%v)::GetPointer() %v expect %v", in, r1.GetPointer().String(), "/fragment/only") + } +} + +func TestUrlPathOnly(t *testing.T) { + + in := "/documents/document.json" + + r1, err := NewJsonReference(in) + if err != nil { + t.Errorf("NewJsonReference(%v) error %s", in, err.Error()) + } + + if in != r1.String() { + t.Errorf("NewJsonReference(%v) = %v, expect %v", in, r1.String(), in) + } + + if r1.HasFragmentOnly != false { + t.Errorf("NewJsonReference(%v)::HasFragmentOnly %v expect %v", in, r1.HasFragmentOnly, false) + } + + if r1.HasFullUrl != false { + t.Errorf("NewJsonReference(%v)::HasFullUrl %v expect %v", in, r1.HasFullUrl, false) + } + + if r1.HasUrlPathOnly != true { + t.Errorf("NewJsonReference(%v)::HasUrlPathOnly %v expect %v", in, r1.HasUrlPathOnly, true) + } + + if r1.HasFileScheme != false { + t.Errorf("NewJsonReference(%v)::HasFileScheme %v expect %v", in, r1.HasFileScheme, false) + } + + if r1.GetPointer().String() != "" { + t.Errorf("NewJsonReference(%v)::GetPointer() %v expect %v", in, r1.GetPointer().String(), "") + } +} + +func TestUrlRelativePathOnly(t *testing.T) { + + in := "document.json" + + r1, err := NewJsonReference(in) + if err != nil { + t.Errorf("NewJsonReference(%v) error %s", in, err.Error()) + } + + if in != r1.String() { + t.Errorf("NewJsonReference(%v) = %v, expect %v", in, r1.String(), in) + } + + if r1.HasFragmentOnly != false { + t.Errorf("NewJsonReference(%v)::HasFragmentOnly %v expect %v", in, r1.HasFragmentOnly, false) + } + + if r1.HasFullUrl != false { + t.Errorf("NewJsonReference(%v)::HasFullUrl %v expect %v", in, r1.HasFullUrl, false) + } + + if r1.HasUrlPathOnly != true { + t.Errorf("NewJsonReference(%v)::HasUrlPathOnly %v expect %v", in, r1.HasUrlPathOnly, true) + } + + if r1.HasFileScheme != false { + t.Errorf("NewJsonReference(%v)::HasFileScheme %v expect %v", in, r1.HasFileScheme, false) + } + + if r1.GetPointer().String() != "" { + t.Errorf("NewJsonReference(%v)::GetPointer() %v expect %v", in, r1.GetPointer().String(), "") + } +} + +func TestInheritsValid(t *testing.T) { + + in1 := "http://www.test.com/doc.json" + in2 := "#/a/b" + out := in1 + in2 + + r1, _ := NewJsonReference(in1) + r2, _ := NewJsonReference(in2) + + result, err := r1.Inherits(r2) + if err != nil { + t.Errorf("Inherits(%s,%s) error %s", r1.String(), r2.String(), err.Error()) + } + + if result.String() != out { + t.Errorf("Inherits(%s,%s) = %s, expect %s", r1.String(), r2.String(), result.String(), out) + } + + if result.GetPointer().String() != "/a/b" { + t.Errorf("result(%v)::GetPointer() %v expect %v", result.String(), result.GetPointer().String(), "/a/b") + } +} + +func TestInheritsDifferentHost(t *testing.T) { + + in1 := "http://www.test.com/doc.json" + in2 := "http://www.test2.com/doc.json#bla" + + r1, _ := NewJsonReference(in1) + r2, _ := NewJsonReference(in2) + + result, err := r1.Inherits(r2) + + if err != nil { + t.Errorf("Inherits(%s,%s) should not fail. Error: %s", r1.String(), r2.String(), err.Error()) + } + + if result.String() != in2 { + t.Errorf("Inherits(%s,%s) should be %s but is %s", in1, in2, in2, result) + } + + if result.GetPointer().String() != "" { + t.Errorf("result(%v)::GetPointer() %v expect %v", result.String(), result.GetPointer().String(), "") + } +} + +func TestFileScheme(t *testing.T) { + + in1 := "file:///Users/mac/1.json#a" + in2 := "file:///Users/mac/2.json#b" + + r1, _ := NewJsonReference(in1) + r2, _ := NewJsonReference(in2) + + if r1.HasFragmentOnly != false { + t.Errorf("NewJsonReference(%v)::HasFragmentOnly %v expect %v", in1, r1.HasFragmentOnly, false) + } + + if r1.HasFileScheme != true { + t.Errorf("NewJsonReference(%v)::HasFileScheme %v expect %v", in1, r1.HasFileScheme, true) + } + + if r1.HasFullFilePath != true { + t.Errorf("NewJsonReference(%v)::HasFullFilePath %v expect %v", in1, r1.HasFullFilePath, true) + } + + if r1.IsCanonical() != true { + t.Errorf("NewJsonReference(%v)::IsCanonical %v expect %v", in1, r1.IsCanonical, true) + } + + result, err := r1.Inherits(r2) + if err != nil { + t.Errorf("Inherits(%s,%s) should not fail. Error: %s", r1.String(), r2.String(), err.Error()) + } + if result.String() != in2 { + t.Errorf("Inherits(%s,%s) should be %s but is %s", in1, in2, in2, result) + } + + if result.GetPointer().String() != "" { + t.Errorf("result(%v)::GetPointer() %v expect %v", result.String(), result.GetPointer().String(), "") + } +} + +func TestReferenceResolution(t *testing.T) { + + // 5.4. Reference Resolution Examples + // http://tools.ietf.org/html/rfc3986#section-5.4 + + base := "http://a/b/c/d;p?q" + baseRef, err := NewJsonReference(base) + + if err != nil { + t.Errorf("NewJsonReference(%s) failed error: %s", base, err.Error()) + } + if baseRef.String() != base { + t.Errorf("NewJsonReference(%s) %s expected %s", base, baseRef.String(), base) + } + + checks := []string{ + // 5.4.1. Normal Examples + // http://tools.ietf.org/html/rfc3986#section-5.4.1 + + "g:h", "g:h", + "g", "http://a/b/c/g", + "./g", "http://a/b/c/g", + "g/", "http://a/b/c/g/", + "/g", "http://a/g", + "//g", "http://g", + "?y", "http://a/b/c/d;p?y", + "g?y", "http://a/b/c/g?y", + "#s", "http://a/b/c/d;p?q#s", + "g#s", "http://a/b/c/g#s", + "g?y#s", "http://a/b/c/g?y#s", + ";x", "http://a/b/c/;x", + "g;x", "http://a/b/c/g;x", + "g;x?y#s", "http://a/b/c/g;x?y#s", + "", "http://a/b/c/d;p?q", + ".", "http://a/b/c/", + "./", "http://a/b/c/", + "..", "http://a/b/", + "../", "http://a/b/", + "../g", "http://a/b/g", + "../..", "http://a/", + "../../", "http://a/", + "../../g", "http://a/g", + + // 5.4.2. Abnormal Examples + // http://tools.ietf.org/html/rfc3986#section-5.4.2 + + "../../../g", "http://a/g", + "../../../../g", "http://a/g", + + "/./g", "http://a/g", + "/../g", "http://a/g", + "g.", "http://a/b/c/g.", + ".g", "http://a/b/c/.g", + "g..", "http://a/b/c/g..", + "..g", "http://a/b/c/..g", + + "./../g", "http://a/b/g", + "./g/.", "http://a/b/c/g/", + "g/./h", "http://a/b/c/g/h", + "g/../h", "http://a/b/c/h", + "g;x=1/./y", "http://a/b/c/g;x=1/y", + "g;x=1/../y", "http://a/b/c/y", + + "g?y/./x", "http://a/b/c/g?y/./x", + "g?y/../x", "http://a/b/c/g?y/../x", + "g#s/./x", "http://a/b/c/g#s/./x", + "g#s/../x", "http://a/b/c/g#s/../x", + + "http:g", "http:g", // for strict parsers + //"http:g", "http://a/b/c/g", // for backward compatibility + + } + for i := 0; i < len(checks); i += 2 { + child := checks[i] + expected := checks[i+1] + // fmt.Printf("%d: %v -> %v\n", i/2, child, expected) + + childRef, e := NewJsonReference(child) + if e != nil { + t.Errorf("%d: NewJsonReference(%s) failed error: %s", i/2, child, e.Error()) + } + + res, e := baseRef.Inherits(childRef) + if res == nil { + t.Errorf("%d: Inherits(%s, %s) nil not expected", i/2, base, child) + } + if e != nil { + t.Errorf("%d: Inherits(%s) failed error: %s", i/2, child, e.Error()) + } + if res.String() != expected { + t.Errorf("%d: Inherits(%s, %s) %s expected %s", i/2, base, child, res.String(), expected) + } + } +} diff --git a/vendor/github.com/xeipuuv/gojsonschema/.gitignore b/vendor/github.com/xeipuuv/gojsonschema/.gitignore new file mode 100644 index 00000000..c1e0636f --- /dev/null +++ b/vendor/github.com/xeipuuv/gojsonschema/.gitignore @@ -0,0 +1 @@ +*.sw[nop] diff --git a/vendor/github.com/xeipuuv/gojsonschema/.travis.yml b/vendor/github.com/xeipuuv/gojsonschema/.travis.yml new file mode 100644 index 00000000..9cc01e8a --- /dev/null +++ b/vendor/github.com/xeipuuv/gojsonschema/.travis.yml @@ -0,0 +1,7 @@ +language: go +go: + - 1.3 +before_install: + - go get github.com/sigu-399/gojsonreference + - go get github.com/sigu-399/gojsonpointer + - go get github.com/stretchr/testify/assert diff --git a/vendor/github.com/xeipuuv/gojsonschema/LICENSE-APACHE-2.0.txt b/vendor/github.com/xeipuuv/gojsonschema/LICENSE-APACHE-2.0.txt new file mode 100644 index 00000000..55ede8a4 --- /dev/null +++ b/vendor/github.com/xeipuuv/gojsonschema/LICENSE-APACHE-2.0.txt @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright 2015 xeipuuv + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/xeipuuv/gojsonschema/README.md b/vendor/github.com/xeipuuv/gojsonschema/README.md new file mode 100644 index 00000000..187da61e --- /dev/null +++ b/vendor/github.com/xeipuuv/gojsonschema/README.md @@ -0,0 +1,236 @@ +[![Build Status](https://travis-ci.org/xeipuuv/gojsonschema.svg)](https://travis-ci.org/xeipuuv/gojsonschema) + +# gojsonschema + +## Description + +An implementation of JSON Schema, based on IETF's draft v4 - Go language + +References : + +* http://json-schema.org +* http://json-schema.org/latest/json-schema-core.html +* http://json-schema.org/latest/json-schema-validation.html + +## Installation + +``` +go get github.com/xeipuuv/gojsonschema +``` + +Dependencies : +* [github.com/xeipuuv/gojsonpointer](https://github.com/xeipuuv/gojsonpointer) +* [github.com/xeipuuv/gojsonreference](https://github.com/xeipuuv/gojsonreference) +* [github.com/stretchr/testify/assert](https://github.com/stretchr/testify#assert-package) + +## Usage + +### Example + +```go + +package main + +import ( + "fmt" + "github.com/xeipuuv/gojsonschema" +) + +func main() { + + schemaLoader := gojsonschema.NewReferenceLoader("file:///home/me/schema.json") + documentLoader := gojsonschema.NewReferenceLoader("file:///home/me/document.json") + + result, err := gojsonschema.Validate(schemaLoader, documentLoader) + if err != nil { + panic(err.Error()) + } + + if result.Valid() { + fmt.Printf("The document is valid\n") + } else { + fmt.Printf("The document is not valid. see errors :\n") + for _, desc := range result.Errors() { + fmt.Printf("- %s\n", desc) + } + } + +} + + +``` + +#### Loaders + +There are various ways to load your JSON data. +In order to load your schemas and documents, +first declare an appropriate loader : + +* Web / HTTP, using a reference : + +```go +loader := gojsonschema.NewReferenceLoader("http://www.some_host.com/schema.json") +``` + +* Local file, using a reference : + +```go +loader := gojsonschema.NewReferenceLoader("file:///home/me/schema.json") +``` + +References use the URI scheme, the prefix (file://) and a full path to the file are required. + +* JSON strings : + +```go +loader := gojsonschema.NewStringLoader(`{"type": "string"}`) +``` + +* Custom Go types : + +```go +m := map[string]interface{}{"type": "string"} +loader := gojsonschema.NewGoLoader(m) +``` + +And + +```go +type Root struct { + Users []User `json:"users"` +} + +type User struct { + Name string `json:"name"` +} + +... + +data := Root{} +data.Users = append(data.Users, User{"John"}) +data.Users = append(data.Users, User{"Sophia"}) +data.Users = append(data.Users, User{"Bill"}) + +loader := gojsonschema.NewGoLoader(data) +``` + +#### Validation + +Once the loaders are set, validation is easy : + +```go +result, err := gojsonschema.Validate(schemaLoader, documentLoader) +``` + +Alternatively, you might want to load a schema only once and process to multiple validations : + +```go +schema, err := gojsonschema.NewSchema(schemaLoader) +... +result1, err := schema.Validate(documentLoader1) +... +result2, err := schema.Validate(documentLoader2) +... +// etc ... +``` + +To check the result : + +```go + if result.Valid() { + fmt.Printf("The document is valid\n") + } else { + fmt.Printf("The document is not valid. see errors :\n") + for _, err := range result.Errors() { + // Err implements the ResultError interface + fmt.Printf("- %s\n", err) + } + } +``` + +## Working with Errors + +The library handles string error codes which you can customize by creating your own gojsonschema.locale and setting it +```go +gojsonschema.Locale = YourCustomLocale{} +``` + +However, each error contains additional contextual information. + +**err.Type()**: *string* Returns the "type" of error that occurred. Note you can also type check. See below + +Note: An error of RequiredType has an err.Type() return value of "required" + + "required": RequiredError + "invalid_type": InvalidTypeError + "number_any_of": NumberAnyOfError + "number_one_of": NumberOneOfError + "number_all_of": NumberAllOfError + "number_not": NumberNotError + "missing_dependency": MissingDependencyError + "internal": InternalError + "enum": EnumError + "array_no_additional_items": ArrayNoAdditionalItemsError + "array_min_items": ArrayMinItemsError + "array_max_items": ArrayMaxItemsError + "unique": ItemsMustBeUniqueError + "array_min_properties": ArrayMinPropertiesError + "array_max_properties": ArrayMaxPropertiesError + "additional_property_not_allowed": AdditionalPropertyNotAllowedError + "invalid_property_pattern": InvalidPropertyPatternError + "string_gte": StringLengthGTEError + "string_lte": StringLengthLTEError + "pattern": DoesNotMatchPatternError + "multiple_of": MultipleOfError + "number_gte": NumberGTEError + "number_gt": NumberGTError + "number_lte": NumberLTEError + "number_lt": NumberLTError + +**err.Value()**: *interface{}* Returns the value given + +**err.Context()**: *gojsonschema.jsonContext* Returns the context. This has a String() method that will print something like this: (root).firstName + +**err.Field()**: *string* Returns the fieldname in the format firstName, or for embedded properties, person.firstName. This returns the same as the String() method on *err.Context()* but removes the (root). prefix. + +**err.Description()**: *string* The error description. This is based on the locale you are using. See the beginning of this section for overwriting the locale with a custom implementation. + +**err.Details()**: *gojsonschema.ErrorDetails* Returns a map[string]interface{} of additional error details specific to the error. For example, GTE errors will have a "min" value, LTE will have a "max" value. See errors.go for a full description of all the error details. Every error always contains a "field" key that holds the value of *err.Field()* + +Note in most cases, the err.Details() will be used to generate replacement strings in your locales. and not used directly i.e. +``` +%field% must be greater than or equal to %min% +``` + +## Formats +JSON Schema allows for optional "format" property to validate strings against well-known formats. gojsonschema ships with all of the formats defined in the spec that you can use like this: +````json +{"type": "string", "format": "email"} +```` +Available formats: date-time, hostname, email, ipv4, ipv6, uri. + +For repetitive or more complex formats, you can create custom format checkers and add them to gojsonschema like this: + +```go +// Define the format checker +type RoleFormatChecker struct {} + +// Ensure it meets the gojsonschema.FormatChecker interface +func (f RoleFormatChecker) IsFormat(input string) bool { + return strings.HasPrefix("ROLE_", input) +} + +// Add it to the library +gojsonschema.FormatCheckers.Add("role", RoleFormatChecker{}) +```` + +Now to use in your json schema: +````json +{"type": "string", "format": "role"} +```` + +## Uses + +gojsonschema uses the following test suite : + +https://github.com/json-schema/JSON-Schema-Test-Suite diff --git a/vendor/github.com/xeipuuv/gojsonschema/errors.go b/vendor/github.com/xeipuuv/gojsonschema/errors.go new file mode 100644 index 00000000..5146cbba --- /dev/null +++ b/vendor/github.com/xeipuuv/gojsonschema/errors.go @@ -0,0 +1,242 @@ +package gojsonschema + +import ( + "fmt" + "strings" +) + +type ( + // RequiredError. ErrorDetails: property string + RequiredError struct { + ResultErrorFields + } + + // InvalidTypeError. ErrorDetails: expected, given + InvalidTypeError struct { + ResultErrorFields + } + + // NumberAnyOfError. ErrorDetails: - + NumberAnyOfError struct { + ResultErrorFields + } + + // NumberOneOfError. ErrorDetails: - + NumberOneOfError struct { + ResultErrorFields + } + + // NumberAllOfError. ErrorDetails: - + NumberAllOfError struct { + ResultErrorFields + } + + // NumberNotError. ErrorDetails: - + NumberNotError struct { + ResultErrorFields + } + + // MissingDependencyError. ErrorDetails: dependency + MissingDependencyError struct { + ResultErrorFields + } + + // InternalError. ErrorDetails: error + InternalError struct { + ResultErrorFields + } + + // EnumError. ErrorDetails: allowed + EnumError struct { + ResultErrorFields + } + + // ArrayNoAdditionalItemsError. ErrorDetails: - + ArrayNoAdditionalItemsError struct { + ResultErrorFields + } + + // ArrayMinItemsError. ErrorDetails: min + ArrayMinItemsError struct { + ResultErrorFields + } + + // ArrayMaxItemsError. ErrorDetails: max + ArrayMaxItemsError struct { + ResultErrorFields + } + + // ItemsMustBeUniqueError. ErrorDetails: type + ItemsMustBeUniqueError struct { + ResultErrorFields + } + + // ArrayMinPropertiesError. ErrorDetails: min + ArrayMinPropertiesError struct { + ResultErrorFields + } + + // ArrayMaxPropertiesError. ErrorDetails: max + ArrayMaxPropertiesError struct { + ResultErrorFields + } + + // AdditionalPropertyNotAllowedError. ErrorDetails: property + AdditionalPropertyNotAllowedError struct { + ResultErrorFields + } + + // InvalidPropertyPatternError. ErrorDetails: property, pattern + InvalidPropertyPatternError struct { + ResultErrorFields + } + + // StringLengthGTEError. ErrorDetails: min + StringLengthGTEError struct { + ResultErrorFields + } + + // StringLengthLTEError. ErrorDetails: max + StringLengthLTEError struct { + ResultErrorFields + } + + // DoesNotMatchPatternError. ErrorDetails: pattern + DoesNotMatchPatternError struct { + ResultErrorFields + } + + // DoesNotMatchFormatError. ErrorDetails: format + DoesNotMatchFormatError struct { + ResultErrorFields + } + + // MultipleOfError. ErrorDetails: multiple + MultipleOfError struct { + ResultErrorFields + } + + // NumberGTEError. ErrorDetails: min + NumberGTEError struct { + ResultErrorFields + } + + // NumberGTError. ErrorDetails: min + NumberGTError struct { + ResultErrorFields + } + + // NumberLTEError. ErrorDetails: max + NumberLTEError struct { + ResultErrorFields + } + + // NumberLTError. ErrorDetails: max + NumberLTError struct { + ResultErrorFields + } +) + +// newError takes a ResultError type and sets the type, context, description, details, value, and field +func newError(err ResultError, context *jsonContext, value interface{}, locale locale, details ErrorDetails) { + var t string + var d string + switch err.(type) { + case *RequiredError: + t = "required" + d = locale.Required() + case *InvalidTypeError: + t = "invalid_type" + d = locale.InvalidType() + case *NumberAnyOfError: + t = "number_any_of" + d = locale.NumberAnyOf() + case *NumberOneOfError: + t = "number_one_of" + d = locale.NumberOneOf() + case *NumberAllOfError: + t = "number_all_of" + d = locale.NumberAllOf() + case *NumberNotError: + t = "number_not" + d = locale.NumberNot() + case *MissingDependencyError: + t = "missing_dependency" + d = locale.MissingDependency() + case *InternalError: + t = "internal" + d = locale.Internal() + case *EnumError: + t = "enum" + d = locale.Enum() + case *ArrayNoAdditionalItemsError: + t = "array_no_additional_items" + d = locale.ArrayNoAdditionalItems() + case *ArrayMinItemsError: + t = "array_min_items" + d = locale.ArrayMinItems() + case *ArrayMaxItemsError: + t = "array_max_items" + d = locale.ArrayMaxItems() + case *ItemsMustBeUniqueError: + t = "unique" + d = locale.Unique() + case *ArrayMinPropertiesError: + t = "array_min_properties" + d = locale.ArrayMinProperties() + case *ArrayMaxPropertiesError: + t = "array_max_properties" + d = locale.ArrayMaxProperties() + case *AdditionalPropertyNotAllowedError: + t = "additional_property_not_allowed" + d = locale.AdditionalPropertyNotAllowed() + case *InvalidPropertyPatternError: + t = "invalid_property_pattern" + d = locale.InvalidPropertyPattern() + case *StringLengthGTEError: + t = "string_gte" + d = locale.StringGTE() + case *StringLengthLTEError: + t = "string_lte" + d = locale.StringLTE() + case *DoesNotMatchPatternError: + t = "pattern" + d = locale.DoesNotMatchPattern() + case *DoesNotMatchFormatError: + t = "format" + d = locale.DoesNotMatchFormat() + case *MultipleOfError: + t = "multiple_of" + d = locale.MultipleOf() + case *NumberGTEError: + t = "number_gte" + d = locale.NumberGTE() + case *NumberGTError: + t = "number_gt" + d = locale.NumberGT() + case *NumberLTEError: + t = "number_lte" + d = locale.NumberLTE() + case *NumberLTError: + t = "number_lt" + d = locale.NumberLT() + } + + err.SetType(t) + err.SetContext(context) + err.SetValue(value) + err.SetDetails(details) + details["field"] = err.Field() + err.SetDescription(formatErrorDescription(d, details)) +} + +// formatErrorDescription takes a string in this format: %field% is required +// and converts it to a string with replacements. The fields come from +// the ErrorDetails struct and vary for each type of error. +func formatErrorDescription(s string, details ErrorDetails) string { + for name, val := range details { + s = strings.Replace(s, "%"+strings.ToLower(name)+"%", fmt.Sprintf("%v", val), -1) + } + + return s +} diff --git a/vendor/github.com/xeipuuv/gojsonschema/format_checkers.go b/vendor/github.com/xeipuuv/gojsonschema/format_checkers.go new file mode 100644 index 00000000..b91c360a --- /dev/null +++ b/vendor/github.com/xeipuuv/gojsonschema/format_checkers.go @@ -0,0 +1,178 @@ +package gojsonschema + +import ( + "net" + "net/url" + "reflect" + "regexp" + "strings" + "time" +) + +type ( + // FormatChecker is the interface all formatters added to FormatCheckerChain must implement + FormatChecker interface { + IsFormat(input string) bool + } + + // FormatCheckerChain holds the formatters + FormatCheckerChain struct { + formatters map[string]FormatChecker + } + + // EmailFormatter verifies email address formats + EmailFormatChecker struct{} + + // IPV4FormatChecker verifies IP addresses in the ipv4 format + IPV4FormatChecker struct{} + + // IPV6FormatChecker verifies IP addresses in the ipv6 format + IPV6FormatChecker struct{} + + // DateTimeFormatChecker verifies date/time formats per RFC3339 5.6 + // + // Valid formats: + // Partial Time: HH:MM:SS + // Full Date: YYYY-MM-DD + // Full Time: HH:MM:SSZ-07:00 + // Date Time: YYYY-MM-DDTHH:MM:SSZ-0700 + // + // Where + // YYYY = 4DIGIT year + // MM = 2DIGIT month ; 01-12 + // DD = 2DIGIT day-month ; 01-28, 01-29, 01-30, 01-31 based on month/year + // HH = 2DIGIT hour ; 00-23 + // MM = 2DIGIT ; 00-59 + // SS = 2DIGIT ; 00-58, 00-60 based on leap second rules + // T = Literal + // Z = Literal + // + // Note: Nanoseconds are also suported in all formats + // + // http://tools.ietf.org/html/rfc3339#section-5.6 + DateTimeFormatChecker struct{} + + // URIFormatCheckers validates a URI with a valid Scheme per RFC3986 + URIFormatChecker struct{} + + // HostnameFormatChecker validates a hostname is in the correct format + HostnameFormatChecker struct{} + + // UUIDFormatChecker validates a UUID is in the correct format + UUIDFormatChecker struct{} +) + +var ( + // Formatters holds the valid formatters, and is a public variable + // so library users can add custom formatters + FormatCheckers = FormatCheckerChain{ + formatters: map[string]FormatChecker{ + "date-time": DateTimeFormatChecker{}, + "hostname": HostnameFormatChecker{}, + "email": EmailFormatChecker{}, + "ipv4": IPV4FormatChecker{}, + "ipv6": IPV6FormatChecker{}, + "uri": URIFormatChecker{}, + "uuid": UUIDFormatChecker{}, + }, + } + + // Regex credit: https://github.com/asaskevich/govalidator + rxEmail = regexp.MustCompile("^(((([a-zA-Z]|\\d|[!#\\$%&'\\*\\+\\-\\/=\\?\\^_`{\\|}~]|[\\x{00A0}-\\x{D7FF}\\x{F900}-\\x{FDCF}\\x{FDF0}-\\x{FFEF}])+(\\.([a-zA-Z]|\\d|[!#\\$%&'\\*\\+\\-\\/=\\?\\^_`{\\|}~]|[\\x{00A0}-\\x{D7FF}\\x{F900}-\\x{FDCF}\\x{FDF0}-\\x{FFEF}])+)*)|((\\x22)((((\\x20|\\x09)*(\\x0d\\x0a))?(\\x20|\\x09)+)?(([\\x01-\\x08\\x0b\\x0c\\x0e-\\x1f\\x7f]|\\x21|[\\x23-\\x5b]|[\\x5d-\\x7e]|[\\x{00A0}-\\x{D7FF}\\x{F900}-\\x{FDCF}\\x{FDF0}-\\x{FFEF}])|(\\([\\x01-\\x09\\x0b\\x0c\\x0d-\\x7f]|[\\x{00A0}-\\x{D7FF}\\x{F900}-\\x{FDCF}\\x{FDF0}-\\x{FFEF}]))))*(((\\x20|\\x09)*(\\x0d\\x0a))?(\\x20|\\x09)+)?(\\x22)))@((([a-zA-Z]|\\d|[\\x{00A0}-\\x{D7FF}\\x{F900}-\\x{FDCF}\\x{FDF0}-\\x{FFEF}])|(([a-zA-Z]|\\d|[\\x{00A0}-\\x{D7FF}\\x{F900}-\\x{FDCF}\\x{FDF0}-\\x{FFEF}])([a-zA-Z]|\\d|-|\\.|_|~|[\\x{00A0}-\\x{D7FF}\\x{F900}-\\x{FDCF}\\x{FDF0}-\\x{FFEF}])*([a-zA-Z]|\\d|[\\x{00A0}-\\x{D7FF}\\x{F900}-\\x{FDCF}\\x{FDF0}-\\x{FFEF}])))\\.)+(([a-zA-Z]|[\\x{00A0}-\\x{D7FF}\\x{F900}-\\x{FDCF}\\x{FDF0}-\\x{FFEF}])|(([a-zA-Z]|[\\x{00A0}-\\x{D7FF}\\x{F900}-\\x{FDCF}\\x{FDF0}-\\x{FFEF}])([a-zA-Z]|\\d|-|\\.|_|~|[\\x{00A0}-\\x{D7FF}\\x{F900}-\\x{FDCF}\\x{FDF0}-\\x{FFEF}])*([a-zA-Z]|[\\x{00A0}-\\x{D7FF}\\x{F900}-\\x{FDCF}\\x{FDF0}-\\x{FFEF}])))\\.?$") + + // Regex credit: https://www.socketloop.com/tutorials/golang-validate-hostname + rxHostname = regexp.MustCompile(`^([a-zA-Z0-9]|[a-zA-Z0-9][a-zA-Z0-9\-]{0,61}[a-zA-Z0-9])(\.([a-zA-Z0-9]|[a-zA-Z0-9][a-zA-Z0-9\-]{0,61}[a-zA-Z0-9]))*$`) + + rxUUID = regexp.MustCompile("^[a-f0-9]{8}-[a-f0-9]{4}-[a-f0-9]{4}-[a-f0-9]{4}-[a-f0-9]{12}$") +) + +// Add adds a FormatChecker to the FormatCheckerChain +// The name used will be the value used for the format key in your json schema +func (c *FormatCheckerChain) Add(name string, f FormatChecker) *FormatCheckerChain { + c.formatters[name] = f + + return c +} + +// Remove deletes a FormatChecker from the FormatCheckerChain (if it exists) +func (c *FormatCheckerChain) Remove(name string) *FormatCheckerChain { + delete(c.formatters, name) + + return c +} + +// Has checks to see if the FormatCheckerChain holds a FormatChecker with the given name +func (c *FormatCheckerChain) Has(name string) bool { + _, ok := c.formatters[name] + + return ok +} + +// IsFormat will check an input against a FormatChecker with the given name +// to see if it is the correct format +func (c *FormatCheckerChain) IsFormat(name string, input interface{}) bool { + f, ok := c.formatters[name] + + if !ok { + return false + } + + if !isKind(input, reflect.String) { + return false + } + + inputString := input.(string) + + return f.IsFormat(inputString) +} + +func (f EmailFormatChecker) IsFormat(input string) bool { + return rxEmail.MatchString(input) +} + +// Credit: https://github.com/asaskevich/govalidator +func (f IPV4FormatChecker) IsFormat(input string) bool { + ip := net.ParseIP(input) + return ip != nil && strings.Contains(input, ".") +} + +// Credit: https://github.com/asaskevich/govalidator +func (f IPV6FormatChecker) IsFormat(input string) bool { + ip := net.ParseIP(input) + return ip != nil && strings.Contains(input, ":") +} + +func (f DateTimeFormatChecker) IsFormat(input string) bool { + formats := []string{ + "15:04:05", + "15:04:05Z07:00", + "2006-01-02", + time.RFC3339, + time.RFC3339Nano, + } + + for _, format := range formats { + if _, err := time.Parse(format, input); err == nil { + return true + } + } + + return false +} + +func (f URIFormatChecker) IsFormat(input string) bool { + u, err := url.Parse(input) + if err != nil || u.Scheme == "" { + return false + } + + return true +} + +func (f HostnameFormatChecker) IsFormat(input string) bool { + return rxHostname.MatchString(input) && len(input) < 256 +} + +func (f UUIDFormatChecker) IsFormat(input string) bool { + return rxUUID.MatchString(input) +} diff --git a/vendor/github.com/xeipuuv/gojsonschema/format_checkers_test.go b/vendor/github.com/xeipuuv/gojsonschema/format_checkers_test.go new file mode 100644 index 00000000..04db0e7c --- /dev/null +++ b/vendor/github.com/xeipuuv/gojsonschema/format_checkers_test.go @@ -0,0 +1,16 @@ +package gojsonschema + +import ( + "github.com/stretchr/testify/assert" + "testing" +) + +func TestUUIDFormatCheckerIsFormat(t *testing.T) { + checker := UUIDFormatChecker{} + + assert.True(t, checker.IsFormat("01234567-89ab-cdef-0123-456789abcdef")) + assert.True(t, checker.IsFormat("f1234567-89ab-cdef-0123-456789abcdef")) + + assert.False(t, checker.IsFormat("not-a-uuid")) + assert.False(t, checker.IsFormat("g1234567-89ab-cdef-0123-456789abcdef")) +} diff --git a/vendor/github.com/xeipuuv/gojsonschema/glide.yaml b/vendor/github.com/xeipuuv/gojsonschema/glide.yaml new file mode 100644 index 00000000..7aef8c09 --- /dev/null +++ b/vendor/github.com/xeipuuv/gojsonschema/glide.yaml @@ -0,0 +1,12 @@ +package: github.com/xeipuuv/gojsonschema +license: Apache 2.0 +import: +- package: github.com/xeipuuv/gojsonschema + +- package: github.com/xeipuuv/gojsonpointer + +- package: github.com/xeipuuv/gojsonreference + +- package: github.com/stretchr/testify/assert + version: ^1.1.3 + diff --git a/vendor/github.com/xeipuuv/gojsonschema/internalLog.go b/vendor/github.com/xeipuuv/gojsonschema/internalLog.go new file mode 100644 index 00000000..4ef7a8d0 --- /dev/null +++ b/vendor/github.com/xeipuuv/gojsonschema/internalLog.go @@ -0,0 +1,37 @@ +// Copyright 2015 xeipuuv ( https://github.com/xeipuuv ) +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// author xeipuuv +// author-github https://github.com/xeipuuv +// author-mail xeipuuv@gmail.com +// +// repository-name gojsonschema +// repository-desc An implementation of JSON Schema, based on IETF's draft v4 - Go language. +// +// description Very simple log wrapper. +// Used for debugging/testing purposes. +// +// created 01-01-2015 + +package gojsonschema + +import ( + "log" +) + +const internalLogEnabled = false + +func internalLog(format string, v ...interface{}) { + log.Printf(format, v...) +} diff --git a/vendor/github.com/xeipuuv/gojsonschema/jsonContext.go b/vendor/github.com/xeipuuv/gojsonschema/jsonContext.go new file mode 100644 index 00000000..fcc8d9d6 --- /dev/null +++ b/vendor/github.com/xeipuuv/gojsonschema/jsonContext.go @@ -0,0 +1,72 @@ +// Copyright 2013 MongoDB, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// author tolsen +// author-github https://github.com/tolsen +// +// repository-name gojsonschema +// repository-desc An implementation of JSON Schema, based on IETF's draft v4 - Go language. +// +// description Implements a persistent (immutable w/ shared structure) singly-linked list of strings for the purpose of storing a json context +// +// created 04-09-2013 + +package gojsonschema + +import "bytes" + +// jsonContext implements a persistent linked-list of strings +type jsonContext struct { + head string + tail *jsonContext +} + +func newJsonContext(head string, tail *jsonContext) *jsonContext { + return &jsonContext{head, tail} +} + +// String displays the context in reverse. +// This plays well with the data structure's persistent nature with +// Cons and a json document's tree structure. +func (c *jsonContext) String(del ...string) string { + byteArr := make([]byte, 0, c.stringLen()) + buf := bytes.NewBuffer(byteArr) + c.writeStringToBuffer(buf, del) + + return buf.String() +} + +func (c *jsonContext) stringLen() int { + length := 0 + if c.tail != nil { + length = c.tail.stringLen() + 1 // add 1 for "." + } + + length += len(c.head) + return length +} + +func (c *jsonContext) writeStringToBuffer(buf *bytes.Buffer, del []string) { + if c.tail != nil { + c.tail.writeStringToBuffer(buf, del) + + if len(del) > 0 { + buf.WriteString(del[0]) + } else { + buf.WriteString(".") + } + } + + buf.WriteString(c.head) +} diff --git a/vendor/github.com/xeipuuv/gojsonschema/jsonLoader.go b/vendor/github.com/xeipuuv/gojsonschema/jsonLoader.go new file mode 100644 index 00000000..0b405747 --- /dev/null +++ b/vendor/github.com/xeipuuv/gojsonschema/jsonLoader.go @@ -0,0 +1,314 @@ +// Copyright 2015 xeipuuv ( https://github.com/xeipuuv ) +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// author xeipuuv +// author-github https://github.com/xeipuuv +// author-mail xeipuuv@gmail.com +// +// repository-name gojsonschema +// repository-desc An implementation of JSON Schema, based on IETF's draft v4 - Go language. +// +// description Different strategies to load JSON files. +// Includes References (file and HTTP), JSON strings and Go types. +// +// created 01-02-2015 + +package gojsonschema + +import ( + "bytes" + "encoding/json" + "errors" + "io" + "io/ioutil" + "net/http" + "os" + "path/filepath" + "runtime" + "strings" + + "github.com/xeipuuv/gojsonreference" +) + +var osFS = osFileSystem(os.Open) + +// JSON loader interface + +type JSONLoader interface { + jsonSource() interface{} + loadJSON() (interface{}, error) + loadSchema() (*Schema, error) +} + +// osFileSystem is a functional wrapper for os.Open that implements http.FileSystem. +type osFileSystem func(string) (*os.File, error) + +func (o osFileSystem) Open(name string) (http.File, error) { + return o(name) +} + +// JSON Reference loader +// references are used to load JSONs from files and HTTP + +type jsonReferenceLoader struct { + fs http.FileSystem + source string +} + +func (l *jsonReferenceLoader) jsonSource() interface{} { + return l.source +} + +// NewReferenceLoader returns a JSON reference loader using the given source and the local OS file system. +func NewReferenceLoader(source string) *jsonReferenceLoader { + return &jsonReferenceLoader{ + fs: osFS, + source: source, + } +} + +// NewReferenceLoaderFileSystem returns a JSON reference loader using the given source and file system. +func NewReferenceLoaderFileSystem(source string, fs http.FileSystem) *jsonReferenceLoader { + return &jsonReferenceLoader{ + fs: fs, + source: source, + } +} + +func (l *jsonReferenceLoader) loadJSON() (interface{}, error) { + + var err error + + reference, err := gojsonreference.NewJsonReference(l.jsonSource().(string)) + if err != nil { + return nil, err + } + + refToUrl := reference + refToUrl.GetUrl().Fragment = "" + + var document interface{} + + if reference.HasFileScheme { + + filename := strings.Replace(refToUrl.String(), "file://", "", -1) + if runtime.GOOS == "windows" { + // on Windows, a file URL may have an extra leading slash, use slashes + // instead of backslashes, and have spaces escaped + if strings.HasPrefix(filename, "/") { + filename = filename[1:] + } + filename = filepath.FromSlash(filename) + filename = strings.Replace(filename, "%20", " ", -1) + } + + document, err = l.loadFromFile(filename) + if err != nil { + return nil, err + } + + } else { + + document, err = l.loadFromHTTP(refToUrl.String()) + if err != nil { + return nil, err + } + + } + + return document, nil + +} + +func (l *jsonReferenceLoader) loadSchema() (*Schema, error) { + + var err error + + d := Schema{} + d.pool = newSchemaPool(l.fs) + d.referencePool = newSchemaReferencePool() + + d.documentReference, err = gojsonreference.NewJsonReference(l.jsonSource().(string)) + if err != nil { + return nil, err + } + + spd, err := d.pool.GetDocument(d.documentReference) + if err != nil { + return nil, err + } + + err = d.parse(spd.Document) + if err != nil { + return nil, err + } + + return &d, nil + +} + +func (l *jsonReferenceLoader) loadFromHTTP(address string) (interface{}, error) { + + resp, err := http.Get(address) + if err != nil { + return nil, err + } + + // must return HTTP Status 200 OK + if resp.StatusCode != http.StatusOK { + return nil, errors.New(formatErrorDescription(Locale.httpBadStatus(), ErrorDetails{"status": resp.Status})) + } + + bodyBuff, err := ioutil.ReadAll(resp.Body) + if err != nil { + return nil, err + } + + return decodeJsonUsingNumber(bytes.NewReader(bodyBuff)) + +} + +func (l *jsonReferenceLoader) loadFromFile(path string) (interface{}, error) { + f, err := l.fs.Open(path) + if err != nil { + return nil, err + } + defer f.Close() + + bodyBuff, err := ioutil.ReadAll(f) + if err != nil { + return nil, err + } + + return decodeJsonUsingNumber(bytes.NewReader(bodyBuff)) + +} + +// JSON string loader + +type jsonStringLoader struct { + source string +} + +func (l *jsonStringLoader) jsonSource() interface{} { + return l.source +} + +func NewStringLoader(source string) *jsonStringLoader { + return &jsonStringLoader{source: source} +} + +func (l *jsonStringLoader) loadJSON() (interface{}, error) { + + return decodeJsonUsingNumber(strings.NewReader(l.jsonSource().(string))) + +} + +func (l *jsonStringLoader) loadSchema() (*Schema, error) { + + var err error + + document, err := l.loadJSON() + if err != nil { + return nil, err + } + + d := Schema{} + d.pool = newSchemaPool(osFS) + d.referencePool = newSchemaReferencePool() + d.documentReference, err = gojsonreference.NewJsonReference("#") + d.pool.SetStandaloneDocument(document) + if err != nil { + return nil, err + } + + err = d.parse(document) + if err != nil { + return nil, err + } + + return &d, nil + +} + +// JSON Go (types) loader +// used to load JSONs from the code as maps, interface{}, structs ... + +type jsonGoLoader struct { + source interface{} +} + +func (l *jsonGoLoader) jsonSource() interface{} { + return l.source +} + +func NewGoLoader(source interface{}) *jsonGoLoader { + return &jsonGoLoader{source: source} +} + +func (l *jsonGoLoader) loadJSON() (interface{}, error) { + + // convert it to a compliant JSON first to avoid types "mismatches" + + jsonBytes, err := json.Marshal(l.jsonSource()) + if err != nil { + return nil, err + } + + return decodeJsonUsingNumber(bytes.NewReader(jsonBytes)) + +} + +func (l *jsonGoLoader) loadSchema() (*Schema, error) { + + var err error + + document, err := l.loadJSON() + if err != nil { + return nil, err + } + + d := Schema{} + d.pool = newSchemaPool(osFS) + d.referencePool = newSchemaReferencePool() + d.documentReference, err = gojsonreference.NewJsonReference("#") + d.pool.SetStandaloneDocument(document) + if err != nil { + return nil, err + } + + err = d.parse(document) + if err != nil { + return nil, err + } + + return &d, nil + +} + +func decodeJsonUsingNumber(r io.Reader) (interface{}, error) { + + var document interface{} + + decoder := json.NewDecoder(r) + decoder.UseNumber() + + err := decoder.Decode(&document) + if err != nil { + return nil, err + } + + return document, nil + +} diff --git a/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/LICENSE b/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/LICENSE new file mode 100644 index 00000000..c28adbad --- /dev/null +++ b/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/LICENSE @@ -0,0 +1,19 @@ +Copyright (c) 2012 Julian Berman + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. diff --git a/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/additionalItems/data_00.json b/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/additionalItems/data_00.json new file mode 100644 index 00000000..5cb29f91 --- /dev/null +++ b/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/additionalItems/data_00.json @@ -0,0 +1 @@ +[null,2,3,4] \ No newline at end of file diff --git a/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/additionalItems/data_01.json b/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/additionalItems/data_01.json new file mode 100644 index 00000000..4eb0d32f --- /dev/null +++ b/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/additionalItems/data_01.json @@ -0,0 +1 @@ +[null,2,3,"foo"] \ No newline at end of file diff --git a/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/additionalItems/data_10.json b/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/additionalItems/data_10.json new file mode 100644 index 00000000..e77ca8d9 --- /dev/null +++ b/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/additionalItems/data_10.json @@ -0,0 +1 @@ +[1,2,3,4,5] \ No newline at end of file diff --git a/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/additionalItems/data_20.json b/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/additionalItems/data_20.json new file mode 100644 index 00000000..3a26a2e5 --- /dev/null +++ b/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/additionalItems/data_20.json @@ -0,0 +1 @@ +[1,2,3] \ No newline at end of file diff --git a/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/additionalItems/data_21.json b/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/additionalItems/data_21.json new file mode 100644 index 00000000..fde6c1d7 --- /dev/null +++ b/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/additionalItems/data_21.json @@ -0,0 +1 @@ +[1,2,3,4] \ No newline at end of file diff --git a/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/additionalItems/data_30.json b/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/additionalItems/data_30.json new file mode 100644 index 00000000..e77ca8d9 --- /dev/null +++ b/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/additionalItems/data_30.json @@ -0,0 +1 @@ +[1,2,3,4,5] \ No newline at end of file diff --git a/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/additionalItems/data_31.json b/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/additionalItems/data_31.json new file mode 100644 index 00000000..9f5dd4e3 --- /dev/null +++ b/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/additionalItems/data_31.json @@ -0,0 +1 @@ +{"foo":"bar"} \ No newline at end of file diff --git a/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/additionalItems/data_40.json b/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/additionalItems/data_40.json new file mode 100644 index 00000000..327fe850 --- /dev/null +++ b/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/additionalItems/data_40.json @@ -0,0 +1 @@ +[1,"foo",false] \ No newline at end of file diff --git a/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/additionalItems/schema_0.json b/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/additionalItems/schema_0.json new file mode 100644 index 00000000..75a4b7e0 --- /dev/null +++ b/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/additionalItems/schema_0.json @@ -0,0 +1 @@ +{"additionalItems":{"type":"integer"},"items":[{}]} \ No newline at end of file diff --git a/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/additionalItems/schema_1.json b/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/additionalItems/schema_1.json new file mode 100644 index 00000000..d5046a9c --- /dev/null +++ b/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/additionalItems/schema_1.json @@ -0,0 +1 @@ +{"additionalItems":false,"items":{}} \ No newline at end of file diff --git a/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/additionalItems/schema_2.json b/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/additionalItems/schema_2.json new file mode 100644 index 00000000..6ab20de2 --- /dev/null +++ b/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/additionalItems/schema_2.json @@ -0,0 +1 @@ +{"additionalItems":false,"items":[{},{},{}]} \ No newline at end of file diff --git a/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/additionalItems/schema_3.json b/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/additionalItems/schema_3.json new file mode 100644 index 00000000..f04aaf84 --- /dev/null +++ b/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/additionalItems/schema_3.json @@ -0,0 +1 @@ +{"additionalItems":false} \ No newline at end of file diff --git a/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/additionalItems/schema_4.json b/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/additionalItems/schema_4.json new file mode 100644 index 00000000..fff26f85 --- /dev/null +++ b/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/additionalItems/schema_4.json @@ -0,0 +1 @@ +{"items":[{"type":"integer"}]} \ No newline at end of file diff --git a/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/additionalProperties/data_00.json b/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/additionalProperties/data_00.json new file mode 100644 index 00000000..2518e687 --- /dev/null +++ b/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/additionalProperties/data_00.json @@ -0,0 +1 @@ +{"foo":1} \ No newline at end of file diff --git a/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/additionalProperties/data_01.json b/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/additionalProperties/data_01.json new file mode 100644 index 00000000..513d61fb --- /dev/null +++ b/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/additionalProperties/data_01.json @@ -0,0 +1 @@ +{"bar":2,"foo":1,"quux":"boom"} \ No newline at end of file diff --git a/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/additionalProperties/data_02.json b/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/additionalProperties/data_02.json new file mode 100644 index 00000000..3a26a2e5 --- /dev/null +++ b/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/additionalProperties/data_02.json @@ -0,0 +1 @@ +[1,2,3] \ No newline at end of file diff --git a/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/additionalProperties/data_03.json b/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/additionalProperties/data_03.json new file mode 100644 index 00000000..65a158df --- /dev/null +++ b/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/additionalProperties/data_03.json @@ -0,0 +1 @@ +{"foo":1, "vroom": 2} diff --git a/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/additionalProperties/data_10.json b/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/additionalProperties/data_10.json new file mode 100644 index 00000000..2518e687 --- /dev/null +++ b/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/additionalProperties/data_10.json @@ -0,0 +1 @@ +{"foo":1} \ No newline at end of file diff --git a/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/additionalProperties/data_11.json b/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/additionalProperties/data_11.json new file mode 100644 index 00000000..2129e620 --- /dev/null +++ b/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/additionalProperties/data_11.json @@ -0,0 +1 @@ +{"bar":2,"foo":1,"quux":true} \ No newline at end of file diff --git a/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/additionalProperties/data_12.json b/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/additionalProperties/data_12.json new file mode 100644 index 00000000..ce7708b1 --- /dev/null +++ b/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/additionalProperties/data_12.json @@ -0,0 +1 @@ +{"bar":2,"foo":1,"quux":12} \ No newline at end of file diff --git a/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/additionalProperties/data_20.json b/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/additionalProperties/data_20.json new file mode 100644 index 00000000..2129e620 --- /dev/null +++ b/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/additionalProperties/data_20.json @@ -0,0 +1 @@ +{"bar":2,"foo":1,"quux":true} \ No newline at end of file diff --git a/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/additionalProperties/schema_0.json b/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/additionalProperties/schema_0.json new file mode 100644 index 00000000..99d0ef4f --- /dev/null +++ b/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/additionalProperties/schema_0.json @@ -0,0 +1 @@ +{"additionalProperties":false,"properties":{"bar":{},"foo":{}},"patternProperties": { "^v": {} }} diff --git a/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/additionalProperties/schema_1.json b/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/additionalProperties/schema_1.json new file mode 100644 index 00000000..b55c0924 --- /dev/null +++ b/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/additionalProperties/schema_1.json @@ -0,0 +1 @@ +{"additionalProperties":{"type":"boolean"},"properties":{"bar":{},"foo":{}}} \ No newline at end of file diff --git a/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/additionalProperties/schema_2.json b/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/additionalProperties/schema_2.json new file mode 100644 index 00000000..4d0bc3a9 --- /dev/null +++ b/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/additionalProperties/schema_2.json @@ -0,0 +1 @@ +{"properties":{"bar":{},"foo":{}}} \ No newline at end of file diff --git a/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/allOf/data_00.json b/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/allOf/data_00.json new file mode 100644 index 00000000..7973c0ac --- /dev/null +++ b/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/allOf/data_00.json @@ -0,0 +1 @@ +{"bar":2,"foo":"baz"} \ No newline at end of file diff --git a/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/allOf/data_01.json b/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/allOf/data_01.json new file mode 100644 index 00000000..62aa0a13 --- /dev/null +++ b/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/allOf/data_01.json @@ -0,0 +1 @@ +{"foo":"baz"} \ No newline at end of file diff --git a/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/allOf/data_02.json b/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/allOf/data_02.json new file mode 100644 index 00000000..70eef88e --- /dev/null +++ b/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/allOf/data_02.json @@ -0,0 +1 @@ +{"bar":2} \ No newline at end of file diff --git a/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/allOf/data_03.json b/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/allOf/data_03.json new file mode 100644 index 00000000..54c2b0b0 --- /dev/null +++ b/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/allOf/data_03.json @@ -0,0 +1 @@ +{"bar":"quux","foo":"baz"} \ No newline at end of file diff --git a/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/allOf/data_10.json b/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/allOf/data_10.json new file mode 100644 index 00000000..fa13a378 --- /dev/null +++ b/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/allOf/data_10.json @@ -0,0 +1 @@ +{"bar":2,"baz":null,"foo":"quux"} \ No newline at end of file diff --git a/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/allOf/data_11.json b/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/allOf/data_11.json new file mode 100644 index 00000000..366679e8 --- /dev/null +++ b/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/allOf/data_11.json @@ -0,0 +1 @@ +{"baz":null,"foo":"quux"} \ No newline at end of file diff --git a/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/allOf/data_12.json b/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/allOf/data_12.json new file mode 100644 index 00000000..e8f534ad --- /dev/null +++ b/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/allOf/data_12.json @@ -0,0 +1 @@ +{"bar":2,"baz":null} \ No newline at end of file diff --git a/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/allOf/data_13.json b/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/allOf/data_13.json new file mode 100644 index 00000000..6fbe1984 --- /dev/null +++ b/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/allOf/data_13.json @@ -0,0 +1 @@ +{"bar":2,"foo":"quux"} \ No newline at end of file diff --git a/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/allOf/data_14.json b/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/allOf/data_14.json new file mode 100644 index 00000000..70eef88e --- /dev/null +++ b/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/allOf/data_14.json @@ -0,0 +1 @@ +{"bar":2} \ No newline at end of file diff --git a/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/allOf/data_20.json b/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/allOf/data_20.json new file mode 100644 index 00000000..410b14d2 --- /dev/null +++ b/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/allOf/data_20.json @@ -0,0 +1 @@ +25 \ No newline at end of file diff --git a/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/allOf/data_21.json b/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/allOf/data_21.json new file mode 100644 index 00000000..597975b4 --- /dev/null +++ b/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/allOf/data_21.json @@ -0,0 +1 @@ +35 \ No newline at end of file diff --git a/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/allOf/schema_0.json b/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/allOf/schema_0.json new file mode 100644 index 00000000..13b607a4 --- /dev/null +++ b/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/allOf/schema_0.json @@ -0,0 +1 @@ +{"allOf":[{"properties":{"bar":{"type":"integer"}},"required":["bar"]},{"properties":{"foo":{"type":"string"}},"required":["foo"]}]} \ No newline at end of file diff --git a/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/allOf/schema_1.json b/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/allOf/schema_1.json new file mode 100644 index 00000000..f618c0b2 --- /dev/null +++ b/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/allOf/schema_1.json @@ -0,0 +1 @@ +{"allOf":[{"properties":{"foo":{"type":"string"}},"required":["foo"]},{"properties":{"baz":{"type":"null"}},"required":["baz"]}],"properties":{"bar":{"type":"integer"}},"required":["bar"]} \ No newline at end of file diff --git a/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/allOf/schema_2.json b/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/allOf/schema_2.json new file mode 100644 index 00000000..748ee7ef --- /dev/null +++ b/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/allOf/schema_2.json @@ -0,0 +1 @@ +{"allOf":[{"maximum":30},{"minimum":20}]} \ No newline at end of file diff --git a/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/anyOf/data_00.json b/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/anyOf/data_00.json new file mode 100644 index 00000000..56a6051c --- /dev/null +++ b/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/anyOf/data_00.json @@ -0,0 +1 @@ +1 \ No newline at end of file diff --git a/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/anyOf/data_01.json b/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/anyOf/data_01.json new file mode 100644 index 00000000..68151b2e --- /dev/null +++ b/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/anyOf/data_01.json @@ -0,0 +1 @@ +2.5 \ No newline at end of file diff --git a/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/anyOf/data_02.json b/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/anyOf/data_02.json new file mode 100644 index 00000000..e440e5c8 --- /dev/null +++ b/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/anyOf/data_02.json @@ -0,0 +1 @@ +3 \ No newline at end of file diff --git a/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/anyOf/data_03.json b/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/anyOf/data_03.json new file mode 100644 index 00000000..400122e6 --- /dev/null +++ b/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/anyOf/data_03.json @@ -0,0 +1 @@ +1.5 \ No newline at end of file diff --git a/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/anyOf/data_10.json b/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/anyOf/data_10.json new file mode 100644 index 00000000..e440e5c8 --- /dev/null +++ b/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/anyOf/data_10.json @@ -0,0 +1 @@ +3 \ No newline at end of file diff --git a/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/anyOf/data_11.json b/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/anyOf/data_11.json new file mode 100644 index 00000000..b2bb988b --- /dev/null +++ b/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/anyOf/data_11.json @@ -0,0 +1 @@ +"foobar" \ No newline at end of file diff --git a/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/anyOf/data_12.json b/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/anyOf/data_12.json new file mode 100644 index 00000000..7e9668e7 --- /dev/null +++ b/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/anyOf/data_12.json @@ -0,0 +1 @@ +"foo" \ No newline at end of file diff --git a/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/anyOf/schema_0.json b/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/anyOf/schema_0.json new file mode 100644 index 00000000..5540cb8f --- /dev/null +++ b/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/anyOf/schema_0.json @@ -0,0 +1 @@ +{"anyOf":[{"type":"integer"},{"minimum":2}]} \ No newline at end of file diff --git a/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/anyOf/schema_1.json b/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/anyOf/schema_1.json new file mode 100644 index 00000000..cbe387a3 --- /dev/null +++ b/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/anyOf/schema_1.json @@ -0,0 +1 @@ +{"anyOf":[{"maxLength":2},{"minLength":4}],"type":"string"} \ No newline at end of file diff --git a/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/definitions/data_00.json b/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/definitions/data_00.json new file mode 100644 index 00000000..fc7a98bf --- /dev/null +++ b/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/definitions/data_00.json @@ -0,0 +1 @@ +{"definitions":{"foo":{"type":"integer"}}} \ No newline at end of file diff --git a/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/definitions/data_10.json b/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/definitions/data_10.json new file mode 100644 index 00000000..5270defb --- /dev/null +++ b/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/definitions/data_10.json @@ -0,0 +1 @@ +{"definitions":{"foo":{"type":1}}} \ No newline at end of file diff --git a/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/definitions/schema_0.json b/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/definitions/schema_0.json new file mode 100644 index 00000000..412c2163 --- /dev/null +++ b/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/definitions/schema_0.json @@ -0,0 +1 @@ +{"$ref":"http://json-schema.org/draft-04/schema#"} \ No newline at end of file diff --git a/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/definitions/schema_1.json b/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/definitions/schema_1.json new file mode 100644 index 00000000..412c2163 --- /dev/null +++ b/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/definitions/schema_1.json @@ -0,0 +1 @@ +{"$ref":"http://json-schema.org/draft-04/schema#"} \ No newline at end of file diff --git a/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/dependencies/data_00.json b/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/dependencies/data_00.json new file mode 100644 index 00000000..9e26dfee --- /dev/null +++ b/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/dependencies/data_00.json @@ -0,0 +1 @@ +{} \ No newline at end of file diff --git a/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/dependencies/data_01.json b/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/dependencies/data_01.json new file mode 100644 index 00000000..2518e687 --- /dev/null +++ b/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/dependencies/data_01.json @@ -0,0 +1 @@ +{"foo":1} \ No newline at end of file diff --git a/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/dependencies/data_02.json b/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/dependencies/data_02.json new file mode 100644 index 00000000..b0fcefdb --- /dev/null +++ b/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/dependencies/data_02.json @@ -0,0 +1 @@ +{"bar":2,"foo":1} \ No newline at end of file diff --git a/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/dependencies/data_03.json b/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/dependencies/data_03.json new file mode 100644 index 00000000..70eef88e --- /dev/null +++ b/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/dependencies/data_03.json @@ -0,0 +1 @@ +{"bar":2} \ No newline at end of file diff --git a/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/dependencies/data_04.json b/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/dependencies/data_04.json new file mode 100644 index 00000000..7e9668e7 --- /dev/null +++ b/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/dependencies/data_04.json @@ -0,0 +1 @@ +"foo" \ No newline at end of file diff --git a/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/dependencies/data_10.json b/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/dependencies/data_10.json new file mode 100644 index 00000000..9e26dfee --- /dev/null +++ b/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/dependencies/data_10.json @@ -0,0 +1 @@ +{} \ No newline at end of file diff --git a/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/dependencies/data_11.json b/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/dependencies/data_11.json new file mode 100644 index 00000000..b0fcefdb --- /dev/null +++ b/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/dependencies/data_11.json @@ -0,0 +1 @@ +{"bar":2,"foo":1} \ No newline at end of file diff --git a/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/dependencies/data_12.json b/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/dependencies/data_12.json new file mode 100644 index 00000000..2c48cfbe --- /dev/null +++ b/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/dependencies/data_12.json @@ -0,0 +1 @@ +{"bar":2,"foo":1,"quux":3} \ No newline at end of file diff --git a/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/dependencies/data_13.json b/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/dependencies/data_13.json new file mode 100644 index 00000000..6fe283b3 --- /dev/null +++ b/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/dependencies/data_13.json @@ -0,0 +1 @@ +{"foo":1,"quux":2} \ No newline at end of file diff --git a/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/dependencies/data_14.json b/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/dependencies/data_14.json new file mode 100644 index 00000000..7bed583b --- /dev/null +++ b/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/dependencies/data_14.json @@ -0,0 +1 @@ +{"bar":1,"quux":2} \ No newline at end of file diff --git a/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/dependencies/data_15.json b/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/dependencies/data_15.json new file mode 100644 index 00000000..10d82bf9 --- /dev/null +++ b/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/dependencies/data_15.json @@ -0,0 +1 @@ +{"quux":1} \ No newline at end of file diff --git a/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/dependencies/data_20.json b/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/dependencies/data_20.json new file mode 100644 index 00000000..b0fcefdb --- /dev/null +++ b/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/dependencies/data_20.json @@ -0,0 +1 @@ +{"bar":2,"foo":1} \ No newline at end of file diff --git a/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/dependencies/data_21.json b/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/dependencies/data_21.json new file mode 100644 index 00000000..707f7cec --- /dev/null +++ b/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/dependencies/data_21.json @@ -0,0 +1 @@ +{"foo":"quux"} \ No newline at end of file diff --git a/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/dependencies/data_22.json b/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/dependencies/data_22.json new file mode 100644 index 00000000..6fbe1984 --- /dev/null +++ b/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/dependencies/data_22.json @@ -0,0 +1 @@ +{"bar":2,"foo":"quux"} \ No newline at end of file diff --git a/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/dependencies/data_23.json b/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/dependencies/data_23.json new file mode 100644 index 00000000..a1637389 --- /dev/null +++ b/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/dependencies/data_23.json @@ -0,0 +1 @@ +{"bar":"quux","foo":2} \ No newline at end of file diff --git a/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/dependencies/data_24.json b/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/dependencies/data_24.json new file mode 100644 index 00000000..2fee1e31 --- /dev/null +++ b/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/dependencies/data_24.json @@ -0,0 +1 @@ +{"bar":"quux","foo":"quux"} \ No newline at end of file diff --git a/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/dependencies/schema_0.json b/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/dependencies/schema_0.json new file mode 100644 index 00000000..445985c6 --- /dev/null +++ b/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/dependencies/schema_0.json @@ -0,0 +1 @@ +{"dependencies":{"bar":["foo"]}} \ No newline at end of file diff --git a/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/dependencies/schema_1.json b/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/dependencies/schema_1.json new file mode 100644 index 00000000..1bb5a676 --- /dev/null +++ b/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/dependencies/schema_1.json @@ -0,0 +1 @@ +{"dependencies":{"quux":["foo","bar"]}} \ No newline at end of file diff --git a/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/dependencies/schema_2.json b/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/dependencies/schema_2.json new file mode 100644 index 00000000..27f97a43 --- /dev/null +++ b/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/dependencies/schema_2.json @@ -0,0 +1 @@ +{"dependencies":{"bar":{"properties":{"bar":{"type":"integer"},"foo":{"type":"integer"}}}}} \ No newline at end of file diff --git a/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/enum/data_00.json b/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/enum/data_00.json new file mode 100644 index 00000000..56a6051c --- /dev/null +++ b/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/enum/data_00.json @@ -0,0 +1 @@ +1 \ No newline at end of file diff --git a/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/enum/data_01.json b/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/enum/data_01.json new file mode 100644 index 00000000..bf0d87ab --- /dev/null +++ b/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/enum/data_01.json @@ -0,0 +1 @@ +4 \ No newline at end of file diff --git a/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/enum/data_10.json b/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/enum/data_10.json new file mode 100644 index 00000000..0637a088 --- /dev/null +++ b/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/enum/data_10.json @@ -0,0 +1 @@ +[] \ No newline at end of file diff --git a/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/enum/data_11.json b/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/enum/data_11.json new file mode 100644 index 00000000..ec747fa4 --- /dev/null +++ b/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/enum/data_11.json @@ -0,0 +1 @@ +null \ No newline at end of file diff --git a/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/enum/data_12.json b/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/enum/data_12.json new file mode 100644 index 00000000..1bfa80c1 --- /dev/null +++ b/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/enum/data_12.json @@ -0,0 +1 @@ +{"foo":false} \ No newline at end of file diff --git a/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/enum/schema_0.json b/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/enum/schema_0.json new file mode 100644 index 00000000..240e702e --- /dev/null +++ b/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/enum/schema_0.json @@ -0,0 +1 @@ +{"enum":[1,2,3]} \ No newline at end of file diff --git a/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/enum/schema_1.json b/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/enum/schema_1.json new file mode 100644 index 00000000..d6b8c5f6 --- /dev/null +++ b/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/enum/schema_1.json @@ -0,0 +1 @@ +{"enum":[6,"foo",[],true,{"foo":12}]} \ No newline at end of file diff --git a/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/format/data_00.json b/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/format/data_00.json new file mode 100644 index 00000000..60bc259b --- /dev/null +++ b/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/format/data_00.json @@ -0,0 +1 @@ +"test" \ No newline at end of file diff --git a/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/format/data_01.json b/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/format/data_01.json new file mode 100644 index 00000000..6d9ec401 --- /dev/null +++ b/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/format/data_01.json @@ -0,0 +1 @@ +"test@" \ No newline at end of file diff --git a/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/format/data_02.json b/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/format/data_02.json new file mode 100644 index 00000000..6b8674d0 --- /dev/null +++ b/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/format/data_02.json @@ -0,0 +1 @@ +"test@test.com" \ No newline at end of file diff --git a/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/format/data_03.json b/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/format/data_03.json new file mode 100644 index 00000000..970043c7 --- /dev/null +++ b/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/format/data_03.json @@ -0,0 +1 @@ +"AB-10105" \ No newline at end of file diff --git a/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/format/data_04.json b/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/format/data_04.json new file mode 100644 index 00000000..140843fb --- /dev/null +++ b/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/format/data_04.json @@ -0,0 +1 @@ +"ABC10105" \ No newline at end of file diff --git a/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/format/data_05.json b/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/format/data_05.json new file mode 100644 index 00000000..e87a8acd --- /dev/null +++ b/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/format/data_05.json @@ -0,0 +1 @@ +"05:15:37" \ No newline at end of file diff --git a/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/format/data_06.json b/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/format/data_06.json new file mode 100644 index 00000000..17f91eb8 --- /dev/null +++ b/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/format/data_06.json @@ -0,0 +1 @@ +"2015-05-13" \ No newline at end of file diff --git a/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/format/data_07.json b/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/format/data_07.json new file mode 100644 index 00000000..f0218e9a --- /dev/null +++ b/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/format/data_07.json @@ -0,0 +1 @@ +"2015-6-31" \ No newline at end of file diff --git a/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/format/data_08.json b/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/format/data_08.json new file mode 100644 index 00000000..bf9e258b --- /dev/null +++ b/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/format/data_08.json @@ -0,0 +1 @@ +"2015-01-30 19:08:06" \ No newline at end of file diff --git a/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/format/data_09.json b/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/format/data_09.json new file mode 100644 index 00000000..1a57beb5 --- /dev/null +++ b/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/format/data_09.json @@ -0,0 +1 @@ +"18:31:24-05:00" \ No newline at end of file diff --git a/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/format/data_10.json b/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/format/data_10.json new file mode 100644 index 00000000..00ed4ee1 --- /dev/null +++ b/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/format/data_10.json @@ -0,0 +1 @@ +"2002-10-02T10:00:00-05:00" \ No newline at end of file diff --git a/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/format/data_11.json b/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/format/data_11.json new file mode 100644 index 00000000..a8f92d50 --- /dev/null +++ b/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/format/data_11.json @@ -0,0 +1 @@ +"2002-10-02T15:00:00Z" \ No newline at end of file diff --git a/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/format/data_12.json b/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/format/data_12.json new file mode 100644 index 00000000..0975607f --- /dev/null +++ b/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/format/data_12.json @@ -0,0 +1 @@ +"2002-10-02T15:00:00.05Z" \ No newline at end of file diff --git a/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/format/data_13.json b/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/format/data_13.json new file mode 100644 index 00000000..9f9dacee --- /dev/null +++ b/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/format/data_13.json @@ -0,0 +1 @@ +"example.com" \ No newline at end of file diff --git a/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/format/data_14.json b/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/format/data_14.json new file mode 100644 index 00000000..15a9a873 --- /dev/null +++ b/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/format/data_14.json @@ -0,0 +1 @@ +"sub.example.com" \ No newline at end of file diff --git a/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/format/data_15.json b/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/format/data_15.json new file mode 100644 index 00000000..5c8e9b7b --- /dev/null +++ b/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/format/data_15.json @@ -0,0 +1 @@ +"hello.co.uk" \ No newline at end of file diff --git a/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/format/data_16.json b/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/format/data_16.json new file mode 100644 index 00000000..f5a11ec2 --- /dev/null +++ b/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/format/data_16.json @@ -0,0 +1 @@ +"http://example.com" \ No newline at end of file diff --git a/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/format/data_17.json b/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/format/data_17.json new file mode 100644 index 00000000..062bf8cb --- /dev/null +++ b/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/format/data_17.json @@ -0,0 +1 @@ +"example_com" \ No newline at end of file diff --git a/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/format/data_18.json b/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/format/data_18.json new file mode 100644 index 00000000..fd444101 --- /dev/null +++ b/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/format/data_18.json @@ -0,0 +1 @@ +"4.2.2.4" \ No newline at end of file diff --git a/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/format/data_19.json b/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/format/data_19.json new file mode 100644 index 00000000..1e490c63 --- /dev/null +++ b/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/format/data_19.json @@ -0,0 +1 @@ +"4.1.1111.45" \ No newline at end of file diff --git a/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/format/data_20.json b/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/format/data_20.json new file mode 100644 index 00000000..28918103 --- /dev/null +++ b/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/format/data_20.json @@ -0,0 +1 @@ +"FE80:0000:0000:0000:0202:B3FF:FE1E:8329" \ No newline at end of file diff --git a/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/format/data_21.json b/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/format/data_21.json new file mode 100644 index 00000000..ad20ef3c --- /dev/null +++ b/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/format/data_21.json @@ -0,0 +1 @@ +"FE80::0202:B3FF:FE1E:8329" \ No newline at end of file diff --git a/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/format/data_22.json b/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/format/data_22.json new file mode 100644 index 00000000..97dae8fe --- /dev/null +++ b/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/format/data_22.json @@ -0,0 +1 @@ +"1200::AB00:1234::2552:7777:1313" \ No newline at end of file diff --git a/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/format/data_23.json b/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/format/data_23.json new file mode 100644 index 00000000..ef38c10e --- /dev/null +++ b/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/format/data_23.json @@ -0,0 +1 @@ +"1200:0000:AB00:1234:O000:2552:7777:1313" \ No newline at end of file diff --git a/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/format/data_24.json b/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/format/data_24.json new file mode 100644 index 00000000..4187b90a --- /dev/null +++ b/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/format/data_24.json @@ -0,0 +1 @@ +"ftp://ftp.is.co.za/rfc/rfc1808.txt" \ No newline at end of file diff --git a/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/format/data_25.json b/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/format/data_25.json new file mode 100644 index 00000000..78168386 --- /dev/null +++ b/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/format/data_25.json @@ -0,0 +1 @@ +"mailto:john.doe@example.com" \ No newline at end of file diff --git a/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/format/data_26.json b/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/format/data_26.json new file mode 100644 index 00000000..df1cfae9 --- /dev/null +++ b/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/format/data_26.json @@ -0,0 +1 @@ +"tel:+1-816-555-1212" \ No newline at end of file diff --git a/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/format/data_27.json b/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/format/data_27.json new file mode 100644 index 00000000..28995ddd --- /dev/null +++ b/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/format/data_27.json @@ -0,0 +1 @@ +"http://www.ietf.org/rfc/rfc2396.txt" \ No newline at end of file diff --git a/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/format/data_28.json b/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/format/data_28.json new file mode 100644 index 00000000..6b620edf --- /dev/null +++ b/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/format/data_28.json @@ -0,0 +1 @@ +"example.com/path/to/file" \ No newline at end of file diff --git a/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/format/schema_0.json b/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/format/schema_0.json new file mode 100644 index 00000000..b54a202e --- /dev/null +++ b/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/format/schema_0.json @@ -0,0 +1 @@ +{"type": "string", "format": "email"} \ No newline at end of file diff --git a/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/format/schema_1.json b/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/format/schema_1.json new file mode 100644 index 00000000..7df67118 --- /dev/null +++ b/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/format/schema_1.json @@ -0,0 +1 @@ +{"type": "string", "format": "invoice"} \ No newline at end of file diff --git a/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/format/schema_2.json b/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/format/schema_2.json new file mode 100644 index 00000000..0dc4b8ff --- /dev/null +++ b/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/format/schema_2.json @@ -0,0 +1 @@ +{"type": "string", "format": "date-time"} \ No newline at end of file diff --git a/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/format/schema_3.json b/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/format/schema_3.json new file mode 100644 index 00000000..d1cf1a05 --- /dev/null +++ b/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/format/schema_3.json @@ -0,0 +1 @@ +{"type": "string", "format": "hostname"} \ No newline at end of file diff --git a/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/format/schema_4.json b/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/format/schema_4.json new file mode 100644 index 00000000..8468509f --- /dev/null +++ b/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/format/schema_4.json @@ -0,0 +1 @@ +{"type": "string", "format": "ipv4"} \ No newline at end of file diff --git a/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/format/schema_5.json b/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/format/schema_5.json new file mode 100644 index 00000000..ddee8475 --- /dev/null +++ b/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/format/schema_5.json @@ -0,0 +1 @@ +{"type": "string", "format": "ipv6"} \ No newline at end of file diff --git a/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/format/schema_6.json b/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/format/schema_6.json new file mode 100644 index 00000000..493546d0 --- /dev/null +++ b/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/format/schema_6.json @@ -0,0 +1 @@ +{"type": "string", "format": "uri"} \ No newline at end of file diff --git a/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/items/data_00.json b/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/items/data_00.json new file mode 100644 index 00000000..3a26a2e5 --- /dev/null +++ b/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/items/data_00.json @@ -0,0 +1 @@ +[1,2,3] \ No newline at end of file diff --git a/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/items/data_01.json b/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/items/data_01.json new file mode 100644 index 00000000..084da97d --- /dev/null +++ b/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/items/data_01.json @@ -0,0 +1 @@ +[1,"x"] \ No newline at end of file diff --git a/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/items/data_02.json b/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/items/data_02.json new file mode 100644 index 00000000..9f5dd4e3 --- /dev/null +++ b/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/items/data_02.json @@ -0,0 +1 @@ +{"foo":"bar"} \ No newline at end of file diff --git a/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/items/data_10.json b/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/items/data_10.json new file mode 100644 index 00000000..f4e52901 --- /dev/null +++ b/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/items/data_10.json @@ -0,0 +1 @@ +[1,"foo"] \ No newline at end of file diff --git a/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/items/data_11.json b/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/items/data_11.json new file mode 100644 index 00000000..d68c5004 --- /dev/null +++ b/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/items/data_11.json @@ -0,0 +1 @@ +["foo",1] \ No newline at end of file diff --git a/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/items/schema_0.json b/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/items/schema_0.json new file mode 100644 index 00000000..e628c9f8 --- /dev/null +++ b/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/items/schema_0.json @@ -0,0 +1 @@ +{"items":{"type":"integer"}} \ No newline at end of file diff --git a/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/items/schema_1.json b/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/items/schema_1.json new file mode 100644 index 00000000..8b92516d --- /dev/null +++ b/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/items/schema_1.json @@ -0,0 +1 @@ +{"items":[{"type":"integer"},{"type":"string"}]} \ No newline at end of file diff --git a/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/maxItems/data_00.json b/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/maxItems/data_00.json new file mode 100644 index 00000000..bace2a0b --- /dev/null +++ b/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/maxItems/data_00.json @@ -0,0 +1 @@ +[1] \ No newline at end of file diff --git a/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/maxItems/data_01.json b/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/maxItems/data_01.json new file mode 100644 index 00000000..3169929f --- /dev/null +++ b/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/maxItems/data_01.json @@ -0,0 +1 @@ +[1,2] \ No newline at end of file diff --git a/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/maxItems/data_02.json b/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/maxItems/data_02.json new file mode 100644 index 00000000..3a26a2e5 --- /dev/null +++ b/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/maxItems/data_02.json @@ -0,0 +1 @@ +[1,2,3] \ No newline at end of file diff --git a/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/maxItems/data_03.json b/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/maxItems/data_03.json new file mode 100644 index 00000000..b2bb988b --- /dev/null +++ b/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/maxItems/data_03.json @@ -0,0 +1 @@ +"foobar" \ No newline at end of file diff --git a/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/maxItems/schema_0.json b/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/maxItems/schema_0.json new file mode 100644 index 00000000..81cfbcca --- /dev/null +++ b/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/maxItems/schema_0.json @@ -0,0 +1 @@ +{"maxItems":2} \ No newline at end of file diff --git a/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/maxLength/data_00.json b/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/maxLength/data_00.json new file mode 100644 index 00000000..91d1e395 --- /dev/null +++ b/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/maxLength/data_00.json @@ -0,0 +1 @@ +"f" \ No newline at end of file diff --git a/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/maxLength/data_01.json b/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/maxLength/data_01.json new file mode 100644 index 00000000..67af23e2 --- /dev/null +++ b/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/maxLength/data_01.json @@ -0,0 +1 @@ +"fo" \ No newline at end of file diff --git a/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/maxLength/data_02.json b/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/maxLength/data_02.json new file mode 100644 index 00000000..7e9668e7 --- /dev/null +++ b/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/maxLength/data_02.json @@ -0,0 +1 @@ +"foo" \ No newline at end of file diff --git a/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/maxLength/data_03.json b/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/maxLength/data_03.json new file mode 100644 index 00000000..9a037142 --- /dev/null +++ b/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/maxLength/data_03.json @@ -0,0 +1 @@ +10 \ No newline at end of file diff --git a/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/maxLength/data_04.json b/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/maxLength/data_04.json new file mode 100644 index 00000000..7d62b4d2 --- /dev/null +++ b/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/maxLength/data_04.json @@ -0,0 +1 @@ +"世界" \ No newline at end of file diff --git a/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/maxLength/schema_0.json b/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/maxLength/schema_0.json new file mode 100644 index 00000000..30a07477 --- /dev/null +++ b/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/maxLength/schema_0.json @@ -0,0 +1 @@ +{"maxLength":2} \ No newline at end of file diff --git a/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/maxProperties/data_00.json b/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/maxProperties/data_00.json new file mode 100644 index 00000000..2518e687 --- /dev/null +++ b/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/maxProperties/data_00.json @@ -0,0 +1 @@ +{"foo":1} \ No newline at end of file diff --git a/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/maxProperties/data_01.json b/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/maxProperties/data_01.json new file mode 100644 index 00000000..b0fcefdb --- /dev/null +++ b/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/maxProperties/data_01.json @@ -0,0 +1 @@ +{"bar":2,"foo":1} \ No newline at end of file diff --git a/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/maxProperties/data_02.json b/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/maxProperties/data_02.json new file mode 100644 index 00000000..bce2e6b6 --- /dev/null +++ b/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/maxProperties/data_02.json @@ -0,0 +1 @@ +{"bar":2,"baz":3,"foo":1} \ No newline at end of file diff --git a/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/maxProperties/data_03.json b/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/maxProperties/data_03.json new file mode 100644 index 00000000..b2bb988b --- /dev/null +++ b/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/maxProperties/data_03.json @@ -0,0 +1 @@ +"foobar" \ No newline at end of file diff --git a/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/maxProperties/schema_0.json b/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/maxProperties/schema_0.json new file mode 100644 index 00000000..a9d977b6 --- /dev/null +++ b/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/maxProperties/schema_0.json @@ -0,0 +1 @@ +{"maxProperties":2} \ No newline at end of file diff --git a/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/maximum/data_00.json b/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/maximum/data_00.json new file mode 100644 index 00000000..c20c8ac5 --- /dev/null +++ b/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/maximum/data_00.json @@ -0,0 +1 @@ +2.6 \ No newline at end of file diff --git a/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/maximum/data_01.json b/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/maximum/data_01.json new file mode 100644 index 00000000..56e972a2 --- /dev/null +++ b/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/maximum/data_01.json @@ -0,0 +1 @@ +3.5 \ No newline at end of file diff --git a/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/maximum/data_02.json b/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/maximum/data_02.json new file mode 100644 index 00000000..3403a0c7 --- /dev/null +++ b/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/maximum/data_02.json @@ -0,0 +1 @@ +"x" \ No newline at end of file diff --git a/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/maximum/data_10.json b/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/maximum/data_10.json new file mode 100644 index 00000000..61618788 --- /dev/null +++ b/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/maximum/data_10.json @@ -0,0 +1 @@ +2.2 \ No newline at end of file diff --git a/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/maximum/data_11.json b/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/maximum/data_11.json new file mode 100644 index 00000000..e440e5c8 --- /dev/null +++ b/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/maximum/data_11.json @@ -0,0 +1 @@ +3 \ No newline at end of file diff --git a/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/maximum/schema_0.json b/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/maximum/schema_0.json new file mode 100644 index 00000000..e88aefdf --- /dev/null +++ b/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/maximum/schema_0.json @@ -0,0 +1 @@ +{"maximum":3} \ No newline at end of file diff --git a/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/maximum/schema_1.json b/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/maximum/schema_1.json new file mode 100644 index 00000000..b96954dc --- /dev/null +++ b/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/maximum/schema_1.json @@ -0,0 +1 @@ +{"exclusiveMaximum":true,"maximum":3} \ No newline at end of file diff --git a/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/minItems/data_00.json b/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/minItems/data_00.json new file mode 100644 index 00000000..3169929f --- /dev/null +++ b/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/minItems/data_00.json @@ -0,0 +1 @@ +[1,2] \ No newline at end of file diff --git a/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/minItems/data_01.json b/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/minItems/data_01.json new file mode 100644 index 00000000..bace2a0b --- /dev/null +++ b/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/minItems/data_01.json @@ -0,0 +1 @@ +[1] \ No newline at end of file diff --git a/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/minItems/data_02.json b/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/minItems/data_02.json new file mode 100644 index 00000000..0637a088 --- /dev/null +++ b/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/minItems/data_02.json @@ -0,0 +1 @@ +[] \ No newline at end of file diff --git a/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/minItems/data_03.json b/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/minItems/data_03.json new file mode 100644 index 00000000..3cc762b5 --- /dev/null +++ b/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/minItems/data_03.json @@ -0,0 +1 @@ +"" \ No newline at end of file diff --git a/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/minItems/schema_0.json b/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/minItems/schema_0.json new file mode 100644 index 00000000..dcfe1c90 --- /dev/null +++ b/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/minItems/schema_0.json @@ -0,0 +1 @@ +{"minItems":1} \ No newline at end of file diff --git a/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/minLength/data_00.json b/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/minLength/data_00.json new file mode 100644 index 00000000..7e9668e7 --- /dev/null +++ b/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/minLength/data_00.json @@ -0,0 +1 @@ +"foo" \ No newline at end of file diff --git a/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/minLength/data_01.json b/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/minLength/data_01.json new file mode 100644 index 00000000..67af23e2 --- /dev/null +++ b/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/minLength/data_01.json @@ -0,0 +1 @@ +"fo" \ No newline at end of file diff --git a/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/minLength/data_02.json b/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/minLength/data_02.json new file mode 100644 index 00000000..91d1e395 --- /dev/null +++ b/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/minLength/data_02.json @@ -0,0 +1 @@ +"f" \ No newline at end of file diff --git a/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/minLength/data_03.json b/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/minLength/data_03.json new file mode 100644 index 00000000..56a6051c --- /dev/null +++ b/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/minLength/data_03.json @@ -0,0 +1 @@ +1 \ No newline at end of file diff --git a/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/minLength/data_04.json b/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/minLength/data_04.json new file mode 100644 index 00000000..62db65a1 --- /dev/null +++ b/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/minLength/data_04.json @@ -0,0 +1 @@ +"世" \ No newline at end of file diff --git a/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/minLength/schema_0.json b/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/minLength/schema_0.json new file mode 100644 index 00000000..efc23046 --- /dev/null +++ b/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/minLength/schema_0.json @@ -0,0 +1 @@ +{"minLength":2} \ No newline at end of file diff --git a/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/minProperties/data_00.json b/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/minProperties/data_00.json new file mode 100644 index 00000000..b0fcefdb --- /dev/null +++ b/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/minProperties/data_00.json @@ -0,0 +1 @@ +{"bar":2,"foo":1} \ No newline at end of file diff --git a/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/minProperties/data_01.json b/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/minProperties/data_01.json new file mode 100644 index 00000000..2518e687 --- /dev/null +++ b/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/minProperties/data_01.json @@ -0,0 +1 @@ +{"foo":1} \ No newline at end of file diff --git a/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/minProperties/data_02.json b/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/minProperties/data_02.json new file mode 100644 index 00000000..9e26dfee --- /dev/null +++ b/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/minProperties/data_02.json @@ -0,0 +1 @@ +{} \ No newline at end of file diff --git a/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/minProperties/data_03.json b/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/minProperties/data_03.json new file mode 100644 index 00000000..3cc762b5 --- /dev/null +++ b/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/minProperties/data_03.json @@ -0,0 +1 @@ +"" \ No newline at end of file diff --git a/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/minProperties/schema_0.json b/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/minProperties/schema_0.json new file mode 100644 index 00000000..87b25c2b --- /dev/null +++ b/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/minProperties/schema_0.json @@ -0,0 +1 @@ +{"minProperties":1} \ No newline at end of file diff --git a/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/minimum/data_00.json b/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/minimum/data_00.json new file mode 100644 index 00000000..c20c8ac5 --- /dev/null +++ b/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/minimum/data_00.json @@ -0,0 +1 @@ +2.6 \ No newline at end of file diff --git a/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/minimum/data_01.json b/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/minimum/data_01.json new file mode 100644 index 00000000..490f510f --- /dev/null +++ b/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/minimum/data_01.json @@ -0,0 +1 @@ +0.6 \ No newline at end of file diff --git a/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/minimum/data_02.json b/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/minimum/data_02.json new file mode 100644 index 00000000..3403a0c7 --- /dev/null +++ b/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/minimum/data_02.json @@ -0,0 +1 @@ +"x" \ No newline at end of file diff --git a/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/minimum/data_10.json b/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/minimum/data_10.json new file mode 100644 index 00000000..ea710abb --- /dev/null +++ b/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/minimum/data_10.json @@ -0,0 +1 @@ +1.2 \ No newline at end of file diff --git a/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/minimum/data_11.json b/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/minimum/data_11.json new file mode 100644 index 00000000..b123147e --- /dev/null +++ b/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/minimum/data_11.json @@ -0,0 +1 @@ +1.1 \ No newline at end of file diff --git a/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/minimum/schema_0.json b/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/minimum/schema_0.json new file mode 100644 index 00000000..0acddfc1 --- /dev/null +++ b/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/minimum/schema_0.json @@ -0,0 +1 @@ +{"minimum":1.1} \ No newline at end of file diff --git a/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/minimum/schema_1.json b/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/minimum/schema_1.json new file mode 100644 index 00000000..e0945a43 --- /dev/null +++ b/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/minimum/schema_1.json @@ -0,0 +1 @@ +{"exclusiveMinimum":true,"minimum":1.1} \ No newline at end of file diff --git a/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/multipleOf/data_00.json b/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/multipleOf/data_00.json new file mode 100644 index 00000000..9a037142 --- /dev/null +++ b/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/multipleOf/data_00.json @@ -0,0 +1 @@ +10 \ No newline at end of file diff --git a/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/multipleOf/data_01.json b/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/multipleOf/data_01.json new file mode 100644 index 00000000..c7930257 --- /dev/null +++ b/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/multipleOf/data_01.json @@ -0,0 +1 @@ +7 \ No newline at end of file diff --git a/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/multipleOf/data_02.json b/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/multipleOf/data_02.json new file mode 100644 index 00000000..7e9668e7 --- /dev/null +++ b/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/multipleOf/data_02.json @@ -0,0 +1 @@ +"foo" \ No newline at end of file diff --git a/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/multipleOf/data_10.json b/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/multipleOf/data_10.json new file mode 100644 index 00000000..c2270834 --- /dev/null +++ b/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/multipleOf/data_10.json @@ -0,0 +1 @@ +0 \ No newline at end of file diff --git a/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/multipleOf/data_11.json b/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/multipleOf/data_11.json new file mode 100644 index 00000000..958d30d8 --- /dev/null +++ b/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/multipleOf/data_11.json @@ -0,0 +1 @@ +4.5 \ No newline at end of file diff --git a/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/multipleOf/data_12.json b/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/multipleOf/data_12.json new file mode 100644 index 00000000..597975b4 --- /dev/null +++ b/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/multipleOf/data_12.json @@ -0,0 +1 @@ +35 \ No newline at end of file diff --git a/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/multipleOf/data_20.json b/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/multipleOf/data_20.json new file mode 100644 index 00000000..136664fd --- /dev/null +++ b/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/multipleOf/data_20.json @@ -0,0 +1 @@ +0.0075 \ No newline at end of file diff --git a/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/multipleOf/data_21.json b/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/multipleOf/data_21.json new file mode 100644 index 00000000..77dfb807 --- /dev/null +++ b/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/multipleOf/data_21.json @@ -0,0 +1 @@ +0.00751 \ No newline at end of file diff --git a/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/multipleOf/schema_0.json b/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/multipleOf/schema_0.json new file mode 100644 index 00000000..b51609f2 --- /dev/null +++ b/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/multipleOf/schema_0.json @@ -0,0 +1 @@ +{"multipleOf":2} \ No newline at end of file diff --git a/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/multipleOf/schema_1.json b/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/multipleOf/schema_1.json new file mode 100644 index 00000000..2ee79665 --- /dev/null +++ b/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/multipleOf/schema_1.json @@ -0,0 +1 @@ +{"multipleOf":1.5} \ No newline at end of file diff --git a/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/multipleOf/schema_2.json b/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/multipleOf/schema_2.json new file mode 100644 index 00000000..de303ef6 --- /dev/null +++ b/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/multipleOf/schema_2.json @@ -0,0 +1 @@ +{"multipleOf":0.0001} \ No newline at end of file diff --git a/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/not/data_00.json b/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/not/data_00.json new file mode 100644 index 00000000..7e9668e7 --- /dev/null +++ b/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/not/data_00.json @@ -0,0 +1 @@ +"foo" \ No newline at end of file diff --git a/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/not/data_01.json b/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/not/data_01.json new file mode 100644 index 00000000..56a6051c --- /dev/null +++ b/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/not/data_01.json @@ -0,0 +1 @@ +1 \ No newline at end of file diff --git a/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/not/data_10.json b/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/not/data_10.json new file mode 100644 index 00000000..7e9668e7 --- /dev/null +++ b/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/not/data_10.json @@ -0,0 +1 @@ +"foo" \ No newline at end of file diff --git a/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/not/data_11.json b/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/not/data_11.json new file mode 100644 index 00000000..56a6051c --- /dev/null +++ b/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/not/data_11.json @@ -0,0 +1 @@ +1 \ No newline at end of file diff --git a/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/not/data_12.json b/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/not/data_12.json new file mode 100644 index 00000000..f32a5804 --- /dev/null +++ b/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/not/data_12.json @@ -0,0 +1 @@ +true \ No newline at end of file diff --git a/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/not/data_20.json b/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/not/data_20.json new file mode 100644 index 00000000..56a6051c --- /dev/null +++ b/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/not/data_20.json @@ -0,0 +1 @@ +1 \ No newline at end of file diff --git a/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/not/data_21.json b/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/not/data_21.json new file mode 100644 index 00000000..2518e687 --- /dev/null +++ b/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/not/data_21.json @@ -0,0 +1 @@ +{"foo":1} \ No newline at end of file diff --git a/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/not/data_22.json b/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/not/data_22.json new file mode 100644 index 00000000..9f5dd4e3 --- /dev/null +++ b/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/not/data_22.json @@ -0,0 +1 @@ +{"foo":"bar"} \ No newline at end of file diff --git a/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/not/schema_0.json b/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/not/schema_0.json new file mode 100644 index 00000000..fc125d79 --- /dev/null +++ b/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/not/schema_0.json @@ -0,0 +1 @@ +{"not":{"type":"integer"}} \ No newline at end of file diff --git a/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/not/schema_1.json b/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/not/schema_1.json new file mode 100644 index 00000000..7f219476 --- /dev/null +++ b/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/not/schema_1.json @@ -0,0 +1 @@ +{"not":{"type":["integer","boolean"]}} \ No newline at end of file diff --git a/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/not/schema_2.json b/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/not/schema_2.json new file mode 100644 index 00000000..a3f16808 --- /dev/null +++ b/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/not/schema_2.json @@ -0,0 +1 @@ +{"not":{"properties":{"foo":{"type":"string"}},"type":"object"}} \ No newline at end of file diff --git a/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/oneOf/data_00.json b/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/oneOf/data_00.json new file mode 100644 index 00000000..56a6051c --- /dev/null +++ b/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/oneOf/data_00.json @@ -0,0 +1 @@ +1 \ No newline at end of file diff --git a/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/oneOf/data_01.json b/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/oneOf/data_01.json new file mode 100644 index 00000000..68151b2e --- /dev/null +++ b/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/oneOf/data_01.json @@ -0,0 +1 @@ +2.5 \ No newline at end of file diff --git a/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/oneOf/data_02.json b/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/oneOf/data_02.json new file mode 100644 index 00000000..e440e5c8 --- /dev/null +++ b/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/oneOf/data_02.json @@ -0,0 +1 @@ +3 \ No newline at end of file diff --git a/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/oneOf/data_03.json b/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/oneOf/data_03.json new file mode 100644 index 00000000..400122e6 --- /dev/null +++ b/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/oneOf/data_03.json @@ -0,0 +1 @@ +1.5 \ No newline at end of file diff --git a/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/oneOf/data_10.json b/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/oneOf/data_10.json new file mode 100644 index 00000000..e440e5c8 --- /dev/null +++ b/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/oneOf/data_10.json @@ -0,0 +1 @@ +3 \ No newline at end of file diff --git a/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/oneOf/data_11.json b/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/oneOf/data_11.json new file mode 100644 index 00000000..b2bb988b --- /dev/null +++ b/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/oneOf/data_11.json @@ -0,0 +1 @@ +"foobar" \ No newline at end of file diff --git a/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/oneOf/data_12.json b/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/oneOf/data_12.json new file mode 100644 index 00000000..7e9668e7 --- /dev/null +++ b/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/oneOf/data_12.json @@ -0,0 +1 @@ +"foo" \ No newline at end of file diff --git a/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/oneOf/schema_0.json b/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/oneOf/schema_0.json new file mode 100644 index 00000000..217c51e6 --- /dev/null +++ b/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/oneOf/schema_0.json @@ -0,0 +1 @@ +{"oneOf":[{"type":"integer"},{"minimum":2}]} \ No newline at end of file diff --git a/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/oneOf/schema_1.json b/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/oneOf/schema_1.json new file mode 100644 index 00000000..4dae78a5 --- /dev/null +++ b/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/oneOf/schema_1.json @@ -0,0 +1 @@ +{"oneOf":[{"minLength":2},{"maxLength":4}],"type":"string"} \ No newline at end of file diff --git a/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/pattern/data_00.json b/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/pattern/data_00.json new file mode 100644 index 00000000..a9cf1d43 --- /dev/null +++ b/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/pattern/data_00.json @@ -0,0 +1 @@ +"aaa" \ No newline at end of file diff --git a/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/pattern/data_01.json b/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/pattern/data_01.json new file mode 100644 index 00000000..4f44a210 --- /dev/null +++ b/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/pattern/data_01.json @@ -0,0 +1 @@ +"abc" \ No newline at end of file diff --git a/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/pattern/data_02.json b/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/pattern/data_02.json new file mode 100644 index 00000000..f32a5804 --- /dev/null +++ b/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/pattern/data_02.json @@ -0,0 +1 @@ +true \ No newline at end of file diff --git a/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/pattern/schema_0.json b/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/pattern/schema_0.json new file mode 100644 index 00000000..47a0a0e2 --- /dev/null +++ b/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/pattern/schema_0.json @@ -0,0 +1 @@ +{"pattern":"^a*$"} \ No newline at end of file diff --git a/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/patternProperties/data_00.json b/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/patternProperties/data_00.json new file mode 100644 index 00000000..2518e687 --- /dev/null +++ b/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/patternProperties/data_00.json @@ -0,0 +1 @@ +{"foo":1} \ No newline at end of file diff --git a/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/patternProperties/data_01.json b/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/patternProperties/data_01.json new file mode 100644 index 00000000..40129f47 --- /dev/null +++ b/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/patternProperties/data_01.json @@ -0,0 +1 @@ +{"foo":1,"foooooo":2} \ No newline at end of file diff --git a/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/patternProperties/data_02.json b/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/patternProperties/data_02.json new file mode 100644 index 00000000..7a2b52f4 --- /dev/null +++ b/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/patternProperties/data_02.json @@ -0,0 +1 @@ +{"foo":"bar","fooooo":2} \ No newline at end of file diff --git a/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/patternProperties/data_03.json b/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/patternProperties/data_03.json new file mode 100644 index 00000000..e7e5b7c9 --- /dev/null +++ b/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/patternProperties/data_03.json @@ -0,0 +1 @@ +{"foo":"bar","foooooo":"baz"} \ No newline at end of file diff --git a/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/patternProperties/data_04.json b/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/patternProperties/data_04.json new file mode 100644 index 00000000..3cacc0b9 --- /dev/null +++ b/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/patternProperties/data_04.json @@ -0,0 +1 @@ +12 \ No newline at end of file diff --git a/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/patternProperties/data_10.json b/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/patternProperties/data_10.json new file mode 100644 index 00000000..c7b8e693 --- /dev/null +++ b/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/patternProperties/data_10.json @@ -0,0 +1 @@ +{"a":21} \ No newline at end of file diff --git a/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/patternProperties/data_11.json b/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/patternProperties/data_11.json new file mode 100644 index 00000000..644136dd --- /dev/null +++ b/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/patternProperties/data_11.json @@ -0,0 +1 @@ +{"aaaa":18} \ No newline at end of file diff --git a/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/patternProperties/data_12.json b/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/patternProperties/data_12.json new file mode 100644 index 00000000..4af39994 --- /dev/null +++ b/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/patternProperties/data_12.json @@ -0,0 +1 @@ +{"a":21,"aaaa":18} \ No newline at end of file diff --git a/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/patternProperties/data_13.json b/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/patternProperties/data_13.json new file mode 100644 index 00000000..5286d3b9 --- /dev/null +++ b/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/patternProperties/data_13.json @@ -0,0 +1 @@ +{"a":"bar"} \ No newline at end of file diff --git a/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/patternProperties/data_14.json b/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/patternProperties/data_14.json new file mode 100644 index 00000000..b778e0fa --- /dev/null +++ b/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/patternProperties/data_14.json @@ -0,0 +1 @@ +{"aaaa":31} \ No newline at end of file diff --git a/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/patternProperties/data_15.json b/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/patternProperties/data_15.json new file mode 100644 index 00000000..786f9cb1 --- /dev/null +++ b/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/patternProperties/data_15.json @@ -0,0 +1 @@ +{"aaa":"foo","aaaa":31} \ No newline at end of file diff --git a/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/patternProperties/data_20.json b/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/patternProperties/data_20.json new file mode 100644 index 00000000..b2ef65b7 --- /dev/null +++ b/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/patternProperties/data_20.json @@ -0,0 +1 @@ +{"answer 1":"42"} \ No newline at end of file diff --git a/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/patternProperties/data_21.json b/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/patternProperties/data_21.json new file mode 100644 index 00000000..f8f6268d --- /dev/null +++ b/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/patternProperties/data_21.json @@ -0,0 +1 @@ +{"a31b":null} \ No newline at end of file diff --git a/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/patternProperties/data_22.json b/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/patternProperties/data_22.json new file mode 100644 index 00000000..bfc72fb3 --- /dev/null +++ b/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/patternProperties/data_22.json @@ -0,0 +1 @@ +{"a_x_3":3} \ No newline at end of file diff --git a/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/patternProperties/data_23.json b/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/patternProperties/data_23.json new file mode 100644 index 00000000..cd32d32f --- /dev/null +++ b/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/patternProperties/data_23.json @@ -0,0 +1 @@ +{"a_X_3":3} \ No newline at end of file diff --git a/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/patternProperties/data_24.json b/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/patternProperties/data_24.json new file mode 100644 index 00000000..c578f690 --- /dev/null +++ b/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/patternProperties/data_24.json @@ -0,0 +1,4 @@ +{ + "a": "hello", + "aaaaaaaaaaaaaaaaaaa": null +} \ No newline at end of file diff --git a/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/patternProperties/data_25.json b/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/patternProperties/data_25.json new file mode 100644 index 00000000..dd415e02 --- /dev/null +++ b/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/patternProperties/data_25.json @@ -0,0 +1,3 @@ +{ + "aaaaaaaaaaaaaaaaaaa": null +} \ No newline at end of file diff --git a/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/patternProperties/data_26.json b/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/patternProperties/data_26.json new file mode 100644 index 00000000..1b44a9f6 --- /dev/null +++ b/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/patternProperties/data_26.json @@ -0,0 +1,6 @@ +{ + "dictionary": { + "aaaaaaaaaaaaaaaaaaaa": null, + "b": "hello" + } +} \ No newline at end of file diff --git a/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/patternProperties/schema_0.json b/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/patternProperties/schema_0.json new file mode 100644 index 00000000..6f5a8199 --- /dev/null +++ b/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/patternProperties/schema_0.json @@ -0,0 +1 @@ +{"patternProperties":{"f.*o":{"type":"integer"}}} \ No newline at end of file diff --git a/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/patternProperties/schema_1.json b/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/patternProperties/schema_1.json new file mode 100644 index 00000000..8776f201 --- /dev/null +++ b/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/patternProperties/schema_1.json @@ -0,0 +1 @@ +{"patternProperties":{"a*":{"type":"integer"},"aaa*":{"maximum":20}}} \ No newline at end of file diff --git a/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/patternProperties/schema_2.json b/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/patternProperties/schema_2.json new file mode 100644 index 00000000..04a4cbc8 --- /dev/null +++ b/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/patternProperties/schema_2.json @@ -0,0 +1 @@ +{"patternProperties":{"X_":{"type":"string"},"[0-9]{2,}":{"type":"boolean"}}} \ No newline at end of file diff --git a/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/patternProperties/schema_3.json b/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/patternProperties/schema_3.json new file mode 100644 index 00000000..03a67b99 --- /dev/null +++ b/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/patternProperties/schema_3.json @@ -0,0 +1,8 @@ +{ + "$schema": "http://json-schema.org/draft-04/schema#", + "type": "object", + "patternProperties": { + "^[a-zA-Z0-9]{1,10}$": { "type": "string" } + }, + "additionalProperties": false +} \ No newline at end of file diff --git a/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/patternProperties/schema_4.json b/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/patternProperties/schema_4.json new file mode 100644 index 00000000..4c35adb5 --- /dev/null +++ b/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/patternProperties/schema_4.json @@ -0,0 +1,14 @@ +{ + "$schema": "http://json-schema.org/draft-04/schema#", + "type": "object", + "properties": { + "dictionary": { + "type": "object", + "patternProperties": { + "^[a-zA-Z0-9]{1,10}$": { "type": "string" } + }, + "additionalProperties": false + } + }, + "additionalProperties": false +} \ No newline at end of file diff --git a/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/properties/data_00.json b/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/properties/data_00.json new file mode 100644 index 00000000..022ce236 --- /dev/null +++ b/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/properties/data_00.json @@ -0,0 +1 @@ +{"bar":"baz","foo":1} \ No newline at end of file diff --git a/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/properties/data_01.json b/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/properties/data_01.json new file mode 100644 index 00000000..9ed62200 --- /dev/null +++ b/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/properties/data_01.json @@ -0,0 +1 @@ +{"bar":{},"foo":1} \ No newline at end of file diff --git a/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/properties/data_02.json b/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/properties/data_02.json new file mode 100644 index 00000000..00062ccb --- /dev/null +++ b/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/properties/data_02.json @@ -0,0 +1 @@ +{"bar":{},"foo":[]} \ No newline at end of file diff --git a/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/properties/data_03.json b/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/properties/data_03.json new file mode 100644 index 00000000..81ebc367 --- /dev/null +++ b/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/properties/data_03.json @@ -0,0 +1 @@ +{"quux":[]} \ No newline at end of file diff --git a/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/properties/data_04.json b/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/properties/data_04.json new file mode 100644 index 00000000..0637a088 --- /dev/null +++ b/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/properties/data_04.json @@ -0,0 +1 @@ +[] \ No newline at end of file diff --git a/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/properties/data_10.json b/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/properties/data_10.json new file mode 100644 index 00000000..a82c1531 --- /dev/null +++ b/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/properties/data_10.json @@ -0,0 +1 @@ +{"foo":[1,2]} \ No newline at end of file diff --git a/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/properties/data_11.json b/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/properties/data_11.json new file mode 100644 index 00000000..f08510eb --- /dev/null +++ b/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/properties/data_11.json @@ -0,0 +1 @@ +{"foo":[1,2,3,4]} \ No newline at end of file diff --git a/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/properties/data_12.json b/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/properties/data_12.json new file mode 100644 index 00000000..2ad6a35a --- /dev/null +++ b/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/properties/data_12.json @@ -0,0 +1 @@ +{"foo":[]} \ No newline at end of file diff --git a/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/properties/data_13.json b/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/properties/data_13.json new file mode 100644 index 00000000..13370fe0 --- /dev/null +++ b/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/properties/data_13.json @@ -0,0 +1 @@ +{"fxo":[1,2]} \ No newline at end of file diff --git a/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/properties/data_14.json b/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/properties/data_14.json new file mode 100644 index 00000000..f82be5d3 --- /dev/null +++ b/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/properties/data_14.json @@ -0,0 +1 @@ +{"fxo":[]} \ No newline at end of file diff --git a/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/properties/data_15.json b/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/properties/data_15.json new file mode 100644 index 00000000..2ed66803 --- /dev/null +++ b/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/properties/data_15.json @@ -0,0 +1 @@ +{"bar":[]} \ No newline at end of file diff --git a/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/properties/data_16.json b/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/properties/data_16.json new file mode 100644 index 00000000..935e5254 --- /dev/null +++ b/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/properties/data_16.json @@ -0,0 +1 @@ +{"quux":3} \ No newline at end of file diff --git a/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/properties/data_17.json b/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/properties/data_17.json new file mode 100644 index 00000000..531bbc94 --- /dev/null +++ b/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/properties/data_17.json @@ -0,0 +1 @@ +{"quux":"foo"} \ No newline at end of file diff --git a/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/properties/schema_0.json b/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/properties/schema_0.json new file mode 100644 index 00000000..2c73cd58 --- /dev/null +++ b/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/properties/schema_0.json @@ -0,0 +1 @@ +{"properties":{"bar":{"type":"string"},"foo":{"type":"integer"}}} \ No newline at end of file diff --git a/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/properties/schema_1.json b/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/properties/schema_1.json new file mode 100644 index 00000000..144f96e3 --- /dev/null +++ b/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/properties/schema_1.json @@ -0,0 +1 @@ +{"additionalProperties":{"type":"integer"},"patternProperties":{"f.o":{"minItems":2}},"properties":{"bar":{"type":"array"},"foo":{"maxItems":3,"type":"array"}}} \ No newline at end of file diff --git a/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/ref/data_00.json b/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/ref/data_00.json new file mode 100644 index 00000000..1bfa80c1 --- /dev/null +++ b/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/ref/data_00.json @@ -0,0 +1 @@ +{"foo":false} \ No newline at end of file diff --git a/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/ref/data_01.json b/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/ref/data_01.json new file mode 100644 index 00000000..27768a40 --- /dev/null +++ b/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/ref/data_01.json @@ -0,0 +1 @@ +{"foo":{"foo":false}} \ No newline at end of file diff --git a/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/ref/data_02.json b/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/ref/data_02.json new file mode 100644 index 00000000..a2742720 --- /dev/null +++ b/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/ref/data_02.json @@ -0,0 +1 @@ +{"bar":false} \ No newline at end of file diff --git a/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/ref/data_03.json b/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/ref/data_03.json new file mode 100644 index 00000000..1de66762 --- /dev/null +++ b/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/ref/data_03.json @@ -0,0 +1 @@ +{"foo":{"bar":false}} \ No newline at end of file diff --git a/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/ref/data_10.json b/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/ref/data_10.json new file mode 100644 index 00000000..cc6193bd --- /dev/null +++ b/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/ref/data_10.json @@ -0,0 +1 @@ +{"bar":3} \ No newline at end of file diff --git a/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/ref/data_11.json b/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/ref/data_11.json new file mode 100644 index 00000000..7716a931 --- /dev/null +++ b/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/ref/data_11.json @@ -0,0 +1 @@ +{"bar":true} \ No newline at end of file diff --git a/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/ref/data_20.json b/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/ref/data_20.json new file mode 100644 index 00000000..3169929f --- /dev/null +++ b/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/ref/data_20.json @@ -0,0 +1 @@ +[1,2] \ No newline at end of file diff --git a/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/ref/data_21.json b/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/ref/data_21.json new file mode 100644 index 00000000..f4e52901 --- /dev/null +++ b/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/ref/data_21.json @@ -0,0 +1 @@ +[1,"foo"] \ No newline at end of file diff --git a/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/ref/data_30.json b/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/ref/data_30.json new file mode 100644 index 00000000..1606db7f --- /dev/null +++ b/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/ref/data_30.json @@ -0,0 +1 @@ +{"slash":"aoeu"} \ No newline at end of file diff --git a/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/ref/data_31.json b/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/ref/data_31.json new file mode 100644 index 00000000..51cdcbc4 --- /dev/null +++ b/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/ref/data_31.json @@ -0,0 +1 @@ +{"tilda":"aoeu"} \ No newline at end of file diff --git a/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/ref/data_32.json b/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/ref/data_32.json new file mode 100644 index 00000000..1aa03c6a --- /dev/null +++ b/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/ref/data_32.json @@ -0,0 +1 @@ +{"percent":"aoeu"} \ No newline at end of file diff --git a/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/ref/data_40.json b/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/ref/data_40.json new file mode 100644 index 00000000..7813681f --- /dev/null +++ b/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/ref/data_40.json @@ -0,0 +1 @@ +5 \ No newline at end of file diff --git a/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/ref/data_41.json b/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/ref/data_41.json new file mode 100644 index 00000000..075c842d --- /dev/null +++ b/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/ref/data_41.json @@ -0,0 +1 @@ +"a" \ No newline at end of file diff --git a/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/ref/data_50.json b/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/ref/data_50.json new file mode 100644 index 00000000..9d936d56 --- /dev/null +++ b/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/ref/data_50.json @@ -0,0 +1 @@ +{"minLength":1} \ No newline at end of file diff --git a/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/ref/data_51.json b/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/ref/data_51.json new file mode 100644 index 00000000..64ba18b3 --- /dev/null +++ b/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/ref/data_51.json @@ -0,0 +1 @@ +{"minLength":-1} \ No newline at end of file diff --git a/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/ref/schema_0.json b/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/ref/schema_0.json new file mode 100644 index 00000000..a243138b --- /dev/null +++ b/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/ref/schema_0.json @@ -0,0 +1 @@ +{"additionalProperties":false,"properties":{"foo":{"$ref":"#"}}} \ No newline at end of file diff --git a/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/ref/schema_1.json b/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/ref/schema_1.json new file mode 100644 index 00000000..5475b8ef --- /dev/null +++ b/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/ref/schema_1.json @@ -0,0 +1 @@ +{"properties":{"bar":{"$ref":"#/properties/foo"},"foo":{"type":"integer"}}} \ No newline at end of file diff --git a/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/ref/schema_2.json b/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/ref/schema_2.json new file mode 100644 index 00000000..80f5a14a --- /dev/null +++ b/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/ref/schema_2.json @@ -0,0 +1 @@ +{"items":[{"type":"integer"},{"$ref":"#/items/0"}]} \ No newline at end of file diff --git a/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/ref/schema_3.json b/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/ref/schema_3.json new file mode 100644 index 00000000..5f5c1e25 --- /dev/null +++ b/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/ref/schema_3.json @@ -0,0 +1 @@ +{"percent%field":{"type":"integer"},"properties":{"percent":{"$ref":"#/percent%25field"},"slash":{"$ref":"#/slash~1field"},"tilda":{"$ref":"#/tilda~0field"}},"slash/field":{"type":"integer"},"tilda~field":{"type":"integer"}} \ No newline at end of file diff --git a/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/ref/schema_4.json b/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/ref/schema_4.json new file mode 100644 index 00000000..ac40ec7a --- /dev/null +++ b/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/ref/schema_4.json @@ -0,0 +1 @@ +{"$ref":"#/definitions/c","definitions":{"a":{"type":"integer"},"b":{"$ref":"#/definitions/a"},"c":{"$ref":"#/definitions/b"}}} \ No newline at end of file diff --git a/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/ref/schema_5.json b/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/ref/schema_5.json new file mode 100644 index 00000000..412c2163 --- /dev/null +++ b/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/ref/schema_5.json @@ -0,0 +1 @@ +{"$ref":"http://json-schema.org/draft-04/schema#"} \ No newline at end of file diff --git a/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/refRemote/data_00.json b/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/refRemote/data_00.json new file mode 100644 index 00000000..56a6051c --- /dev/null +++ b/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/refRemote/data_00.json @@ -0,0 +1 @@ +1 \ No newline at end of file diff --git a/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/refRemote/data_01.json b/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/refRemote/data_01.json new file mode 100644 index 00000000..075c842d --- /dev/null +++ b/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/refRemote/data_01.json @@ -0,0 +1 @@ +"a" \ No newline at end of file diff --git a/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/refRemote/data_10.json b/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/refRemote/data_10.json new file mode 100644 index 00000000..56a6051c --- /dev/null +++ b/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/refRemote/data_10.json @@ -0,0 +1 @@ +1 \ No newline at end of file diff --git a/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/refRemote/data_11.json b/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/refRemote/data_11.json new file mode 100644 index 00000000..075c842d --- /dev/null +++ b/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/refRemote/data_11.json @@ -0,0 +1 @@ +"a" \ No newline at end of file diff --git a/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/refRemote/data_20.json b/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/refRemote/data_20.json new file mode 100644 index 00000000..56a6051c --- /dev/null +++ b/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/refRemote/data_20.json @@ -0,0 +1 @@ +1 \ No newline at end of file diff --git a/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/refRemote/data_21.json b/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/refRemote/data_21.json new file mode 100644 index 00000000..075c842d --- /dev/null +++ b/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/refRemote/data_21.json @@ -0,0 +1 @@ +"a" \ No newline at end of file diff --git a/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/refRemote/data_30.json b/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/refRemote/data_30.json new file mode 100644 index 00000000..89d00ba2 --- /dev/null +++ b/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/refRemote/data_30.json @@ -0,0 +1 @@ +[[1]] \ No newline at end of file diff --git a/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/refRemote/data_31.json b/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/refRemote/data_31.json new file mode 100644 index 00000000..294b06e0 --- /dev/null +++ b/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/refRemote/data_31.json @@ -0,0 +1 @@ +[["a"]] \ No newline at end of file diff --git a/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/refRemote/remoteFiles/folder/folderInteger.json b/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/refRemote/remoteFiles/folder/folderInteger.json new file mode 100644 index 00000000..dbe5c758 --- /dev/null +++ b/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/refRemote/remoteFiles/folder/folderInteger.json @@ -0,0 +1,3 @@ +{ + "type": "integer" +} \ No newline at end of file diff --git a/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/refRemote/remoteFiles/integer.json b/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/refRemote/remoteFiles/integer.json new file mode 100644 index 00000000..dbe5c758 --- /dev/null +++ b/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/refRemote/remoteFiles/integer.json @@ -0,0 +1,3 @@ +{ + "type": "integer" +} \ No newline at end of file diff --git a/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/refRemote/remoteFiles/subSchemas.json b/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/refRemote/remoteFiles/subSchemas.json new file mode 100644 index 00000000..8b6d8f84 --- /dev/null +++ b/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/refRemote/remoteFiles/subSchemas.json @@ -0,0 +1,8 @@ +{ + "integer": { + "type": "integer" + }, + "refToInteger": { + "$ref": "#/integer" + } +} \ No newline at end of file diff --git a/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/refRemote/schema_0.json b/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/refRemote/schema_0.json new file mode 100644 index 00000000..c9d47df2 --- /dev/null +++ b/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/refRemote/schema_0.json @@ -0,0 +1 @@ +{"$ref":"http://localhost:1234/integer.json"} \ No newline at end of file diff --git a/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/refRemote/schema_1.json b/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/refRemote/schema_1.json new file mode 100644 index 00000000..de9da7fe --- /dev/null +++ b/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/refRemote/schema_1.json @@ -0,0 +1 @@ +{"$ref":"http://localhost:1234/subSchemas.json#/integer"} \ No newline at end of file diff --git a/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/refRemote/schema_2.json b/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/refRemote/schema_2.json new file mode 100644 index 00000000..40b5213d --- /dev/null +++ b/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/refRemote/schema_2.json @@ -0,0 +1 @@ +{"$ref":"http://localhost:1234/subSchemas.json#/refToInteger"} \ No newline at end of file diff --git a/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/refRemote/schema_3.json b/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/refRemote/schema_3.json new file mode 100644 index 00000000..51bd33d7 --- /dev/null +++ b/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/refRemote/schema_3.json @@ -0,0 +1 @@ +{"id":"http://localhost:1234","items":{"id":"folder/","items":{"$ref":"folderInteger.json"}}} \ No newline at end of file diff --git a/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/required/data_00.json b/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/required/data_00.json new file mode 100644 index 00000000..2518e687 --- /dev/null +++ b/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/required/data_00.json @@ -0,0 +1 @@ +{"foo":1} \ No newline at end of file diff --git a/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/required/data_01.json b/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/required/data_01.json new file mode 100644 index 00000000..ba06f3f8 --- /dev/null +++ b/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/required/data_01.json @@ -0,0 +1 @@ +{"bar":1} \ No newline at end of file diff --git a/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/required/data_10.json b/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/required/data_10.json new file mode 100644 index 00000000..9e26dfee --- /dev/null +++ b/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/required/data_10.json @@ -0,0 +1 @@ +{} \ No newline at end of file diff --git a/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/required/schema_0.json b/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/required/schema_0.json new file mode 100644 index 00000000..237ecc42 --- /dev/null +++ b/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/required/schema_0.json @@ -0,0 +1 @@ +{"properties":{"bar":{},"foo":{}},"required":["foo"]} \ No newline at end of file diff --git a/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/required/schema_1.json b/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/required/schema_1.json new file mode 100644 index 00000000..5891a518 --- /dev/null +++ b/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/required/schema_1.json @@ -0,0 +1 @@ +{"properties":{"foo":{}}} \ No newline at end of file diff --git a/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/type/data_00.json b/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/type/data_00.json new file mode 100644 index 00000000..56a6051c --- /dev/null +++ b/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/type/data_00.json @@ -0,0 +1 @@ +1 \ No newline at end of file diff --git a/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/type/data_01.json b/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/type/data_01.json new file mode 100644 index 00000000..b123147e --- /dev/null +++ b/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/type/data_01.json @@ -0,0 +1 @@ +1.1 \ No newline at end of file diff --git a/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/type/data_02.json b/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/type/data_02.json new file mode 100644 index 00000000..7e9668e7 --- /dev/null +++ b/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/type/data_02.json @@ -0,0 +1 @@ +"foo" \ No newline at end of file diff --git a/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/type/data_03.json b/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/type/data_03.json new file mode 100644 index 00000000..9e26dfee --- /dev/null +++ b/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/type/data_03.json @@ -0,0 +1 @@ +{} \ No newline at end of file diff --git a/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/type/data_04.json b/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/type/data_04.json new file mode 100644 index 00000000..0637a088 --- /dev/null +++ b/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/type/data_04.json @@ -0,0 +1 @@ +[] \ No newline at end of file diff --git a/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/type/data_05.json b/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/type/data_05.json new file mode 100644 index 00000000..f32a5804 --- /dev/null +++ b/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/type/data_05.json @@ -0,0 +1 @@ +true \ No newline at end of file diff --git a/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/type/data_06.json b/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/type/data_06.json new file mode 100644 index 00000000..ec747fa4 --- /dev/null +++ b/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/type/data_06.json @@ -0,0 +1 @@ +null \ No newline at end of file diff --git a/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/type/data_10.json b/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/type/data_10.json new file mode 100644 index 00000000..56a6051c --- /dev/null +++ b/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/type/data_10.json @@ -0,0 +1 @@ +1 \ No newline at end of file diff --git a/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/type/data_11.json b/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/type/data_11.json new file mode 100644 index 00000000..b123147e --- /dev/null +++ b/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/type/data_11.json @@ -0,0 +1 @@ +1.1 \ No newline at end of file diff --git a/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/type/data_12.json b/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/type/data_12.json new file mode 100644 index 00000000..7e9668e7 --- /dev/null +++ b/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/type/data_12.json @@ -0,0 +1 @@ +"foo" \ No newline at end of file diff --git a/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/type/data_13.json b/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/type/data_13.json new file mode 100644 index 00000000..9e26dfee --- /dev/null +++ b/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/type/data_13.json @@ -0,0 +1 @@ +{} \ No newline at end of file diff --git a/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/type/data_14.json b/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/type/data_14.json new file mode 100644 index 00000000..0637a088 --- /dev/null +++ b/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/type/data_14.json @@ -0,0 +1 @@ +[] \ No newline at end of file diff --git a/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/type/data_15.json b/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/type/data_15.json new file mode 100644 index 00000000..f32a5804 --- /dev/null +++ b/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/type/data_15.json @@ -0,0 +1 @@ +true \ No newline at end of file diff --git a/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/type/data_16.json b/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/type/data_16.json new file mode 100644 index 00000000..ec747fa4 --- /dev/null +++ b/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/type/data_16.json @@ -0,0 +1 @@ +null \ No newline at end of file diff --git a/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/type/data_20.json b/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/type/data_20.json new file mode 100644 index 00000000..56a6051c --- /dev/null +++ b/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/type/data_20.json @@ -0,0 +1 @@ +1 \ No newline at end of file diff --git a/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/type/data_21.json b/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/type/data_21.json new file mode 100644 index 00000000..b123147e --- /dev/null +++ b/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/type/data_21.json @@ -0,0 +1 @@ +1.1 \ No newline at end of file diff --git a/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/type/data_22.json b/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/type/data_22.json new file mode 100644 index 00000000..7e9668e7 --- /dev/null +++ b/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/type/data_22.json @@ -0,0 +1 @@ +"foo" \ No newline at end of file diff --git a/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/type/data_23.json b/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/type/data_23.json new file mode 100644 index 00000000..9e26dfee --- /dev/null +++ b/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/type/data_23.json @@ -0,0 +1 @@ +{} \ No newline at end of file diff --git a/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/type/data_24.json b/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/type/data_24.json new file mode 100644 index 00000000..0637a088 --- /dev/null +++ b/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/type/data_24.json @@ -0,0 +1 @@ +[] \ No newline at end of file diff --git a/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/type/data_25.json b/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/type/data_25.json new file mode 100644 index 00000000..f32a5804 --- /dev/null +++ b/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/type/data_25.json @@ -0,0 +1 @@ +true \ No newline at end of file diff --git a/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/type/data_26.json b/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/type/data_26.json new file mode 100644 index 00000000..ec747fa4 --- /dev/null +++ b/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/type/data_26.json @@ -0,0 +1 @@ +null \ No newline at end of file diff --git a/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/type/data_30.json b/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/type/data_30.json new file mode 100644 index 00000000..56a6051c --- /dev/null +++ b/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/type/data_30.json @@ -0,0 +1 @@ +1 \ No newline at end of file diff --git a/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/type/data_31.json b/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/type/data_31.json new file mode 100644 index 00000000..b123147e --- /dev/null +++ b/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/type/data_31.json @@ -0,0 +1 @@ +1.1 \ No newline at end of file diff --git a/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/type/data_32.json b/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/type/data_32.json new file mode 100644 index 00000000..7e9668e7 --- /dev/null +++ b/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/type/data_32.json @@ -0,0 +1 @@ +"foo" \ No newline at end of file diff --git a/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/type/data_33.json b/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/type/data_33.json new file mode 100644 index 00000000..9e26dfee --- /dev/null +++ b/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/type/data_33.json @@ -0,0 +1 @@ +{} \ No newline at end of file diff --git a/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/type/data_34.json b/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/type/data_34.json new file mode 100644 index 00000000..0637a088 --- /dev/null +++ b/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/type/data_34.json @@ -0,0 +1 @@ +[] \ No newline at end of file diff --git a/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/type/data_35.json b/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/type/data_35.json new file mode 100644 index 00000000..f32a5804 --- /dev/null +++ b/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/type/data_35.json @@ -0,0 +1 @@ +true \ No newline at end of file diff --git a/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/type/data_36.json b/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/type/data_36.json new file mode 100644 index 00000000..ec747fa4 --- /dev/null +++ b/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/type/data_36.json @@ -0,0 +1 @@ +null \ No newline at end of file diff --git a/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/type/data_40.json b/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/type/data_40.json new file mode 100644 index 00000000..56a6051c --- /dev/null +++ b/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/type/data_40.json @@ -0,0 +1 @@ +1 \ No newline at end of file diff --git a/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/type/data_41.json b/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/type/data_41.json new file mode 100644 index 00000000..b123147e --- /dev/null +++ b/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/type/data_41.json @@ -0,0 +1 @@ +1.1 \ No newline at end of file diff --git a/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/type/data_42.json b/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/type/data_42.json new file mode 100644 index 00000000..7e9668e7 --- /dev/null +++ b/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/type/data_42.json @@ -0,0 +1 @@ +"foo" \ No newline at end of file diff --git a/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/type/data_43.json b/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/type/data_43.json new file mode 100644 index 00000000..9e26dfee --- /dev/null +++ b/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/type/data_43.json @@ -0,0 +1 @@ +{} \ No newline at end of file diff --git a/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/type/data_44.json b/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/type/data_44.json new file mode 100644 index 00000000..0637a088 --- /dev/null +++ b/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/type/data_44.json @@ -0,0 +1 @@ +[] \ No newline at end of file diff --git a/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/type/data_45.json b/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/type/data_45.json new file mode 100644 index 00000000..f32a5804 --- /dev/null +++ b/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/type/data_45.json @@ -0,0 +1 @@ +true \ No newline at end of file diff --git a/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/type/data_46.json b/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/type/data_46.json new file mode 100644 index 00000000..ec747fa4 --- /dev/null +++ b/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/type/data_46.json @@ -0,0 +1 @@ +null \ No newline at end of file diff --git a/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/type/data_50.json b/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/type/data_50.json new file mode 100644 index 00000000..56a6051c --- /dev/null +++ b/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/type/data_50.json @@ -0,0 +1 @@ +1 \ No newline at end of file diff --git a/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/type/data_51.json b/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/type/data_51.json new file mode 100644 index 00000000..b123147e --- /dev/null +++ b/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/type/data_51.json @@ -0,0 +1 @@ +1.1 \ No newline at end of file diff --git a/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/type/data_52.json b/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/type/data_52.json new file mode 100644 index 00000000..7e9668e7 --- /dev/null +++ b/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/type/data_52.json @@ -0,0 +1 @@ +"foo" \ No newline at end of file diff --git a/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/type/data_53.json b/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/type/data_53.json new file mode 100644 index 00000000..9e26dfee --- /dev/null +++ b/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/type/data_53.json @@ -0,0 +1 @@ +{} \ No newline at end of file diff --git a/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/type/data_54.json b/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/type/data_54.json new file mode 100644 index 00000000..0637a088 --- /dev/null +++ b/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/type/data_54.json @@ -0,0 +1 @@ +[] \ No newline at end of file diff --git a/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/type/data_55.json b/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/type/data_55.json new file mode 100644 index 00000000..f32a5804 --- /dev/null +++ b/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/type/data_55.json @@ -0,0 +1 @@ +true \ No newline at end of file diff --git a/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/type/data_56.json b/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/type/data_56.json new file mode 100644 index 00000000..ec747fa4 --- /dev/null +++ b/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/type/data_56.json @@ -0,0 +1 @@ +null \ No newline at end of file diff --git a/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/type/data_60.json b/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/type/data_60.json new file mode 100644 index 00000000..56a6051c --- /dev/null +++ b/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/type/data_60.json @@ -0,0 +1 @@ +1 \ No newline at end of file diff --git a/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/type/data_61.json b/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/type/data_61.json new file mode 100644 index 00000000..b123147e --- /dev/null +++ b/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/type/data_61.json @@ -0,0 +1 @@ +1.1 \ No newline at end of file diff --git a/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/type/data_62.json b/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/type/data_62.json new file mode 100644 index 00000000..7e9668e7 --- /dev/null +++ b/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/type/data_62.json @@ -0,0 +1 @@ +"foo" \ No newline at end of file diff --git a/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/type/data_63.json b/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/type/data_63.json new file mode 100644 index 00000000..9e26dfee --- /dev/null +++ b/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/type/data_63.json @@ -0,0 +1 @@ +{} \ No newline at end of file diff --git a/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/type/data_64.json b/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/type/data_64.json new file mode 100644 index 00000000..0637a088 --- /dev/null +++ b/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/type/data_64.json @@ -0,0 +1 @@ +[] \ No newline at end of file diff --git a/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/type/data_65.json b/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/type/data_65.json new file mode 100644 index 00000000..f32a5804 --- /dev/null +++ b/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/type/data_65.json @@ -0,0 +1 @@ +true \ No newline at end of file diff --git a/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/type/data_66.json b/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/type/data_66.json new file mode 100644 index 00000000..ec747fa4 --- /dev/null +++ b/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/type/data_66.json @@ -0,0 +1 @@ +null \ No newline at end of file diff --git a/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/type/data_70.json b/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/type/data_70.json new file mode 100644 index 00000000..56a6051c --- /dev/null +++ b/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/type/data_70.json @@ -0,0 +1 @@ +1 \ No newline at end of file diff --git a/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/type/data_71.json b/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/type/data_71.json new file mode 100644 index 00000000..7e9668e7 --- /dev/null +++ b/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/type/data_71.json @@ -0,0 +1 @@ +"foo" \ No newline at end of file diff --git a/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/type/data_72.json b/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/type/data_72.json new file mode 100644 index 00000000..b123147e --- /dev/null +++ b/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/type/data_72.json @@ -0,0 +1 @@ +1.1 \ No newline at end of file diff --git a/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/type/data_73.json b/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/type/data_73.json new file mode 100644 index 00000000..9e26dfee --- /dev/null +++ b/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/type/data_73.json @@ -0,0 +1 @@ +{} \ No newline at end of file diff --git a/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/type/data_74.json b/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/type/data_74.json new file mode 100644 index 00000000..0637a088 --- /dev/null +++ b/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/type/data_74.json @@ -0,0 +1 @@ +[] \ No newline at end of file diff --git a/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/type/data_75.json b/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/type/data_75.json new file mode 100644 index 00000000..f32a5804 --- /dev/null +++ b/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/type/data_75.json @@ -0,0 +1 @@ +true \ No newline at end of file diff --git a/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/type/data_76.json b/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/type/data_76.json new file mode 100644 index 00000000..ec747fa4 --- /dev/null +++ b/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/type/data_76.json @@ -0,0 +1 @@ +null \ No newline at end of file diff --git a/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/type/schema_0.json b/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/type/schema_0.json new file mode 100644 index 00000000..2be72ba1 --- /dev/null +++ b/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/type/schema_0.json @@ -0,0 +1 @@ +{"type":"integer"} \ No newline at end of file diff --git a/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/type/schema_1.json b/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/type/schema_1.json new file mode 100644 index 00000000..da4eeec7 --- /dev/null +++ b/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/type/schema_1.json @@ -0,0 +1 @@ +{"type":"number"} \ No newline at end of file diff --git a/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/type/schema_2.json b/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/type/schema_2.json new file mode 100644 index 00000000..4db187eb --- /dev/null +++ b/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/type/schema_2.json @@ -0,0 +1 @@ +{"type":"string"} \ No newline at end of file diff --git a/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/type/schema_3.json b/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/type/schema_3.json new file mode 100644 index 00000000..ffa5c9f8 --- /dev/null +++ b/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/type/schema_3.json @@ -0,0 +1 @@ +{"type":"object"} \ No newline at end of file diff --git a/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/type/schema_4.json b/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/type/schema_4.json new file mode 100644 index 00000000..bc88ff4a --- /dev/null +++ b/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/type/schema_4.json @@ -0,0 +1 @@ +{"type":"array"} \ No newline at end of file diff --git a/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/type/schema_5.json b/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/type/schema_5.json new file mode 100644 index 00000000..7763bc51 --- /dev/null +++ b/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/type/schema_5.json @@ -0,0 +1 @@ +{"type":"boolean"} \ No newline at end of file diff --git a/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/type/schema_6.json b/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/type/schema_6.json new file mode 100644 index 00000000..fedaf464 --- /dev/null +++ b/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/type/schema_6.json @@ -0,0 +1 @@ +{"type":"null"} \ No newline at end of file diff --git a/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/type/schema_7.json b/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/type/schema_7.json new file mode 100644 index 00000000..c243f0b7 --- /dev/null +++ b/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/type/schema_7.json @@ -0,0 +1 @@ +{"type":["integer","string"]} \ No newline at end of file diff --git a/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/uniqueItems/data_00.json b/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/uniqueItems/data_00.json new file mode 100644 index 00000000..3169929f --- /dev/null +++ b/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/uniqueItems/data_00.json @@ -0,0 +1 @@ +[1,2] \ No newline at end of file diff --git a/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/uniqueItems/data_01.json b/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/uniqueItems/data_01.json new file mode 100644 index 00000000..a7c0e4b1 --- /dev/null +++ b/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/uniqueItems/data_01.json @@ -0,0 +1 @@ +[1,1] \ No newline at end of file diff --git a/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/uniqueItems/data_010.json b/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/uniqueItems/data_010.json new file mode 100644 index 00000000..7e6261e1 --- /dev/null +++ b/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/uniqueItems/data_010.json @@ -0,0 +1 @@ +[0,false] \ No newline at end of file diff --git a/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/uniqueItems/data_011.json b/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/uniqueItems/data_011.json new file mode 100644 index 00000000..48006ce5 --- /dev/null +++ b/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/uniqueItems/data_011.json @@ -0,0 +1 @@ +[{},[1],true,null,1] \ No newline at end of file diff --git a/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/uniqueItems/data_012.json b/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/uniqueItems/data_012.json new file mode 100644 index 00000000..067faf53 --- /dev/null +++ b/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/uniqueItems/data_012.json @@ -0,0 +1 @@ +[{},[1],true,null,{},1] \ No newline at end of file diff --git a/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/uniqueItems/data_02.json b/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/uniqueItems/data_02.json new file mode 100644 index 00000000..22881161 --- /dev/null +++ b/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/uniqueItems/data_02.json @@ -0,0 +1 @@ +[1,1,1] \ No newline at end of file diff --git a/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/uniqueItems/data_03.json b/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/uniqueItems/data_03.json new file mode 100644 index 00000000..b7734bb0 --- /dev/null +++ b/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/uniqueItems/data_03.json @@ -0,0 +1 @@ +[{"foo":"bar"},{"foo":"baz"}] \ No newline at end of file diff --git a/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/uniqueItems/data_04.json b/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/uniqueItems/data_04.json new file mode 100644 index 00000000..67d501bc --- /dev/null +++ b/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/uniqueItems/data_04.json @@ -0,0 +1 @@ +[{"foo":"bar"},{"foo":"bar"}] \ No newline at end of file diff --git a/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/uniqueItems/data_05.json b/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/uniqueItems/data_05.json new file mode 100644 index 00000000..4e042c2f --- /dev/null +++ b/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/uniqueItems/data_05.json @@ -0,0 +1 @@ +[{"foo":{"bar":{"baz":true}}},{"foo":{"bar":{"baz":false}}}] \ No newline at end of file diff --git a/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/uniqueItems/data_06.json b/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/uniqueItems/data_06.json new file mode 100644 index 00000000..0f63748a --- /dev/null +++ b/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/uniqueItems/data_06.json @@ -0,0 +1 @@ +[{"foo":{"bar":{"baz":true}}},{"foo":{"bar":{"baz":true}}}] \ No newline at end of file diff --git a/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/uniqueItems/data_07.json b/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/uniqueItems/data_07.json new file mode 100644 index 00000000..8318b7d7 --- /dev/null +++ b/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/uniqueItems/data_07.json @@ -0,0 +1 @@ +[["foo"],["bar"]] \ No newline at end of file diff --git a/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/uniqueItems/data_08.json b/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/uniqueItems/data_08.json new file mode 100644 index 00000000..15caa6c0 --- /dev/null +++ b/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/uniqueItems/data_08.json @@ -0,0 +1 @@ +[["foo"],["foo"]] \ No newline at end of file diff --git a/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/uniqueItems/data_09.json b/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/uniqueItems/data_09.json new file mode 100644 index 00000000..1083d092 --- /dev/null +++ b/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/uniqueItems/data_09.json @@ -0,0 +1 @@ +[1,true] \ No newline at end of file diff --git a/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/uniqueItems/schema_0.json b/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/uniqueItems/schema_0.json new file mode 100644 index 00000000..d598d705 --- /dev/null +++ b/vendor/github.com/xeipuuv/gojsonschema/json_schema_test_suite/uniqueItems/schema_0.json @@ -0,0 +1 @@ +{"uniqueItems":true} \ No newline at end of file diff --git a/vendor/github.com/xeipuuv/gojsonschema/locales.go b/vendor/github.com/xeipuuv/gojsonschema/locales.go new file mode 100644 index 00000000..de05d60d --- /dev/null +++ b/vendor/github.com/xeipuuv/gojsonschema/locales.go @@ -0,0 +1,275 @@ +// Copyright 2015 xeipuuv ( https://github.com/xeipuuv ) +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// author xeipuuv +// author-github https://github.com/xeipuuv +// author-mail xeipuuv@gmail.com +// +// repository-name gojsonschema +// repository-desc An implementation of JSON Schema, based on IETF's draft v4 - Go language. +// +// description Contains const string and messages. +// +// created 01-01-2015 + +package gojsonschema + +type ( + // locale is an interface for definining custom error strings + locale interface { + Required() string + InvalidType() string + NumberAnyOf() string + NumberOneOf() string + NumberAllOf() string + NumberNot() string + MissingDependency() string + Internal() string + Enum() string + ArrayNoAdditionalItems() string + ArrayMinItems() string + ArrayMaxItems() string + Unique() string + ArrayMinProperties() string + ArrayMaxProperties() string + AdditionalPropertyNotAllowed() string + InvalidPropertyPattern() string + StringGTE() string + StringLTE() string + DoesNotMatchPattern() string + DoesNotMatchFormat() string + MultipleOf() string + NumberGTE() string + NumberGT() string + NumberLTE() string + NumberLT() string + + // Schema validations + RegexPattern() string + GreaterThanZero() string + MustBeOfA() string + MustBeOfAn() string + CannotBeUsedWithout() string + CannotBeGT() string + MustBeOfType() string + MustBeValidRegex() string + MustBeValidFormat() string + MustBeGTEZero() string + KeyCannotBeGreaterThan() string + KeyItemsMustBeOfType() string + KeyItemsMustBeUnique() string + ReferenceMustBeCanonical() string + NotAValidType() string + Duplicated() string + httpBadStatus() string + + // ErrorFormat + ErrorFormat() string + } + + // DefaultLocale is the default locale for this package + DefaultLocale struct{} +) + +func (l DefaultLocale) Required() string { + return `%property% is required` +} + +func (l DefaultLocale) InvalidType() string { + return `Invalid type. Expected: %expected%, given: %given%` +} + +func (l DefaultLocale) NumberAnyOf() string { + return `Must validate at least one schema (anyOf)` +} + +func (l DefaultLocale) NumberOneOf() string { + return `Must validate one and only one schema (oneOf)` +} + +func (l DefaultLocale) NumberAllOf() string { + return `Must validate all the schemas (allOf)` +} + +func (l DefaultLocale) NumberNot() string { + return `Must not validate the schema (not)` +} + +func (l DefaultLocale) MissingDependency() string { + return `Has a dependency on %dependency%` +} + +func (l DefaultLocale) Internal() string { + return `Internal Error %error%` +} + +func (l DefaultLocale) Enum() string { + return `%field% must be one of the following: %allowed%` +} + +func (l DefaultLocale) ArrayNoAdditionalItems() string { + return `No additional items allowed on array` +} + +func (l DefaultLocale) ArrayMinItems() string { + return `Array must have at least %min% items` +} + +func (l DefaultLocale) ArrayMaxItems() string { + return `Array must have at most %max% items` +} + +func (l DefaultLocale) Unique() string { + return `%type% items must be unique` +} + +func (l DefaultLocale) ArrayMinProperties() string { + return `Must have at least %min% properties` +} + +func (l DefaultLocale) ArrayMaxProperties() string { + return `Must have at most %max% properties` +} + +func (l DefaultLocale) AdditionalPropertyNotAllowed() string { + return `Additional property %property% is not allowed` +} + +func (l DefaultLocale) InvalidPropertyPattern() string { + return `Property "%property%" does not match pattern %pattern%` +} + +func (l DefaultLocale) StringGTE() string { + return `String length must be greater than or equal to %min%` +} + +func (l DefaultLocale) StringLTE() string { + return `String length must be less than or equal to %max%` +} + +func (l DefaultLocale) DoesNotMatchPattern() string { + return `Does not match pattern '%pattern%'` +} + +func (l DefaultLocale) DoesNotMatchFormat() string { + return `Does not match format '%format%'` +} + +func (l DefaultLocale) MultipleOf() string { + return `Must be a multiple of %multiple%` +} + +func (l DefaultLocale) NumberGTE() string { + return `Must be greater than or equal to %min%` +} + +func (l DefaultLocale) NumberGT() string { + return `Must be greater than %min%` +} + +func (l DefaultLocale) NumberLTE() string { + return `Must be less than or equal to %max%` +} + +func (l DefaultLocale) NumberLT() string { + return `Must be less than %max%` +} + +// Schema validators +func (l DefaultLocale) RegexPattern() string { + return `Invalid regex pattern '%pattern%'` +} + +func (l DefaultLocale) GreaterThanZero() string { + return `%number% must be strictly greater than 0` +} + +func (l DefaultLocale) MustBeOfA() string { + return `%x% must be of a %y%` +} + +func (l DefaultLocale) MustBeOfAn() string { + return `%x% must be of an %y%` +} + +func (l DefaultLocale) CannotBeUsedWithout() string { + return `%x% cannot be used without %y%` +} + +func (l DefaultLocale) CannotBeGT() string { + return `%x% cannot be greater than %y%` +} + +func (l DefaultLocale) MustBeOfType() string { + return `%key% must be of type %type%` +} + +func (l DefaultLocale) MustBeValidRegex() string { + return `%key% must be a valid regex` +} + +func (l DefaultLocale) MustBeValidFormat() string { + return `%key% must be a valid format %given%` +} + +func (l DefaultLocale) MustBeGTEZero() string { + return `%key% must be greater than or equal to 0` +} + +func (l DefaultLocale) KeyCannotBeGreaterThan() string { + return `%key% cannot be greater than %y%` +} + +func (l DefaultLocale) KeyItemsMustBeOfType() string { + return `%key% items must be %type%` +} + +func (l DefaultLocale) KeyItemsMustBeUnique() string { + return `%key% items must be unique` +} + +func (l DefaultLocale) ReferenceMustBeCanonical() string { + return `Reference %reference% must be canonical` +} + +func (l DefaultLocale) NotAValidType() string { + return `%type% is not a valid type -- ` +} + +func (l DefaultLocale) Duplicated() string { + return `%type% type is duplicated` +} + +func (l DefaultLocale) httpBadStatus() string { + return `Could not read schema from HTTP, response status is %status%` +} + +// Replacement options: field, description, context, value +func (l DefaultLocale) ErrorFormat() string { + return `%field%: %description%` +} + +const ( + STRING_NUMBER = "number" + STRING_ARRAY_OF_STRINGS = "array of strings" + STRING_ARRAY_OF_SCHEMAS = "array of schemas" + STRING_SCHEMA = "schema" + STRING_SCHEMA_OR_ARRAY_OF_STRINGS = "schema or array of strings" + STRING_PROPERTIES = "properties" + STRING_DEPENDENCY = "dependency" + STRING_PROPERTY = "property" + STRING_UNDEFINED = "undefined" + STRING_CONTEXT_ROOT = "(root)" + STRING_ROOT_SCHEMA_PROPERTY = "(root)" +) diff --git a/vendor/github.com/xeipuuv/gojsonschema/result.go b/vendor/github.com/xeipuuv/gojsonschema/result.go new file mode 100644 index 00000000..c6d33758 --- /dev/null +++ b/vendor/github.com/xeipuuv/gojsonschema/result.go @@ -0,0 +1,171 @@ +// Copyright 2015 xeipuuv ( https://github.com/xeipuuv ) +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// author xeipuuv +// author-github https://github.com/xeipuuv +// author-mail xeipuuv@gmail.com +// +// repository-name gojsonschema +// repository-desc An implementation of JSON Schema, based on IETF's draft v4 - Go language. +// +// description Result and ResultError implementations. +// +// created 01-01-2015 + +package gojsonschema + +import ( + "fmt" + "strings" +) + +type ( + // ErrorDetails is a map of details specific to each error. + // While the values will vary, every error will contain a "field" value + ErrorDetails map[string]interface{} + + // ResultError is the interface that library errors must implement + ResultError interface { + Field() string + SetType(string) + Type() string + SetContext(*jsonContext) + Context() *jsonContext + SetDescription(string) + Description() string + SetValue(interface{}) + Value() interface{} + SetDetails(ErrorDetails) + Details() ErrorDetails + } + + // ResultErrorFields holds the fields for each ResultError implementation. + // ResultErrorFields implements the ResultError interface, so custom errors + // can be defined by just embedding this type + ResultErrorFields struct { + errorType string // A string with the type of error (i.e. invalid_type) + context *jsonContext // Tree like notation of the part that failed the validation. ex (root).a.b ... + description string // A human readable error message + value interface{} // Value given by the JSON file that is the source of the error + details ErrorDetails + } + + Result struct { + errors []ResultError + // Scores how well the validation matched. Useful in generating + // better error messages for anyOf and oneOf. + score int + } +) + +// Field outputs the field name without the root context +// i.e. firstName or person.firstName instead of (root).firstName or (root).person.firstName +func (v *ResultErrorFields) Field() string { + if p, ok := v.Details()["property"]; ok { + if str, isString := p.(string); isString { + return str + } + } + + return strings.TrimPrefix(v.context.String(), STRING_ROOT_SCHEMA_PROPERTY+".") +} + +func (v *ResultErrorFields) SetType(errorType string) { + v.errorType = errorType +} + +func (v *ResultErrorFields) Type() string { + return v.errorType +} + +func (v *ResultErrorFields) SetContext(context *jsonContext) { + v.context = context +} + +func (v *ResultErrorFields) Context() *jsonContext { + return v.context +} + +func (v *ResultErrorFields) SetDescription(description string) { + v.description = description +} + +func (v *ResultErrorFields) Description() string { + return v.description +} + +func (v *ResultErrorFields) SetValue(value interface{}) { + v.value = value +} + +func (v *ResultErrorFields) Value() interface{} { + return v.value +} + +func (v *ResultErrorFields) SetDetails(details ErrorDetails) { + v.details = details +} + +func (v *ResultErrorFields) Details() ErrorDetails { + return v.details +} + +func (v ResultErrorFields) String() string { + // as a fallback, the value is displayed go style + valueString := fmt.Sprintf("%v", v.value) + + // marshal the go value value to json + if v.value == nil { + valueString = TYPE_NULL + } else { + if vs, err := marshalToJsonString(v.value); err == nil { + if vs == nil { + valueString = TYPE_NULL + } else { + valueString = *vs + } + } + } + + return formatErrorDescription(Locale.ErrorFormat(), ErrorDetails{ + "context": v.context.String(), + "description": v.description, + "value": valueString, + "field": v.Field(), + }) +} + +func (v *Result) Valid() bool { + return len(v.errors) == 0 +} + +func (v *Result) Errors() []ResultError { + return v.errors +} + +func (v *Result) addError(err ResultError, context *jsonContext, value interface{}, details ErrorDetails) { + newError(err, context, value, Locale, details) + v.errors = append(v.errors, err) + v.score -= 2 // results in a net -1 when added to the +1 we get at the end of the validation function +} + +// Used to copy errors from a sub-schema to the main one +func (v *Result) mergeErrors(otherResult *Result) { + v.errors = append(v.errors, otherResult.Errors()...) + v.score += otherResult.score +} + +func (v *Result) incrementScore() { + v.score++ +} diff --git a/vendor/github.com/xeipuuv/gojsonschema/schema.go b/vendor/github.com/xeipuuv/gojsonschema/schema.go new file mode 100644 index 00000000..577c7863 --- /dev/null +++ b/vendor/github.com/xeipuuv/gojsonschema/schema.go @@ -0,0 +1,908 @@ +// Copyright 2015 xeipuuv ( https://github.com/xeipuuv ) +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// author xeipuuv +// author-github https://github.com/xeipuuv +// author-mail xeipuuv@gmail.com +// +// repository-name gojsonschema +// repository-desc An implementation of JSON Schema, based on IETF's draft v4 - Go language. +// +// description Defines Schema, the main entry to every subSchema. +// Contains the parsing logic and error checking. +// +// created 26-02-2013 + +package gojsonschema + +import ( + // "encoding/json" + "errors" + "reflect" + "regexp" + + "github.com/xeipuuv/gojsonreference" +) + +var ( + // Locale is the default locale to use + // Library users can overwrite with their own implementation + Locale locale = DefaultLocale{} +) + +func NewSchema(l JSONLoader) (*Schema, error) { + return l.loadSchema() +} + +type Schema struct { + documentReference gojsonreference.JsonReference + rootSchema *subSchema + pool *schemaPool + referencePool *schemaReferencePool +} + +func (d *Schema) parse(document interface{}) error { + d.rootSchema = &subSchema{property: STRING_ROOT_SCHEMA_PROPERTY} + return d.parseSchema(document, d.rootSchema) +} + +func (d *Schema) SetRootSchemaName(name string) { + d.rootSchema.property = name +} + +// Parses a subSchema +// +// Pretty long function ( sorry :) )... but pretty straight forward, repetitive and boring +// Not much magic involved here, most of the job is to validate the key names and their values, +// then the values are copied into subSchema struct +// +func (d *Schema) parseSchema(documentNode interface{}, currentSchema *subSchema) error { + + if !isKind(documentNode, reflect.Map) { + return errors.New(formatErrorDescription( + Locale.InvalidType(), + ErrorDetails{ + "expected": TYPE_OBJECT, + "given": STRING_SCHEMA, + }, + )) + } + + m := documentNode.(map[string]interface{}) + + if currentSchema == d.rootSchema { + currentSchema.ref = &d.documentReference + } + + // $subSchema + if existsMapKey(m, KEY_SCHEMA) { + if !isKind(m[KEY_SCHEMA], reflect.String) { + return errors.New(formatErrorDescription( + Locale.InvalidType(), + ErrorDetails{ + "expected": TYPE_STRING, + "given": KEY_SCHEMA, + }, + )) + } + schemaRef := m[KEY_SCHEMA].(string) + schemaReference, err := gojsonreference.NewJsonReference(schemaRef) + currentSchema.subSchema = &schemaReference + if err != nil { + return err + } + } + + // $ref + if existsMapKey(m, KEY_REF) && !isKind(m[KEY_REF], reflect.String) { + return errors.New(formatErrorDescription( + Locale.InvalidType(), + ErrorDetails{ + "expected": TYPE_STRING, + "given": KEY_REF, + }, + )) + } + if k, ok := m[KEY_REF].(string); ok { + + if sch, ok := d.referencePool.Get(currentSchema.ref.String() + k); ok { + + currentSchema.refSchema = sch + + } else { + + var err error + err = d.parseReference(documentNode, currentSchema, k) + if err != nil { + return err + } + + return nil + } + } + + // definitions + if existsMapKey(m, KEY_DEFINITIONS) { + if isKind(m[KEY_DEFINITIONS], reflect.Map) { + currentSchema.definitions = make(map[string]*subSchema) + for dk, dv := range m[KEY_DEFINITIONS].(map[string]interface{}) { + if isKind(dv, reflect.Map) { + newSchema := &subSchema{property: KEY_DEFINITIONS, parent: currentSchema, ref: currentSchema.ref} + currentSchema.definitions[dk] = newSchema + err := d.parseSchema(dv, newSchema) + if err != nil { + return errors.New(err.Error()) + } + } else { + return errors.New(formatErrorDescription( + Locale.InvalidType(), + ErrorDetails{ + "expected": STRING_ARRAY_OF_SCHEMAS, + "given": KEY_DEFINITIONS, + }, + )) + } + } + } else { + return errors.New(formatErrorDescription( + Locale.InvalidType(), + ErrorDetails{ + "expected": STRING_ARRAY_OF_SCHEMAS, + "given": KEY_DEFINITIONS, + }, + )) + } + + } + + // id + if existsMapKey(m, KEY_ID) && !isKind(m[KEY_ID], reflect.String) { + return errors.New(formatErrorDescription( + Locale.InvalidType(), + ErrorDetails{ + "expected": TYPE_STRING, + "given": KEY_ID, + }, + )) + } + if k, ok := m[KEY_ID].(string); ok { + currentSchema.id = &k + } + + // title + if existsMapKey(m, KEY_TITLE) && !isKind(m[KEY_TITLE], reflect.String) { + return errors.New(formatErrorDescription( + Locale.InvalidType(), + ErrorDetails{ + "expected": TYPE_STRING, + "given": KEY_TITLE, + }, + )) + } + if k, ok := m[KEY_TITLE].(string); ok { + currentSchema.title = &k + } + + // description + if existsMapKey(m, KEY_DESCRIPTION) && !isKind(m[KEY_DESCRIPTION], reflect.String) { + return errors.New(formatErrorDescription( + Locale.InvalidType(), + ErrorDetails{ + "expected": TYPE_STRING, + "given": KEY_DESCRIPTION, + }, + )) + } + if k, ok := m[KEY_DESCRIPTION].(string); ok { + currentSchema.description = &k + } + + // type + if existsMapKey(m, KEY_TYPE) { + if isKind(m[KEY_TYPE], reflect.String) { + if k, ok := m[KEY_TYPE].(string); ok { + err := currentSchema.types.Add(k) + if err != nil { + return err + } + } + } else { + if isKind(m[KEY_TYPE], reflect.Slice) { + arrayOfTypes := m[KEY_TYPE].([]interface{}) + for _, typeInArray := range arrayOfTypes { + if reflect.ValueOf(typeInArray).Kind() != reflect.String { + return errors.New(formatErrorDescription( + Locale.InvalidType(), + ErrorDetails{ + "expected": TYPE_STRING + "/" + STRING_ARRAY_OF_STRINGS, + "given": KEY_TYPE, + }, + )) + } else { + currentSchema.types.Add(typeInArray.(string)) + } + } + + } else { + return errors.New(formatErrorDescription( + Locale.InvalidType(), + ErrorDetails{ + "expected": TYPE_STRING + "/" + STRING_ARRAY_OF_STRINGS, + "given": KEY_TYPE, + }, + )) + } + } + } + + // properties + if existsMapKey(m, KEY_PROPERTIES) { + err := d.parseProperties(m[KEY_PROPERTIES], currentSchema) + if err != nil { + return err + } + } + + // additionalProperties + if existsMapKey(m, KEY_ADDITIONAL_PROPERTIES) { + if isKind(m[KEY_ADDITIONAL_PROPERTIES], reflect.Bool) { + currentSchema.additionalProperties = m[KEY_ADDITIONAL_PROPERTIES].(bool) + } else if isKind(m[KEY_ADDITIONAL_PROPERTIES], reflect.Map) { + newSchema := &subSchema{property: KEY_ADDITIONAL_PROPERTIES, parent: currentSchema, ref: currentSchema.ref} + currentSchema.additionalProperties = newSchema + err := d.parseSchema(m[KEY_ADDITIONAL_PROPERTIES], newSchema) + if err != nil { + return errors.New(err.Error()) + } + } else { + return errors.New(formatErrorDescription( + Locale.InvalidType(), + ErrorDetails{ + "expected": TYPE_BOOLEAN + "/" + STRING_SCHEMA, + "given": KEY_ADDITIONAL_PROPERTIES, + }, + )) + } + } + + // patternProperties + if existsMapKey(m, KEY_PATTERN_PROPERTIES) { + if isKind(m[KEY_PATTERN_PROPERTIES], reflect.Map) { + patternPropertiesMap := m[KEY_PATTERN_PROPERTIES].(map[string]interface{}) + if len(patternPropertiesMap) > 0 { + currentSchema.patternProperties = make(map[string]*subSchema) + for k, v := range patternPropertiesMap { + _, err := regexp.MatchString(k, "") + if err != nil { + return errors.New(formatErrorDescription( + Locale.RegexPattern(), + ErrorDetails{"pattern": k}, + )) + } + newSchema := &subSchema{property: k, parent: currentSchema, ref: currentSchema.ref} + err = d.parseSchema(v, newSchema) + if err != nil { + return errors.New(err.Error()) + } + currentSchema.patternProperties[k] = newSchema + } + } + } else { + return errors.New(formatErrorDescription( + Locale.InvalidType(), + ErrorDetails{ + "expected": STRING_SCHEMA, + "given": KEY_PATTERN_PROPERTIES, + }, + )) + } + } + + // dependencies + if existsMapKey(m, KEY_DEPENDENCIES) { + err := d.parseDependencies(m[KEY_DEPENDENCIES], currentSchema) + if err != nil { + return err + } + } + + // items + if existsMapKey(m, KEY_ITEMS) { + if isKind(m[KEY_ITEMS], reflect.Slice) { + for _, itemElement := range m[KEY_ITEMS].([]interface{}) { + if isKind(itemElement, reflect.Map) { + newSchema := &subSchema{parent: currentSchema, property: KEY_ITEMS} + newSchema.ref = currentSchema.ref + currentSchema.AddItemsChild(newSchema) + err := d.parseSchema(itemElement, newSchema) + if err != nil { + return err + } + } else { + return errors.New(formatErrorDescription( + Locale.InvalidType(), + ErrorDetails{ + "expected": STRING_SCHEMA + "/" + STRING_ARRAY_OF_SCHEMAS, + "given": KEY_ITEMS, + }, + )) + } + currentSchema.itemsChildrenIsSingleSchema = false + } + } else if isKind(m[KEY_ITEMS], reflect.Map) { + newSchema := &subSchema{parent: currentSchema, property: KEY_ITEMS} + newSchema.ref = currentSchema.ref + currentSchema.AddItemsChild(newSchema) + err := d.parseSchema(m[KEY_ITEMS], newSchema) + if err != nil { + return err + } + currentSchema.itemsChildrenIsSingleSchema = true + } else { + return errors.New(formatErrorDescription( + Locale.InvalidType(), + ErrorDetails{ + "expected": STRING_SCHEMA + "/" + STRING_ARRAY_OF_SCHEMAS, + "given": KEY_ITEMS, + }, + )) + } + } + + // additionalItems + if existsMapKey(m, KEY_ADDITIONAL_ITEMS) { + if isKind(m[KEY_ADDITIONAL_ITEMS], reflect.Bool) { + currentSchema.additionalItems = m[KEY_ADDITIONAL_ITEMS].(bool) + } else if isKind(m[KEY_ADDITIONAL_ITEMS], reflect.Map) { + newSchema := &subSchema{property: KEY_ADDITIONAL_ITEMS, parent: currentSchema, ref: currentSchema.ref} + currentSchema.additionalItems = newSchema + err := d.parseSchema(m[KEY_ADDITIONAL_ITEMS], newSchema) + if err != nil { + return errors.New(err.Error()) + } + } else { + return errors.New(formatErrorDescription( + Locale.InvalidType(), + ErrorDetails{ + "expected": TYPE_BOOLEAN + "/" + STRING_SCHEMA, + "given": KEY_ADDITIONAL_ITEMS, + }, + )) + } + } + + // validation : number / integer + + if existsMapKey(m, KEY_MULTIPLE_OF) { + multipleOfValue := mustBeNumber(m[KEY_MULTIPLE_OF]) + if multipleOfValue == nil { + return errors.New(formatErrorDescription( + Locale.InvalidType(), + ErrorDetails{ + "expected": STRING_NUMBER, + "given": KEY_MULTIPLE_OF, + }, + )) + } + if *multipleOfValue <= 0 { + return errors.New(formatErrorDescription( + Locale.GreaterThanZero(), + ErrorDetails{"number": KEY_MULTIPLE_OF}, + )) + } + currentSchema.multipleOf = multipleOfValue + } + + if existsMapKey(m, KEY_MINIMUM) { + minimumValue := mustBeNumber(m[KEY_MINIMUM]) + if minimumValue == nil { + return errors.New(formatErrorDescription( + Locale.MustBeOfA(), + ErrorDetails{"x": KEY_MINIMUM, "y": STRING_NUMBER}, + )) + } + currentSchema.minimum = minimumValue + } + + if existsMapKey(m, KEY_EXCLUSIVE_MINIMUM) { + if isKind(m[KEY_EXCLUSIVE_MINIMUM], reflect.Bool) { + if currentSchema.minimum == nil { + return errors.New(formatErrorDescription( + Locale.CannotBeUsedWithout(), + ErrorDetails{"x": KEY_EXCLUSIVE_MINIMUM, "y": KEY_MINIMUM}, + )) + } + exclusiveMinimumValue := m[KEY_EXCLUSIVE_MINIMUM].(bool) + currentSchema.exclusiveMinimum = exclusiveMinimumValue + } else { + return errors.New(formatErrorDescription( + Locale.MustBeOfA(), + ErrorDetails{"x": KEY_EXCLUSIVE_MINIMUM, "y": TYPE_BOOLEAN}, + )) + } + } + + if existsMapKey(m, KEY_MAXIMUM) { + maximumValue := mustBeNumber(m[KEY_MAXIMUM]) + if maximumValue == nil { + return errors.New(formatErrorDescription( + Locale.MustBeOfA(), + ErrorDetails{"x": KEY_MAXIMUM, "y": STRING_NUMBER}, + )) + } + currentSchema.maximum = maximumValue + } + + if existsMapKey(m, KEY_EXCLUSIVE_MAXIMUM) { + if isKind(m[KEY_EXCLUSIVE_MAXIMUM], reflect.Bool) { + if currentSchema.maximum == nil { + return errors.New(formatErrorDescription( + Locale.CannotBeUsedWithout(), + ErrorDetails{"x": KEY_EXCLUSIVE_MAXIMUM, "y": KEY_MAXIMUM}, + )) + } + exclusiveMaximumValue := m[KEY_EXCLUSIVE_MAXIMUM].(bool) + currentSchema.exclusiveMaximum = exclusiveMaximumValue + } else { + return errors.New(formatErrorDescription( + Locale.MustBeOfA(), + ErrorDetails{"x": KEY_EXCLUSIVE_MAXIMUM, "y": STRING_NUMBER}, + )) + } + } + + if currentSchema.minimum != nil && currentSchema.maximum != nil { + if *currentSchema.minimum > *currentSchema.maximum { + return errors.New(formatErrorDescription( + Locale.CannotBeGT(), + ErrorDetails{"x": KEY_MINIMUM, "y": KEY_MAXIMUM}, + )) + } + } + + // validation : string + + if existsMapKey(m, KEY_MIN_LENGTH) { + minLengthIntegerValue := mustBeInteger(m[KEY_MIN_LENGTH]) + if minLengthIntegerValue == nil { + return errors.New(formatErrorDescription( + Locale.MustBeOfAn(), + ErrorDetails{"x": KEY_MIN_LENGTH, "y": TYPE_INTEGER}, + )) + } + if *minLengthIntegerValue < 0 { + return errors.New(formatErrorDescription( + Locale.MustBeGTEZero(), + ErrorDetails{"key": KEY_MIN_LENGTH}, + )) + } + currentSchema.minLength = minLengthIntegerValue + } + + if existsMapKey(m, KEY_MAX_LENGTH) { + maxLengthIntegerValue := mustBeInteger(m[KEY_MAX_LENGTH]) + if maxLengthIntegerValue == nil { + return errors.New(formatErrorDescription( + Locale.MustBeOfAn(), + ErrorDetails{"x": KEY_MAX_LENGTH, "y": TYPE_INTEGER}, + )) + } + if *maxLengthIntegerValue < 0 { + return errors.New(formatErrorDescription( + Locale.MustBeGTEZero(), + ErrorDetails{"key": KEY_MAX_LENGTH}, + )) + } + currentSchema.maxLength = maxLengthIntegerValue + } + + if currentSchema.minLength != nil && currentSchema.maxLength != nil { + if *currentSchema.minLength > *currentSchema.maxLength { + return errors.New(formatErrorDescription( + Locale.CannotBeGT(), + ErrorDetails{"x": KEY_MIN_LENGTH, "y": KEY_MAX_LENGTH}, + )) + } + } + + if existsMapKey(m, KEY_PATTERN) { + if isKind(m[KEY_PATTERN], reflect.String) { + regexpObject, err := regexp.Compile(m[KEY_PATTERN].(string)) + if err != nil { + return errors.New(formatErrorDescription( + Locale.MustBeValidRegex(), + ErrorDetails{"key": KEY_PATTERN}, + )) + } + currentSchema.pattern = regexpObject + } else { + return errors.New(formatErrorDescription( + Locale.MustBeOfA(), + ErrorDetails{"x": KEY_PATTERN, "y": TYPE_STRING}, + )) + } + } + + if existsMapKey(m, KEY_FORMAT) { + formatString, ok := m[KEY_FORMAT].(string) + if ok && FormatCheckers.Has(formatString) { + currentSchema.format = formatString + } else { + return errors.New(formatErrorDescription( + Locale.MustBeValidFormat(), + ErrorDetails{"key": KEY_FORMAT, "given": m[KEY_FORMAT]}, + )) + } + } + + // validation : object + + if existsMapKey(m, KEY_MIN_PROPERTIES) { + minPropertiesIntegerValue := mustBeInteger(m[KEY_MIN_PROPERTIES]) + if minPropertiesIntegerValue == nil { + return errors.New(formatErrorDescription( + Locale.MustBeOfAn(), + ErrorDetails{"x": KEY_MIN_PROPERTIES, "y": TYPE_INTEGER}, + )) + } + if *minPropertiesIntegerValue < 0 { + return errors.New(formatErrorDescription( + Locale.MustBeGTEZero(), + ErrorDetails{"key": KEY_MIN_PROPERTIES}, + )) + } + currentSchema.minProperties = minPropertiesIntegerValue + } + + if existsMapKey(m, KEY_MAX_PROPERTIES) { + maxPropertiesIntegerValue := mustBeInteger(m[KEY_MAX_PROPERTIES]) + if maxPropertiesIntegerValue == nil { + return errors.New(formatErrorDescription( + Locale.MustBeOfAn(), + ErrorDetails{"x": KEY_MAX_PROPERTIES, "y": TYPE_INTEGER}, + )) + } + if *maxPropertiesIntegerValue < 0 { + return errors.New(formatErrorDescription( + Locale.MustBeGTEZero(), + ErrorDetails{"key": KEY_MAX_PROPERTIES}, + )) + } + currentSchema.maxProperties = maxPropertiesIntegerValue + } + + if currentSchema.minProperties != nil && currentSchema.maxProperties != nil { + if *currentSchema.minProperties > *currentSchema.maxProperties { + return errors.New(formatErrorDescription( + Locale.KeyCannotBeGreaterThan(), + ErrorDetails{"key": KEY_MIN_PROPERTIES, "y": KEY_MAX_PROPERTIES}, + )) + } + } + + if existsMapKey(m, KEY_REQUIRED) { + if isKind(m[KEY_REQUIRED], reflect.Slice) { + requiredValues := m[KEY_REQUIRED].([]interface{}) + for _, requiredValue := range requiredValues { + if isKind(requiredValue, reflect.String) { + err := currentSchema.AddRequired(requiredValue.(string)) + if err != nil { + return err + } + } else { + return errors.New(formatErrorDescription( + Locale.KeyItemsMustBeOfType(), + ErrorDetails{"key": KEY_REQUIRED, "type": TYPE_STRING}, + )) + } + } + } else { + return errors.New(formatErrorDescription( + Locale.MustBeOfAn(), + ErrorDetails{"x": KEY_REQUIRED, "y": TYPE_ARRAY}, + )) + } + } + + // validation : array + + if existsMapKey(m, KEY_MIN_ITEMS) { + minItemsIntegerValue := mustBeInteger(m[KEY_MIN_ITEMS]) + if minItemsIntegerValue == nil { + return errors.New(formatErrorDescription( + Locale.MustBeOfAn(), + ErrorDetails{"x": KEY_MIN_ITEMS, "y": TYPE_INTEGER}, + )) + } + if *minItemsIntegerValue < 0 { + return errors.New(formatErrorDescription( + Locale.MustBeGTEZero(), + ErrorDetails{"key": KEY_MIN_ITEMS}, + )) + } + currentSchema.minItems = minItemsIntegerValue + } + + if existsMapKey(m, KEY_MAX_ITEMS) { + maxItemsIntegerValue := mustBeInteger(m[KEY_MAX_ITEMS]) + if maxItemsIntegerValue == nil { + return errors.New(formatErrorDescription( + Locale.MustBeOfAn(), + ErrorDetails{"x": KEY_MAX_ITEMS, "y": TYPE_INTEGER}, + )) + } + if *maxItemsIntegerValue < 0 { + return errors.New(formatErrorDescription( + Locale.MustBeGTEZero(), + ErrorDetails{"key": KEY_MAX_ITEMS}, + )) + } + currentSchema.maxItems = maxItemsIntegerValue + } + + if existsMapKey(m, KEY_UNIQUE_ITEMS) { + if isKind(m[KEY_UNIQUE_ITEMS], reflect.Bool) { + currentSchema.uniqueItems = m[KEY_UNIQUE_ITEMS].(bool) + } else { + return errors.New(formatErrorDescription( + Locale.MustBeOfA(), + ErrorDetails{"x": KEY_UNIQUE_ITEMS, "y": TYPE_BOOLEAN}, + )) + } + } + + // validation : all + + if existsMapKey(m, KEY_ENUM) { + if isKind(m[KEY_ENUM], reflect.Slice) { + for _, v := range m[KEY_ENUM].([]interface{}) { + err := currentSchema.AddEnum(v) + if err != nil { + return err + } + } + } else { + return errors.New(formatErrorDescription( + Locale.MustBeOfAn(), + ErrorDetails{"x": KEY_ENUM, "y": TYPE_ARRAY}, + )) + } + } + + // validation : subSchema + + if existsMapKey(m, KEY_ONE_OF) { + if isKind(m[KEY_ONE_OF], reflect.Slice) { + for _, v := range m[KEY_ONE_OF].([]interface{}) { + newSchema := &subSchema{property: KEY_ONE_OF, parent: currentSchema, ref: currentSchema.ref} + currentSchema.AddOneOf(newSchema) + err := d.parseSchema(v, newSchema) + if err != nil { + return err + } + } + } else { + return errors.New(formatErrorDescription( + Locale.MustBeOfAn(), + ErrorDetails{"x": KEY_ONE_OF, "y": TYPE_ARRAY}, + )) + } + } + + if existsMapKey(m, KEY_ANY_OF) { + if isKind(m[KEY_ANY_OF], reflect.Slice) { + for _, v := range m[KEY_ANY_OF].([]interface{}) { + newSchema := &subSchema{property: KEY_ANY_OF, parent: currentSchema, ref: currentSchema.ref} + currentSchema.AddAnyOf(newSchema) + err := d.parseSchema(v, newSchema) + if err != nil { + return err + } + } + } else { + return errors.New(formatErrorDescription( + Locale.MustBeOfAn(), + ErrorDetails{"x": KEY_ANY_OF, "y": TYPE_ARRAY}, + )) + } + } + + if existsMapKey(m, KEY_ALL_OF) { + if isKind(m[KEY_ALL_OF], reflect.Slice) { + for _, v := range m[KEY_ALL_OF].([]interface{}) { + newSchema := &subSchema{property: KEY_ALL_OF, parent: currentSchema, ref: currentSchema.ref} + currentSchema.AddAllOf(newSchema) + err := d.parseSchema(v, newSchema) + if err != nil { + return err + } + } + } else { + return errors.New(formatErrorDescription( + Locale.MustBeOfAn(), + ErrorDetails{"x": KEY_ANY_OF, "y": TYPE_ARRAY}, + )) + } + } + + if existsMapKey(m, KEY_NOT) { + if isKind(m[KEY_NOT], reflect.Map) { + newSchema := &subSchema{property: KEY_NOT, parent: currentSchema, ref: currentSchema.ref} + currentSchema.SetNot(newSchema) + err := d.parseSchema(m[KEY_NOT], newSchema) + if err != nil { + return err + } + } else { + return errors.New(formatErrorDescription( + Locale.MustBeOfAn(), + ErrorDetails{"x": KEY_NOT, "y": TYPE_OBJECT}, + )) + } + } + + return nil +} + +func (d *Schema) parseReference(documentNode interface{}, currentSchema *subSchema, reference string) (e error) { + + var err error + + jsonReference, err := gojsonreference.NewJsonReference(reference) + if err != nil { + return err + } + + standaloneDocument := d.pool.GetStandaloneDocument() + + if jsonReference.HasFullUrl { + currentSchema.ref = &jsonReference + } else { + inheritedReference, err := currentSchema.ref.Inherits(jsonReference) + if err != nil { + return err + } + currentSchema.ref = inheritedReference + } + + jsonPointer := currentSchema.ref.GetPointer() + + var refdDocumentNode interface{} + + if standaloneDocument != nil { + + var err error + refdDocumentNode, _, err = jsonPointer.Get(standaloneDocument) + if err != nil { + return err + } + + } else { + + var err error + dsp, err := d.pool.GetDocument(*currentSchema.ref) + if err != nil { + return err + } + + refdDocumentNode, _, err = jsonPointer.Get(dsp.Document) + if err != nil { + return err + } + + } + + if !isKind(refdDocumentNode, reflect.Map) { + return errors.New(formatErrorDescription( + Locale.MustBeOfType(), + ErrorDetails{"key": STRING_SCHEMA, "type": TYPE_OBJECT}, + )) + } + + // returns the loaded referenced subSchema for the caller to update its current subSchema + newSchemaDocument := refdDocumentNode.(map[string]interface{}) + + newSchema := &subSchema{property: KEY_REF, parent: currentSchema, ref: currentSchema.ref} + d.referencePool.Add(currentSchema.ref.String()+reference, newSchema) + + err = d.parseSchema(newSchemaDocument, newSchema) + if err != nil { + return err + } + + currentSchema.refSchema = newSchema + + return nil + +} + +func (d *Schema) parseProperties(documentNode interface{}, currentSchema *subSchema) error { + + if !isKind(documentNode, reflect.Map) { + return errors.New(formatErrorDescription( + Locale.MustBeOfType(), + ErrorDetails{"key": STRING_PROPERTIES, "type": TYPE_OBJECT}, + )) + } + + m := documentNode.(map[string]interface{}) + for k := range m { + schemaProperty := k + newSchema := &subSchema{property: schemaProperty, parent: currentSchema, ref: currentSchema.ref} + currentSchema.AddPropertiesChild(newSchema) + err := d.parseSchema(m[k], newSchema) + if err != nil { + return err + } + } + + return nil +} + +func (d *Schema) parseDependencies(documentNode interface{}, currentSchema *subSchema) error { + + if !isKind(documentNode, reflect.Map) { + return errors.New(formatErrorDescription( + Locale.MustBeOfType(), + ErrorDetails{"key": KEY_DEPENDENCIES, "type": TYPE_OBJECT}, + )) + } + + m := documentNode.(map[string]interface{}) + currentSchema.dependencies = make(map[string]interface{}) + + for k := range m { + switch reflect.ValueOf(m[k]).Kind() { + + case reflect.Slice: + values := m[k].([]interface{}) + var valuesToRegister []string + + for _, value := range values { + if !isKind(value, reflect.String) { + return errors.New(formatErrorDescription( + Locale.MustBeOfType(), + ErrorDetails{ + "key": STRING_DEPENDENCY, + "type": STRING_SCHEMA_OR_ARRAY_OF_STRINGS, + }, + )) + } else { + valuesToRegister = append(valuesToRegister, value.(string)) + } + currentSchema.dependencies[k] = valuesToRegister + } + + case reflect.Map: + depSchema := &subSchema{property: k, parent: currentSchema, ref: currentSchema.ref} + err := d.parseSchema(m[k], depSchema) + if err != nil { + return err + } + currentSchema.dependencies[k] = depSchema + + default: + return errors.New(formatErrorDescription( + Locale.MustBeOfType(), + ErrorDetails{ + "key": STRING_DEPENDENCY, + "type": STRING_SCHEMA_OR_ARRAY_OF_STRINGS, + }, + )) + } + + } + + return nil +} diff --git a/vendor/github.com/xeipuuv/gojsonschema/schemaPool.go b/vendor/github.com/xeipuuv/gojsonschema/schemaPool.go new file mode 100644 index 00000000..70ae7318 --- /dev/null +++ b/vendor/github.com/xeipuuv/gojsonschema/schemaPool.go @@ -0,0 +1,110 @@ +// Copyright 2015 xeipuuv ( https://github.com/xeipuuv ) +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// author xeipuuv +// author-github https://github.com/xeipuuv +// author-mail xeipuuv@gmail.com +// +// repository-name gojsonschema +// repository-desc An implementation of JSON Schema, based on IETF's draft v4 - Go language. +// +// description Defines resources pooling. +// Eases referencing and avoids downloading the same resource twice. +// +// created 26-02-2013 + +package gojsonschema + +import ( + "errors" + "net/http" + + "github.com/xeipuuv/gojsonreference" +) + +type schemaPoolDocument struct { + Document interface{} +} + +type schemaPool struct { + schemaPoolDocuments map[string]*schemaPoolDocument + standaloneDocument interface{} + fs http.FileSystem +} + +func newSchemaPool(fs http.FileSystem) *schemaPool { + + p := &schemaPool{} + p.schemaPoolDocuments = make(map[string]*schemaPoolDocument) + p.standaloneDocument = nil + p.fs = fs + + return p +} + +func (p *schemaPool) SetStandaloneDocument(document interface{}) { + p.standaloneDocument = document +} + +func (p *schemaPool) GetStandaloneDocument() (document interface{}) { + return p.standaloneDocument +} + +func (p *schemaPool) GetDocument(reference gojsonreference.JsonReference) (*schemaPoolDocument, error) { + + if internalLogEnabled { + internalLog("Get Document ( %s )", reference.String()) + } + + var err error + + // It is not possible to load anything that is not canonical... + if !reference.IsCanonical() { + return nil, errors.New(formatErrorDescription( + Locale.ReferenceMustBeCanonical(), + ErrorDetails{"reference": reference}, + )) + } + + refToUrl := reference + refToUrl.GetUrl().Fragment = "" + + var spd *schemaPoolDocument + + // Try to find the requested document in the pool + for k := range p.schemaPoolDocuments { + if k == refToUrl.String() { + spd = p.schemaPoolDocuments[k] + } + } + + if spd != nil { + if internalLogEnabled { + internalLog(" From pool") + } + return spd, nil + } + + jsonReferenceLoader := NewReferenceLoaderFileSystem(reference.String(), p.fs) + document, err := jsonReferenceLoader.loadJSON() + if err != nil { + return nil, err + } + + spd = &schemaPoolDocument{Document: document} + // add the document to the pool for potential later use + p.schemaPoolDocuments[refToUrl.String()] = spd + + return spd, nil +} diff --git a/vendor/github.com/xeipuuv/gojsonschema/schemaReferencePool.go b/vendor/github.com/xeipuuv/gojsonschema/schemaReferencePool.go new file mode 100644 index 00000000..294e36a7 --- /dev/null +++ b/vendor/github.com/xeipuuv/gojsonschema/schemaReferencePool.go @@ -0,0 +1,67 @@ +// Copyright 2015 xeipuuv ( https://github.com/xeipuuv ) +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// author xeipuuv +// author-github https://github.com/xeipuuv +// author-mail xeipuuv@gmail.com +// +// repository-name gojsonschema +// repository-desc An implementation of JSON Schema, based on IETF's draft v4 - Go language. +// +// description Pool of referenced schemas. +// +// created 25-06-2013 + +package gojsonschema + +import ( + "fmt" +) + +type schemaReferencePool struct { + documents map[string]*subSchema +} + +func newSchemaReferencePool() *schemaReferencePool { + + p := &schemaReferencePool{} + p.documents = make(map[string]*subSchema) + + return p +} + +func (p *schemaReferencePool) Get(ref string) (r *subSchema, o bool) { + + if internalLogEnabled { + internalLog(fmt.Sprintf("Schema Reference ( %s )", ref)) + } + + if sch, ok := p.documents[ref]; ok { + if internalLogEnabled { + internalLog(fmt.Sprintf(" From pool")) + } + return sch, true + } + + return nil, false +} + +func (p *schemaReferencePool) Add(ref string, sch *subSchema) { + + if internalLogEnabled { + internalLog(fmt.Sprintf("Add Schema Reference %s to pool", ref)) + } + + p.documents[ref] = sch +} diff --git a/vendor/github.com/xeipuuv/gojsonschema/schemaType.go b/vendor/github.com/xeipuuv/gojsonschema/schemaType.go new file mode 100644 index 00000000..e13a0fb0 --- /dev/null +++ b/vendor/github.com/xeipuuv/gojsonschema/schemaType.go @@ -0,0 +1,83 @@ +// Copyright 2015 xeipuuv ( https://github.com/xeipuuv ) +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// author xeipuuv +// author-github https://github.com/xeipuuv +// author-mail xeipuuv@gmail.com +// +// repository-name gojsonschema +// repository-desc An implementation of JSON Schema, based on IETF's draft v4 - Go language. +// +// description Helper structure to handle schema types, and the combination of them. +// +// created 28-02-2013 + +package gojsonschema + +import ( + "errors" + "fmt" + "strings" +) + +type jsonSchemaType struct { + types []string +} + +// Is the schema typed ? that is containing at least one type +// When not typed, the schema does not need any type validation +func (t *jsonSchemaType) IsTyped() bool { + return len(t.types) > 0 +} + +func (t *jsonSchemaType) Add(etype string) error { + + if !isStringInSlice(JSON_TYPES, etype) { + return errors.New(formatErrorDescription(Locale.NotAValidType(), ErrorDetails{"type": etype})) + } + + if t.Contains(etype) { + return errors.New(formatErrorDescription(Locale.Duplicated(), ErrorDetails{"type": etype})) + } + + t.types = append(t.types, etype) + + return nil +} + +func (t *jsonSchemaType) Contains(etype string) bool { + + for _, v := range t.types { + if v == etype { + return true + } + } + + return false +} + +func (t *jsonSchemaType) String() string { + + if len(t.types) == 0 { + return STRING_UNDEFINED // should never happen + } + + // Displayed as a list [type1,type2,...] + if len(t.types) > 1 { + return fmt.Sprintf("[%s]", strings.Join(t.types, ",")) + } + + // Only one type: name only + return t.types[0] +} diff --git a/vendor/github.com/xeipuuv/gojsonschema/schema_test.go b/vendor/github.com/xeipuuv/gojsonschema/schema_test.go new file mode 100644 index 00000000..2a1832c5 --- /dev/null +++ b/vendor/github.com/xeipuuv/gojsonschema/schema_test.go @@ -0,0 +1,416 @@ +// Copyright 2015 xeipuuv ( https://github.com/xeipuuv ) +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// author xeipuuv +// author-github https://github.com/xeipuuv +// author-mail xeipuuv@gmail.com +// +// repository-name gojsonschema +// repository-desc An implementation of JSON Schema, based on IETF's draft v4 - Go language. +// +// description (Unit) Tests for schema validation. +// +// created 16-06-2013 + +package gojsonschema + +import ( + "fmt" + "net/http" + "os" + "reflect" + "regexp" + "sort" + "strconv" + "strings" + "testing" +) + +const displayErrorMessages = false + +var rxInvoice = regexp.MustCompile("^[A-Z]{2}-[0-9]{5}") +var rxSplitErrors = regexp.MustCompile(", ?") + +// Used for remote schema in ref/schema_5.json that defines "uri" and "regex" types +type alwaysTrueFormatChecker struct{} +type invoiceFormatChecker struct{} + +func (a alwaysTrueFormatChecker) IsFormat(input string) bool { + return true +} + +func (a invoiceFormatChecker) IsFormat(input string) bool { + return rxInvoice.MatchString(input) +} + +func TestJsonSchemaTestSuite(t *testing.T) { + + JsonSchemaTestSuiteMap := []map[string]string{ + + map[string]string{"phase": "integer type matches integers", "test": "an integer is an integer", "schema": "type/schema_0.json", "data": "type/data_00.json", "valid": "true"}, + map[string]string{"phase": "integer type matches integers", "test": "a float is not an integer", "schema": "type/schema_0.json", "data": "type/data_01.json", "valid": "false", "errors": "invalid_type"}, + map[string]string{"phase": "integer type matches integers", "test": "a string is not an integer", "schema": "type/schema_0.json", "data": "type/data_02.json", "valid": "false", "errors": "invalid_type"}, + map[string]string{"phase": "integer type matches integers", "test": "an object is not an integer", "schema": "type/schema_0.json", "data": "type/data_03.json", "valid": "false", "errors": "invalid_type"}, + map[string]string{"phase": "integer type matches integers", "test": "an array is not an integer", "schema": "type/schema_0.json", "data": "type/data_04.json", "valid": "false", "errors": "invalid_type"}, + map[string]string{"phase": "integer type matches integers", "test": "a boolean is not an integer", "schema": "type/schema_0.json", "data": "type/data_05.json", "valid": "false", "errors": "invalid_type"}, + map[string]string{"phase": "integer type matches integers", "test": "null is not an integer", "schema": "type/schema_0.json", "data": "type/data_06.json", "valid": "false", "errors": "invalid_type"}, + map[string]string{"phase": "number type matches numbers", "test": "an integer is a number", "schema": "type/schema_1.json", "data": "type/data_10.json", "valid": "true"}, + map[string]string{"phase": "number type matches numbers", "test": "a float is a number", "schema": "type/schema_1.json", "data": "type/data_11.json", "valid": "true"}, + map[string]string{"phase": "number type matches numbers", "test": "a string is not a number", "schema": "type/schema_1.json", "data": "type/data_12.json", "valid": "false", "errors": "invalid_type"}, + map[string]string{"phase": "number type matches numbers", "test": "an object is not a number", "schema": "type/schema_1.json", "data": "type/data_13.json", "valid": "false", "errors": "invalid_type"}, + map[string]string{"phase": "number type matches numbers", "test": "an array is not a number", "schema": "type/schema_1.json", "data": "type/data_14.json", "valid": "false", "errors": "invalid_type"}, + map[string]string{"phase": "number type matches numbers", "test": "a boolean is not a number", "schema": "type/schema_1.json", "data": "type/data_15.json", "valid": "false", "errors": "invalid_type"}, + map[string]string{"phase": "number type matches numbers", "test": "null is not a number", "schema": "type/schema_1.json", "data": "type/data_16.json", "valid": "false", "errors": "invalid_type"}, + map[string]string{"phase": "string type matches strings", "test": "1 is not a string", "schema": "type/schema_2.json", "data": "type/data_20.json", "valid": "false", "errors": "invalid_type"}, + map[string]string{"phase": "string type matches strings", "test": "a float is not a string", "schema": "type/schema_2.json", "data": "type/data_21.json", "valid": "false", "errors": "invalid_type"}, + map[string]string{"phase": "string type matches strings", "test": "a string is a string", "schema": "type/schema_2.json", "data": "type/data_22.json", "valid": "true"}, + map[string]string{"phase": "string type matches strings", "test": "an object is not a string", "schema": "type/schema_2.json", "data": "type/data_23.json", "valid": "false", "errors": "invalid_type"}, + map[string]string{"phase": "string type matches strings", "test": "an array is not a string", "schema": "type/schema_2.json", "data": "type/data_24.json", "valid": "false", "errors": "invalid_type"}, + map[string]string{"phase": "string type matches strings", "test": "a boolean is not a string", "schema": "type/schema_2.json", "data": "type/data_25.json", "valid": "false", "errors": "invalid_type"}, + map[string]string{"phase": "string type matches strings", "test": "null is not a string", "schema": "type/schema_2.json", "data": "type/data_26.json", "valid": "false", "errors": "invalid_type"}, + map[string]string{"phase": "object type matches objects", "test": "an integer is not an object", "schema": "type/schema_3.json", "data": "type/data_30.json", "valid": "false", "errors": "invalid_type"}, + map[string]string{"phase": "object type matches objects", "test": "a float is not an object", "schema": "type/schema_3.json", "data": "type/data_31.json", "valid": "false", "errors": "invalid_type"}, + map[string]string{"phase": "object type matches objects", "test": "a string is not an object", "schema": "type/schema_3.json", "data": "type/data_32.json", "valid": "false", "errors": "invalid_type"}, + map[string]string{"phase": "object type matches objects", "test": "an object is an object", "schema": "type/schema_3.json", "data": "type/data_33.json", "valid": "true"}, + map[string]string{"phase": "object type matches objects", "test": "an array is not an object", "schema": "type/schema_3.json", "data": "type/data_34.json", "valid": "false", "errors": "invalid_type"}, + map[string]string{"phase": "object type matches objects", "test": "a boolean is not an object", "schema": "type/schema_3.json", "data": "type/data_35.json", "valid": "false", "errors": "invalid_type"}, + map[string]string{"phase": "object type matches objects", "test": "null is not an object", "schema": "type/schema_3.json", "data": "type/data_36.json", "valid": "false", "errors": "invalid_type"}, + map[string]string{"phase": "array type matches arrays", "test": "an integer is not an array", "schema": "type/schema_4.json", "data": "type/data_40.json", "valid": "false", "errors": "invalid_type"}, + map[string]string{"phase": "array type matches arrays", "test": "a float is not an array", "schema": "type/schema_4.json", "data": "type/data_41.json", "valid": "false", "errors": "invalid_type"}, + map[string]string{"phase": "array type matches arrays", "test": "a string is not an array", "schema": "type/schema_4.json", "data": "type/data_42.json", "valid": "false", "errors": "invalid_type"}, + map[string]string{"phase": "array type matches arrays", "test": "an object is not an array", "schema": "type/schema_4.json", "data": "type/data_43.json", "valid": "false", "errors": "invalid_type"}, + map[string]string{"phase": "array type matches arrays", "test": "an array is not an array", "schema": "type/schema_4.json", "data": "type/data_44.json", "valid": "true"}, + map[string]string{"phase": "array type matches arrays", "test": "a boolean is not an array", "schema": "type/schema_4.json", "data": "type/data_45.json", "valid": "false", "errors": "invalid_type"}, + map[string]string{"phase": "array type matches arrays", "test": "null is not an array", "schema": "type/schema_4.json", "data": "type/data_46.json", "valid": "false", "errors": "invalid_type"}, + map[string]string{"phase": "boolean type matches booleans", "test": "an integer is not a boolean", "schema": "type/schema_5.json", "data": "type/data_50.json", "valid": "false", "errors": "invalid_type"}, + map[string]string{"phase": "boolean type matches booleans", "test": "a float is not a boolean", "schema": "type/schema_5.json", "data": "type/data_51.json", "valid": "false", "errors": "invalid_type"}, + map[string]string{"phase": "boolean type matches booleans", "test": "a string is not a boolean", "schema": "type/schema_5.json", "data": "type/data_52.json", "valid": "false", "errors": "invalid_type"}, + map[string]string{"phase": "boolean type matches booleans", "test": "an object is not a boolean", "schema": "type/schema_5.json", "data": "type/data_53.json", "valid": "false", "errors": "invalid_type"}, + map[string]string{"phase": "boolean type matches booleans", "test": "an array is not a boolean", "schema": "type/schema_5.json", "data": "type/data_54.json", "valid": "false", "errors": "invalid_type"}, + map[string]string{"phase": "boolean type matches booleans", "test": "a boolean is not a boolean", "schema": "type/schema_5.json", "data": "type/data_55.json", "valid": "true", "errors": "invalid_type"}, + map[string]string{"phase": "boolean type matches booleans", "test": "null is not a boolean", "schema": "type/schema_5.json", "data": "type/data_56.json", "valid": "false", "errors": "invalid_type"}, + map[string]string{"phase": "null type matches only the null object", "test": "an integer is not null", "schema": "type/schema_6.json", "data": "type/data_60.json", "valid": "false", "errors": "invalid_type"}, + map[string]string{"phase": "null type matches only the null object", "test": "a float is not null", "schema": "type/schema_6.json", "data": "type/data_61.json", "valid": "false", "errors": "invalid_type"}, + map[string]string{"phase": "null type matches only the null object", "test": "a string is not null", "schema": "type/schema_6.json", "data": "type/data_62.json", "valid": "false", "errors": "invalid_type"}, + map[string]string{"phase": "null type matches only the null object", "test": "an object is not null", "schema": "type/schema_6.json", "data": "type/data_63.json", "valid": "false", "errors": "invalid_type"}, + map[string]string{"phase": "null type matches only the null object", "test": "an array is not null", "schema": "type/schema_6.json", "data": "type/data_64.json", "valid": "false", "errors": "invalid_type"}, + map[string]string{"phase": "null type matches only the null object", "test": "a boolean is not null", "schema": "type/schema_6.json", "data": "type/data_65.json", "valid": "false", "errors": "invalid_type"}, + map[string]string{"phase": "null type matches only the null object", "test": "null is null", "schema": "type/schema_6.json", "data": "type/data_66.json", "valid": "true"}, + map[string]string{"phase": "multiple types can be specified in an array", "test": "an integer is valid", "schema": "type/schema_7.json", "data": "type/data_70.json", "valid": "true"}, + map[string]string{"phase": "multiple types can be specified in an array", "test": "a string is valid", "schema": "type/schema_7.json", "data": "type/data_71.json", "valid": "true"}, + map[string]string{"phase": "multiple types can be specified in an array", "test": "a float is invalid", "schema": "type/schema_7.json", "data": "type/data_72.json", "valid": "false", "errors": "invalid_type"}, + map[string]string{"phase": "multiple types can be specified in an array", "test": "an object is invalid", "schema": "type/schema_7.json", "data": "type/data_73.json", "valid": "false", "errors": "invalid_type"}, + map[string]string{"phase": "multiple types can be specified in an array", "test": "an array is invalid", "schema": "type/schema_7.json", "data": "type/data_74.json", "valid": "false", "errors": "invalid_type"}, + map[string]string{"phase": "multiple types can be specified in an array", "test": "a boolean is invalid", "schema": "type/schema_7.json", "data": "type/data_75.json", "valid": "false", "errors": "invalid_type"}, + map[string]string{"phase": "multiple types can be specified in an array", "test": "null is invalid", "schema": "type/schema_7.json", "data": "type/data_76.json", "valid": "false", "errors": "invalid_type"}, + map[string]string{"phase": "required validation", "test": "present required property is valid", "schema": "required/schema_0.json", "data": "required/data_00.json", "valid": "true"}, + map[string]string{"phase": "required validation", "test": "non-present required property is invalid", "schema": "required/schema_0.json", "data": "required/data_01.json", "valid": "false", "errors": "required"}, + map[string]string{"phase": "required default validation", "test": "not required by default", "schema": "required/schema_1.json", "data": "required/data_10.json", "valid": "true"}, + map[string]string{"phase": "uniqueItems validation", "test": "unique array of integers is valid", "schema": "uniqueItems/schema_0.json", "data": "uniqueItems/data_00.json", "valid": "true"}, + map[string]string{"phase": "uniqueItems validation", "test": "non-unique array of integers is invalid", "schema": "uniqueItems/schema_0.json", "data": "uniqueItems/data_01.json", "valid": "false", "errors": "unique"}, + map[string]string{"phase": "uniqueItems validation", "test": "numbers are unique if mathematically unequal", "schema": "uniqueItems/schema_0.json", "data": "uniqueItems/data_02.json", "valid": "false", "errors": "unique, unique"}, + map[string]string{"phase": "uniqueItems validation", "test": "unique array of objects is valid", "schema": "uniqueItems/schema_0.json", "data": "uniqueItems/data_03.json", "valid": "true"}, + map[string]string{"phase": "uniqueItems validation", "test": "non-unique array of objects is invalid", "schema": "uniqueItems/schema_0.json", "data": "uniqueItems/data_04.json", "valid": "false", "errors": "unique"}, + map[string]string{"phase": "uniqueItems validation", "test": "unique array of nested objects is valid", "schema": "uniqueItems/schema_0.json", "data": "uniqueItems/data_05.json", "valid": "true"}, + map[string]string{"phase": "uniqueItems validation", "test": "non-unique array of nested objects is invalid", "schema": "uniqueItems/schema_0.json", "data": "uniqueItems/data_06.json", "valid": "false", "errors": "unique"}, + map[string]string{"phase": "uniqueItems validation", "test": "unique array of arrays is valid", "schema": "uniqueItems/schema_0.json", "data": "uniqueItems/data_07.json", "valid": "true"}, + map[string]string{"phase": "uniqueItems validation", "test": "non-unique array of arrays is invalid", "schema": "uniqueItems/schema_0.json", "data": "uniqueItems/data_08.json", "valid": "false", "errors": "unique"}, + map[string]string{"phase": "uniqueItems validation", "test": "1 and true are unique", "schema": "uniqueItems/schema_0.json", "data": "uniqueItems/data_09.json", "valid": "true"}, + map[string]string{"phase": "uniqueItems validation", "test": "0 and false are unique", "schema": "uniqueItems/schema_0.json", "data": "uniqueItems/data_010.json", "valid": "true"}, + map[string]string{"phase": "uniqueItems validation", "test": "unique heterogeneous types are valid", "schema": "uniqueItems/schema_0.json", "data": "uniqueItems/data_011.json", "valid": "true"}, + map[string]string{"phase": "uniqueItems validation", "test": "non-unique heterogeneous types are invalid", "schema": "uniqueItems/schema_0.json", "data": "uniqueItems/data_012.json", "valid": "false", "errors": "unique"}, + map[string]string{"phase": "pattern validation", "test": "a matching pattern is valid", "schema": "pattern/schema_0.json", "data": "pattern/data_00.json", "valid": "true"}, + map[string]string{"phase": "pattern validation", "test": "a non-matching pattern is invalid", "schema": "pattern/schema_0.json", "data": "pattern/data_01.json", "valid": "false", "errors": "pattern"}, + map[string]string{"phase": "pattern validation", "test": "ignores non-strings", "schema": "pattern/schema_0.json", "data": "pattern/data_02.json", "valid": "true"}, + map[string]string{"phase": "simple enum validation", "test": "one of the enum is valid", "schema": "enum/schema_0.json", "data": "enum/data_00.json", "valid": "true"}, + map[string]string{"phase": "simple enum validation", "test": "something else is invalid", "schema": "enum/schema_0.json", "data": "enum/data_01.json", "valid": "false", "errors": "enum"}, + map[string]string{"phase": "heterogeneous enum validation", "test": "one of the enum is valid", "schema": "enum/schema_1.json", "data": "enum/data_10.json", "valid": "true"}, + map[string]string{"phase": "heterogeneous enum validation", "test": "something else is invalid", "schema": "enum/schema_1.json", "data": "enum/data_11.json", "valid": "false", "errors": "enum"}, + map[string]string{"phase": "heterogeneous enum validation", "test": "objects are deep compared", "schema": "enum/schema_1.json", "data": "enum/data_12.json", "valid": "false", "errors": "enum"}, + map[string]string{"phase": "minLength validation", "test": "longer is valid", "schema": "minLength/schema_0.json", "data": "minLength/data_00.json", "valid": "true"}, + map[string]string{"phase": "minLength validation", "test": "exact length is valid", "schema": "minLength/schema_0.json", "data": "minLength/data_01.json", "valid": "true"}, + map[string]string{"phase": "minLength validation", "test": "too short is invalid", "schema": "minLength/schema_0.json", "data": "minLength/data_02.json", "valid": "false", "errors": "string_gte"}, + map[string]string{"phase": "minLength validation", "test": "ignores non-strings", "schema": "minLength/schema_0.json", "data": "minLength/data_03.json", "valid": "true"}, + map[string]string{"phase": "minLength validation", "test": "counts utf8 length correctly", "schema": "minLength/schema_0.json", "data": "minLength/data_04.json", "valid": "false", "errors": "string_gte"}, + map[string]string{"phase": "maxLength validation", "test": "shorter is valid", "schema": "maxLength/schema_0.json", "data": "maxLength/data_00.json", "valid": "true"}, + map[string]string{"phase": "maxLength validation", "test": "exact length is valid", "schema": "maxLength/schema_0.json", "data": "maxLength/data_01.json", "valid": "true"}, + map[string]string{"phase": "maxLength validation", "test": "too long is invalid", "schema": "maxLength/schema_0.json", "data": "maxLength/data_02.json", "valid": "false", "errors": "string_lte"}, + map[string]string{"phase": "maxLength validation", "test": "ignores non-strings", "schema": "maxLength/schema_0.json", "data": "maxLength/data_03.json", "valid": "true"}, + map[string]string{"phase": "maxLength validation", "test": "counts utf8 length correctly", "schema": "maxLength/schema_0.json", "data": "maxLength/data_04.json", "valid": "true"}, + map[string]string{"phase": "minimum validation", "test": "above the minimum is valid", "schema": "minimum/schema_0.json", "data": "minimum/data_00.json", "valid": "true"}, + map[string]string{"phase": "minimum validation", "test": "below the minimum is invalid", "schema": "minimum/schema_0.json", "data": "minimum/data_01.json", "valid": "false", "errors": "number_gte"}, + map[string]string{"phase": "minimum validation", "test": "ignores non-numbers", "schema": "minimum/schema_0.json", "data": "minimum/data_02.json", "valid": "true"}, + map[string]string{"phase": "exclusiveMinimum validation", "test": "above the minimum is still valid", "schema": "minimum/schema_1.json", "data": "minimum/data_10.json", "valid": "true"}, + map[string]string{"phase": "exclusiveMinimum validation", "test": "boundary point is invalid", "schema": "minimum/schema_1.json", "data": "minimum/data_11.json", "valid": "false", "errors": "number_gt"}, + map[string]string{"phase": "maximum validation", "test": "below the maximum is valid", "schema": "maximum/schema_0.json", "data": "maximum/data_00.json", "valid": "true"}, + map[string]string{"phase": "maximum validation", "test": "above the maximum is invalid", "schema": "maximum/schema_0.json", "data": "maximum/data_01.json", "valid": "false", "errors": "number_lte"}, + map[string]string{"phase": "maximum validation", "test": "ignores non-numbers", "schema": "maximum/schema_0.json", "data": "maximum/data_02.json", "valid": "true"}, + map[string]string{"phase": "exclusiveMaximum validation", "test": "below the maximum is still valid", "schema": "maximum/schema_1.json", "data": "maximum/data_10.json", "valid": "true"}, + map[string]string{"phase": "exclusiveMaximum validation", "test": "boundary point is invalid", "schema": "maximum/schema_1.json", "data": "maximum/data_11.json", "valid": "false", "errors": "number_lt"}, + map[string]string{"phase": "allOf", "test": "allOf", "schema": "allOf/schema_0.json", "data": "allOf/data_00.json", "valid": "true"}, + map[string]string{"phase": "allOf", "test": "mismatch second", "schema": "allOf/schema_0.json", "data": "allOf/data_01.json", "valid": "false", "errors": "number_all_of, required"}, + map[string]string{"phase": "allOf", "test": "mismatch first", "schema": "allOf/schema_0.json", "data": "allOf/data_02.json", "valid": "false", "errors": "number_all_of, required"}, + map[string]string{"phase": "allOf", "test": "wrong type", "schema": "allOf/schema_0.json", "data": "allOf/data_03.json", "valid": "false", "errors": "number_all_of, invalid_type"}, + map[string]string{"phase": "allOf with base schema", "test": "valid", "schema": "allOf/schema_1.json", "data": "allOf/data_10.json", "valid": "true"}, + map[string]string{"phase": "allOf with base schema", "test": "mismatch base schema", "schema": "allOf/schema_1.json", "data": "allOf/data_11.json", "valid": "false", "errors": "required"}, + map[string]string{"phase": "allOf with base schema", "test": "mismatch first allOf", "schema": "allOf/schema_1.json", "data": "allOf/data_12.json", "valid": "false", "errors": "number_all_of, required"}, + map[string]string{"phase": "allOf with base schema", "test": "mismatch second allOf", "schema": "allOf/schema_1.json", "data": "allOf/data_13.json", "valid": "false", "errors": "number_all_of, required"}, + map[string]string{"phase": "allOf with base schema", "test": "mismatch both", "schema": "allOf/schema_1.json", "data": "allOf/data_14.json", "valid": "false", "errors": "number_all_of, required, required"}, + map[string]string{"phase": "allOf simple types", "test": "valid", "schema": "allOf/schema_2.json", "data": "allOf/data_20.json", "valid": "true"}, + map[string]string{"phase": "allOf simple types", "test": "mismatch one", "schema": "allOf/schema_2.json", "data": "allOf/data_21.json", "valid": "false", "errors": "number_all_of, number_lte"}, + map[string]string{"phase": "oneOf", "test": "first oneOf valid", "schema": "oneOf/schema_0.json", "data": "oneOf/data_00.json", "valid": "true"}, + map[string]string{"phase": "oneOf", "test": "second oneOf valid", "schema": "oneOf/schema_0.json", "data": "oneOf/data_01.json", "valid": "true"}, + map[string]string{"phase": "oneOf", "test": "both oneOf valid", "schema": "oneOf/schema_0.json", "data": "oneOf/data_02.json", "valid": "false", "errors": "number_one_of"}, + map[string]string{"phase": "oneOf", "test": "neither oneOf valid", "schema": "oneOf/schema_0.json", "data": "oneOf/data_03.json", "valid": "false"}, + map[string]string{"phase": "oneOf with base schema", "test": "mismatch base schema", "schema": "oneOf/schema_1.json", "data": "oneOf/data_10.json", "valid": "false", "errors": "invalid_type"}, + map[string]string{"phase": "oneOf with base schema", "test": "one oneOf valid", "schema": "oneOf/schema_1.json", "data": "oneOf/data_11.json", "valid": "true"}, + map[string]string{"phase": "oneOf with base schema", "test": "both oneOf valid", "schema": "oneOf/schema_1.json", "data": "oneOf/data_12.json", "valid": "false", "errors": "number_one_of"}, + map[string]string{"phase": "anyOf", "test": "first anyOf valid", "schema": "anyOf/schema_0.json", "data": "anyOf/data_00.json", "valid": "true"}, + map[string]string{"phase": "anyOf", "test": "second anyOf valid", "schema": "anyOf/schema_0.json", "data": "anyOf/data_01.json", "valid": "true"}, + map[string]string{"phase": "anyOf", "test": "both anyOf valid", "schema": "anyOf/schema_0.json", "data": "anyOf/data_02.json", "valid": "true"}, + map[string]string{"phase": "anyOf", "test": "neither anyOf valid", "schema": "anyOf/schema_0.json", "data": "anyOf/data_03.json", "valid": "false", "errors": "number_any_of, number_gte"}, + map[string]string{"phase": "anyOf with base schema", "test": "mismatch base schema", "schema": "anyOf/schema_1.json", "data": "anyOf/data_10.json", "valid": "false", "errors": "invalid_type"}, + map[string]string{"phase": "anyOf with base schema", "test": "one anyOf valid", "schema": "anyOf/schema_1.json", "data": "anyOf/data_11.json", "valid": "true"}, + map[string]string{"phase": "anyOf with base schema", "test": "both anyOf invalid", "schema": "anyOf/schema_1.json", "data": "anyOf/data_12.json", "valid": "false", "errors": "number_any_of, string_lte"}, + map[string]string{"phase": "not", "test": "allowed", "schema": "not/schema_0.json", "data": "not/data_00.json", "valid": "true"}, + map[string]string{"phase": "not", "test": "disallowed", "schema": "not/schema_0.json", "data": "not/data_01.json", "valid": "false", "errors": "number_not"}, + map[string]string{"phase": "not multiple types", "test": "valid", "schema": "not/schema_1.json", "data": "not/data_10.json", "valid": "true"}, + map[string]string{"phase": "not multiple types", "test": "mismatch", "schema": "not/schema_1.json", "data": "not/data_11.json", "valid": "false", "errors": "number_not"}, + map[string]string{"phase": "not multiple types", "test": "other mismatch", "schema": "not/schema_1.json", "data": "not/data_12.json", "valid": "false", "errors": "number_not"}, + map[string]string{"phase": "not more complex schema", "test": "match", "schema": "not/schema_2.json", "data": "not/data_20.json", "valid": "true"}, + map[string]string{"phase": "not more complex schema", "test": "other match", "schema": "not/schema_2.json", "data": "not/data_21.json", "valid": "true"}, + map[string]string{"phase": "not more complex schema", "test": "mismatch", "schema": "not/schema_2.json", "data": "not/data_22.json", "valid": "false", "errors": "number_not"}, + map[string]string{"phase": "minProperties validation", "test": "longer is valid", "schema": "minProperties/schema_0.json", "data": "minProperties/data_00.json", "valid": "true"}, + map[string]string{"phase": "minProperties validation", "test": "exact length is valid", "schema": "minProperties/schema_0.json", "data": "minProperties/data_01.json", "valid": "true"}, + map[string]string{"phase": "minProperties validation", "test": "too short is invalid", "schema": "minProperties/schema_0.json", "data": "minProperties/data_02.json", "valid": "false", "errors": "array_min_properties"}, + map[string]string{"phase": "minProperties validation", "test": "ignores non-objects", "schema": "minProperties/schema_0.json", "data": "minProperties/data_03.json", "valid": "true"}, + map[string]string{"phase": "maxProperties validation", "test": "shorter is valid", "schema": "maxProperties/schema_0.json", "data": "maxProperties/data_00.json", "valid": "true"}, + map[string]string{"phase": "maxProperties validation", "test": "exact length is valid", "schema": "maxProperties/schema_0.json", "data": "maxProperties/data_01.json", "valid": "true"}, + map[string]string{"phase": "maxProperties validation", "test": "too long is invalid", "schema": "maxProperties/schema_0.json", "data": "maxProperties/data_02.json", "valid": "false", "errors": "array_max_properties"}, + map[string]string{"phase": "maxProperties validation", "test": "ignores non-objects", "schema": "maxProperties/schema_0.json", "data": "maxProperties/data_03.json", "valid": "true"}, + map[string]string{"phase": "by int", "test": "int by int", "schema": "multipleOf/schema_0.json", "data": "multipleOf/data_00.json", "valid": "true"}, + map[string]string{"phase": "by int", "test": "int by int fail", "schema": "multipleOf/schema_0.json", "data": "multipleOf/data_01.json", "valid": "false", "errors": "multiple_of"}, + map[string]string{"phase": "by int", "test": "ignores non-numbers", "schema": "multipleOf/schema_0.json", "data": "multipleOf/data_02.json", "valid": "true"}, + map[string]string{"phase": "by number", "test": "zero is multiple of anything", "schema": "multipleOf/schema_1.json", "data": "multipleOf/data_10.json", "valid": "true"}, + map[string]string{"phase": "by number", "test": "4.5 is multiple of 1.5", "schema": "multipleOf/schema_1.json", "data": "multipleOf/data_11.json", "valid": "true"}, + map[string]string{"phase": "by number", "test": "35 is not multiple of 1.5", "schema": "multipleOf/schema_1.json", "data": "multipleOf/data_12.json", "valid": "false", "errors": "multiple_of"}, + map[string]string{"phase": "by small number", "test": "0.0075 is multiple of 0.0001", "schema": "multipleOf/schema_2.json", "data": "multipleOf/data_20.json", "valid": "true"}, + map[string]string{"phase": "by small number", "test": "0.00751 is not multiple of 0.0001", "schema": "multipleOf/schema_2.json", "data": "multipleOf/data_21.json", "valid": "false", "errors": "multiple_of"}, + map[string]string{"phase": "minItems validation", "test": "longer is valid", "schema": "minItems/schema_0.json", "data": "minItems/data_00.json", "valid": "true"}, + map[string]string{"phase": "minItems validation", "test": "exact length is valid", "schema": "minItems/schema_0.json", "data": "minItems/data_01.json", "valid": "true"}, + map[string]string{"phase": "minItems validation", "test": "too short is invalid", "schema": "minItems/schema_0.json", "data": "minItems/data_02.json", "valid": "false", "errors": "array_min_items"}, + map[string]string{"phase": "minItems validation", "test": "ignores non-arrays", "schema": "minItems/schema_0.json", "data": "minItems/data_03.json", "valid": "true"}, + map[string]string{"phase": "maxItems validation", "test": "shorter is valid", "schema": "maxItems/schema_0.json", "data": "maxItems/data_00.json", "valid": "true"}, + map[string]string{"phase": "maxItems validation", "test": "exact length is valid", "schema": "maxItems/schema_0.json", "data": "maxItems/data_01.json", "valid": "true"}, + map[string]string{"phase": "maxItems validation", "test": "too long is invalid", "schema": "maxItems/schema_0.json", "data": "maxItems/data_02.json", "valid": "false", "errors": "array_max_items"}, + map[string]string{"phase": "maxItems validation", "test": "ignores non-arrays", "schema": "maxItems/schema_0.json", "data": "maxItems/data_03.json", "valid": "true"}, + map[string]string{"phase": "object properties validation", "test": "both properties present and valid is valid", "schema": "properties/schema_0.json", "data": "properties/data_00.json", "valid": "true"}, + map[string]string{"phase": "object properties validation", "test": "one property invalid is invalid", "schema": "properties/schema_0.json", "data": "properties/data_01.json", "valid": "false", "errors": "invalid_type"}, + map[string]string{"phase": "object properties validation", "test": "both properties invalid is invalid", "schema": "properties/schema_0.json", "data": "properties/data_02.json", "valid": "false", "errors": "invalid_type, invalid_type"}, + map[string]string{"phase": "object properties validation", "test": "doesn't invalidate other properties", "schema": "properties/schema_0.json", "data": "properties/data_03.json", "valid": "true"}, + map[string]string{"phase": "object properties validation", "test": "ignores non-objects", "schema": "properties/schema_0.json", "data": "properties/data_04.json", "valid": "true"}, + map[string]string{"phase": "properties, patternProperties, additionalProperties interaction", "test": "property validates property", "schema": "properties/schema_1.json", "data": "properties/data_10.json", "valid": "true"}, + map[string]string{"phase": "properties, patternProperties, additionalProperties interaction", "test": "property invalidates property", "schema": "properties/schema_1.json", "data": "properties/data_11.json", "valid": "false", "errors": "array_max_items"}, + map[string]string{"phase": "properties, patternProperties, additionalProperties interaction", "test": "patternProperty invalidates property", "schema": "properties/schema_1.json", "data": "properties/data_12.json", "valid": "false", "errors": "array_min_items, invalid_type"}, + map[string]string{"phase": "properties, patternProperties, additionalProperties interaction", "test": "patternProperty validates nonproperty", "schema": "properties/schema_1.json", "data": "properties/data_13.json", "valid": "true"}, + map[string]string{"phase": "properties, patternProperties, additionalProperties interaction", "test": "patternProperty invalidates nonproperty", "schema": "properties/schema_1.json", "data": "properties/data_14.json", "valid": "false", "errors": "array_min_items, invalid_type"}, + map[string]string{"phase": "properties, patternProperties, additionalProperties interaction", "test": "additionalProperty ignores property", "schema": "properties/schema_1.json", "data": "properties/data_15.json", "valid": "true"}, + map[string]string{"phase": "properties, patternProperties, additionalProperties interaction", "test": "additionalProperty validates others", "schema": "properties/schema_1.json", "data": "properties/data_16.json", "valid": "true"}, + map[string]string{"phase": "properties, patternProperties, additionalProperties interaction", "test": "additionalProperty invalidates others", "schema": "properties/schema_1.json", "data": "properties/data_17.json", "valid": "false", "errors": "invalid_type"}, + map[string]string{"phase": "root pointer ref", "test": "match", "schema": "ref/schema_0.json", "data": "ref/data_00.json", "valid": "true"}, + map[string]string{"phase": "root pointer ref", "test": "recursive match", "schema": "ref/schema_0.json", "data": "ref/data_01.json", "valid": "true"}, + map[string]string{"phase": "root pointer ref", "test": "mismatch", "schema": "ref/schema_0.json", "data": "ref/data_02.json", "valid": "false", "errors": "additional_property_not_allowed"}, + map[string]string{"phase": "root pointer ref", "test": "recursive mismatch", "schema": "ref/schema_0.json", "data": "ref/data_03.json", "valid": "false", "errors": "additional_property_not_allowed"}, + map[string]string{"phase": "relative pointer ref to object", "test": "match", "schema": "ref/schema_1.json", "data": "ref/data_10.json", "valid": "true"}, + map[string]string{"phase": "relative pointer ref to object", "test": "mismatch", "schema": "ref/schema_1.json", "data": "ref/data_11.json", "valid": "false", "errors": "invalid_type"}, + map[string]string{"phase": "relative pointer ref to array", "test": "match array", "schema": "ref/schema_2.json", "data": "ref/data_20.json", "valid": "true"}, + map[string]string{"phase": "relative pointer ref to array", "test": "mismatch array", "schema": "ref/schema_2.json", "data": "ref/data_21.json", "valid": "false", "errors": "invalid_type"}, + map[string]string{"phase": "escaped pointer ref", "test": "slash", "schema": "ref/schema_3.json", "data": "ref/data_30.json", "valid": "false", "errors": "invalid_type"}, + map[string]string{"phase": "escaped pointer ref", "test": "tilda", "schema": "ref/schema_3.json", "data": "ref/data_31.json", "valid": "false", "errors": "invalid_type"}, + map[string]string{"phase": "escaped pointer ref", "test": "percent", "schema": "ref/schema_3.json", "data": "ref/data_32.json", "valid": "false", "errors": "invalid_type"}, + map[string]string{"phase": "nested refs", "test": "nested ref valid", "schema": "ref/schema_4.json", "data": "ref/data_40.json", "valid": "true"}, + map[string]string{"phase": "nested refs", "test": "nested ref invalid", "schema": "ref/schema_4.json", "data": "ref/data_41.json", "valid": "false", "errors": "invalid_type"}, + map[string]string{"phase": "remote ref, containing refs itself", "test": "remote ref valid", "schema": "ref/schema_5.json", "data": "ref/data_50.json", "valid": "true"}, + map[string]string{"phase": "remote ref, containing refs itself", "test": "remote ref invalid", "schema": "ref/schema_5.json", "data": "ref/data_51.json", "valid": "false", "errors": "number_all_of, number_gte"}, + map[string]string{"phase": "a schema given for items", "test": "valid items", "schema": "items/schema_0.json", "data": "items/data_00.json", "valid": "true"}, + map[string]string{"phase": "a schema given for items", "test": "wrong type of items", "schema": "items/schema_0.json", "data": "items/data_01.json", "valid": "false", "errors": "invalid_type"}, + map[string]string{"phase": "a schema given for items", "test": "ignores non-arrays", "schema": "items/schema_0.json", "data": "items/data_02.json", "valid": "true"}, + map[string]string{"phase": "an array of schemas for items", "test": "correct types", "schema": "items/schema_1.json", "data": "items/data_10.json", "valid": "true"}, + map[string]string{"phase": "an array of schemas for items", "test": "wrong types", "schema": "items/schema_1.json", "data": "items/data_11.json", "valid": "false", "errors": "invalid_type, invalid_type"}, + map[string]string{"phase": "valid definition", "test": "valid definition schema", "schema": "definitions/schema_0.json", "data": "definitions/data_00.json", "valid": "true"}, + map[string]string{"phase": "invalid definition", "test": "invalid definition schema", "schema": "definitions/schema_1.json", "data": "definitions/data_10.json", "valid": "false", "errors": "number_any_of, enum"}, + map[string]string{"phase": "additionalItems as schema", "test": "additional items match schema", "schema": "additionalItems/schema_0.json", "data": "additionalItems/data_00.json", "valid": "true"}, + map[string]string{"phase": "additionalItems as schema", "test": "additional items do not match schema", "schema": "additionalItems/schema_0.json", "data": "additionalItems/data_01.json", "valid": "false", "errors": "invalid_type"}, + map[string]string{"phase": "items is schema, no additionalItems", "test": "all items match schema", "schema": "additionalItems/schema_1.json", "data": "additionalItems/data_10.json", "valid": "true"}, + map[string]string{"phase": "array of items with no additionalItems", "test": "no additional items present", "schema": "additionalItems/schema_2.json", "data": "additionalItems/data_20.json", "valid": "true"}, + map[string]string{"phase": "array of items with no additionalItems", "test": "additional items are not permitted", "schema": "additionalItems/schema_2.json", "data": "additionalItems/data_21.json", "valid": "false", "errors": "array_no_additional_items"}, + map[string]string{"phase": "additionalItems as false without items", "test": "items defaults to empty schema so everything is valid", "schema": "additionalItems/schema_3.json", "data": "additionalItems/data_30.json", "valid": "true"}, + map[string]string{"phase": "additionalItems as false without items", "test": "ignores non-arrays", "schema": "additionalItems/schema_3.json", "data": "additionalItems/data_31.json", "valid": "true"}, + map[string]string{"phase": "additionalItems are allowed by default", "test": "only the first item is validated", "schema": "additionalItems/schema_4.json", "data": "additionalItems/data_40.json", "valid": "true"}, + map[string]string{"phase": "additionalProperties being false does not allow other properties", "test": "no additional properties is valid", "schema": "additionalProperties/schema_0.json", "data": "additionalProperties/data_00.json", "valid": "true"}, + map[string]string{"phase": "additionalProperties being false does not allow other properties", "test": "an additional property is invalid", "schema": "additionalProperties/schema_0.json", "data": "additionalProperties/data_01.json", "valid": "false", "errors": "additional_property_not_allowed"}, + map[string]string{"phase": "additionalProperties being false does not allow other properties", "test": "ignores non-objects", "schema": "additionalProperties/schema_0.json", "data": "additionalProperties/data_02.json", "valid": "true"}, + map[string]string{"phase": "additionalProperties being false does not allow other properties", "test": "patternProperties are not additional properties", "schema": "additionalProperties/schema_0.json", "data": "additionalProperties/data_03.json", "valid": "true"}, + map[string]string{"phase": "additionalProperties allows a schema which should validate", "test": "no additional properties is valid", "schema": "additionalProperties/schema_1.json", "data": "additionalProperties/data_10.json", "valid": "true"}, + map[string]string{"phase": "additionalProperties allows a schema which should validate", "test": "an additional valid property is valid", "schema": "additionalProperties/schema_1.json", "data": "additionalProperties/data_11.json", "valid": "true"}, + map[string]string{"phase": "additionalProperties allows a schema which should validate", "test": "an additional invalid property is invalid", "schema": "additionalProperties/schema_1.json", "data": "additionalProperties/data_12.json", "valid": "false", "errors": "invalid_type"}, + map[string]string{"phase": "additionalProperties are allowed by default", "test": "additional properties are allowed", "schema": "additionalProperties/schema_2.json", "data": "additionalProperties/data_20.json", "valid": "true"}, + map[string]string{"phase": "dependencies", "test": "neither", "schema": "dependencies/schema_0.json", "data": "dependencies/data_00.json", "valid": "true"}, + map[string]string{"phase": "dependencies", "test": "nondependant", "schema": "dependencies/schema_0.json", "data": "dependencies/data_01.json", "valid": "true"}, + map[string]string{"phase": "dependencies", "test": "with dependency", "schema": "dependencies/schema_0.json", "data": "dependencies/data_02.json", "valid": "true"}, + map[string]string{"phase": "dependencies", "test": "missing dependency", "schema": "dependencies/schema_0.json", "data": "dependencies/data_03.json", "valid": "false", "errors": "missing_dependency"}, + map[string]string{"phase": "dependencies", "test": "ignores non-objects", "schema": "dependencies/schema_0.json", "data": "dependencies/data_04.json", "valid": "true"}, + map[string]string{"phase": "multiple dependencies", "test": "neither", "schema": "dependencies/schema_1.json", "data": "dependencies/data_10.json", "valid": "true"}, + map[string]string{"phase": "multiple dependencies", "test": "nondependants", "schema": "dependencies/schema_1.json", "data": "dependencies/data_11.json", "valid": "true"}, + map[string]string{"phase": "multiple dependencies", "test": "with dependencies", "schema": "dependencies/schema_1.json", "data": "dependencies/data_12.json", "valid": "true"}, + map[string]string{"phase": "multiple dependencies", "test": "missing dependency", "schema": "dependencies/schema_1.json", "data": "dependencies/data_13.json", "valid": "false", "errors": "missing_dependency"}, + map[string]string{"phase": "multiple dependencies", "test": "missing other dependency", "schema": "dependencies/schema_1.json", "data": "dependencies/data_14.json", "valid": "false", "errors": "missing_dependency"}, + map[string]string{"phase": "multiple dependencies", "test": "missing both dependencies", "schema": "dependencies/schema_1.json", "data": "dependencies/data_15.json", "valid": "false", "errors": "missing_dependency, missing_dependency"}, + map[string]string{"phase": "multiple dependencies subschema", "test": "valid", "schema": "dependencies/schema_2.json", "data": "dependencies/data_20.json", "valid": "true"}, + map[string]string{"phase": "multiple dependencies subschema", "test": "no dependency", "schema": "dependencies/schema_2.json", "data": "dependencies/data_21.json", "valid": "true"}, + map[string]string{"phase": "multiple dependencies subschema", "test": "wrong type", "schema": "dependencies/schema_2.json", "data": "dependencies/data_22.json", "valid": "false"}, + map[string]string{"phase": "multiple dependencies subschema", "test": "wrong type other", "schema": "dependencies/schema_2.json", "data": "dependencies/data_23.json", "valid": "false", "errors": "invalid_type"}, + map[string]string{"phase": "multiple dependencies subschema", "test": "wrong type both", "schema": "dependencies/schema_2.json", "data": "dependencies/data_24.json", "valid": "false", "errors": "invalid_type, invalid_type"}, + map[string]string{"phase": "patternProperties validates properties matching a regex", "test": "a single valid match is valid", "schema": "patternProperties/schema_0.json", "data": "patternProperties/data_00.json", "valid": "true"}, + map[string]string{"phase": "patternProperties validates properties matching a regex", "test": "multiple valid matches is valid", "schema": "patternProperties/schema_0.json", "data": "patternProperties/data_01.json", "valid": "true"}, + map[string]string{"phase": "patternProperties validates properties matching a regex", "test": "a single invalid match is invalid", "schema": "patternProperties/schema_0.json", "data": "patternProperties/data_02.json", "valid": "false", "errors": "invalid_property_pattern, invalid_type"}, + map[string]string{"phase": "patternProperties validates properties matching a regex", "test": "multiple invalid matches is invalid", "schema": "patternProperties/schema_0.json", "data": "patternProperties/data_03.json", "valid": "false", "errors": "invalid_property_pattern, invalid_property_pattern, invalid_type, invalid_type"}, + map[string]string{"phase": "patternProperties validates properties matching a regex", "test": "ignores non-objects", "schema": "patternProperties/schema_0.json", "data": "patternProperties/data_04.json", "valid": "true"}, + map[string]string{"phase": "patternProperties validates properties matching a regex", "test": "with additionalProperties combination", "schema": "patternProperties/schema_3.json", "data": "patternProperties/data_24.json", "valid": "false", "errors": "additional_property_not_allowed"}, + map[string]string{"phase": "patternProperties validates properties matching a regex", "test": "with additionalProperties combination", "schema": "patternProperties/schema_3.json", "data": "patternProperties/data_25.json", "valid": "false", "errors": "additional_property_not_allowed"}, + map[string]string{"phase": "patternProperties validates properties matching a regex", "test": "with additionalProperties combination", "schema": "patternProperties/schema_4.json", "data": "patternProperties/data_26.json", "valid": "false", "errors": "additional_property_not_allowed"}, + map[string]string{"phase": "multiple simultaneous patternProperties are validated", "test": "a single valid match is valid", "schema": "patternProperties/schema_1.json", "data": "patternProperties/data_10.json", "valid": "true"}, + map[string]string{"phase": "multiple simultaneous patternProperties are validated", "test": "a simultaneous match is valid", "schema": "patternProperties/schema_1.json", "data": "patternProperties/data_11.json", "valid": "true"}, + map[string]string{"phase": "multiple simultaneous patternProperties are validated", "test": "multiple matches is valid", "schema": "patternProperties/schema_1.json", "data": "patternProperties/data_12.json", "valid": "true"}, + map[string]string{"phase": "multiple simultaneous patternProperties are validated", "test": "an invalid due to one is invalid", "schema": "patternProperties/schema_1.json", "data": "patternProperties/data_13.json", "valid": "false", "errors": "invalid_property_pattern, invalid_type"}, + map[string]string{"phase": "multiple simultaneous patternProperties are validated", "test": "an invalid due to the other is invalid", "schema": "patternProperties/schema_1.json", "data": "patternProperties/data_14.json", "valid": "false", "errors": "number_lte"}, + map[string]string{"phase": "multiple simultaneous patternProperties are validated", "test": "an invalid due to both is invalid", "schema": "patternProperties/schema_1.json", "data": "patternProperties/data_15.json", "valid": "false", "errors": "invalid_type, number_lte"}, + map[string]string{"phase": "regexes are not anchored by default and are case sensitive", "test": "non recognized members are ignored", "schema": "patternProperties/schema_2.json", "data": "patternProperties/data_20.json", "valid": "true"}, + map[string]string{"phase": "regexes are not anchored by default and are case sensitive", "test": "recognized members are accounted for", "schema": "patternProperties/schema_2.json", "data": "patternProperties/data_21.json", "valid": "false", "errors": "invalid_property_pattern, invalid_type"}, + map[string]string{"phase": "regexes are not anchored by default and are case sensitive", "test": "regexes are case sensitive", "schema": "patternProperties/schema_2.json", "data": "patternProperties/data_22.json", "valid": "true"}, + map[string]string{"phase": "regexes are not anchored by default and are case sensitive", "test": "regexes are case sensitive, 2", "schema": "patternProperties/schema_2.json", "data": "patternProperties/data_23.json", "valid": "false", "errors": "invalid_property_pattern, invalid_type"}, + map[string]string{"phase": "remote ref", "test": "remote ref valid", "schema": "refRemote/schema_0.json", "data": "refRemote/data_00.json", "valid": "true"}, + map[string]string{"phase": "remote ref", "test": "remote ref invalid", "schema": "refRemote/schema_0.json", "data": "refRemote/data_01.json", "valid": "false", "errors": "invalid_type"}, + map[string]string{"phase": "fragment within remote ref", "test": "remote fragment valid", "schema": "refRemote/schema_1.json", "data": "refRemote/data_10.json", "valid": "true"}, + map[string]string{"phase": "fragment within remote ref", "test": "remote fragment invalid", "schema": "refRemote/schema_1.json", "data": "refRemote/data_11.json", "valid": "false", "errors": "invalid_type"}, + map[string]string{"phase": "ref within remote ref", "test": "ref within ref valid", "schema": "refRemote/schema_2.json", "data": "refRemote/data_20.json", "valid": "true"}, + map[string]string{"phase": "ref within remote ref", "test": "ref within ref invalid", "schema": "refRemote/schema_2.json", "data": "refRemote/data_21.json", "valid": "false", "errors": "invalid_type"}, + map[string]string{"phase": "format validation", "test": "email format is invalid", "schema": "format/schema_0.json", "data": "format/data_00.json", "valid": "false", "errors": "format"}, + map[string]string{"phase": "format validation", "test": "email format is invalid", "schema": "format/schema_0.json", "data": "format/data_01.json", "valid": "false", "errors": "format"}, + map[string]string{"phase": "format validation", "test": "email format valid", "schema": "format/schema_0.json", "data": "format/data_02.json", "valid": "true"}, + map[string]string{"phase": "format validation", "test": "invoice format valid", "schema": "format/schema_1.json", "data": "format/data_03.json", "valid": "true"}, + map[string]string{"phase": "format validation", "test": "invoice format is invalid", "schema": "format/schema_1.json", "data": "format/data_04.json", "valid": "false", "errors": "format"}, + map[string]string{"phase": "format validation", "test": "date-time format is valid", "schema": "format/schema_2.json", "data": "format/data_05.json", "valid": "true"}, + map[string]string{"phase": "format validation", "test": "date-time format is valid", "schema": "format/schema_2.json", "data": "format/data_06.json", "valid": "true"}, + map[string]string{"phase": "format validation", "test": "date-time format is invalid", "schema": "format/schema_2.json", "data": "format/data_07.json", "valid": "false", "errors": "format"}, + map[string]string{"phase": "format validation", "test": "date-time format is valid", "schema": "format/schema_2.json", "data": "format/data_08.json", "valid": "false", "errors": "format"}, + map[string]string{"phase": "format validation", "test": "date-time format is valid", "schema": "format/schema_2.json", "data": "format/data_09.json", "valid": "true"}, + map[string]string{"phase": "format validation", "test": "date-time format is valid", "schema": "format/schema_2.json", "data": "format/data_10.json", "valid": "true"}, + map[string]string{"phase": "format validation", "test": "date-time format is valid", "schema": "format/schema_2.json", "data": "format/data_11.json", "valid": "true"}, + map[string]string{"phase": "format validation", "test": "date-time format is valid", "schema": "format/schema_2.json", "data": "format/data_12.json", "valid": "true"}, + map[string]string{"phase": "format validation", "test": "hostname format is valid", "schema": "format/schema_3.json", "data": "format/data_13.json", "valid": "true"}, + map[string]string{"phase": "format validation", "test": "hostname format is valid", "schema": "format/schema_3.json", "data": "format/data_14.json", "valid": "true"}, + map[string]string{"phase": "format validation", "test": "hostname format is valid", "schema": "format/schema_3.json", "data": "format/data_15.json", "valid": "true"}, + map[string]string{"phase": "format validation", "test": "hostname format is invalid", "schema": "format/schema_3.json", "data": "format/data_16.json", "valid": "false", "errors": "format"}, + map[string]string{"phase": "format validation", "test": "hostname format is invalid", "schema": "format/schema_3.json", "data": "format/data_17.json", "valid": "false", "errors": "format"}, + map[string]string{"phase": "format validation", "test": "ipv4 format is valid", "schema": "format/schema_4.json", "data": "format/data_18.json", "valid": "true"}, + map[string]string{"phase": "format validation", "test": "ipv4 format is invalid", "schema": "format/schema_4.json", "data": "format/data_19.json", "valid": "false", "errors": "format"}, + map[string]string{"phase": "format validation", "test": "ipv6 format is valid", "schema": "format/schema_5.json", "data": "format/data_20.json", "valid": "true"}, + map[string]string{"phase": "format validation", "test": "ipv6 format is valid", "schema": "format/schema_5.json", "data": "format/data_21.json", "valid": "true"}, + map[string]string{"phase": "format validation", "test": "ipv6 format is invalid", "schema": "format/schema_5.json", "data": "format/data_22.json", "valid": "false", "errors": "format"}, + map[string]string{"phase": "format validation", "test": "ipv6 format is invalid", "schema": "format/schema_5.json", "data": "format/data_23.json", "valid": "false", "errors": "format"}, + map[string]string{"phase": "format validation", "test": "uri format is valid", "schema": "format/schema_6.json", "data": "format/data_24.json", "valid": "true"}, + map[string]string{"phase": "format validation", "test": "uri format is valid", "schema": "format/schema_6.json", "data": "format/data_25.json", "valid": "true"}, + map[string]string{"phase": "format validation", "test": "uri format is valid", "schema": "format/schema_6.json", "data": "format/data_26.json", "valid": "true"}, + map[string]string{"phase": "format validation", "test": "uri format is valid", "schema": "format/schema_6.json", "data": "format/data_27.json", "valid": "true"}, + map[string]string{"phase": "format validation", "test": "uri format is invalid", "schema": "format/schema_6.json", "data": "format/data_28.json", "valid": "false", "errors": "format"}, + map[string]string{"phase": "format validation", "test": "uri format is invalid", "schema": "format/schema_6.json", "data": "format/data_13.json", "valid": "false", "errors": "format"}, + } + + //TODO Pass failed tests : id(s) as scope for references is not implemented yet + //map[string]string{"phase": "change resolution scope", "test": "changed scope ref valid", "schema": "refRemote/schema_3.json", "data": "refRemote/data_30.json", "valid": "true"}, + //map[string]string{"phase": "change resolution scope", "test": "changed scope ref invalid", "schema": "refRemote/schema_3.json", "data": "refRemote/data_31.json", "valid": "false"}} + + // Setup a small http server on localhost:1234 for testing purposes + + wd, err := os.Getwd() + if err != nil { + panic(err.Error()) + } + + testwd := wd + "/json_schema_test_suite" + + go func() { + err := http.ListenAndServe(":1234", http.FileServer(http.Dir(testwd+"/refRemote/remoteFiles"))) + if err != nil { + panic(err.Error()) + } + }() + + // Used for remote schema in ref/schema_5.json that defines "regex" type + FormatCheckers.Add("regex", alwaysTrueFormatChecker{}) + + // Custom Formatter + FormatCheckers.Add("invoice", invoiceFormatChecker{}) + + // Launch tests + + for testJsonIndex, testJson := range JsonSchemaTestSuiteMap { + + fmt.Printf("Test (%d) | %s :: %s\n", testJsonIndex, testJson["phase"], testJson["test"]) + + schemaLoader := NewReferenceLoader("file://" + testwd + "/" + testJson["schema"]) + documentLoader := NewReferenceLoader("file://" + testwd + "/" + testJson["data"]) + + // validate + result, err := Validate(schemaLoader, documentLoader) + if err != nil { + t.Errorf("Error (%s)\n", err.Error()) + } + givenValid := result.Valid() + + if displayErrorMessages { + for vErrI, vErr := range result.Errors() { + fmt.Printf(" Error (%d) | %s\n", vErrI, vErr) + } + } + + expectedValid, _ := strconv.ParseBool(testJson["valid"]) + if givenValid != expectedValid { + t.Errorf("Test failed : %s :: %s, expects %t, given %t\n", testJson["phase"], testJson["test"], expectedValid, givenValid) + } + + if !givenValid && testJson["errors"] != "" { + expectedErrors := rxSplitErrors.Split(testJson["errors"], -1) + errors := result.Errors() + if len(errors) != len(expectedErrors) { + t.Errorf("Test failed : %s :: %s, expects %d errors, given %d errors\n", testJson["phase"], testJson["test"], len(expectedErrors), len(errors)) + } + + actualErrors := make([]string, 0) + for _, e := range result.Errors() { + actualErrors = append(actualErrors, e.Type()) + } + + sort.Strings(actualErrors) + sort.Strings(expectedErrors) + if !reflect.DeepEqual(actualErrors, expectedErrors) { + t.Errorf("Test failed : %s :: %s, expected '%s' errors, given '%s' errors\n", testJson["phase"], testJson["test"], strings.Join(expectedErrors, ", "), strings.Join(actualErrors, ", ")) + } + } + + } + + fmt.Printf("\n%d tests performed / %d total tests to perform ( %.2f %% )\n", len(JsonSchemaTestSuiteMap), 248, float32(len(JsonSchemaTestSuiteMap))/248.0*100.0) +} diff --git a/vendor/github.com/xeipuuv/gojsonschema/subSchema.go b/vendor/github.com/xeipuuv/gojsonschema/subSchema.go new file mode 100644 index 00000000..b249b7e5 --- /dev/null +++ b/vendor/github.com/xeipuuv/gojsonschema/subSchema.go @@ -0,0 +1,227 @@ +// Copyright 2015 xeipuuv ( https://github.com/xeipuuv ) +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// author xeipuuv +// author-github https://github.com/xeipuuv +// author-mail xeipuuv@gmail.com +// +// repository-name gojsonschema +// repository-desc An implementation of JSON Schema, based on IETF's draft v4 - Go language. +// +// description Defines the structure of a sub-subSchema. +// A sub-subSchema can contain other sub-schemas. +// +// created 27-02-2013 + +package gojsonschema + +import ( + "errors" + "regexp" + "strings" + + "github.com/xeipuuv/gojsonreference" +) + +const ( + KEY_SCHEMA = "$subSchema" + KEY_ID = "$id" + KEY_REF = "$ref" + KEY_TITLE = "title" + KEY_DESCRIPTION = "description" + KEY_TYPE = "type" + KEY_ITEMS = "items" + KEY_ADDITIONAL_ITEMS = "additionalItems" + KEY_PROPERTIES = "properties" + KEY_PATTERN_PROPERTIES = "patternProperties" + KEY_ADDITIONAL_PROPERTIES = "additionalProperties" + KEY_DEFINITIONS = "definitions" + KEY_MULTIPLE_OF = "multipleOf" + KEY_MINIMUM = "minimum" + KEY_MAXIMUM = "maximum" + KEY_EXCLUSIVE_MINIMUM = "exclusiveMinimum" + KEY_EXCLUSIVE_MAXIMUM = "exclusiveMaximum" + KEY_MIN_LENGTH = "minLength" + KEY_MAX_LENGTH = "maxLength" + KEY_PATTERN = "pattern" + KEY_FORMAT = "format" + KEY_MIN_PROPERTIES = "minProperties" + KEY_MAX_PROPERTIES = "maxProperties" + KEY_DEPENDENCIES = "dependencies" + KEY_REQUIRED = "required" + KEY_MIN_ITEMS = "minItems" + KEY_MAX_ITEMS = "maxItems" + KEY_UNIQUE_ITEMS = "uniqueItems" + KEY_ENUM = "enum" + KEY_ONE_OF = "oneOf" + KEY_ANY_OF = "anyOf" + KEY_ALL_OF = "allOf" + KEY_NOT = "not" +) + +type subSchema struct { + + // basic subSchema meta properties + id *string + title *string + description *string + + property string + + // Types associated with the subSchema + types jsonSchemaType + + // Reference url + ref *gojsonreference.JsonReference + // Schema referenced + refSchema *subSchema + // Json reference + subSchema *gojsonreference.JsonReference + + // hierarchy + parent *subSchema + definitions map[string]*subSchema + definitionsChildren []*subSchema + itemsChildren []*subSchema + itemsChildrenIsSingleSchema bool + propertiesChildren []*subSchema + + // validation : number / integer + multipleOf *float64 + maximum *float64 + exclusiveMaximum bool + minimum *float64 + exclusiveMinimum bool + + // validation : string + minLength *int + maxLength *int + pattern *regexp.Regexp + format string + + // validation : object + minProperties *int + maxProperties *int + required []string + + dependencies map[string]interface{} + additionalProperties interface{} + patternProperties map[string]*subSchema + + // validation : array + minItems *int + maxItems *int + uniqueItems bool + + additionalItems interface{} + + // validation : all + enum []string + + // validation : subSchema + oneOf []*subSchema + anyOf []*subSchema + allOf []*subSchema + not *subSchema +} + +func (s *subSchema) AddEnum(i interface{}) error { + + is, err := marshalToJsonString(i) + if err != nil { + return err + } + + if isStringInSlice(s.enum, *is) { + return errors.New(formatErrorDescription( + Locale.KeyItemsMustBeUnique(), + ErrorDetails{"key": KEY_ENUM}, + )) + } + + s.enum = append(s.enum, *is) + + return nil +} + +func (s *subSchema) ContainsEnum(i interface{}) (bool, error) { + + is, err := marshalToJsonString(i) + if err != nil { + return false, err + } + + return isStringInSlice(s.enum, *is), nil +} + +func (s *subSchema) AddOneOf(subSchema *subSchema) { + s.oneOf = append(s.oneOf, subSchema) +} + +func (s *subSchema) AddAllOf(subSchema *subSchema) { + s.allOf = append(s.allOf, subSchema) +} + +func (s *subSchema) AddAnyOf(subSchema *subSchema) { + s.anyOf = append(s.anyOf, subSchema) +} + +func (s *subSchema) SetNot(subSchema *subSchema) { + s.not = subSchema +} + +func (s *subSchema) AddRequired(value string) error { + + if isStringInSlice(s.required, value) { + return errors.New(formatErrorDescription( + Locale.KeyItemsMustBeUnique(), + ErrorDetails{"key": KEY_REQUIRED}, + )) + } + + s.required = append(s.required, value) + + return nil +} + +func (s *subSchema) AddDefinitionChild(child *subSchema) { + s.definitionsChildren = append(s.definitionsChildren, child) +} + +func (s *subSchema) AddItemsChild(child *subSchema) { + s.itemsChildren = append(s.itemsChildren, child) +} + +func (s *subSchema) AddPropertiesChild(child *subSchema) { + s.propertiesChildren = append(s.propertiesChildren, child) +} + +func (s *subSchema) PatternPropertiesString() string { + + if s.patternProperties == nil || len(s.patternProperties) == 0 { + return STRING_UNDEFINED // should never happen + } + + patternPropertiesKeySlice := []string{} + for pk, _ := range s.patternProperties { + patternPropertiesKeySlice = append(patternPropertiesKeySlice, `"`+pk+`"`) + } + + if len(patternPropertiesKeySlice) == 1 { + return patternPropertiesKeySlice[0] + } + + return "[" + strings.Join(patternPropertiesKeySlice, ",") + "]" + +} diff --git a/vendor/github.com/xeipuuv/gojsonschema/types.go b/vendor/github.com/xeipuuv/gojsonschema/types.go new file mode 100644 index 00000000..952d22ef --- /dev/null +++ b/vendor/github.com/xeipuuv/gojsonschema/types.go @@ -0,0 +1,58 @@ +// Copyright 2015 xeipuuv ( https://github.com/xeipuuv ) +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// author xeipuuv +// author-github https://github.com/xeipuuv +// author-mail xeipuuv@gmail.com +// +// repository-name gojsonschema +// repository-desc An implementation of JSON Schema, based on IETF's draft v4 - Go language. +// +// description Contains const types for schema and JSON. +// +// created 28-02-2013 + +package gojsonschema + +const ( + TYPE_ARRAY = `array` + TYPE_BOOLEAN = `boolean` + TYPE_INTEGER = `integer` + TYPE_NUMBER = `number` + TYPE_NULL = `null` + TYPE_OBJECT = `object` + TYPE_STRING = `string` +) + +var JSON_TYPES []string +var SCHEMA_TYPES []string + +func init() { + JSON_TYPES = []string{ + TYPE_ARRAY, + TYPE_BOOLEAN, + TYPE_INTEGER, + TYPE_NUMBER, + TYPE_NULL, + TYPE_OBJECT, + TYPE_STRING} + + SCHEMA_TYPES = []string{ + TYPE_ARRAY, + TYPE_BOOLEAN, + TYPE_INTEGER, + TYPE_NUMBER, + TYPE_OBJECT, + TYPE_STRING} +} diff --git a/vendor/github.com/xeipuuv/gojsonschema/utils.go b/vendor/github.com/xeipuuv/gojsonschema/utils.go new file mode 100644 index 00000000..4cbe0dcc --- /dev/null +++ b/vendor/github.com/xeipuuv/gojsonschema/utils.go @@ -0,0 +1,203 @@ +// Copyright 2015 xeipuuv ( https://github.com/xeipuuv ) +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// author xeipuuv +// author-github https://github.com/xeipuuv +// author-mail xeipuuv@gmail.com +// +// repository-name gojsonschema +// repository-desc An implementation of JSON Schema, based on IETF's draft v4 - Go language. +// +// description Various utility functions. +// +// created 26-02-2013 + +package gojsonschema + +import ( + "encoding/json" + "fmt" + "math" + "reflect" + "strconv" +) + +func isKind(what interface{}, kind reflect.Kind) bool { + return reflect.ValueOf(what).Kind() == kind +} + +func existsMapKey(m map[string]interface{}, k string) bool { + _, ok := m[k] + return ok +} + +func isStringInSlice(s []string, what string) bool { + for i := range s { + if s[i] == what { + return true + } + } + return false +} + +func marshalToJsonString(value interface{}) (*string, error) { + + mBytes, err := json.Marshal(value) + if err != nil { + return nil, err + } + + sBytes := string(mBytes) + return &sBytes, nil +} + +func isJsonNumber(what interface{}) bool { + + switch what.(type) { + + case json.Number: + return true + } + + return false +} + +func checkJsonNumber(what interface{}) (isValidFloat64 bool, isValidInt64 bool, isValidInt32 bool) { + + jsonNumber := what.(json.Number) + + f64, errFloat64 := jsonNumber.Float64() + s64 := strconv.FormatFloat(f64, 'f', -1, 64) + _, errInt64 := strconv.ParseInt(s64, 10, 64) + + isValidFloat64 = errFloat64 == nil + isValidInt64 = errInt64 == nil + + _, errInt32 := strconv.ParseInt(s64, 10, 32) + isValidInt32 = isValidInt64 && errInt32 == nil + + return + +} + +// same as ECMA Number.MAX_SAFE_INTEGER and Number.MIN_SAFE_INTEGER +const ( + max_json_float = float64(1<<53 - 1) // 9007199254740991.0 2^53 - 1 + min_json_float = -float64(1<<53 - 1) //-9007199254740991.0 -2^53 - 1 +) + +func isFloat64AnInteger(f float64) bool { + + if math.IsNaN(f) || math.IsInf(f, 0) || f < min_json_float || f > max_json_float { + return false + } + + return f == float64(int64(f)) || f == float64(uint64(f)) +} + +func mustBeInteger(what interface{}) *int { + + if isJsonNumber(what) { + + number := what.(json.Number) + + _, _, isValidInt32 := checkJsonNumber(number) + + if isValidInt32 { + + int64Value, err := number.Int64() + if err != nil { + return nil + } + + int32Value := int(int64Value) + return &int32Value + + } else { + return nil + } + + } + + return nil +} + +func mustBeNumber(what interface{}) *float64 { + + if isJsonNumber(what) { + + number := what.(json.Number) + float64Value, err := number.Float64() + + if err == nil { + return &float64Value + } else { + return nil + } + + } + + return nil + +} + +// formats a number so that it is displayed as the smallest string possible +func resultErrorFormatJsonNumber(n json.Number) string { + + if int64Value, err := n.Int64(); err == nil { + return fmt.Sprintf("%d", int64Value) + } + + float64Value, _ := n.Float64() + + return fmt.Sprintf("%g", float64Value) +} + +// formats a number so that it is displayed as the smallest string possible +func resultErrorFormatNumber(n float64) string { + + if isFloat64AnInteger(n) { + return fmt.Sprintf("%d", int64(n)) + } + + return fmt.Sprintf("%g", n) +} + +func convertDocumentNode(val interface{}) interface{} { + + if lval, ok := val.([]interface{}); ok { + + res := []interface{}{} + for _, v := range lval { + res = append(res, convertDocumentNode(v)) + } + + return res + + } + + if mval, ok := val.(map[interface{}]interface{}); ok { + + res := map[string]interface{}{} + + for k, v := range mval { + res[k.(string)] = convertDocumentNode(v) + } + + return res + + } + + return val +} diff --git a/vendor/github.com/xeipuuv/gojsonschema/utils_test.go b/vendor/github.com/xeipuuv/gojsonschema/utils_test.go new file mode 100644 index 00000000..489577d4 --- /dev/null +++ b/vendor/github.com/xeipuuv/gojsonschema/utils_test.go @@ -0,0 +1,64 @@ +// Copyright 2015 xeipuuv ( https://github.com/xeipuuv ) +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// author janmentzel +// author-github https://github.com/janmentzel +// author-mail ? ( forward to xeipuuv@gmail.com ) +// +// repository-name gojsonschema +// repository-desc An implementation of JSON Schema, based on IETF's draft v4 - Go language. +// +// description (Unit) Tests for utils ( Float / Integer conversion ). +// +// created 08-08-2013 + +package gojsonschema + +import ( + "github.com/stretchr/testify/assert" + "math" + "testing" +) + +func TestResultErrorFormatNumber(t *testing.T) { + + assert.Equal(t, "1", resultErrorFormatNumber(1)) + assert.Equal(t, "-1", resultErrorFormatNumber(-1)) + assert.Equal(t, "0", resultErrorFormatNumber(0)) + // unfortunately, can not be recognized as float + assert.Equal(t, "0", resultErrorFormatNumber(0.0)) + + assert.Equal(t, "1.001", resultErrorFormatNumber(1.001)) + assert.Equal(t, "-1.001", resultErrorFormatNumber(-1.001)) + assert.Equal(t, "0.0001", resultErrorFormatNumber(0.0001)) + + // casting math.MaxInt64 (1<<63 -1) to float back to int64 + // becomes negative. obviousely because of bit missinterpretation. + // so simply test a slightly smaller "large" integer here + assert.Equal(t, "4.611686018427388e+18", resultErrorFormatNumber(1<<62)) + // with negative int64 max works + assert.Equal(t, "-9.223372036854776e+18", resultErrorFormatNumber(math.MinInt64)) + assert.Equal(t, "-4.611686018427388e+18", resultErrorFormatNumber(-1<<62)) + + assert.Equal(t, "10000000000", resultErrorFormatNumber(1e10)) + assert.Equal(t, "-10000000000", resultErrorFormatNumber(-1e10)) + + assert.Equal(t, "1.000000000001", resultErrorFormatNumber(1.000000000001)) + assert.Equal(t, "-1.000000000001", resultErrorFormatNumber(-1.000000000001)) + assert.Equal(t, "1e-10", resultErrorFormatNumber(1e-10)) + assert.Equal(t, "-1e-10", resultErrorFormatNumber(-1e-10)) + assert.Equal(t, "4.6116860184273876e+07", resultErrorFormatNumber(4.611686018427387904e7)) + assert.Equal(t, "-4.6116860184273876e+07", resultErrorFormatNumber(-4.611686018427387904e7)) + +} diff --git a/vendor/github.com/xeipuuv/gojsonschema/validation.go b/vendor/github.com/xeipuuv/gojsonschema/validation.go new file mode 100644 index 00000000..2dc0df22 --- /dev/null +++ b/vendor/github.com/xeipuuv/gojsonschema/validation.go @@ -0,0 +1,829 @@ +// Copyright 2015 xeipuuv ( https://github.com/xeipuuv ) +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// author xeipuuv +// author-github https://github.com/xeipuuv +// author-mail xeipuuv@gmail.com +// +// repository-name gojsonschema +// repository-desc An implementation of JSON Schema, based on IETF's draft v4 - Go language. +// +// description Extends Schema and subSchema, implements the validation phase. +// +// created 28-02-2013 + +package gojsonschema + +import ( + "encoding/json" + "reflect" + "regexp" + "strconv" + "strings" + "unicode/utf8" +) + +func Validate(ls JSONLoader, ld JSONLoader) (*Result, error) { + + var err error + + // load schema + + schema, err := NewSchema(ls) + if err != nil { + return nil, err + } + + // begine validation + + return schema.Validate(ld) + +} + +func (v *Schema) Validate(l JSONLoader) (*Result, error) { + + // load document + + root, err := l.loadJSON() + if err != nil { + return nil, err + } + + // begin validation + + result := &Result{} + context := newJsonContext(STRING_CONTEXT_ROOT, nil) + v.rootSchema.validateRecursive(v.rootSchema, root, result, context) + + return result, nil + +} + +func (v *subSchema) subValidateWithContext(document interface{}, context *jsonContext) *Result { + result := &Result{} + v.validateRecursive(v, document, result, context) + return result +} + +// Walker function to validate the json recursively against the subSchema +func (v *subSchema) validateRecursive(currentSubSchema *subSchema, currentNode interface{}, result *Result, context *jsonContext) { + + if internalLogEnabled { + internalLog("validateRecursive %s", context.String()) + internalLog(" %v", currentNode) + } + + // Handle referenced schemas, returns directly when a $ref is found + if currentSubSchema.refSchema != nil { + v.validateRecursive(currentSubSchema.refSchema, currentNode, result, context) + return + } + + // Check for null value + if currentNode == nil { + if currentSubSchema.types.IsTyped() && !currentSubSchema.types.Contains(TYPE_NULL) { + result.addError( + new(InvalidTypeError), + context, + currentNode, + ErrorDetails{ + "expected": currentSubSchema.types.String(), + "given": TYPE_NULL, + }, + ) + return + } + + currentSubSchema.validateSchema(currentSubSchema, currentNode, result, context) + v.validateCommon(currentSubSchema, currentNode, result, context) + + } else { // Not a null value + + if isJsonNumber(currentNode) { + + value := currentNode.(json.Number) + + _, isValidInt64, _ := checkJsonNumber(value) + + validType := currentSubSchema.types.Contains(TYPE_NUMBER) || (isValidInt64 && currentSubSchema.types.Contains(TYPE_INTEGER)) + + if currentSubSchema.types.IsTyped() && !validType { + + givenType := TYPE_INTEGER + if !isValidInt64 { + givenType = TYPE_NUMBER + } + + result.addError( + new(InvalidTypeError), + context, + currentNode, + ErrorDetails{ + "expected": currentSubSchema.types.String(), + "given": givenType, + }, + ) + return + } + + currentSubSchema.validateSchema(currentSubSchema, value, result, context) + v.validateNumber(currentSubSchema, value, result, context) + v.validateCommon(currentSubSchema, value, result, context) + v.validateString(currentSubSchema, value, result, context) + + } else { + + rValue := reflect.ValueOf(currentNode) + rKind := rValue.Kind() + + switch rKind { + + // Slice => JSON array + + case reflect.Slice: + + if currentSubSchema.types.IsTyped() && !currentSubSchema.types.Contains(TYPE_ARRAY) { + result.addError( + new(InvalidTypeError), + context, + currentNode, + ErrorDetails{ + "expected": currentSubSchema.types.String(), + "given": TYPE_ARRAY, + }, + ) + return + } + + castCurrentNode := currentNode.([]interface{}) + + currentSubSchema.validateSchema(currentSubSchema, castCurrentNode, result, context) + + v.validateArray(currentSubSchema, castCurrentNode, result, context) + v.validateCommon(currentSubSchema, castCurrentNode, result, context) + + // Map => JSON object + + case reflect.Map: + if currentSubSchema.types.IsTyped() && !currentSubSchema.types.Contains(TYPE_OBJECT) { + result.addError( + new(InvalidTypeError), + context, + currentNode, + ErrorDetails{ + "expected": currentSubSchema.types.String(), + "given": TYPE_OBJECT, + }, + ) + return + } + + castCurrentNode, ok := currentNode.(map[string]interface{}) + if !ok { + castCurrentNode = convertDocumentNode(currentNode).(map[string]interface{}) + } + + currentSubSchema.validateSchema(currentSubSchema, castCurrentNode, result, context) + + v.validateObject(currentSubSchema, castCurrentNode, result, context) + v.validateCommon(currentSubSchema, castCurrentNode, result, context) + + for _, pSchema := range currentSubSchema.propertiesChildren { + nextNode, ok := castCurrentNode[pSchema.property] + if ok { + subContext := newJsonContext(pSchema.property, context) + v.validateRecursive(pSchema, nextNode, result, subContext) + } + } + + // Simple JSON values : string, number, boolean + + case reflect.Bool: + + if currentSubSchema.types.IsTyped() && !currentSubSchema.types.Contains(TYPE_BOOLEAN) { + result.addError( + new(InvalidTypeError), + context, + currentNode, + ErrorDetails{ + "expected": currentSubSchema.types.String(), + "given": TYPE_BOOLEAN, + }, + ) + return + } + + value := currentNode.(bool) + + currentSubSchema.validateSchema(currentSubSchema, value, result, context) + v.validateNumber(currentSubSchema, value, result, context) + v.validateCommon(currentSubSchema, value, result, context) + v.validateString(currentSubSchema, value, result, context) + + case reflect.String: + + if currentSubSchema.types.IsTyped() && !currentSubSchema.types.Contains(TYPE_STRING) { + result.addError( + new(InvalidTypeError), + context, + currentNode, + ErrorDetails{ + "expected": currentSubSchema.types.String(), + "given": TYPE_STRING, + }, + ) + return + } + + value := currentNode.(string) + + currentSubSchema.validateSchema(currentSubSchema, value, result, context) + v.validateNumber(currentSubSchema, value, result, context) + v.validateCommon(currentSubSchema, value, result, context) + v.validateString(currentSubSchema, value, result, context) + + } + + } + + } + + result.incrementScore() +} + +// Different kinds of validation there, subSchema / common / array / object / string... +func (v *subSchema) validateSchema(currentSubSchema *subSchema, currentNode interface{}, result *Result, context *jsonContext) { + + if internalLogEnabled { + internalLog("validateSchema %s", context.String()) + internalLog(" %v", currentNode) + } + + if len(currentSubSchema.anyOf) > 0 { + + validatedAnyOf := false + var bestValidationResult *Result + + for _, anyOfSchema := range currentSubSchema.anyOf { + if !validatedAnyOf { + validationResult := anyOfSchema.subValidateWithContext(currentNode, context) + validatedAnyOf = validationResult.Valid() + + if !validatedAnyOf && (bestValidationResult == nil || validationResult.score > bestValidationResult.score) { + bestValidationResult = validationResult + } + } + } + if !validatedAnyOf { + + result.addError(new(NumberAnyOfError), context, currentNode, ErrorDetails{}) + + if bestValidationResult != nil { + // add error messages of closest matching subSchema as + // that's probably the one the user was trying to match + result.mergeErrors(bestValidationResult) + } + } + } + + if len(currentSubSchema.oneOf) > 0 { + + nbValidated := 0 + var bestValidationResult *Result + + for _, oneOfSchema := range currentSubSchema.oneOf { + validationResult := oneOfSchema.subValidateWithContext(currentNode, context) + if validationResult.Valid() { + nbValidated++ + } else if nbValidated == 0 && (bestValidationResult == nil || validationResult.score > bestValidationResult.score) { + bestValidationResult = validationResult + } + } + + if nbValidated != 1 { + + result.addError(new(NumberOneOfError), context, currentNode, ErrorDetails{}) + + if nbValidated == 0 { + // add error messages of closest matching subSchema as + // that's probably the one the user was trying to match + result.mergeErrors(bestValidationResult) + } + } + + } + + if len(currentSubSchema.allOf) > 0 { + nbValidated := 0 + + for _, allOfSchema := range currentSubSchema.allOf { + validationResult := allOfSchema.subValidateWithContext(currentNode, context) + if validationResult.Valid() { + nbValidated++ + } + result.mergeErrors(validationResult) + } + + if nbValidated != len(currentSubSchema.allOf) { + result.addError(new(NumberAllOfError), context, currentNode, ErrorDetails{}) + } + } + + if currentSubSchema.not != nil { + validationResult := currentSubSchema.not.subValidateWithContext(currentNode, context) + if validationResult.Valid() { + result.addError(new(NumberNotError), context, currentNode, ErrorDetails{}) + } + } + + if currentSubSchema.dependencies != nil && len(currentSubSchema.dependencies) > 0 { + if isKind(currentNode, reflect.Map) { + for elementKey := range currentNode.(map[string]interface{}) { + if dependency, ok := currentSubSchema.dependencies[elementKey]; ok { + switch dependency := dependency.(type) { + + case []string: + for _, dependOnKey := range dependency { + if _, dependencyResolved := currentNode.(map[string]interface{})[dependOnKey]; !dependencyResolved { + result.addError( + new(MissingDependencyError), + context, + currentNode, + ErrorDetails{"dependency": dependOnKey}, + ) + } + } + + case *subSchema: + dependency.validateRecursive(dependency, currentNode, result, context) + + } + } + } + } + } + + result.incrementScore() +} + +func (v *subSchema) validateCommon(currentSubSchema *subSchema, value interface{}, result *Result, context *jsonContext) { + + if internalLogEnabled { + internalLog("validateCommon %s", context.String()) + internalLog(" %v", value) + } + + // enum: + if len(currentSubSchema.enum) > 0 { + has, err := currentSubSchema.ContainsEnum(value) + if err != nil { + result.addError(new(InternalError), context, value, ErrorDetails{"error": err}) + } + if !has { + result.addError( + new(EnumError), + context, + value, + ErrorDetails{ + "allowed": strings.Join(currentSubSchema.enum, ", "), + }, + ) + } + } + + result.incrementScore() +} + +func (v *subSchema) validateArray(currentSubSchema *subSchema, value []interface{}, result *Result, context *jsonContext) { + + if internalLogEnabled { + internalLog("validateArray %s", context.String()) + internalLog(" %v", value) + } + + nbItems := len(value) + + // TODO explain + if currentSubSchema.itemsChildrenIsSingleSchema { + for i := range value { + subContext := newJsonContext(strconv.Itoa(i), context) + validationResult := currentSubSchema.itemsChildren[0].subValidateWithContext(value[i], subContext) + result.mergeErrors(validationResult) + } + } else { + if currentSubSchema.itemsChildren != nil && len(currentSubSchema.itemsChildren) > 0 { + + nbItems := len(currentSubSchema.itemsChildren) + nbValues := len(value) + + if nbItems == nbValues { + for i := 0; i != nbItems; i++ { + subContext := newJsonContext(strconv.Itoa(i), context) + validationResult := currentSubSchema.itemsChildren[i].subValidateWithContext(value[i], subContext) + result.mergeErrors(validationResult) + } + } else if nbItems < nbValues { + switch currentSubSchema.additionalItems.(type) { + case bool: + if !currentSubSchema.additionalItems.(bool) { + result.addError(new(ArrayNoAdditionalItemsError), context, value, ErrorDetails{}) + } + case *subSchema: + additionalItemSchema := currentSubSchema.additionalItems.(*subSchema) + for i := nbItems; i != nbValues; i++ { + subContext := newJsonContext(strconv.Itoa(i), context) + validationResult := additionalItemSchema.subValidateWithContext(value[i], subContext) + result.mergeErrors(validationResult) + } + } + } + } + } + + // minItems & maxItems + if currentSubSchema.minItems != nil { + if nbItems < int(*currentSubSchema.minItems) { + result.addError( + new(ArrayMinItemsError), + context, + value, + ErrorDetails{"min": *currentSubSchema.minItems}, + ) + } + } + if currentSubSchema.maxItems != nil { + if nbItems > int(*currentSubSchema.maxItems) { + result.addError( + new(ArrayMaxItemsError), + context, + value, + ErrorDetails{"max": *currentSubSchema.maxItems}, + ) + } + } + + // uniqueItems: + if currentSubSchema.uniqueItems { + var stringifiedItems []string + for _, v := range value { + vString, err := marshalToJsonString(v) + if err != nil { + result.addError(new(InternalError), context, value, ErrorDetails{"err": err}) + } + if isStringInSlice(stringifiedItems, *vString) { + result.addError( + new(ItemsMustBeUniqueError), + context, + value, + ErrorDetails{"type": TYPE_ARRAY}, + ) + } + stringifiedItems = append(stringifiedItems, *vString) + } + } + + result.incrementScore() +} + +func (v *subSchema) validateObject(currentSubSchema *subSchema, value map[string]interface{}, result *Result, context *jsonContext) { + + if internalLogEnabled { + internalLog("validateObject %s", context.String()) + internalLog(" %v", value) + } + + // minProperties & maxProperties: + if currentSubSchema.minProperties != nil { + if len(value) < int(*currentSubSchema.minProperties) { + result.addError( + new(ArrayMinPropertiesError), + context, + value, + ErrorDetails{"min": *currentSubSchema.minProperties}, + ) + } + } + if currentSubSchema.maxProperties != nil { + if len(value) > int(*currentSubSchema.maxProperties) { + result.addError( + new(ArrayMaxPropertiesError), + context, + value, + ErrorDetails{"max": *currentSubSchema.maxProperties}, + ) + } + } + + // required: + for _, requiredProperty := range currentSubSchema.required { + _, ok := value[requiredProperty] + if ok { + result.incrementScore() + } else { + result.addError( + new(RequiredError), + context, + value, + ErrorDetails{"property": requiredProperty}, + ) + } + } + + // additionalProperty & patternProperty: + if currentSubSchema.additionalProperties != nil { + + switch currentSubSchema.additionalProperties.(type) { + case bool: + + if !currentSubSchema.additionalProperties.(bool) { + + for pk := range value { + + found := false + for _, spValue := range currentSubSchema.propertiesChildren { + if pk == spValue.property { + found = true + } + } + + pp_has, pp_match := v.validatePatternProperty(currentSubSchema, pk, value[pk], result, context) + + if found { + + if pp_has && !pp_match { + result.addError( + new(AdditionalPropertyNotAllowedError), + context, + value, + ErrorDetails{"property": pk}, + ) + } + + } else { + + if !pp_has || !pp_match { + result.addError( + new(AdditionalPropertyNotAllowedError), + context, + value, + ErrorDetails{"property": pk}, + ) + } + + } + } + } + + case *subSchema: + + additionalPropertiesSchema := currentSubSchema.additionalProperties.(*subSchema) + for pk := range value { + + found := false + for _, spValue := range currentSubSchema.propertiesChildren { + if pk == spValue.property { + found = true + } + } + + pp_has, pp_match := v.validatePatternProperty(currentSubSchema, pk, value[pk], result, context) + + if found { + + if pp_has && !pp_match { + validationResult := additionalPropertiesSchema.subValidateWithContext(value[pk], context) + result.mergeErrors(validationResult) + } + + } else { + + if !pp_has || !pp_match { + validationResult := additionalPropertiesSchema.subValidateWithContext(value[pk], context) + result.mergeErrors(validationResult) + } + + } + + } + } + } else { + + for pk := range value { + + pp_has, pp_match := v.validatePatternProperty(currentSubSchema, pk, value[pk], result, context) + + if pp_has && !pp_match { + + result.addError( + new(InvalidPropertyPatternError), + context, + value, + ErrorDetails{ + "property": pk, + "pattern": currentSubSchema.PatternPropertiesString(), + }, + ) + } + + } + } + + result.incrementScore() +} + +func (v *subSchema) validatePatternProperty(currentSubSchema *subSchema, key string, value interface{}, result *Result, context *jsonContext) (has bool, matched bool) { + + if internalLogEnabled { + internalLog("validatePatternProperty %s", context.String()) + internalLog(" %s %v", key, value) + } + + has = false + + validatedkey := false + + for pk, pv := range currentSubSchema.patternProperties { + if matches, _ := regexp.MatchString(pk, key); matches { + has = true + subContext := newJsonContext(key, context) + validationResult := pv.subValidateWithContext(value, subContext) + result.mergeErrors(validationResult) + if validationResult.Valid() { + validatedkey = true + } + } + } + + if !validatedkey { + return has, false + } + + result.incrementScore() + + return has, true +} + +func (v *subSchema) validateString(currentSubSchema *subSchema, value interface{}, result *Result, context *jsonContext) { + + // Ignore JSON numbers + if isJsonNumber(value) { + return + } + + // Ignore non strings + if !isKind(value, reflect.String) { + return + } + + if internalLogEnabled { + internalLog("validateString %s", context.String()) + internalLog(" %v", value) + } + + stringValue := value.(string) + + // minLength & maxLength: + if currentSubSchema.minLength != nil { + if utf8.RuneCount([]byte(stringValue)) < int(*currentSubSchema.minLength) { + result.addError( + new(StringLengthGTEError), + context, + value, + ErrorDetails{"min": *currentSubSchema.minLength}, + ) + } + } + if currentSubSchema.maxLength != nil { + if utf8.RuneCount([]byte(stringValue)) > int(*currentSubSchema.maxLength) { + result.addError( + new(StringLengthLTEError), + context, + value, + ErrorDetails{"max": *currentSubSchema.maxLength}, + ) + } + } + + // pattern: + if currentSubSchema.pattern != nil { + if !currentSubSchema.pattern.MatchString(stringValue) { + result.addError( + new(DoesNotMatchPatternError), + context, + value, + ErrorDetails{"pattern": currentSubSchema.pattern}, + ) + + } + } + + // format + if currentSubSchema.format != "" { + if !FormatCheckers.IsFormat(currentSubSchema.format, stringValue) { + result.addError( + new(DoesNotMatchFormatError), + context, + value, + ErrorDetails{"format": currentSubSchema.format}, + ) + } + } + + result.incrementScore() +} + +func (v *subSchema) validateNumber(currentSubSchema *subSchema, value interface{}, result *Result, context *jsonContext) { + + // Ignore non numbers + if !isJsonNumber(value) { + return + } + + if internalLogEnabled { + internalLog("validateNumber %s", context.String()) + internalLog(" %v", value) + } + + number := value.(json.Number) + float64Value, _ := number.Float64() + + // multipleOf: + if currentSubSchema.multipleOf != nil { + + if !isFloat64AnInteger(float64Value / *currentSubSchema.multipleOf) { + result.addError( + new(MultipleOfError), + context, + resultErrorFormatJsonNumber(number), + ErrorDetails{"multiple": *currentSubSchema.multipleOf}, + ) + } + } + + //maximum & exclusiveMaximum: + if currentSubSchema.maximum != nil { + if currentSubSchema.exclusiveMaximum { + if float64Value >= *currentSubSchema.maximum { + result.addError( + new(NumberLTError), + context, + resultErrorFormatJsonNumber(number), + ErrorDetails{ + "max": resultErrorFormatNumber(*currentSubSchema.maximum), + }, + ) + } + } else { + if float64Value > *currentSubSchema.maximum { + result.addError( + new(NumberLTEError), + context, + resultErrorFormatJsonNumber(number), + ErrorDetails{ + "max": resultErrorFormatNumber(*currentSubSchema.maximum), + }, + ) + } + } + } + + //minimum & exclusiveMinimum: + if currentSubSchema.minimum != nil { + if currentSubSchema.exclusiveMinimum { + if float64Value <= *currentSubSchema.minimum { + result.addError( + new(NumberGTError), + context, + resultErrorFormatJsonNumber(number), + ErrorDetails{ + "min": resultErrorFormatNumber(*currentSubSchema.minimum), + }, + ) + } + } else { + if float64Value < *currentSubSchema.minimum { + result.addError( + new(NumberGTEError), + context, + resultErrorFormatJsonNumber(number), + ErrorDetails{ + "min": resultErrorFormatNumber(*currentSubSchema.minimum), + }, + ) + } + } + } + + result.incrementScore() +}