*: replace git/odb with vendored copy

This commit is contained in:
Taylor Blau 2018-07-05 11:49:10 -05:00
parent 22b9113e25
commit b060df60f2
165 changed files with 22888 additions and 224 deletions

@ -8,8 +8,8 @@ import (
"github.com/git-lfs/git-lfs/errors"
"github.com/git-lfs/git-lfs/git"
"github.com/git-lfs/git-lfs/git/githistory"
"github.com/git-lfs/git-lfs/git/odb"
"github.com/git-lfs/git-lfs/tasklog"
"github.com/git-lfs/gitobj"
"github.com/spf13/cobra"
)
@ -49,7 +49,7 @@ var (
exportRemote string
)
// migrate takes the given command and arguments, *odb.ObjectDatabase, as well
// migrate takes the given command and arguments, *gitobj.ObjectDatabase, as well
// as a BlobRewriteFn to apply, and performs a migration.
func migrate(args []string, r *githistory.Rewriter, l *tasklog.Logger, opts *githistory.RewriteOptions) {
requireInRepo()
@ -67,12 +67,12 @@ func migrate(args []string, r *githistory.Rewriter, l *tasklog.Logger, opts *git
// getObjectDatabase creates a *git.ObjectDatabase from the filesystem pointed
// at the .git directory of the currently checked-out repository.
func getObjectDatabase() (*odb.ObjectDatabase, error) {
func getObjectDatabase() (*gitobj.ObjectDatabase, error) {
dir, err := git.GitDir()
if err != nil {
return nil, errors.Wrap(err, "cannot open root")
}
return odb.FromFilesystem(filepath.Join(dir, "objects"), cfg.TempDir())
return gitobj.FromFilesystem(filepath.Join(dir, "objects"), cfg.TempDir())
}
// rewriteOptions returns *githistory.RewriteOptions able to be passed to a
@ -281,7 +281,7 @@ func currentRefToMigrate() (*git.Ref, error) {
// getHistoryRewriter returns a history rewriter that includes the filepath
// filter given by the --include and --exclude arguments.
func getHistoryRewriter(cmd *cobra.Command, db *odb.ObjectDatabase, l *tasklog.Logger) *githistory.Rewriter {
func getHistoryRewriter(cmd *cobra.Command, db *gitobj.ObjectDatabase, l *tasklog.Logger) *githistory.Rewriter {
include, exclude := getIncludeExcludeArgs(cmd)
filter := buildFilepathFilter(cfg, include, exclude)

@ -9,10 +9,10 @@ import (
"github.com/git-lfs/git-lfs/filepathfilter"
"github.com/git-lfs/git-lfs/git"
"github.com/git-lfs/git-lfs/git/githistory"
"github.com/git-lfs/git-lfs/git/odb"
"github.com/git-lfs/git-lfs/lfs"
"github.com/git-lfs/git-lfs/tasklog"
"github.com/git-lfs/git-lfs/tools"
"github.com/git-lfs/gitobj"
"github.com/spf13/cobra"
)
@ -39,7 +39,7 @@ func migrateExportCommand(cmd *cobra.Command, args []string) {
opts := &githistory.RewriteOptions{
Verbose: migrateVerbose,
ObjectMapFilePath: objectMapFilePath,
BlobFn: func(path string, b *odb.Blob) (*odb.Blob, error) {
BlobFn: func(path string, b *gitobj.Blob) (*gitobj.Blob, error) {
if filepath.Base(path) == ".gitattributes" {
return b, nil
}
@ -57,10 +57,10 @@ func migrateExportCommand(cmd *cobra.Command, args []string) {
return nil, err
}
return odb.NewBlobFromFile(downloadPath)
return gitobj.NewBlobFromFile(downloadPath)
},
TreeCallbackFn: func(path string, t *odb.Tree) (*odb.Tree, error) {
TreeCallbackFn: func(path string, t *gitobj.Tree) (*gitobj.Tree, error) {
if path != "/" {
// Ignore non-root trees.
return t, nil
@ -88,7 +88,7 @@ func migrateExportCommand(cmd *cobra.Command, args []string) {
// Finally, return a copy of the tree "t" that has the
// new .gitattributes file included/replaced.
return t.Merge(&odb.TreeEntry{
return t.Merge(&gitobj.TreeEntry{
Name: ".gitattributes",
Filemode: 0100644,
Oid: blob,

@ -13,10 +13,10 @@ import (
"github.com/git-lfs/git-lfs/filepathfilter"
"github.com/git-lfs/git-lfs/git"
"github.com/git-lfs/git-lfs/git/githistory"
"github.com/git-lfs/git-lfs/git/odb"
"github.com/git-lfs/git-lfs/lfs"
"github.com/git-lfs/git-lfs/tasklog"
"github.com/git-lfs/git-lfs/tools"
"github.com/git-lfs/gitobj"
"github.com/spf13/cobra"
)
@ -71,7 +71,7 @@ func migrateImportCommand(cmd *cobra.Command, args []string) {
name, email := cfg.CurrentCommitter()
author := fmt.Sprintf("%s <%s>", name, email)
oid, err := db.WriteCommit(&odb.Commit{
oid, err := db.WriteCommit(&gitobj.Commit{
Author: author,
Committer: author,
ParentIDs: [][]byte{sha},
@ -103,7 +103,7 @@ func migrateImportCommand(cmd *cobra.Command, args []string) {
migrate(args, rewriter, l, &githistory.RewriteOptions{
Verbose: migrateVerbose,
ObjectMapFilePath: objectMapFilePath,
BlobFn: func(path string, b *odb.Blob) (*odb.Blob, error) {
BlobFn: func(path string, b *gitobj.Blob) (*gitobj.Blob, error) {
if filepath.Base(path) == ".gitattributes" {
return b, nil
}
@ -118,12 +118,12 @@ func migrateImportCommand(cmd *cobra.Command, args []string) {
exts.Add(fmt.Sprintf("*%s filter=lfs diff=lfs merge=lfs -text", ext))
}
return &odb.Blob{
return &gitobj.Blob{
Contents: &buf, Size: int64(buf.Len()),
}, nil
},
TreeCallbackFn: func(path string, t *odb.Tree) (*odb.Tree, error) {
TreeCallbackFn: func(path string, t *gitobj.Tree) (*gitobj.Tree, error) {
if path != "/" {
// Ignore non-root trees.
return t, nil
@ -171,7 +171,7 @@ func migrateImportCommand(cmd *cobra.Command, args []string) {
// Finally, return a copy of the tree "t" that has the
// new .gitattributes file included/replaced.
return t.Merge(&odb.TreeEntry{
return t.Merge(&gitobj.TreeEntry{
Name: ".gitattributes",
Filemode: 0100644,
Oid: blob,
@ -238,11 +238,11 @@ var (
//
// It returns an empty set if no attributes file could be found, or an error if
// it could not otherwise be opened.
func trackedFromAttrs(db *odb.ObjectDatabase, t *odb.Tree) (*tools.OrderedSet, error) {
func trackedFromAttrs(db *gitobj.ObjectDatabase, t *gitobj.Tree) (*tools.OrderedSet, error) {
var oid []byte
for _, e := range t.Entries {
if strings.ToLower(e.Name) == ".gitattributes" && e.Type() == odb.BlobObjectType {
if strings.ToLower(e.Name) == ".gitattributes" && e.Type() == gitobj.BlobObjectType {
oid = e.Oid
break
}
@ -283,14 +283,14 @@ func trackedFromAttrs(db *odb.ObjectDatabase, t *odb.Tree) (*tools.OrderedSet, e
// trackedToBlob writes and returns the OID of a .gitattributes blob based on
// the patterns given in the ordered set of patterns, "patterns".
func trackedToBlob(db *odb.ObjectDatabase, patterns *tools.OrderedSet) ([]byte, error) {
func trackedToBlob(db *gitobj.ObjectDatabase, patterns *tools.OrderedSet) ([]byte, error) {
var attrs bytes.Buffer
for pattern := range patterns.Iter() {
fmt.Fprintf(&attrs, "%s\n", pattern)
}
return db.WriteBlob(&odb.Blob{
return db.WriteBlob(&gitobj.Blob{
Contents: &attrs,
Size: int64(attrs.Len()),
})
@ -299,7 +299,7 @@ func trackedToBlob(db *odb.ObjectDatabase, patterns *tools.OrderedSet) ([]byte,
// rewriteTree replaces the blob at the provided path within the given tree with
// a git lfs pointer. It will recursively rewrite any subtrees along the path to the
// blob.
func rewriteTree(gf *lfs.GitFilter, db *odb.ObjectDatabase, root []byte, path string) ([]byte, error) {
func rewriteTree(gf *lfs.GitFilter, db *gitobj.ObjectDatabase, root []byte, path string) ([]byte, error) {
tree, err := db.Tree(root)
if err != nil {
return nil, err
@ -328,7 +328,7 @@ func rewriteTree(gf *lfs.GitFilter, db *odb.ObjectDatabase, root []byte, path st
return nil, err
}
newOid, err := db.WriteBlob(&odb.Blob{
newOid, err := db.WriteBlob(&gitobj.Blob{
Contents: &buf,
Size: int64(buf.Len()),
})
@ -337,7 +337,7 @@ func rewriteTree(gf *lfs.GitFilter, db *odb.ObjectDatabase, root []byte, path st
return nil, err
}
tree = tree.Merge(&odb.TreeEntry{
tree = tree.Merge(&gitobj.TreeEntry{
Name: splits[0],
Filemode: blobEntry.Filemode,
Oid: newOid,
@ -355,7 +355,7 @@ func rewriteTree(gf *lfs.GitFilter, db *odb.ObjectDatabase, root []byte, path st
}
subtreeEntry := tree.Entries[index]
if subtreeEntry.Type() != odb.TreeObjectType {
if subtreeEntry.Type() != gitobj.TreeObjectType {
return nil, errors.Errorf("migrate: expected %s to be a tree, got %s", head, subtreeEntry.Type())
}
@ -364,7 +364,7 @@ func rewriteTree(gf *lfs.GitFilter, db *odb.ObjectDatabase, root []byte, path st
return nil, err
}
tree = tree.Merge(&odb.TreeEntry{
tree = tree.Merge(&gitobj.TreeEntry{
Filemode: subtreeEntry.Filemode,
Name: subtreeEntry.Name,
Oid: rewrittenSubtree,
@ -379,7 +379,7 @@ func rewriteTree(gf *lfs.GitFilter, db *odb.ObjectDatabase, root []byte, path st
// findEntry searches a tree for the desired entry, and returns the index of that
// entry within the tree's Entries array
func findEntry(t *odb.Tree, name string) int {
func findEntry(t *gitobj.Tree, name string) int {
for i, entry := range t.Entries {
if entry.Name == name {
return i

@ -10,10 +10,10 @@ import (
"github.com/git-lfs/git-lfs/errors"
"github.com/git-lfs/git-lfs/git/githistory"
"github.com/git-lfs/git-lfs/git/odb"
"github.com/git-lfs/git-lfs/tasklog"
"github.com/git-lfs/git-lfs/tools"
"github.com/git-lfs/git-lfs/tools/humanize"
"github.com/git-lfs/gitobj"
"github.com/spf13/cobra"
)
@ -69,7 +69,7 @@ func migrateInfoCommand(cmd *cobra.Command, args []string) {
migrateInfoAbove = above
migrate(args, rewriter, l, &githistory.RewriteOptions{
BlobFn: func(path string, b *odb.Blob) (*odb.Blob, error) {
BlobFn: func(path string, b *gitobj.Blob) (*gitobj.Blob, error) {
ext := fmt.Sprintf("*%s", filepath.Ext(path))
if len(ext) > 1 {

@ -11,35 +11,35 @@ import (
"strings"
"testing"
"github.com/git-lfs/git-lfs/git/odb"
"github.com/git-lfs/gitobj"
"github.com/stretchr/testify/assert"
)
// DatabaseFromFixture returns a *git/odb.ObjectDatabase instance that is safely
// DatabaseFromFixture returns a *gitobj.ObjectDatabase instance that is safely
// mutable and created from a template equivelant to the fixture that you
// provided it.
//
// If any error was encountered, it will call t.Fatalf() immediately.
func DatabaseFromFixture(t *testing.T, name string) *odb.ObjectDatabase {
func DatabaseFromFixture(t *testing.T, name string) *gitobj.ObjectDatabase {
path, err := copyToTmp(filepath.Join("fixtures", name))
if err != nil {
t.Fatalf("git/odb: could not copy fixture %s: %v", name, err)
t.Fatalf("gitobj: could not copy fixture %s: %v", name, err)
}
db, err := odb.FromFilesystem(filepath.Join(path, "objects"), "")
db, err := gitobj.FromFilesystem(filepath.Join(path, "objects"), "")
if err != nil {
t.Fatalf("git/odb: could not create object database: %v", err)
t.Fatalf("gitobj: could not create object database: %v", err)
}
return db
}
// AssertBlobContents asserts that the blob contents given by loading the path
// starting from the root tree "tree" has the given "contents".
func AssertBlobContents(t *testing.T, db *odb.ObjectDatabase, tree, path, contents string) {
func AssertBlobContents(t *testing.T, db *gitobj.ObjectDatabase, tree, path, contents string) {
// First, load the root tree.
root, err := db.Tree(HexDecode(t, tree))
if err != nil {
t.Fatalf("git/odb: cannot load tree: %s: %s", tree, err)
t.Fatalf("gitobj: cannot load tree: %s: %s", tree, err)
}
// Then, iterating through each part of the filepath (i.e., a/b/c.txt ->
@ -49,7 +49,7 @@ func AssertBlobContents(t *testing.T, db *odb.ObjectDatabase, tree, path, conten
part := parts[i]
// Load the subtree given by that name.
var subtree *odb.Tree
var subtree *gitobj.Tree
for _, entry := range root.Entries {
if entry.Name != part {
continue
@ -57,13 +57,13 @@ func AssertBlobContents(t *testing.T, db *odb.ObjectDatabase, tree, path, conten
subtree, err = db.Tree(entry.Oid)
if err != nil {
t.Fatalf("git/odb: cannot load subtree %s: %s", filepath.Join(parts[:i]...), err)
t.Fatalf("gitobj: cannot load subtree %s: %s", filepath.Join(parts[:i]...), err)
}
break
}
if subtree == nil {
t.Fatalf("git/odb: subtree %s does not exist", path)
t.Fatalf("gitobj: subtree %s does not exist", path)
}
// And re-assign it to root, creating a sort of pseudo-recursion.
@ -73,25 +73,25 @@ func AssertBlobContents(t *testing.T, db *odb.ObjectDatabase, tree, path, conten
filename := parts[len(parts)-1]
// Find the blob given by the last entry in parts (the filename).
var blob *odb.Blob
var blob *gitobj.Blob
for _, entry := range root.Entries {
if entry.Name == filename {
blob, err = db.Blob(entry.Oid)
if err != nil {
t.Fatalf("git/odb: cannot load blob %x: %s", entry.Oid, err)
t.Fatalf("gitobj: cannot load blob %x: %s", entry.Oid, err)
}
}
}
// If we couldn't find the blob, fail immediately.
if blob == nil {
t.Fatalf("git/odb: blob at %s in %s does not exist", path, tree)
t.Fatalf("gitobj: blob at %s in %s does not exist", path, tree)
}
// Perform an assertion on the blob's contents.
got, err := ioutil.ReadAll(blob.Contents)
if err != nil {
t.Fatalf("git/odb: cannot read contents from blob %s: %s", path, err)
t.Fatalf("gitobj: cannot read contents from blob %s: %s", path, err)
}
assert.Equal(t, contents, string(got))
@ -99,41 +99,41 @@ func AssertBlobContents(t *testing.T, db *odb.ObjectDatabase, tree, path, conten
// AssertCommitParent asserts that the given commit has a parent equivalent to
// the one provided.
func AssertCommitParent(t *testing.T, db *odb.ObjectDatabase, sha, parent string) {
func AssertCommitParent(t *testing.T, db *gitobj.ObjectDatabase, sha, parent string) {
commit, err := db.Commit(HexDecode(t, sha))
if err != nil {
t.Fatalf("git/odb: expected to read commit: %s, couldn't: %v", sha, err)
t.Fatalf("gitobj: expected to read commit: %s, couldn't: %v", sha, err)
}
decoded, err := hex.DecodeString(parent)
if err != nil {
t.Fatalf("git/odb: expected to decode parent SHA: %s, couldn't: %v", parent, err)
t.Fatalf("gitobj: expected to decode parent SHA: %s, couldn't: %v", parent, err)
}
assert.Contains(t, commit.ParentIDs, decoded,
"git/odb: expected parents of commit: %s to contain: %s", sha, parent)
"gitobj: expected parents of commit: %s to contain: %s", sha, parent)
}
// AssertCommitTree asserts that the given commit has a tree equivelant to the
// one provided.
func AssertCommitTree(t *testing.T, db *odb.ObjectDatabase, sha, tree string) {
func AssertCommitTree(t *testing.T, db *gitobj.ObjectDatabase, sha, tree string) {
commit, err := db.Commit(HexDecode(t, sha))
if err != nil {
t.Fatalf("git/odb: expected to read commit: %s, couldn't: %v", sha, err)
t.Fatalf("gitobj: expected to read commit: %s, couldn't: %v", sha, err)
}
decoded, err := hex.DecodeString(tree)
if err != nil {
t.Fatalf("git/odb: expected to decode tree SHA: %s, couldn't: %v", tree, err)
t.Fatalf("gitobj: expected to decode tree SHA: %s, couldn't: %v", tree, err)
}
assert.Equal(t, decoded, commit.TreeID, "git/odb: expected tree ID: %s (got: %x)", tree, commit.TreeID)
assert.Equal(t, decoded, commit.TreeID, "gitobj: expected tree ID: %s (got: %x)", tree, commit.TreeID)
}
// AssertRef asserts that a given refname points at the expected commit.
func AssertRef(t *testing.T, db *odb.ObjectDatabase, ref string, expected []byte) {
func AssertRef(t *testing.T, db *gitobj.ObjectDatabase, ref string, expected []byte) {
root, ok := db.Root()
assert.True(t, ok, "git/odb: expected *odb.ObjectDatabase to have Root()")
assert.True(t, ok, "gitobj: expected *odb.ObjectDatabase to have Root()")
cmd := exec.Command("git", "rev-parse", ref)
cmd.Dir = root
@ -149,7 +149,7 @@ func AssertRef(t *testing.T, db *odb.ObjectDatabase, ref string, expected []byte
func HexDecode(t *testing.T, sha string) []byte {
b, err := hex.DecodeString(sha)
if err != nil {
t.Fatalf("git/odb: could not decode string: %q, %v", sha, err)
t.Fatalf("gitobj: could not decode string: %q, %v", sha, err)
}
return b

@ -7,9 +7,9 @@ import (
"github.com/git-lfs/git-lfs/errors"
"github.com/git-lfs/git-lfs/git"
"github.com/git-lfs/git-lfs/git/odb"
"github.com/git-lfs/git-lfs/tasklog"
"github.com/git-lfs/git-lfs/tools"
"github.com/git-lfs/gitobj"
)
// refUpdater is a type responsible for moving references from one point in the
@ -27,7 +27,7 @@ type refUpdater struct {
// located.
Root string
db *odb.ObjectDatabase
db *gitobj.ObjectDatabase
}
// UpdateRefs performs the reference update(s) from existing locations (see:
@ -57,7 +57,7 @@ func (r *refUpdater) UpdateRefs() error {
if ref.Type == git.RefTypeLocalTag {
tag, _ := r.db.Tag(sha1)
if tag != nil && tag.ObjectType == odb.CommitObjectType {
if tag != nil && tag.ObjectType == gitobj.CommitObjectType {
// Assume that a non-nil error is an indication
// that the tag is bare (without annotation).
@ -66,7 +66,7 @@ func (r *refUpdater) UpdateRefs() error {
continue
}
newTag, err := r.db.WriteTag(&odb.Tag{
newTag, err := r.db.WriteTag(&gitobj.Tag{
Object: toObj,
ObjectType: tag.ObjectType,
Name: tag.Name,

@ -11,8 +11,8 @@ import (
"github.com/git-lfs/git-lfs/errors"
"github.com/git-lfs/git-lfs/filepathfilter"
"github.com/git-lfs/git-lfs/git"
"github.com/git-lfs/git-lfs/git/odb"
"github.com/git-lfs/git-lfs/tasklog"
"github.com/git-lfs/gitobj"
)
// Rewriter allows rewriting topologically equivalent Git histories
@ -23,7 +23,7 @@ type Rewriter struct {
// entries is a mapping of old tree entries to new (rewritten) ones.
// Since TreeEntry contains a []byte (and is therefore not a key-able
// type), a unique TreeEntry -> string function is used for map keys.
entries map[string]*odb.TreeEntry
entries map[string]*gitobj.TreeEntry
// commits is a mapping of old commit SHAs to new ones, where the ASCII
// hex encoding of the SHA1 values are used as map keys.
commits map[string][]byte
@ -33,7 +33,7 @@ type Rewriter struct {
filter *filepathfilter.Filter
// db is the *ObjectDatabase from which blobs, commits, and trees are
// loaded from.
db *odb.ObjectDatabase
db *gitobj.ObjectDatabase
// l is the *tasklog.Logger to which updates are written.
l *tasklog.Logger
}
@ -96,7 +96,7 @@ func (r *RewriteOptions) treeFn() TreeCallbackFn {
// and instead the error will be returned from the Rewrite() function.
//
// Invocations of an instance of BlobRewriteFn are not expected to store the
// returned blobs in the *git/odb.ObjectDatabase.
// returned blobs in the *git/gitobj.ObjectDatabase.
//
// The path argument is given to be an absolute path to the tree entry being
// rewritten, where the repository root is the root of the path given. For
@ -105,7 +105,7 @@ func (r *RewriteOptions) treeFn() TreeCallbackFn {
//
// As above, the path separators are OS specific, and equivalent to the result
// of filepath.Join(...) or os.PathSeparator.
type BlobRewriteFn func(path string, b *odb.Blob) (*odb.Blob, error)
type BlobRewriteFn func(path string, b *gitobj.Blob) (*gitobj.Blob, error)
// TreeCallbackFn specifies a function to call before writing a re-written tree
// to the object database. The TreeCallbackFn can return a modified tree to be
@ -120,7 +120,7 @@ type BlobRewriteFn func(path string, b *odb.Blob) (*odb.Blob, error)
//
// If the TreeCallbackFn returns an error, it will be returned from the
// Rewrite() invocation.
type TreeCallbackFn func(path string, t *odb.Tree) (*odb.Tree, error)
type TreeCallbackFn func(path string, t *gitobj.Tree) (*gitobj.Tree, error)
type rewriterOption func(*Rewriter)
@ -150,17 +150,17 @@ var (
// noopBlobFn is a no-op implementation of the BlobRewriteFn. It returns
// the blob that it was given, and returns no error.
noopBlobFn = func(path string, b *odb.Blob) (*odb.Blob, error) { return b, nil }
noopBlobFn = func(path string, b *gitobj.Blob) (*gitobj.Blob, error) { return b, nil }
// noopTreeFn is a no-op implementation of the TreeRewriteFn. It returns
// the tree that it was given, and returns no error.
noopTreeFn = func(path string, t *odb.Tree) (*odb.Tree, error) { return t, nil }
noopTreeFn = func(path string, t *gitobj.Tree) (*gitobj.Tree, error) { return t, nil }
)
// NewRewriter constructs a *Rewriter from the given *ObjectDatabase instance.
func NewRewriter(db *odb.ObjectDatabase, opts ...rewriterOption) *Rewriter {
func NewRewriter(db *gitobj.ObjectDatabase, opts ...rewriterOption) *Rewriter {
rewriter := &Rewriter{
mu: new(sync.Mutex),
entries: make(map[string]*odb.TreeEntry),
entries: make(map[string]*gitobj.TreeEntry),
commits: make(map[string][]byte),
db: db,
@ -247,7 +247,7 @@ func (r *Rewriter) Rewrite(opt *RewriteOptions) ([]byte, error) {
// Construct a new commit using the original header information,
// but the rewritten set of parents as well as root tree.
rewrittenCommit := &odb.Commit{
rewrittenCommit := &gitobj.Commit{
Author: original.Author,
Committer: original.Committer,
ExtraHeaders: original.ExtraHeaders,
@ -327,7 +327,7 @@ func (r *Rewriter) rewriteTree(commitOID []byte, treeOID []byte, path string, fn
return nil, err
}
entries := make([]*odb.TreeEntry, 0, len(tree.Entries))
entries := make([]*gitobj.TreeEntry, 0, len(tree.Entries))
for _, entry := range tree.Entries {
var fullpath string
if len(path) > 0 {
@ -355,9 +355,9 @@ func (r *Rewriter) rewriteTree(commitOID []byte, treeOID []byte, path string, fn
var oid []byte
switch entry.Type() {
case odb.BlobObjectType:
case gitobj.BlobObjectType:
oid, err = r.rewriteBlob(commitOID, entry.Oid, fullpath, fn, perc)
case odb.TreeObjectType:
case gitobj.TreeObjectType:
oid, err = r.rewriteTree(commitOID, entry.Oid, fullpath, fn, tfn, perc)
default:
oid = entry.Oid
@ -367,14 +367,14 @@ func (r *Rewriter) rewriteTree(commitOID []byte, treeOID []byte, path string, fn
return nil, err
}
entries = append(entries, r.cacheEntry(entry, &odb.TreeEntry{
entries = append(entries, r.cacheEntry(entry, &gitobj.TreeEntry{
Filemode: entry.Filemode,
Name: entry.Name,
Oid: oid,
}))
}
rewritten, err := tfn("/"+path, &odb.Tree{Entries: entries})
rewritten, err := tfn("/"+path, &gitobj.Tree{Entries: entries})
if err != nil {
return nil, err
}
@ -385,7 +385,7 @@ func (r *Rewriter) rewriteTree(commitOID []byte, treeOID []byte, path string, fn
return r.db.WriteTree(rewritten)
}
func copyEntry(e *odb.TreeEntry) *odb.TreeEntry {
func copyEntry(e *gitobj.TreeEntry) *gitobj.TreeEntry {
if e == nil {
return nil
}
@ -393,18 +393,18 @@ func copyEntry(e *odb.TreeEntry) *odb.TreeEntry {
oid := make([]byte, len(e.Oid))
copy(oid, e.Oid)
return &odb.TreeEntry{
return &gitobj.TreeEntry{
Filemode: e.Filemode,
Name: e.Name,
Oid: oid,
}
}
func (r *Rewriter) allows(typ odb.ObjectType, abs string) bool {
func (r *Rewriter) allows(typ gitobj.ObjectType, abs string) bool {
switch typ {
case odb.BlobObjectType:
case gitobj.BlobObjectType:
return r.Filter().Allows(strings.TrimPrefix(abs, "/"))
case odb.CommitObjectType, odb.TreeObjectType:
case gitobj.CommitObjectType, gitobj.TreeObjectType:
return true
default:
panic(fmt.Sprintf("git/githistory: unknown entry type: %s", typ))
@ -546,7 +546,7 @@ func (r *Rewriter) Filter() *filepathfilter.Filter {
// cacheEntry caches then given "from" entry so that it is always rewritten as
// a *TreeEntry equivalent to "to".
func (r *Rewriter) cacheEntry(from, to *odb.TreeEntry) *odb.TreeEntry {
func (r *Rewriter) cacheEntry(from, to *gitobj.TreeEntry) *gitobj.TreeEntry {
r.mu.Lock()
defer r.mu.Unlock()
@ -558,7 +558,7 @@ func (r *Rewriter) cacheEntry(from, to *odb.TreeEntry) *odb.TreeEntry {
// uncacheEntry returns a *TreeEntry that is cached from the given *TreeEntry
// "from". That is to say, it returns the *TreeEntry that "from" should be
// rewritten to, or nil if none could be found.
func (r *Rewriter) uncacheEntry(from *odb.TreeEntry) *odb.TreeEntry {
func (r *Rewriter) uncacheEntry(from *gitobj.TreeEntry) *gitobj.TreeEntry {
r.mu.Lock()
defer r.mu.Unlock()
@ -566,12 +566,12 @@ func (r *Rewriter) uncacheEntry(from *odb.TreeEntry) *odb.TreeEntry {
}
// entryKey returns a unique key for a given *TreeEntry "e".
func (r *Rewriter) entryKey(e *odb.TreeEntry) string {
func (r *Rewriter) entryKey(e *gitobj.TreeEntry) string {
return fmt.Sprintf("%s:%x", e.Name, e.Oid)
}
// cacheEntry caches then given "from" commit so that it is always rewritten as
// a *git/odb.Commit equivalent to "to".
// a *git/gitobj.Commit equivalent to "to".
func (r *Rewriter) cacheCommit(from, to []byte) {
r.mu.Lock()
defer r.mu.Unlock()
@ -579,8 +579,8 @@ func (r *Rewriter) cacheCommit(from, to []byte) {
r.commits[hex.EncodeToString(from)] = to
}
// uncacheCommit returns a *git/odb.Commit that is cached from the given
// *git/odb.Commit "from". That is to say, it returns the *git/odb.Commit that
// uncacheCommit returns a *git/gitobj.Commit that is cached from the given
// *git/gitobj.Commit "from". That is to say, it returns the *git/gitobj.Commit that
// "from" should be rewritten to and true, or nil and false if none could be
// found.
func (r *Rewriter) uncacheCommit(from []byte) ([]byte, bool) {

@ -11,7 +11,7 @@ import (
"testing"
"github.com/git-lfs/git-lfs/filepathfilter"
"github.com/git-lfs/git-lfs/git/odb"
"github.com/git-lfs/gitobj"
"github.com/stretchr/testify/assert"
)
@ -20,7 +20,7 @@ func TestRewriterRewritesHistory(t *testing.T) {
r := NewRewriter(db)
tip, err := r.Rewrite(&RewriteOptions{Include: []string{"refs/heads/master"},
BlobFn: func(path string, b *odb.Blob) (*odb.Blob, error) {
BlobFn: func(path string, b *gitobj.Blob) (*gitobj.Blob, error) {
contents, err := ioutil.ReadAll(b.Contents)
if err != nil {
return nil, err
@ -33,7 +33,7 @@ func TestRewriterRewritesHistory(t *testing.T) {
rewritten := strconv.Itoa(n + 1)
return &odb.Blob{
return &gitobj.Blob{
Contents: strings.NewReader(rewritten),
Size: int64(len(rewritten)),
}, nil
@ -81,8 +81,8 @@ func TestRewriterRewritesOctopusMerges(t *testing.T) {
r := NewRewriter(db)
tip, err := r.Rewrite(&RewriteOptions{Include: []string{"refs/heads/master"},
BlobFn: func(path string, b *odb.Blob) (*odb.Blob, error) {
return &odb.Blob{
BlobFn: func(path string, b *gitobj.Blob) (*gitobj.Blob, error) {
return &gitobj.Blob{
Contents: io.MultiReader(b.Contents, strings.NewReader("_new")),
Size: b.Size + int64(len("_new")),
}, nil
@ -128,7 +128,7 @@ func TestRewriterVisitsPackedObjects(t *testing.T) {
var contents []byte
_, err := r.Rewrite(&RewriteOptions{Include: []string{"refs/heads/master"},
BlobFn: func(path string, b *odb.Blob) (*odb.Blob, error) {
BlobFn: func(path string, b *gitobj.Blob) (*gitobj.Blob, error) {
var err error
contents, err = ioutil.ReadAll(b.Contents)
@ -136,7 +136,7 @@ func TestRewriterVisitsPackedObjects(t *testing.T) {
return nil, err
}
return &odb.Blob{
return &gitobj.Blob{
Contents: bytes.NewReader(contents),
Size: int64(len(contents)),
}, nil
@ -154,7 +154,7 @@ func TestRewriterDoesntVisitUnchangedSubtrees(t *testing.T) {
seen := make(map[string]int)
_, err := r.Rewrite(&RewriteOptions{Include: []string{"refs/heads/master"},
BlobFn: func(path string, b *odb.Blob) (*odb.Blob, error) {
BlobFn: func(path string, b *gitobj.Blob) (*gitobj.Blob, error) {
seen[path] = seen[path] + 1
return b, nil
@ -172,12 +172,12 @@ func TestRewriterVisitsUniqueEntriesWithIdenticalContents(t *testing.T) {
r := NewRewriter(db)
tip, err := r.Rewrite(&RewriteOptions{Include: []string{"refs/heads/master"},
BlobFn: func(path string, b *odb.Blob) (*odb.Blob, error) {
BlobFn: func(path string, b *gitobj.Blob) (*gitobj.Blob, error) {
if path == "b.txt" {
return b, nil
}
return &odb.Blob{
return &gitobj.Blob{
Contents: strings.NewReader("changed"),
Size: int64(len("changed")),
}, nil
@ -212,7 +212,7 @@ func TestRewriterIgnoresPathsThatDontMatchFilter(t *testing.T) {
seen := make(map[string]int)
_, err := r.Rewrite(&RewriteOptions{Include: []string{"refs/heads/master"},
BlobFn: func(path string, b *odb.Blob) (*odb.Blob, error) {
BlobFn: func(path string, b *gitobj.Blob) (*gitobj.Blob, error) {
seen[path] = seen[path] + 1
return b, nil
@ -228,20 +228,20 @@ func TestRewriterAllowsAdditionalTreeEntries(t *testing.T) {
db := DatabaseFromFixture(t, "linear-history.git")
r := NewRewriter(db)
extra, err := db.WriteBlob(&odb.Blob{
extra, err := db.WriteBlob(&gitobj.Blob{
Contents: strings.NewReader("extra\n"),
Size: int64(len("extra\n")),
})
assert.Nil(t, err)
tip, err := r.Rewrite(&RewriteOptions{Include: []string{"refs/heads/master"},
BlobFn: func(path string, b *odb.Blob) (*odb.Blob, error) {
BlobFn: func(path string, b *gitobj.Blob) (*gitobj.Blob, error) {
return b, nil
},
TreeCallbackFn: func(path string, tr *odb.Tree) (*odb.Tree, error) {
return &odb.Tree{
Entries: append(tr.Entries, &odb.TreeEntry{
TreeCallbackFn: func(path string, tr *gitobj.Tree) (*gitobj.Tree, error) {
return &gitobj.Tree{
Entries: append(tr.Entries, &gitobj.TreeEntry{
Name: "extra.txt",
Filemode: 0100644,
Oid: extra,
@ -300,7 +300,7 @@ func TestHistoryRewriterUseOriginalParentsForPartialMigration(t *testing.T) {
Include: []string{"refs/heads/master"},
Exclude: []string{"refs/tags/middle"},
BlobFn: func(path string, b *odb.Blob) (*odb.Blob, error) {
BlobFn: func(path string, b *gitobj.Blob) (*gitobj.Blob, error) {
return b, nil
},
})
@ -337,10 +337,10 @@ func TestHistoryRewriterUpdatesRefs(t *testing.T) {
UpdateRefs: true,
BlobFn: func(path string, b *odb.Blob) (*odb.Blob, error) {
BlobFn: func(path string, b *gitobj.Blob) (*gitobj.Blob, error) {
suffix := strings.NewReader("_suffix")
return &odb.Blob{
return &gitobj.Blob{
Contents: io.MultiReader(b.Contents, suffix),
Size: b.Size + int64(suffix.Len()),
}, nil
@ -375,7 +375,7 @@ func TestHistoryRewriterReturnsFilter(t *testing.T) {
//
// Callers are expected to call it immediately after calling the Rewrite()
// function.
func debug(t *testing.T, db *odb.ObjectDatabase, tip []byte, err error) {
func debug(t *testing.T, db *gitobj.ObjectDatabase, tip []byte, err error) {
root, ok := db.Root()
t.Log(strings.Repeat("*", 80))

8
glide.lock generated

@ -1,5 +1,5 @@
hash: 5d2fbd8be4931b982d29c6ac8df833f139b28ffdb44ca062948a2386e2096a4d
updated: 2018-05-25T13:01:03.220513-07:00
hash: 28cfce2b07c2f75e13f9347f903b1f5b73379c8c84c5a66c85b6328df7d3540f
updated: 2018-07-05T11:50:15.501713165-05:00
imports:
- name: github.com/alexbrainman/sspi
version: 4729b3d4d8581b2db83864d1018926e4154f9406
@ -9,6 +9,10 @@ imports:
version: 5215b55f46b2b919f50a1df0eaa5886afe4e3b3d
subpackages:
- spew
- name: github.com/git-lfs/gitobj
version: 0fcb9c3796fa00bd974c86dc25b1919da1479007
subpackages:
- pack
- name: github.com/git-lfs/go-netrc
version: e0e9ca483a183481412e6f5a700ff20a36177503
subpackages:

@ -32,3 +32,5 @@ import:
version: 4729b3d4d8581b2db83864d1018926e4154f9406
subpackages:
- ntlm
- package: github.com/git-lfs/gitobj
version: 0fcb9c3796fa00bd974c86dc25b1919da1479007

21
vendor/github.com/git-lfs/gitobj/LICENSE.md generated vendored Normal file

@ -0,0 +1,21 @@
MIT License
Copyright (c) 2017- GitHub, Inc. and Git LFS contributors
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.

9
vendor/github.com/git-lfs/gitobj/README.md generated vendored Normal file

@ -0,0 +1,9 @@
# gitobj
Package `gitobj` reads and writes loose and packed Git objects.
For more: godoc.org/github.com/git-lfs/gitobj.
## License
MIT.

@ -1,11 +1,10 @@
package odb
package gitobj
import (
"bytes"
"fmt"
"io"
"os"
"github.com/git-lfs/git-lfs/errors"
)
// Blob represents a Git object of type "blob".
@ -42,14 +41,14 @@ func NewBlobFromBytes(contents []byte) *Blob {
func NewBlobFromFile(path string) (*Blob, error) {
f, err := os.Open(path)
if err != nil {
return nil, errors.Wrapf(err, "git/odb: could not open: %s",
path)
return nil, fmt.Errorf("gitobj: could not open: %s: %s", path,
err)
}
stat, err := f.Stat()
if err != nil {
return nil, errors.Wrapf(err, "git/odb: could not stat %s",
path)
return nil, fmt.Errorf("gitobj: could not stat %s: %s", path,
err)
}
return &Blob{
@ -58,8 +57,9 @@ func NewBlobFromFile(path string) (*Blob, error) {
closeFn: func() error {
if err := f.Close(); err != nil {
return errors.Wrapf(err,
"git/odb: could not close %s", path)
return fmt.Errorf(
"gitobj: could not close %s: %s",
path, err)
}
return nil
},

@ -1,4 +1,4 @@
package odb
package gitobj
import (
"bytes"

@ -1,4 +1,4 @@
package odb
package gitobj
import (
"bufio"

@ -1,4 +1,4 @@
package odb
package gitobj
import (
"bytes"

@ -1,4 +1,4 @@
package odb
package gitobj
import "fmt"
@ -14,5 +14,5 @@ type UnexpectedObjectType struct {
// Error implements the error.Error() function.
func (e *UnexpectedObjectType) Error() string {
return fmt.Sprintf("git/odb: unexpected object type, got: %q, wanted: %q", e.Got, e.Wanted)
return fmt.Sprintf("gitobj: unexpected object type, got: %q, wanted: %q", e.Got, e.Wanted)
}

@ -1,4 +1,4 @@
package odb
package gitobj
import (
"testing"
@ -11,5 +11,5 @@ func TestUnexpectedObjectTypeErrFormatting(t *testing.T) {
Got: TreeObjectType, Wanted: BlobObjectType,
}
assert.Equal(t, "git/odb: unexpected object type, got: \"tree\", wanted: \"blob\"", err.Error())
assert.Equal(t, "gitobj: unexpected object type, got: \"tree\", wanted: \"blob\"", err.Error())
}

@ -1,13 +1,12 @@
package odb
package gitobj
import (
"encoding/hex"
"fmt"
"io"
"io/ioutil"
"os"
"path/filepath"
"github.com/git-lfs/git-lfs/errors"
)
// fileStorer implements the storer interface by writing to the .git/objects
@ -53,7 +52,7 @@ func (fs *fileStorer) Store(sha []byte, r io.Reader) (n int64, err error) {
// collision).
_, err = io.Copy(ioutil.Discard, r)
if err != nil {
return 0, errors.Wrap(err, "discard pre-existing object data")
return 0, fmt.Errorf("discard pre-existing object data: %s", err)
}
return 0, nil

17
vendor/github.com/git-lfs/gitobj/glide.lock generated vendored Normal file

@ -0,0 +1,17 @@
hash: 7ac25f90bf24ea1f92a5b1153932af0d26e35dc6ea4ecb50fc48c4138429d6a0
updated: 2018-07-05T11:01:37.530865814-05:00
imports: []
testImports:
- name: github.com/davecgh/go-spew
version: 5215b55f46b2b919f50a1df0eaa5886afe4e3b3d
subpackages:
- spew
- name: github.com/pmezard/go-difflib
version: d8ed2627bdf02c080bf22230dbb337003b7aba2d
subpackages:
- difflib
- name: github.com/stretchr/testify
version: 6cb3b85ef5a0efef77caef88363ec4d4b5c0976d
subpackages:
- assert
- require

8
vendor/github.com/git-lfs/gitobj/glide.yaml generated vendored Normal file

@ -0,0 +1,8 @@
package: github.com/git-lfs/go-gitobj
import: []
testImport:
- package: github.com/stretchr/testify
version: 6cb3b85ef5a0efef77caef88363ec4d4b5c0976d
subpackages:
- assert
- require

@ -1,4 +1,4 @@
package odb
package gitobj
import (
"bytes"

@ -1,4 +1,4 @@
package odb
package gitobj
import (
"bytes"

@ -1,4 +1,4 @@
package odb
package gitobj
import "io"

@ -1,4 +1,4 @@
package odb
package gitobj
import (
"bytes"
@ -9,8 +9,7 @@ import (
"strings"
"sync/atomic"
"github.com/git-lfs/git-lfs/errors"
"github.com/git-lfs/git-lfs/git/odb/pack"
"github.com/git-lfs/gitobj/pack"
)
// ObjectDatabase enables the reading and writing of objects against a storage
@ -58,7 +57,7 @@ func FromFilesystem(root, tmp string) (*ObjectDatabase, error) {
// If Close() has already been called, this function will return an error.
func (o *ObjectDatabase) Close() error {
if !atomic.CompareAndSwapUint32(&o.closed, 0, 1) {
return errors.New("git/odb: *ObjectDatabase already closed")
return fmt.Errorf("gitobj: *ObjectDatabase already closed")
}
if err := o.packs.Close(); err != nil {
@ -246,7 +245,7 @@ func (o *ObjectDatabase) open(sha []byte) (*ObjectReader, error) {
// load its contents from the *git.ObjectScanner by leveraging
// `git-cat-file --batch`.
if atomic.LoadUint32(&o.closed) == 1 {
return nil, errors.New("git/odb: cannot use closed *pack.Set")
return nil, fmt.Errorf("gitobj: cannot use closed *pack.Set")
}
packed, err := o.packs.Object(sha)
@ -280,7 +279,7 @@ func (o *ObjectDatabase) open(sha []byte) (*ObjectReader, error) {
// the `io.Closer` interface), but skips this if the "into" Object is of type
// BlobObjectType. Blob's don't exhaust the buffer completely (they instead
// maintain a handle on the blob's contents via an io.LimitedReader) and
// therefore cannot be closed until signaled explicitly by git/odb.Blob.Close().
// therefore cannot be closed until signaled explicitly by gitobj.Blob.Close().
func (o *ObjectDatabase) decode(sha []byte, into Object) error {
r, err := o.open(sha)
if err != nil {

@ -1,4 +1,4 @@
package odb
package gitobj
import (
"bytes"
@ -226,7 +226,7 @@ func TestReadingAMissingObjectAfterClose(t *testing.T) {
}
blob, err := db.Blob(sha)
assert.EqualError(t, err, "git/odb: cannot use closed *pack.Set")
assert.EqualError(t, err, "gitobj: cannot use closed *pack.Set")
assert.Nil(t, blob)
}
@ -235,7 +235,7 @@ func TestClosingAnObjectDatabaseMoreThanOnce(t *testing.T) {
assert.Nil(t, err)
assert.Nil(t, db.Close())
assert.EqualError(t, db.Close(), "git/odb: *ObjectDatabase already closed")
assert.EqualError(t, db.Close(), "gitobj: *ObjectDatabase already closed")
}
func TestObjectDatabaseRootWithRoot(t *testing.T) {

@ -1,14 +1,13 @@
package odb
package gitobj
import (
"bufio"
"compress/zlib"
"fmt"
"io"
"io/ioutil"
"strconv"
"strings"
"github.com/pkg/errors"
)
// ObjectReader provides an io.Reader implementation that can read Git object
@ -106,8 +105,8 @@ func (r *ObjectReader) Header() (typ ObjectType, size int64, err error) {
return UnknownObjectType, 0, err
}
if len(typs) == 0 {
return UnknownObjectType, 0, errors.Errorf(
"git/odb: object type must not be empty",
return UnknownObjectType, 0, fmt.Errorf(
"gitobj: object type must not be empty",
)
}
typs = strings.TrimSuffix(typs, " ")

@ -1,4 +1,4 @@
package odb
package gitobj
import (
"bytes"

@ -1,4 +1,4 @@
package odb
package gitobj
import "strings"

@ -1,4 +1,4 @@
package odb
package gitobj
import (
"math"

@ -1,4 +1,4 @@
package odb
package gitobj
import (
"compress/zlib"
@ -85,7 +85,7 @@ func NewObjectWriteCloser(w io.WriteCloser) *ObjectWriter {
// WriteHeader MUST be called only once, or a panic() will occur.
func (w *ObjectWriter) WriteHeader(typ ObjectType, len int64) (n int, err error) {
if !atomic.CompareAndSwapUint32(&w.wroteHeader, 0, 1) {
panic("git/odb: cannot write headers more than once")
panic("gitobj: cannot write headers more than once")
}
return fmt.Fprintf(w, "%s %d\x00", typ, len)
}
@ -98,7 +98,7 @@ func (w *ObjectWriter) WriteHeader(typ ObjectType, len int64) (n int, err error)
// occur.
func (w *ObjectWriter) Write(p []byte) (n int, err error) {
if atomic.LoadUint32(&w.wroteHeader) != 1 {
panic("git/odb: cannot write data without header")
panic("gitobj: cannot write data without header")
}
return w.w.Write(p)
}

@ -1,4 +1,4 @@
package odb
package gitobj
import (
"bytes"
@ -61,7 +61,7 @@ func TestObjectWriterPanicsOnWritesWithoutHeader(t *testing.T) {
err := recover()
assert.NotNil(t, err)
assert.Equal(t, "git/odb: cannot write data without header", err)
assert.Equal(t, "gitobj: cannot write data without header", err)
}()
w := NewObjectWriter(new(bytes.Buffer))
@ -73,7 +73,7 @@ func TestObjectWriterPanicsOnMultipleHeaderWrites(t *testing.T) {
err := recover()
assert.NotNil(t, err)
assert.Equal(t, "git/odb: cannot write headers more than once", err)
assert.Equal(t, "gitobj: cannot write headers more than once", err)
}()
w := NewObjectWriter(new(bytes.Buffer))

@ -9,7 +9,7 @@ import "fmt"
// inclusive or exclusive. *bounds makes no assumptions on the inclusivity of
// those values.
//
// See: *git/odb/pack.Index for more.
// See: *gitobj/pack.Index for more.
type bounds struct {
// left is the left or lower bound of the bounds.
left int64

@ -1,8 +1,6 @@
package pack
import (
"github.com/git-lfs/git-lfs/errors"
)
import "fmt"
// ChainDelta represents a "delta" component of a delta-base chain.
type ChainDelta struct {
@ -49,7 +47,7 @@ func patch(base, delta []byte) ([]byte, error) {
//
// If this does not match with the srcSize, return an error
// early so as to avoid a possible bounds error below.
return nil, errors.New("git/odb/pack: invalid delta data")
return nil, fmt.Errorf("gitobj/pack: invalid delta data")
}
// The remainder of the delta header contains the destination size, and
@ -136,8 +134,8 @@ func patch(base, delta []byte) ([]byte, error) {
// instruction.
//
// Return immediately.
return nil, errors.New(
"git/odb/pack: invalid delta data")
return nil, fmt.Errorf(
"gitobj/pack: invalid delta data")
}
}
@ -147,7 +145,7 @@ func patch(base, delta []byte) ([]byte, error) {
// an invalid set of patch instructions.
//
// Return immediately.
return nil, errors.New("git/odb/pack: invalid delta data")
return nil, fmt.Errorf("gitobj/pack: invalid delta data")
}
return dest, nil
}
@ -161,7 +159,7 @@ func patchDeltaHeader(delta []byte, pos int) (size int64, end int) {
for shift == 0 || c&0x80 != 0 {
if len(delta) <= pos {
panic("git/odb/pack: invalid delta header")
panic("gitobj/pack: invalid delta header")
}
c = int64(delta[pos])

@ -87,7 +87,7 @@ func TestChainDeltaWithInvalidDeltaInstruction(t *testing.T) {
}
data, err := c.Unpack()
assert.EqualError(t, err, "git/odb/pack: invalid delta data")
assert.EqualError(t, err, "gitobj/pack: invalid delta data")
assert.Nil(t, data)
}
@ -107,6 +107,6 @@ func TestChainDeltaWithExtraInstructions(t *testing.T) {
}
data, err := c.Unpack()
assert.EqualError(t, err, "git/odb/pack: invalid delta data")
assert.EqualError(t, err, "gitobj/pack: invalid delta data")
assert.Nil(t, data)
}

@ -11,5 +11,5 @@ type UnsupportedVersionErr struct {
// Error implements 'error.Error()'.
func (u *UnsupportedVersionErr) Error() string {
return fmt.Sprintf("git/odb/pack: unsupported version: %d", u.Got)
return fmt.Sprintf("gitobj/pack: unsupported version: %d", u.Got)
}

@ -9,5 +9,5 @@ import (
func TestUnsupportedVersionErr(t *testing.T) {
u := &UnsupportedVersionErr{Got: 3}
assert.Error(t, u, "git/odb/pack: unsupported version: 3")
assert.Error(t, u, "gitobj/pack: unsupported version: 3")
}

@ -2,9 +2,8 @@ package pack
import (
"bytes"
"fmt"
"io"
"github.com/git-lfs/git-lfs/errors"
)
// Index stores information about the location of objects in a corresponding
@ -43,7 +42,7 @@ func (i *Index) Close() error {
var (
// errNotFound is an error returned by Index.Entry() (see: below) when
// an object cannot be found in the index.
errNotFound = errors.New("git/odb/pack: object not found in index")
errNotFound = fmt.Errorf("gitobj/pack: object not found in index")
)
// IsNotFound returns whether a given error represents a missing object in the

@ -3,9 +3,8 @@ package pack
import (
"bytes"
"encoding/binary"
"fmt"
"io"
"github.com/git-lfs/git-lfs/errors"
)
const (
@ -57,7 +56,7 @@ const (
var (
// ErrShortFanout is an error representing situations where the entire
// fanout table could not be read, and is thus too short.
ErrShortFanout = errors.New("git/odb/pack: too short fanout table")
ErrShortFanout = fmt.Errorf("gitobj/pack: too short fanout table")
// indexHeader is the first four "magic" bytes of index files version 2
// or newer.
@ -109,7 +108,6 @@ func decodeIndexHeader(r io.ReaderAt) (IndexVersion, error) {
case 2:
return new(V2), nil
}
return nil, &UnsupportedVersionErr{uint32(version)}
}
return new(V1), nil

@ -9,13 +9,6 @@ import (
"github.com/stretchr/testify/assert"
)
func TestDecodeIndexV1InvalidFanout(t *testing.T) {
idx, err := DecodeIndex(bytes.NewReader(make([]byte, indexFanoutWidth-1)))
assert.Equal(t, ErrShortFanout, err)
assert.Nil(t, idx)
}
func TestDecodeIndexV2(t *testing.T) {
buf := make([]byte, 0, indexV2Width+indexFanoutWidth)
buf = append(buf, 0xff, 0x74, 0x4f, 0x63)
@ -42,8 +35,8 @@ func TestDecodeIndexV2InvalidFanout(t *testing.T) {
idx, err := DecodeIndex(bytes.NewReader(buf))
assert.Nil(t, idx)
assert.Equal(t, ErrShortFanout, err)
assert.Nil(t, idx)
}
func TestDecodeIndexV1(t *testing.T) {
@ -53,6 +46,13 @@ func TestDecodeIndexV1(t *testing.T) {
assert.EqualValues(t, 0, idx.Count())
}
func TestDecodeIndexV1InvalidFanout(t *testing.T) {
idx, err := DecodeIndex(bytes.NewReader(make([]byte, indexFanoutWidth-1)))
assert.Equal(t, ErrShortFanout, err)
assert.Nil(t, idx)
}
func TestDecodeIndexUnsupportedVersion(t *testing.T) {
buf := make([]byte, 0, 4+4)
buf = append(buf, 0xff, 0x74, 0x4f, 0x63)
@ -60,7 +60,7 @@ func TestDecodeIndexUnsupportedVersion(t *testing.T) {
idx, err := DecodeIndex(bytes.NewReader(buf))
assert.EqualError(t, err, "git/odb/pack: unsupported version: 3")
assert.EqualError(t, err, "gitobj/pack: unsupported version: 3")
assert.Nil(t, idx)
}

@ -3,10 +3,9 @@ package pack
import (
"bytes"
"encoding/binary"
"fmt"
"testing"
"github.com/git-lfs/git-lfs/errors"
"github.com/stretchr/testify/assert"
)
@ -81,7 +80,7 @@ func TestIndexIsNotFound(t *testing.T) {
}
func TestIndexIsNotFoundForOtherErrors(t *testing.T) {
assert.False(t, IsNotFound(errors.New("git/odb/pack: misc")),
assert.False(t, IsNotFound(fmt.Errorf("gitobj/pack: misc")),
"expected 'err' not to satisfy 'IsNotFound()'")
}

@ -1,7 +1,5 @@
package pack
// IndexVersion is a constant type that represents the version of encoding used
// by a particular index version.
type IndexVersion interface {
// Name returns the name of the object located at the given offset "at",
// in the Index file "idx".

@ -2,10 +2,9 @@ package pack
import (
"bytes"
"fmt"
"testing"
"github.com/git-lfs/git-lfs/errors"
"github.com/stretchr/testify/assert"
)
@ -32,7 +31,7 @@ func TestOffsetReaderAtReadsAtOffset(t *testing.T) {
}
func TestOffsetReaderPropogatesErrors(t *testing.T) {
expected := errors.New("git/odb/pack: testing")
expected := fmt.Errorf("gitobj/pack: testing")
bo := &OffsetReaderAt{
r: &ErrReaderAt{Err: expected},
o: 1,

@ -1,10 +1,9 @@
package pack
import (
"fmt"
"testing"
"github.com/git-lfs/git-lfs/errors"
"github.com/stretchr/testify/assert"
)
@ -32,7 +31,7 @@ func TestObjectUnpackUnpacksData(t *testing.T) {
}
func TestObjectUnpackPropogatesErrors(t *testing.T) {
expected := errors.New("git/odb/pack: testing")
expected := fmt.Errorf("gitobj/pack: testing")
o := &Object{
data: &ChainSimple{

@ -2,10 +2,9 @@ package pack
import (
"compress/zlib"
"fmt"
"io"
"io/ioutil"
"github.com/git-lfs/git-lfs/errors"
)
// Packfile encapsulates the behavior of accessing an unpacked representation of
@ -57,8 +56,7 @@ func (p *Packfile) Object(name []byte) (*Object, error) {
if !IsNotFound(err) {
// If the error was not an errNotFound, re-wrap it with
// additional context.
err = errors.Wrap(err,
"git/odb/pack: could not load index")
err = fmt.Errorf("gitobj/pack: could not load index: %s", err)
}
return nil, err
}
@ -225,8 +223,8 @@ func (p *Packfile) findBase(typ PackedObjectType, offset, objOffset int64) (Chai
default:
// If we did not receive an OBJ_OFS_DELTA, or OBJ_REF_DELTA, the
// type given is not a delta-fied type. Return an error.
return nil, baseOffset, errors.Errorf(
"git/odb/pack: type %s is not deltafied", typ)
return nil, baseOffset, fmt.Errorf(
"gitobj/pack: type %s is not deltafied", typ)
}
// Once we have determined the base offset of the object's chain base,

@ -13,7 +13,7 @@ var (
// errBadPackHeader is a sentinel error value returned when the given
// pack header does not match the expected one.
errBadPackHeader = errors.New("git/odb/pack: bad pack header")
errBadPackHeader = errors.New("gitobj/pack: bad pack header")
)
// DecodePackfile opens the packfile given by the io.ReaderAt "r" for reading.

@ -4,13 +4,12 @@ import (
"bytes"
"encoding/binary"
"encoding/hex"
"fmt"
"sort"
"strings"
"sync/atomic"
"testing"
"github.com/git-lfs/git-lfs/errors"
"github.com/stretchr/testify/assert"
)
@ -184,7 +183,7 @@ func TestPackfileClosesReadClosers(t *testing.T) {
}
func TestPackfileClosePropogatesCloseErrors(t *testing.T) {
e := errors.New("git/odb/pack: testing")
e := fmt.Errorf("gitobj/pack: testing")
p := &Packfile{
r: &ReaderAtCloser{E: e},
}
@ -269,7 +268,7 @@ func IndexWith(offsets map[string]uint32) *Index {
func DecodeHex(t *testing.T, str string) []byte {
b, err := hex.DecodeString(str)
if err != nil {
t.Fatalf("git/odb/pack: unexpected hex.DecodeString error: %s", err)
t.Fatalf("gitobj/pack: unexpected hex.DecodeString error: %s", err)
}
return b

@ -29,7 +29,7 @@ const (
)
// String implements fmt.Stringer and returns an encoding of the type valid for
// use in the loose object format protocol (see: package 'git/odb' for more).
// use in the loose object format protocol (see: package 'gitobj' for more).
//
// If the receiving instance is not defined, String() will panic().
func (t PackedObjectType) String() string {
@ -50,9 +50,9 @@ func (t PackedObjectType) String() string {
return "obj_ref_delta"
}
panic(fmt.Sprintf("git/odb/pack: unknown object type: %d", t))
panic(fmt.Sprintf("gitobj/pack: unknown object type: %d", t))
}
var (
errUnrecognizedObjectType = errors.New("git/odb/pack: unrecognized object type")
errUnrecognizedObjectType = errors.New("gitobj/pack: unrecognized object type")
)

@ -20,7 +20,7 @@ func (c *PackedObjectStringTestCase) Assert(t *testing.T) {
err := recover()
if err == nil {
t.Fatalf("git/odb/pack: expected panic()")
t.Fatalf("gitobj/pack: expected panic()")
}
assert.Equal(t, c.Expected, fmt.Sprintf("%s", err))
@ -45,7 +45,7 @@ func TestPackedObjectTypeString(t *testing.T) {
Expected: "obj_ref_delta"},
"unknown type": {T: PackedObjectType(5), Panic: true,
Expected: "git/odb/pack: unknown object type: 5"},
Expected: "gitobj/pack: unknown object type: 5"},
} {
t.Run(desc, c.Assert)
}

@ -1,4 +1,4 @@
package odb
package gitobj
import "io"

@ -1,4 +1,4 @@
package odb
package gitobj
import (
"bufio"
@ -7,8 +7,6 @@ import (
"fmt"
"io"
"strings"
"github.com/git-lfs/git-lfs/errors"
)
type Tag struct {
@ -45,14 +43,14 @@ func (t *Tag) Decode(r io.Reader, size int64) (int, error) {
parts := strings.SplitN(scanner.Text(), " ", 2)
if len(parts) < 2 {
return 0, errors.Errorf("git/odb: invalid tag header: %s", scanner.Text())
return 0, fmt.Errorf("gitobj: invalid tag header: %s", scanner.Text())
}
switch parts[0] {
case "object":
sha, err := hex.DecodeString(parts[1])
if err != nil {
return 0, errors.Wrap(err, "git/odb: unable to decode SHA-1")
return 0, fmt.Errorf("gitobj: unable to decode SHA-1: %s", err)
}
t.Object = sha
@ -63,7 +61,7 @@ func (t *Tag) Decode(r io.Reader, size int64) (int, error) {
case "tagger":
t.Tagger = parts[1]
default:
return 0, errors.Errorf("git/odb: unknown tag header: %s", parts[0])
return 0, fmt.Errorf("gitobj: unknown tag header: %s", parts[0])
}
}
}

@ -1,4 +1,4 @@
package odb
package gitobj
import (
"bytes"

@ -1,4 +1,4 @@
package odb
package gitobj
import (
"bufio"
@ -220,7 +220,7 @@ func (e *TreeEntry) Type() ObjectType {
// Safeguard that catch here, or otherwise panic.
return CommitObjectType
} else {
panic(fmt.Sprintf("git/odb: unknown object type: %o",
panic(fmt.Sprintf("gitobj: unknown object type: %o",
e.Filemode))
}
}
@ -231,7 +231,7 @@ func (e *TreeEntry) Type() ObjectType {
// write trees in a correct, readable format to the Git object database.
//
// The format is as follows: entries are sorted lexicographically in byte-order,
// with subtrees (entries of Type() == git/odb.TreeObjectType) being sorted as
// with subtrees (entries of Type() == gitobj.TreeObjectType) being sorted as
// if their `Name` fields ended in a "/".
//
// See: https://github.com/git/git/blob/v2.13.0/fsck.c#L492-L525 for more

@ -1,4 +1,4 @@
package odb
package gitobj
import (
"bufio"
@ -173,7 +173,7 @@ func (c *TreeEntryTypeTestCase) Assert(t *testing.T) {
got := e.Type()
assert.Equal(t, c.Expected, got,
"git/odb: expected type: %s, got: %s", c.Expected, got)
"gitobj: expected type: %s, got: %s", c.Expected, got)
}
func TestTreeEntryTypeResolution(t *testing.T) {
@ -192,9 +192,9 @@ func TestTreeEntryTypeResolutionUnknown(t *testing.T) {
defer func() {
if err := recover(); err == nil {
t.Fatal("git/odb: expected panic(), got none")
t.Fatal("gitobj: expected panic(), got none")
} else {
assert.Equal(t, "git/odb: unknown object type: -1", err)
assert.Equal(t, "gitobj: unknown object type: -1", err)
}
}()

@ -0,0 +1,22 @@
# Compiled Object files, Static and Dynamic libs (Shared Objects)
*.o
*.a
*.so
# Folders
_obj
_test
# Architecture specific extensions/prefixes
*.[568vq]
[568vq].out
*.cgo1.go
*.cgo2.c
_cgo_defun.c
_cgo_gotypes.go
_cgo_export.*
_testmain.go
*.exe

@ -0,0 +1,11 @@
language: go
go: 1.2
install:
- go get -v code.google.com/p/go.tools/cmd/cover
script:
- go test -v -tags=disableunsafe ./spew
- go test -v -tags=testcgo ./spew -covermode=count -coverprofile=profile.cov
after_success:
- go get -v github.com/mattn/goveralls
- export PATH=$PATH:$HOME/gopath/bin
- goveralls -coverprofile=profile.cov -service=travis-ci

@ -0,0 +1,13 @@
Copyright (c) 2012-2013 Dave Collins <dave@davec.name>
Permission to use, copy, modify, and distribute this software for any
purpose with or without fee is hereby granted, provided that the above
copyright notice and this permission notice appear in all copies.
THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.

@ -0,0 +1,194 @@
go-spew
=======
[![Build Status](https://travis-ci.org/davecgh/go-spew.png?branch=master)]
(https://travis-ci.org/davecgh/go-spew) [![Coverage Status]
(https://coveralls.io/repos/davecgh/go-spew/badge.png?branch=master)]
(https://coveralls.io/r/davecgh/go-spew?branch=master)
Go-spew implements a deep pretty printer for Go data structures to aid in
debugging. A comprehensive suite of tests with 100% test coverage is provided
to ensure proper functionality. See `test_coverage.txt` for the gocov coverage
report. Go-spew is licensed under the liberal ISC license, so it may be used in
open source or commercial projects.
If you're interested in reading about how this package came to life and some
of the challenges involved in providing a deep pretty printer, there is a blog
post about it
[here](https://blog.cyphertite.com/go-spew-a-journey-into-dumping-go-data-structures/).
## Documentation
[![GoDoc](https://godoc.org/github.com/davecgh/go-spew/spew?status.png)]
(http://godoc.org/github.com/davecgh/go-spew/spew)
Full `go doc` style documentation for the project can be viewed online without
installing this package by using the excellent GoDoc site here:
http://godoc.org/github.com/davecgh/go-spew/spew
You can also view the documentation locally once the package is installed with
the `godoc` tool by running `godoc -http=":6060"` and pointing your browser to
http://localhost:6060/pkg/github.com/davecgh/go-spew/spew
## Installation
```bash
$ go get -u github.com/davecgh/go-spew/spew
```
## Quick Start
Add this import line to the file you're working in:
```Go
import "github.com/davecgh/go-spew/spew"
```
To dump a variable with full newlines, indentation, type, and pointer
information use Dump, Fdump, or Sdump:
```Go
spew.Dump(myVar1, myVar2, ...)
spew.Fdump(someWriter, myVar1, myVar2, ...)
str := spew.Sdump(myVar1, myVar2, ...)
```
Alternatively, if you would prefer to use format strings with a compacted inline
printing style, use the convenience wrappers Printf, Fprintf, etc with %v (most
compact), %+v (adds pointer addresses), %#v (adds types), or %#+v (adds types
and pointer addresses):
```Go
spew.Printf("myVar1: %v -- myVar2: %+v", myVar1, myVar2)
spew.Printf("myVar3: %#v -- myVar4: %#+v", myVar3, myVar4)
spew.Fprintf(someWriter, "myVar1: %v -- myVar2: %+v", myVar1, myVar2)
spew.Fprintf(someWriter, "myVar3: %#v -- myVar4: %#+v", myVar3, myVar4)
```
## Debugging a Web Application Example
Here is an example of how you can use `spew.Sdump()` to help debug a web application. Please be sure to wrap your output using the `html.EscapeString()` function for safety reasons. You should also only use this debugging technique in a development environment, never in production.
```Go
package main
import (
"fmt"
"html"
"net/http"
"github.com/davecgh/go-spew/spew"
)
func handler(w http.ResponseWriter, r *http.Request) {
w.Header().Set("Content-Type", "text/html")
fmt.Fprintf(w, "Hi there, %s!", r.URL.Path[1:])
fmt.Fprintf(w, "<!--\n" + html.EscapeString(spew.Sdump(w)) + "\n-->")
}
func main() {
http.HandleFunc("/", handler)
http.ListenAndServe(":8080", nil)
}
```
## Sample Dump Output
```
(main.Foo) {
unexportedField: (*main.Bar)(0xf84002e210)({
flag: (main.Flag) flagTwo,
data: (uintptr) <nil>
}),
ExportedField: (map[interface {}]interface {}) {
(string) "one": (bool) true
}
}
([]uint8) {
00000000 11 12 13 14 15 16 17 18 19 1a 1b 1c 1d 1e 1f 20 |............... |
00000010 21 22 23 24 25 26 27 28 29 2a 2b 2c 2d 2e 2f 30 |!"#$%&'()*+,-./0|
00000020 31 32 |12|
}
```
## Sample Formatter Output
Double pointer to a uint8:
```
%v: <**>5
%+v: <**>(0xf8400420d0->0xf8400420c8)5
%#v: (**uint8)5
%#+v: (**uint8)(0xf8400420d0->0xf8400420c8)5
```
Pointer to circular struct with a uint8 field and a pointer to itself:
```
%v: <*>{1 <*><shown>}
%+v: <*>(0xf84003e260){ui8:1 c:<*>(0xf84003e260)<shown>}
%#v: (*main.circular){ui8:(uint8)1 c:(*main.circular)<shown>}
%#+v: (*main.circular)(0xf84003e260){ui8:(uint8)1 c:(*main.circular)(0xf84003e260)<shown>}
```
## Configuration Options
Configuration of spew is handled by fields in the ConfigState type. For
convenience, all of the top-level functions use a global state available via the
spew.Config global.
It is also possible to create a ConfigState instance that provides methods
equivalent to the top-level functions. This allows concurrent configuration
options. See the ConfigState documentation for more details.
```
* Indent
String to use for each indentation level for Dump functions.
It is a single space by default. A popular alternative is "\t".
* MaxDepth
Maximum number of levels to descend into nested data structures.
There is no limit by default.
* DisableMethods
Disables invocation of error and Stringer interface methods.
Method invocation is enabled by default.
* DisablePointerMethods
Disables invocation of error and Stringer interface methods on types
which only accept pointer receivers from non-pointer variables. This option
relies on access to the unsafe package, so it will not have any effect when
running in environments without access to the unsafe package such as Google
App Engine or with the "disableunsafe" build tag specified.
Pointer method invocation is enabled by default.
* ContinueOnMethod
Enables recursion into types after invoking error and Stringer interface
methods. Recursion after method invocation is disabled by default.
* SortKeys
Specifies map keys should be sorted before being printed. Use
this to have a more deterministic, diffable output. Note that
only native types (bool, int, uint, floats, uintptr and string)
and types which implement error or Stringer interfaces are supported,
with other types sorted according to the reflect.Value.String() output
which guarantees display stability. Natural map order is used by
default.
* SpewKeys
SpewKeys specifies that, as a last resort attempt, map keys should be
spewed to strings and sorted by those strings. This is only considered
if SortKeys is true.
```
## Unsafe Package Dependency
This package relies on the unsafe package to perform some of the more advanced
features, however it also supports a "limited" mode which allows it to work in
environments where the unsafe package is not available. By default, it will
operate in this mode on Google App Engine. The "disableunsafe" build tag may
also be specified to force the package to build without using the unsafe
package.
## License
Go-spew is licensed under the liberal ISC License.

@ -0,0 +1,22 @@
#!/bin/sh
# This script uses gocov to generate a test coverage report.
# The gocov tool my be obtained with the following command:
# go get github.com/axw/gocov/gocov
#
# It will be installed to $GOPATH/bin, so ensure that location is in your $PATH.
# Check for gocov.
if ! type gocov >/dev/null 2>&1; then
echo >&2 "This script requires the gocov tool."
echo >&2 "You may obtain it with the following command:"
echo >&2 "go get github.com/axw/gocov/gocov"
exit 1
fi
# Only run the cgo tests if gcc is installed.
if type gcc >/dev/null 2>&1; then
(cd spew && gocov test -tags testcgo | gocov report)
else
(cd spew && gocov test | gocov report)
fi

@ -0,0 +1,151 @@
// Copyright (c) 2015 Dave Collins <dave@davec.name>
//
// Permission to use, copy, modify, and distribute this software for any
// purpose with or without fee is hereby granted, provided that the above
// copyright notice and this permission notice appear in all copies.
//
// THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
// WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
// MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
// ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
// WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
// ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
// OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
// NOTE: Due to the following build constraints, this file will only be compiled
// when the code is not running on Google App Engine and "-tags disableunsafe"
// is not added to the go build command line.
// +build !appengine,!disableunsafe
package spew
import (
"reflect"
"unsafe"
)
const (
// UnsafeDisabled is a build-time constant which specifies whether or
// not access to the unsafe package is available.
UnsafeDisabled = false
// ptrSize is the size of a pointer on the current arch.
ptrSize = unsafe.Sizeof((*byte)(nil))
)
var (
// offsetPtr, offsetScalar, and offsetFlag are the offsets for the
// internal reflect.Value fields. These values are valid before golang
// commit ecccf07e7f9d which changed the format. The are also valid
// after commit 82f48826c6c7 which changed the format again to mirror
// the original format. Code in the init function updates these offsets
// as necessary.
offsetPtr = uintptr(ptrSize)
offsetScalar = uintptr(0)
offsetFlag = uintptr(ptrSize * 2)
// flagKindWidth and flagKindShift indicate various bits that the
// reflect package uses internally to track kind information.
//
// flagRO indicates whether or not the value field of a reflect.Value is
// read-only.
//
// flagIndir indicates whether the value field of a reflect.Value is
// the actual data or a pointer to the data.
//
// These values are valid before golang commit 90a7c3c86944 which
// changed their positions. Code in the init function updates these
// flags as necessary.
flagKindWidth = uintptr(5)
flagKindShift = uintptr(flagKindWidth - 1)
flagRO = uintptr(1 << 0)
flagIndir = uintptr(1 << 1)
)
func init() {
// Older versions of reflect.Value stored small integers directly in the
// ptr field (which is named val in the older versions). Versions
// between commits ecccf07e7f9d and 82f48826c6c7 added a new field named
// scalar for this purpose which unfortunately came before the flag
// field, so the offset of the flag field is different for those
// versions.
//
// This code constructs a new reflect.Value from a known small integer
// and checks if the size of the reflect.Value struct indicates it has
// the scalar field. When it does, the offsets are updated accordingly.
vv := reflect.ValueOf(0xf00)
if unsafe.Sizeof(vv) == (ptrSize * 4) {
offsetScalar = ptrSize * 2
offsetFlag = ptrSize * 3
}
// Commit 90a7c3c86944 changed the flag positions such that the low
// order bits are the kind. This code extracts the kind from the flags
// field and ensures it's the correct type. When it's not, the flag
// order has been changed to the newer format, so the flags are updated
// accordingly.
upf := unsafe.Pointer(uintptr(unsafe.Pointer(&vv)) + offsetFlag)
upfv := *(*uintptr)(upf)
flagKindMask := uintptr((1<<flagKindWidth - 1) << flagKindShift)
if (upfv&flagKindMask)>>flagKindShift != uintptr(reflect.Int) {
flagKindShift = 0
flagRO = 1 << 5
flagIndir = 1 << 6
// Commit adf9b30e5594 modified the flags to separate the
// flagRO flag into two bits which specifies whether or not the
// field is embedded. This causes flagIndir to move over a bit
// and means that flagRO is the combination of either of the
// original flagRO bit and the new bit.
//
// This code detects the change by extracting what used to be
// the indirect bit to ensure it's set. When it's not, the flag
// order has been changed to the newer format, so the flags are
// updated accordingly.
if upfv&flagIndir == 0 {
flagRO = 3 << 5
flagIndir = 1 << 7
}
}
}
// unsafeReflectValue converts the passed reflect.Value into a one that bypasses
// the typical safety restrictions preventing access to unaddressable and
// unexported data. It works by digging the raw pointer to the underlying
// value out of the protected value and generating a new unprotected (unsafe)
// reflect.Value to it.
//
// This allows us to check for implementations of the Stringer and error
// interfaces to be used for pretty printing ordinarily unaddressable and
// inaccessible values such as unexported struct fields.
func unsafeReflectValue(v reflect.Value) (rv reflect.Value) {
indirects := 1
vt := v.Type()
upv := unsafe.Pointer(uintptr(unsafe.Pointer(&v)) + offsetPtr)
rvf := *(*uintptr)(unsafe.Pointer(uintptr(unsafe.Pointer(&v)) + offsetFlag))
if rvf&flagIndir != 0 {
vt = reflect.PtrTo(v.Type())
indirects++
} else if offsetScalar != 0 {
// The value is in the scalar field when it's not one of the
// reference types.
switch vt.Kind() {
case reflect.Uintptr:
case reflect.Chan:
case reflect.Func:
case reflect.Map:
case reflect.Ptr:
case reflect.UnsafePointer:
default:
upv = unsafe.Pointer(uintptr(unsafe.Pointer(&v)) +
offsetScalar)
}
}
pv := reflect.NewAt(vt, upv)
rv = pv
for i := 0; i < indirects; i++ {
rv = rv.Elem()
}
return rv
}

@ -0,0 +1,37 @@
// Copyright (c) 2015 Dave Collins <dave@davec.name>
//
// Permission to use, copy, modify, and distribute this software for any
// purpose with or without fee is hereby granted, provided that the above
// copyright notice and this permission notice appear in all copies.
//
// THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
// WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
// MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
// ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
// WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
// ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
// OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
// NOTE: Due to the following build constraints, this file will only be compiled
// when either the code is running on Google App Engine or "-tags disableunsafe"
// is added to the go build command line.
// +build appengine disableunsafe
package spew
import "reflect"
const (
// UnsafeDisabled is a build-time constant which specifies whether or
// not access to the unsafe package is available.
UnsafeDisabled = true
)
// unsafeReflectValue typically converts the passed reflect.Value into a one
// that bypasses the typical safety restrictions preventing access to
// unaddressable and unexported data. However, doing this relies on access to
// the unsafe package. This is a stub version which simply returns the passed
// reflect.Value when the unsafe package is not available.
func unsafeReflectValue(v reflect.Value) reflect.Value {
return v
}

@ -0,0 +1,341 @@
/*
* Copyright (c) 2013 Dave Collins <dave@davec.name>
*
* Permission to use, copy, modify, and distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
* copyright notice and this permission notice appear in all copies.
*
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
* WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
* ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
* WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
* ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
* OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
package spew
import (
"bytes"
"fmt"
"io"
"reflect"
"sort"
"strconv"
)
// Some constants in the form of bytes to avoid string overhead. This mirrors
// the technique used in the fmt package.
var (
panicBytes = []byte("(PANIC=")
plusBytes = []byte("+")
iBytes = []byte("i")
trueBytes = []byte("true")
falseBytes = []byte("false")
interfaceBytes = []byte("(interface {})")
commaNewlineBytes = []byte(",\n")
newlineBytes = []byte("\n")
openBraceBytes = []byte("{")
openBraceNewlineBytes = []byte("{\n")
closeBraceBytes = []byte("}")
asteriskBytes = []byte("*")
colonBytes = []byte(":")
colonSpaceBytes = []byte(": ")
openParenBytes = []byte("(")
closeParenBytes = []byte(")")
spaceBytes = []byte(" ")
pointerChainBytes = []byte("->")
nilAngleBytes = []byte("<nil>")
maxNewlineBytes = []byte("<max depth reached>\n")
maxShortBytes = []byte("<max>")
circularBytes = []byte("<already shown>")
circularShortBytes = []byte("<shown>")
invalidAngleBytes = []byte("<invalid>")
openBracketBytes = []byte("[")
closeBracketBytes = []byte("]")
percentBytes = []byte("%")
precisionBytes = []byte(".")
openAngleBytes = []byte("<")
closeAngleBytes = []byte(">")
openMapBytes = []byte("map[")
closeMapBytes = []byte("]")
lenEqualsBytes = []byte("len=")
capEqualsBytes = []byte("cap=")
)
// hexDigits is used to map a decimal value to a hex digit.
var hexDigits = "0123456789abcdef"
// catchPanic handles any panics that might occur during the handleMethods
// calls.
func catchPanic(w io.Writer, v reflect.Value) {
if err := recover(); err != nil {
w.Write(panicBytes)
fmt.Fprintf(w, "%v", err)
w.Write(closeParenBytes)
}
}
// handleMethods attempts to call the Error and String methods on the underlying
// type the passed reflect.Value represents and outputes the result to Writer w.
//
// It handles panics in any called methods by catching and displaying the error
// as the formatted value.
func handleMethods(cs *ConfigState, w io.Writer, v reflect.Value) (handled bool) {
// We need an interface to check if the type implements the error or
// Stringer interface. However, the reflect package won't give us an
// interface on certain things like unexported struct fields in order
// to enforce visibility rules. We use unsafe, when it's available,
// to bypass these restrictions since this package does not mutate the
// values.
if !v.CanInterface() {
if UnsafeDisabled {
return false
}
v = unsafeReflectValue(v)
}
// Choose whether or not to do error and Stringer interface lookups against
// the base type or a pointer to the base type depending on settings.
// Technically calling one of these methods with a pointer receiver can
// mutate the value, however, types which choose to satisify an error or
// Stringer interface with a pointer receiver should not be mutating their
// state inside these interface methods.
if !cs.DisablePointerMethods && !UnsafeDisabled && !v.CanAddr() {
v = unsafeReflectValue(v)
}
if v.CanAddr() {
v = v.Addr()
}
// Is it an error or Stringer?
switch iface := v.Interface().(type) {
case error:
defer catchPanic(w, v)
if cs.ContinueOnMethod {
w.Write(openParenBytes)
w.Write([]byte(iface.Error()))
w.Write(closeParenBytes)
w.Write(spaceBytes)
return false
}
w.Write([]byte(iface.Error()))
return true
case fmt.Stringer:
defer catchPanic(w, v)
if cs.ContinueOnMethod {
w.Write(openParenBytes)
w.Write([]byte(iface.String()))
w.Write(closeParenBytes)
w.Write(spaceBytes)
return false
}
w.Write([]byte(iface.String()))
return true
}
return false
}
// printBool outputs a boolean value as true or false to Writer w.
func printBool(w io.Writer, val bool) {
if val {
w.Write(trueBytes)
} else {
w.Write(falseBytes)
}
}
// printInt outputs a signed integer value to Writer w.
func printInt(w io.Writer, val int64, base int) {
w.Write([]byte(strconv.FormatInt(val, base)))
}
// printUint outputs an unsigned integer value to Writer w.
func printUint(w io.Writer, val uint64, base int) {
w.Write([]byte(strconv.FormatUint(val, base)))
}
// printFloat outputs a floating point value using the specified precision,
// which is expected to be 32 or 64bit, to Writer w.
func printFloat(w io.Writer, val float64, precision int) {
w.Write([]byte(strconv.FormatFloat(val, 'g', -1, precision)))
}
// printComplex outputs a complex value using the specified float precision
// for the real and imaginary parts to Writer w.
func printComplex(w io.Writer, c complex128, floatPrecision int) {
r := real(c)
w.Write(openParenBytes)
w.Write([]byte(strconv.FormatFloat(r, 'g', -1, floatPrecision)))
i := imag(c)
if i >= 0 {
w.Write(plusBytes)
}
w.Write([]byte(strconv.FormatFloat(i, 'g', -1, floatPrecision)))
w.Write(iBytes)
w.Write(closeParenBytes)
}
// printHexPtr outputs a uintptr formatted as hexidecimal with a leading '0x'
// prefix to Writer w.
func printHexPtr(w io.Writer, p uintptr) {
// Null pointer.
num := uint64(p)
if num == 0 {
w.Write(nilAngleBytes)
return
}
// Max uint64 is 16 bytes in hex + 2 bytes for '0x' prefix
buf := make([]byte, 18)
// It's simpler to construct the hex string right to left.
base := uint64(16)
i := len(buf) - 1
for num >= base {
buf[i] = hexDigits[num%base]
num /= base
i--
}
buf[i] = hexDigits[num]
// Add '0x' prefix.
i--
buf[i] = 'x'
i--
buf[i] = '0'
// Strip unused leading bytes.
buf = buf[i:]
w.Write(buf)
}
// valuesSorter implements sort.Interface to allow a slice of reflect.Value
// elements to be sorted.
type valuesSorter struct {
values []reflect.Value
strings []string // either nil or same len and values
cs *ConfigState
}
// newValuesSorter initializes a valuesSorter instance, which holds a set of
// surrogate keys on which the data should be sorted. It uses flags in
// ConfigState to decide if and how to populate those surrogate keys.
func newValuesSorter(values []reflect.Value, cs *ConfigState) sort.Interface {
vs := &valuesSorter{values: values, cs: cs}
if canSortSimply(vs.values[0].Kind()) {
return vs
}
if !cs.DisableMethods {
vs.strings = make([]string, len(values))
for i := range vs.values {
b := bytes.Buffer{}
if !handleMethods(cs, &b, vs.values[i]) {
vs.strings = nil
break
}
vs.strings[i] = b.String()
}
}
if vs.strings == nil && cs.SpewKeys {
vs.strings = make([]string, len(values))
for i := range vs.values {
vs.strings[i] = Sprintf("%#v", vs.values[i].Interface())
}
}
return vs
}
// canSortSimply tests whether a reflect.Kind is a primitive that can be sorted
// directly, or whether it should be considered for sorting by surrogate keys
// (if the ConfigState allows it).
func canSortSimply(kind reflect.Kind) bool {
// This switch parallels valueSortLess, except for the default case.
switch kind {
case reflect.Bool:
return true
case reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, reflect.Int:
return true
case reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uint:
return true
case reflect.Float32, reflect.Float64:
return true
case reflect.String:
return true
case reflect.Uintptr:
return true
case reflect.Array:
return true
}
return false
}
// Len returns the number of values in the slice. It is part of the
// sort.Interface implementation.
func (s *valuesSorter) Len() int {
return len(s.values)
}
// Swap swaps the values at the passed indices. It is part of the
// sort.Interface implementation.
func (s *valuesSorter) Swap(i, j int) {
s.values[i], s.values[j] = s.values[j], s.values[i]
if s.strings != nil {
s.strings[i], s.strings[j] = s.strings[j], s.strings[i]
}
}
// valueSortLess returns whether the first value should sort before the second
// value. It is used by valueSorter.Less as part of the sort.Interface
// implementation.
func valueSortLess(a, b reflect.Value) bool {
switch a.Kind() {
case reflect.Bool:
return !a.Bool() && b.Bool()
case reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, reflect.Int:
return a.Int() < b.Int()
case reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uint:
return a.Uint() < b.Uint()
case reflect.Float32, reflect.Float64:
return a.Float() < b.Float()
case reflect.String:
return a.String() < b.String()
case reflect.Uintptr:
return a.Uint() < b.Uint()
case reflect.Array:
// Compare the contents of both arrays.
l := a.Len()
for i := 0; i < l; i++ {
av := a.Index(i)
bv := b.Index(i)
if av.Interface() == bv.Interface() {
continue
}
return valueSortLess(av, bv)
}
}
return a.String() < b.String()
}
// Less returns whether the value at index i should sort before the
// value at index j. It is part of the sort.Interface implementation.
func (s *valuesSorter) Less(i, j int) bool {
if s.strings == nil {
return valueSortLess(s.values[i], s.values[j])
}
return s.strings[i] < s.strings[j]
}
// sortValues is a sort function that handles both native types and any type that
// can be converted to error or Stringer. Other inputs are sorted according to
// their Value.String() value to ensure display stability.
func sortValues(values []reflect.Value, cs *ConfigState) {
if len(values) == 0 {
return
}
sort.Sort(newValuesSorter(values, cs))
}

@ -0,0 +1,298 @@
/*
* Copyright (c) 2013 Dave Collins <dave@davec.name>
*
* Permission to use, copy, modify, and distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
* copyright notice and this permission notice appear in all copies.
*
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
* WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
* ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
* WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
* ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
* OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
package spew_test
import (
"fmt"
"reflect"
"testing"
"github.com/davecgh/go-spew/spew"
)
// custom type to test Stinger interface on non-pointer receiver.
type stringer string
// String implements the Stringer interface for testing invocation of custom
// stringers on types with non-pointer receivers.
func (s stringer) String() string {
return "stringer " + string(s)
}
// custom type to test Stinger interface on pointer receiver.
type pstringer string
// String implements the Stringer interface for testing invocation of custom
// stringers on types with only pointer receivers.
func (s *pstringer) String() string {
return "stringer " + string(*s)
}
// xref1 and xref2 are cross referencing structs for testing circular reference
// detection.
type xref1 struct {
ps2 *xref2
}
type xref2 struct {
ps1 *xref1
}
// indirCir1, indirCir2, and indirCir3 are used to generate an indirect circular
// reference for testing detection.
type indirCir1 struct {
ps2 *indirCir2
}
type indirCir2 struct {
ps3 *indirCir3
}
type indirCir3 struct {
ps1 *indirCir1
}
// embed is used to test embedded structures.
type embed struct {
a string
}
// embedwrap is used to test embedded structures.
type embedwrap struct {
*embed
e *embed
}
// panicer is used to intentionally cause a panic for testing spew properly
// handles them
type panicer int
func (p panicer) String() string {
panic("test panic")
}
// customError is used to test custom error interface invocation.
type customError int
func (e customError) Error() string {
return fmt.Sprintf("error: %d", int(e))
}
// stringizeWants converts a slice of wanted test output into a format suitable
// for a test error message.
func stringizeWants(wants []string) string {
s := ""
for i, want := range wants {
if i > 0 {
s += fmt.Sprintf("want%d: %s", i+1, want)
} else {
s += "want: " + want
}
}
return s
}
// testFailed returns whether or not a test failed by checking if the result
// of the test is in the slice of wanted strings.
func testFailed(result string, wants []string) bool {
for _, want := range wants {
if result == want {
return false
}
}
return true
}
type sortableStruct struct {
x int
}
func (ss sortableStruct) String() string {
return fmt.Sprintf("ss.%d", ss.x)
}
type unsortableStruct struct {
x int
}
type sortTestCase struct {
input []reflect.Value
expected []reflect.Value
}
func helpTestSortValues(tests []sortTestCase, cs *spew.ConfigState, t *testing.T) {
getInterfaces := func(values []reflect.Value) []interface{} {
interfaces := []interface{}{}
for _, v := range values {
interfaces = append(interfaces, v.Interface())
}
return interfaces
}
for _, test := range tests {
spew.SortValues(test.input, cs)
// reflect.DeepEqual cannot really make sense of reflect.Value,
// probably because of all the pointer tricks. For instance,
// v(2.0) != v(2.0) on a 32-bits system. Turn them into interface{}
// instead.
input := getInterfaces(test.input)
expected := getInterfaces(test.expected)
if !reflect.DeepEqual(input, expected) {
t.Errorf("Sort mismatch:\n %v != %v", input, expected)
}
}
}
// TestSortValues ensures the sort functionality for relect.Value based sorting
// works as intended.
func TestSortValues(t *testing.T) {
v := reflect.ValueOf
a := v("a")
b := v("b")
c := v("c")
embedA := v(embed{"a"})
embedB := v(embed{"b"})
embedC := v(embed{"c"})
tests := []sortTestCase{
// No values.
{
[]reflect.Value{},
[]reflect.Value{},
},
// Bools.
{
[]reflect.Value{v(false), v(true), v(false)},
[]reflect.Value{v(false), v(false), v(true)},
},
// Ints.
{
[]reflect.Value{v(2), v(1), v(3)},
[]reflect.Value{v(1), v(2), v(3)},
},
// Uints.
{
[]reflect.Value{v(uint8(2)), v(uint8(1)), v(uint8(3))},
[]reflect.Value{v(uint8(1)), v(uint8(2)), v(uint8(3))},
},
// Floats.
{
[]reflect.Value{v(2.0), v(1.0), v(3.0)},
[]reflect.Value{v(1.0), v(2.0), v(3.0)},
},
// Strings.
{
[]reflect.Value{b, a, c},
[]reflect.Value{a, b, c},
},
// Array
{
[]reflect.Value{v([3]int{3, 2, 1}), v([3]int{1, 3, 2}), v([3]int{1, 2, 3})},
[]reflect.Value{v([3]int{1, 2, 3}), v([3]int{1, 3, 2}), v([3]int{3, 2, 1})},
},
// Uintptrs.
{
[]reflect.Value{v(uintptr(2)), v(uintptr(1)), v(uintptr(3))},
[]reflect.Value{v(uintptr(1)), v(uintptr(2)), v(uintptr(3))},
},
// SortableStructs.
{
// Note: not sorted - DisableMethods is set.
[]reflect.Value{v(sortableStruct{2}), v(sortableStruct{1}), v(sortableStruct{3})},
[]reflect.Value{v(sortableStruct{2}), v(sortableStruct{1}), v(sortableStruct{3})},
},
// UnsortableStructs.
{
// Note: not sorted - SpewKeys is false.
[]reflect.Value{v(unsortableStruct{2}), v(unsortableStruct{1}), v(unsortableStruct{3})},
[]reflect.Value{v(unsortableStruct{2}), v(unsortableStruct{1}), v(unsortableStruct{3})},
},
// Invalid.
{
[]reflect.Value{embedB, embedA, embedC},
[]reflect.Value{embedB, embedA, embedC},
},
}
cs := spew.ConfigState{DisableMethods: true, SpewKeys: false}
helpTestSortValues(tests, &cs, t)
}
// TestSortValuesWithMethods ensures the sort functionality for relect.Value
// based sorting works as intended when using string methods.
func TestSortValuesWithMethods(t *testing.T) {
v := reflect.ValueOf
a := v("a")
b := v("b")
c := v("c")
tests := []sortTestCase{
// Ints.
{
[]reflect.Value{v(2), v(1), v(3)},
[]reflect.Value{v(1), v(2), v(3)},
},
// Strings.
{
[]reflect.Value{b, a, c},
[]reflect.Value{a, b, c},
},
// SortableStructs.
{
[]reflect.Value{v(sortableStruct{2}), v(sortableStruct{1}), v(sortableStruct{3})},
[]reflect.Value{v(sortableStruct{1}), v(sortableStruct{2}), v(sortableStruct{3})},
},
// UnsortableStructs.
{
// Note: not sorted - SpewKeys is false.
[]reflect.Value{v(unsortableStruct{2}), v(unsortableStruct{1}), v(unsortableStruct{3})},
[]reflect.Value{v(unsortableStruct{2}), v(unsortableStruct{1}), v(unsortableStruct{3})},
},
}
cs := spew.ConfigState{DisableMethods: false, SpewKeys: false}
helpTestSortValues(tests, &cs, t)
}
// TestSortValuesWithSpew ensures the sort functionality for relect.Value
// based sorting works as intended when using spew to stringify keys.
func TestSortValuesWithSpew(t *testing.T) {
v := reflect.ValueOf
a := v("a")
b := v("b")
c := v("c")
tests := []sortTestCase{
// Ints.
{
[]reflect.Value{v(2), v(1), v(3)},
[]reflect.Value{v(1), v(2), v(3)},
},
// Strings.
{
[]reflect.Value{b, a, c},
[]reflect.Value{a, b, c},
},
// SortableStructs.
{
[]reflect.Value{v(sortableStruct{2}), v(sortableStruct{1}), v(sortableStruct{3})},
[]reflect.Value{v(sortableStruct{1}), v(sortableStruct{2}), v(sortableStruct{3})},
},
// UnsortableStructs.
{
[]reflect.Value{v(unsortableStruct{2}), v(unsortableStruct{1}), v(unsortableStruct{3})},
[]reflect.Value{v(unsortableStruct{1}), v(unsortableStruct{2}), v(unsortableStruct{3})},
},
}
cs := spew.ConfigState{DisableMethods: true, SpewKeys: true}
helpTestSortValues(tests, &cs, t)
}

@ -0,0 +1,297 @@
/*
* Copyright (c) 2013 Dave Collins <dave@davec.name>
*
* Permission to use, copy, modify, and distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
* copyright notice and this permission notice appear in all copies.
*
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
* WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
* ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
* WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
* ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
* OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
package spew
import (
"bytes"
"fmt"
"io"
"os"
)
// ConfigState houses the configuration options used by spew to format and
// display values. There is a global instance, Config, that is used to control
// all top-level Formatter and Dump functionality. Each ConfigState instance
// provides methods equivalent to the top-level functions.
//
// The zero value for ConfigState provides no indentation. You would typically
// want to set it to a space or a tab.
//
// Alternatively, you can use NewDefaultConfig to get a ConfigState instance
// with default settings. See the documentation of NewDefaultConfig for default
// values.
type ConfigState struct {
// Indent specifies the string to use for each indentation level. The
// global config instance that all top-level functions use set this to a
// single space by default. If you would like more indentation, you might
// set this to a tab with "\t" or perhaps two spaces with " ".
Indent string
// MaxDepth controls the maximum number of levels to descend into nested
// data structures. The default, 0, means there is no limit.
//
// NOTE: Circular data structures are properly detected, so it is not
// necessary to set this value unless you specifically want to limit deeply
// nested data structures.
MaxDepth int
// DisableMethods specifies whether or not error and Stringer interfaces are
// invoked for types that implement them.
DisableMethods bool
// DisablePointerMethods specifies whether or not to check for and invoke
// error and Stringer interfaces on types which only accept a pointer
// receiver when the current type is not a pointer.
//
// NOTE: This might be an unsafe action since calling one of these methods
// with a pointer receiver could technically mutate the value, however,
// in practice, types which choose to satisify an error or Stringer
// interface with a pointer receiver should not be mutating their state
// inside these interface methods. As a result, this option relies on
// access to the unsafe package, so it will not have any effect when
// running in environments without access to the unsafe package such as
// Google App Engine or with the "disableunsafe" build tag specified.
DisablePointerMethods bool
// ContinueOnMethod specifies whether or not recursion should continue once
// a custom error or Stringer interface is invoked. The default, false,
// means it will print the results of invoking the custom error or Stringer
// interface and return immediately instead of continuing to recurse into
// the internals of the data type.
//
// NOTE: This flag does not have any effect if method invocation is disabled
// via the DisableMethods or DisablePointerMethods options.
ContinueOnMethod bool
// SortKeys specifies map keys should be sorted before being printed. Use
// this to have a more deterministic, diffable output. Note that only
// native types (bool, int, uint, floats, uintptr and string) and types
// that support the error or Stringer interfaces (if methods are
// enabled) are supported, with other types sorted according to the
// reflect.Value.String() output which guarantees display stability.
SortKeys bool
// SpewKeys specifies that, as a last resort attempt, map keys should
// be spewed to strings and sorted by those strings. This is only
// considered if SortKeys is true.
SpewKeys bool
}
// Config is the active configuration of the top-level functions.
// The configuration can be changed by modifying the contents of spew.Config.
var Config = ConfigState{Indent: " "}
// Errorf is a wrapper for fmt.Errorf that treats each argument as if it were
// passed with a Formatter interface returned by c.NewFormatter. It returns
// the formatted string as a value that satisfies error. See NewFormatter
// for formatting details.
//
// This function is shorthand for the following syntax:
//
// fmt.Errorf(format, c.NewFormatter(a), c.NewFormatter(b))
func (c *ConfigState) Errorf(format string, a ...interface{}) (err error) {
return fmt.Errorf(format, c.convertArgs(a)...)
}
// Fprint is a wrapper for fmt.Fprint that treats each argument as if it were
// passed with a Formatter interface returned by c.NewFormatter. It returns
// the number of bytes written and any write error encountered. See
// NewFormatter for formatting details.
//
// This function is shorthand for the following syntax:
//
// fmt.Fprint(w, c.NewFormatter(a), c.NewFormatter(b))
func (c *ConfigState) Fprint(w io.Writer, a ...interface{}) (n int, err error) {
return fmt.Fprint(w, c.convertArgs(a)...)
}
// Fprintf is a wrapper for fmt.Fprintf that treats each argument as if it were
// passed with a Formatter interface returned by c.NewFormatter. It returns
// the number of bytes written and any write error encountered. See
// NewFormatter for formatting details.
//
// This function is shorthand for the following syntax:
//
// fmt.Fprintf(w, format, c.NewFormatter(a), c.NewFormatter(b))
func (c *ConfigState) Fprintf(w io.Writer, format string, a ...interface{}) (n int, err error) {
return fmt.Fprintf(w, format, c.convertArgs(a)...)
}
// Fprintln is a wrapper for fmt.Fprintln that treats each argument as if it
// passed with a Formatter interface returned by c.NewFormatter. See
// NewFormatter for formatting details.
//
// This function is shorthand for the following syntax:
//
// fmt.Fprintln(w, c.NewFormatter(a), c.NewFormatter(b))
func (c *ConfigState) Fprintln(w io.Writer, a ...interface{}) (n int, err error) {
return fmt.Fprintln(w, c.convertArgs(a)...)
}
// Print is a wrapper for fmt.Print that treats each argument as if it were
// passed with a Formatter interface returned by c.NewFormatter. It returns
// the number of bytes written and any write error encountered. See
// NewFormatter for formatting details.
//
// This function is shorthand for the following syntax:
//
// fmt.Print(c.NewFormatter(a), c.NewFormatter(b))
func (c *ConfigState) Print(a ...interface{}) (n int, err error) {
return fmt.Print(c.convertArgs(a)...)
}
// Printf is a wrapper for fmt.Printf that treats each argument as if it were
// passed with a Formatter interface returned by c.NewFormatter. It returns
// the number of bytes written and any write error encountered. See
// NewFormatter for formatting details.
//
// This function is shorthand for the following syntax:
//
// fmt.Printf(format, c.NewFormatter(a), c.NewFormatter(b))
func (c *ConfigState) Printf(format string, a ...interface{}) (n int, err error) {
return fmt.Printf(format, c.convertArgs(a)...)
}
// Println is a wrapper for fmt.Println that treats each argument as if it were
// passed with a Formatter interface returned by c.NewFormatter. It returns
// the number of bytes written and any write error encountered. See
// NewFormatter for formatting details.
//
// This function is shorthand for the following syntax:
//
// fmt.Println(c.NewFormatter(a), c.NewFormatter(b))
func (c *ConfigState) Println(a ...interface{}) (n int, err error) {
return fmt.Println(c.convertArgs(a)...)
}
// Sprint is a wrapper for fmt.Sprint that treats each argument as if it were
// passed with a Formatter interface returned by c.NewFormatter. It returns
// the resulting string. See NewFormatter for formatting details.
//
// This function is shorthand for the following syntax:
//
// fmt.Sprint(c.NewFormatter(a), c.NewFormatter(b))
func (c *ConfigState) Sprint(a ...interface{}) string {
return fmt.Sprint(c.convertArgs(a)...)
}
// Sprintf is a wrapper for fmt.Sprintf that treats each argument as if it were
// passed with a Formatter interface returned by c.NewFormatter. It returns
// the resulting string. See NewFormatter for formatting details.
//
// This function is shorthand for the following syntax:
//
// fmt.Sprintf(format, c.NewFormatter(a), c.NewFormatter(b))
func (c *ConfigState) Sprintf(format string, a ...interface{}) string {
return fmt.Sprintf(format, c.convertArgs(a)...)
}
// Sprintln is a wrapper for fmt.Sprintln that treats each argument as if it
// were passed with a Formatter interface returned by c.NewFormatter. It
// returns the resulting string. See NewFormatter for formatting details.
//
// This function is shorthand for the following syntax:
//
// fmt.Sprintln(c.NewFormatter(a), c.NewFormatter(b))
func (c *ConfigState) Sprintln(a ...interface{}) string {
return fmt.Sprintln(c.convertArgs(a)...)
}
/*
NewFormatter returns a custom formatter that satisfies the fmt.Formatter
interface. As a result, it integrates cleanly with standard fmt package
printing functions. The formatter is useful for inline printing of smaller data
types similar to the standard %v format specifier.
The custom formatter only responds to the %v (most compact), %+v (adds pointer
addresses), %#v (adds types), and %#+v (adds types and pointer addresses) verb
combinations. Any other verbs such as %x and %q will be sent to the the
standard fmt package for formatting. In addition, the custom formatter ignores
the width and precision arguments (however they will still work on the format
specifiers not handled by the custom formatter).
Typically this function shouldn't be called directly. It is much easier to make
use of the custom formatter by calling one of the convenience functions such as
c.Printf, c.Println, or c.Printf.
*/
func (c *ConfigState) NewFormatter(v interface{}) fmt.Formatter {
return newFormatter(c, v)
}
// Fdump formats and displays the passed arguments to io.Writer w. It formats
// exactly the same as Dump.
func (c *ConfigState) Fdump(w io.Writer, a ...interface{}) {
fdump(c, w, a...)
}
/*
Dump displays the passed parameters to standard out with newlines, customizable
indentation, and additional debug information such as complete types and all
pointer addresses used to indirect to the final value. It provides the
following features over the built-in printing facilities provided by the fmt
package:
* Pointers are dereferenced and followed
* Circular data structures are detected and handled properly
* Custom Stringer/error interfaces are optionally invoked, including
on unexported types
* Custom types which only implement the Stringer/error interfaces via
a pointer receiver are optionally invoked when passing non-pointer
variables
* Byte arrays and slices are dumped like the hexdump -C command which
includes offsets, byte values in hex, and ASCII output
The configuration options are controlled by modifying the public members
of c. See ConfigState for options documentation.
See Fdump if you would prefer dumping to an arbitrary io.Writer or Sdump to
get the formatted result as a string.
*/
func (c *ConfigState) Dump(a ...interface{}) {
fdump(c, os.Stdout, a...)
}
// Sdump returns a string with the passed arguments formatted exactly the same
// as Dump.
func (c *ConfigState) Sdump(a ...interface{}) string {
var buf bytes.Buffer
fdump(c, &buf, a...)
return buf.String()
}
// convertArgs accepts a slice of arguments and returns a slice of the same
// length with each argument converted to a spew Formatter interface using
// the ConfigState associated with s.
func (c *ConfigState) convertArgs(args []interface{}) (formatters []interface{}) {
formatters = make([]interface{}, len(args))
for index, arg := range args {
formatters[index] = newFormatter(c, arg)
}
return formatters
}
// NewDefaultConfig returns a ConfigState with the following default settings.
//
// Indent: " "
// MaxDepth: 0
// DisableMethods: false
// DisablePointerMethods: false
// ContinueOnMethod: false
// SortKeys: false
func NewDefaultConfig() *ConfigState {
return &ConfigState{Indent: " "}
}

@ -0,0 +1,202 @@
/*
* Copyright (c) 2013 Dave Collins <dave@davec.name>
*
* Permission to use, copy, modify, and distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
* copyright notice and this permission notice appear in all copies.
*
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
* WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
* ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
* WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
* ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
* OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
/*
Package spew implements a deep pretty printer for Go data structures to aid in
debugging.
A quick overview of the additional features spew provides over the built-in
printing facilities for Go data types are as follows:
* Pointers are dereferenced and followed
* Circular data structures are detected and handled properly
* Custom Stringer/error interfaces are optionally invoked, including
on unexported types
* Custom types which only implement the Stringer/error interfaces via
a pointer receiver are optionally invoked when passing non-pointer
variables
* Byte arrays and slices are dumped like the hexdump -C command which
includes offsets, byte values in hex, and ASCII output (only when using
Dump style)
There are two different approaches spew allows for dumping Go data structures:
* Dump style which prints with newlines, customizable indentation,
and additional debug information such as types and all pointer addresses
used to indirect to the final value
* A custom Formatter interface that integrates cleanly with the standard fmt
package and replaces %v, %+v, %#v, and %#+v to provide inline printing
similar to the default %v while providing the additional functionality
outlined above and passing unsupported format verbs such as %x and %q
along to fmt
Quick Start
This section demonstrates how to quickly get started with spew. See the
sections below for further details on formatting and configuration options.
To dump a variable with full newlines, indentation, type, and pointer
information use Dump, Fdump, or Sdump:
spew.Dump(myVar1, myVar2, ...)
spew.Fdump(someWriter, myVar1, myVar2, ...)
str := spew.Sdump(myVar1, myVar2, ...)
Alternatively, if you would prefer to use format strings with a compacted inline
printing style, use the convenience wrappers Printf, Fprintf, etc with
%v (most compact), %+v (adds pointer addresses), %#v (adds types), or
%#+v (adds types and pointer addresses):
spew.Printf("myVar1: %v -- myVar2: %+v", myVar1, myVar2)
spew.Printf("myVar3: %#v -- myVar4: %#+v", myVar3, myVar4)
spew.Fprintf(someWriter, "myVar1: %v -- myVar2: %+v", myVar1, myVar2)
spew.Fprintf(someWriter, "myVar3: %#v -- myVar4: %#+v", myVar3, myVar4)
Configuration Options
Configuration of spew is handled by fields in the ConfigState type. For
convenience, all of the top-level functions use a global state available
via the spew.Config global.
It is also possible to create a ConfigState instance that provides methods
equivalent to the top-level functions. This allows concurrent configuration
options. See the ConfigState documentation for more details.
The following configuration options are available:
* Indent
String to use for each indentation level for Dump functions.
It is a single space by default. A popular alternative is "\t".
* MaxDepth
Maximum number of levels to descend into nested data structures.
There is no limit by default.
* DisableMethods
Disables invocation of error and Stringer interface methods.
Method invocation is enabled by default.
* DisablePointerMethods
Disables invocation of error and Stringer interface methods on types
which only accept pointer receivers from non-pointer variables.
Pointer method invocation is enabled by default.
* ContinueOnMethod
Enables recursion into types after invoking error and Stringer interface
methods. Recursion after method invocation is disabled by default.
* SortKeys
Specifies map keys should be sorted before being printed. Use
this to have a more deterministic, diffable output. Note that
only native types (bool, int, uint, floats, uintptr and string)
and types which implement error or Stringer interfaces are
supported with other types sorted according to the
reflect.Value.String() output which guarantees display
stability. Natural map order is used by default.
* SpewKeys
Specifies that, as a last resort attempt, map keys should be
spewed to strings and sorted by those strings. This is only
considered if SortKeys is true.
Dump Usage
Simply call spew.Dump with a list of variables you want to dump:
spew.Dump(myVar1, myVar2, ...)
You may also call spew.Fdump if you would prefer to output to an arbitrary
io.Writer. For example, to dump to standard error:
spew.Fdump(os.Stderr, myVar1, myVar2, ...)
A third option is to call spew.Sdump to get the formatted output as a string:
str := spew.Sdump(myVar1, myVar2, ...)
Sample Dump Output
See the Dump example for details on the setup of the types and variables being
shown here.
(main.Foo) {
unexportedField: (*main.Bar)(0xf84002e210)({
flag: (main.Flag) flagTwo,
data: (uintptr) <nil>
}),
ExportedField: (map[interface {}]interface {}) (len=1) {
(string) (len=3) "one": (bool) true
}
}
Byte (and uint8) arrays and slices are displayed uniquely like the hexdump -C
command as shown.
([]uint8) (len=32 cap=32) {
00000000 11 12 13 14 15 16 17 18 19 1a 1b 1c 1d 1e 1f 20 |............... |
00000010 21 22 23 24 25 26 27 28 29 2a 2b 2c 2d 2e 2f 30 |!"#$%&'()*+,-./0|
00000020 31 32 |12|
}
Custom Formatter
Spew provides a custom formatter that implements the fmt.Formatter interface
so that it integrates cleanly with standard fmt package printing functions. The
formatter is useful for inline printing of smaller data types similar to the
standard %v format specifier.
The custom formatter only responds to the %v (most compact), %+v (adds pointer
addresses), %#v (adds types), or %#+v (adds types and pointer addresses) verb
combinations. Any other verbs such as %x and %q will be sent to the the
standard fmt package for formatting. In addition, the custom formatter ignores
the width and precision arguments (however they will still work on the format
specifiers not handled by the custom formatter).
Custom Formatter Usage
The simplest way to make use of the spew custom formatter is to call one of the
convenience functions such as spew.Printf, spew.Println, or spew.Printf. The
functions have syntax you are most likely already familiar with:
spew.Printf("myVar1: %v -- myVar2: %+v", myVar1, myVar2)
spew.Printf("myVar3: %#v -- myVar4: %#+v", myVar3, myVar4)
spew.Println(myVar, myVar2)
spew.Fprintf(os.Stderr, "myVar1: %v -- myVar2: %+v", myVar1, myVar2)
spew.Fprintf(os.Stderr, "myVar3: %#v -- myVar4: %#+v", myVar3, myVar4)
See the Index for the full list convenience functions.
Sample Formatter Output
Double pointer to a uint8:
%v: <**>5
%+v: <**>(0xf8400420d0->0xf8400420c8)5
%#v: (**uint8)5
%#+v: (**uint8)(0xf8400420d0->0xf8400420c8)5
Pointer to circular struct with a uint8 field and a pointer to itself:
%v: <*>{1 <*><shown>}
%+v: <*>(0xf84003e260){ui8:1 c:<*>(0xf84003e260)<shown>}
%#v: (*main.circular){ui8:(uint8)1 c:(*main.circular)<shown>}
%#+v: (*main.circular)(0xf84003e260){ui8:(uint8)1 c:(*main.circular)(0xf84003e260)<shown>}
See the Printf example for details on the setup of variables being shown
here.
Errors
Since it is possible for custom Stringer/error interfaces to panic, spew
detects them and handles them internally by printing the panic information
inline with the output. Since spew is intended to provide deep pretty printing
capabilities on structures, it intentionally does not return any errors.
*/
package spew

@ -0,0 +1,509 @@
/*
* Copyright (c) 2013 Dave Collins <dave@davec.name>
*
* Permission to use, copy, modify, and distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
* copyright notice and this permission notice appear in all copies.
*
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
* WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
* ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
* WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
* ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
* OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
package spew
import (
"bytes"
"encoding/hex"
"fmt"
"io"
"os"
"reflect"
"regexp"
"strconv"
"strings"
)
var (
// uint8Type is a reflect.Type representing a uint8. It is used to
// convert cgo types to uint8 slices for hexdumping.
uint8Type = reflect.TypeOf(uint8(0))
// cCharRE is a regular expression that matches a cgo char.
// It is used to detect character arrays to hexdump them.
cCharRE = regexp.MustCompile("^.*\\._Ctype_char$")
// cUnsignedCharRE is a regular expression that matches a cgo unsigned
// char. It is used to detect unsigned character arrays to hexdump
// them.
cUnsignedCharRE = regexp.MustCompile("^.*\\._Ctype_unsignedchar$")
// cUint8tCharRE is a regular expression that matches a cgo uint8_t.
// It is used to detect uint8_t arrays to hexdump them.
cUint8tCharRE = regexp.MustCompile("^.*\\._Ctype_uint8_t$")
)
// dumpState contains information about the state of a dump operation.
type dumpState struct {
w io.Writer
depth int
pointers map[uintptr]int
ignoreNextType bool
ignoreNextIndent bool
cs *ConfigState
}
// indent performs indentation according to the depth level and cs.Indent
// option.
func (d *dumpState) indent() {
if d.ignoreNextIndent {
d.ignoreNextIndent = false
return
}
d.w.Write(bytes.Repeat([]byte(d.cs.Indent), d.depth))
}
// unpackValue returns values inside of non-nil interfaces when possible.
// This is useful for data types like structs, arrays, slices, and maps which
// can contain varying types packed inside an interface.
func (d *dumpState) unpackValue(v reflect.Value) reflect.Value {
if v.Kind() == reflect.Interface && !v.IsNil() {
v = v.Elem()
}
return v
}
// dumpPtr handles formatting of pointers by indirecting them as necessary.
func (d *dumpState) dumpPtr(v reflect.Value) {
// Remove pointers at or below the current depth from map used to detect
// circular refs.
for k, depth := range d.pointers {
if depth >= d.depth {
delete(d.pointers, k)
}
}
// Keep list of all dereferenced pointers to show later.
pointerChain := make([]uintptr, 0)
// Figure out how many levels of indirection there are by dereferencing
// pointers and unpacking interfaces down the chain while detecting circular
// references.
nilFound := false
cycleFound := false
indirects := 0
ve := v
for ve.Kind() == reflect.Ptr {
if ve.IsNil() {
nilFound = true
break
}
indirects++
addr := ve.Pointer()
pointerChain = append(pointerChain, addr)
if pd, ok := d.pointers[addr]; ok && pd < d.depth {
cycleFound = true
indirects--
break
}
d.pointers[addr] = d.depth
ve = ve.Elem()
if ve.Kind() == reflect.Interface {
if ve.IsNil() {
nilFound = true
break
}
ve = ve.Elem()
}
}
// Display type information.
d.w.Write(openParenBytes)
d.w.Write(bytes.Repeat(asteriskBytes, indirects))
d.w.Write([]byte(ve.Type().String()))
d.w.Write(closeParenBytes)
// Display pointer information.
if len(pointerChain) > 0 {
d.w.Write(openParenBytes)
for i, addr := range pointerChain {
if i > 0 {
d.w.Write(pointerChainBytes)
}
printHexPtr(d.w, addr)
}
d.w.Write(closeParenBytes)
}
// Display dereferenced value.
d.w.Write(openParenBytes)
switch {
case nilFound == true:
d.w.Write(nilAngleBytes)
case cycleFound == true:
d.w.Write(circularBytes)
default:
d.ignoreNextType = true
d.dump(ve)
}
d.w.Write(closeParenBytes)
}
// dumpSlice handles formatting of arrays and slices. Byte (uint8 under
// reflection) arrays and slices are dumped in hexdump -C fashion.
func (d *dumpState) dumpSlice(v reflect.Value) {
// Determine whether this type should be hex dumped or not. Also,
// for types which should be hexdumped, try to use the underlying data
// first, then fall back to trying to convert them to a uint8 slice.
var buf []uint8
doConvert := false
doHexDump := false
numEntries := v.Len()
if numEntries > 0 {
vt := v.Index(0).Type()
vts := vt.String()
switch {
// C types that need to be converted.
case cCharRE.MatchString(vts):
fallthrough
case cUnsignedCharRE.MatchString(vts):
fallthrough
case cUint8tCharRE.MatchString(vts):
doConvert = true
// Try to use existing uint8 slices and fall back to converting
// and copying if that fails.
case vt.Kind() == reflect.Uint8:
// We need an addressable interface to convert the type
// to a byte slice. However, the reflect package won't
// give us an interface on certain things like
// unexported struct fields in order to enforce
// visibility rules. We use unsafe, when available, to
// bypass these restrictions since this package does not
// mutate the values.
vs := v
if !vs.CanInterface() || !vs.CanAddr() {
vs = unsafeReflectValue(vs)
}
if !UnsafeDisabled {
vs = vs.Slice(0, numEntries)
// Use the existing uint8 slice if it can be
// type asserted.
iface := vs.Interface()
if slice, ok := iface.([]uint8); ok {
buf = slice
doHexDump = true
break
}
}
// The underlying data needs to be converted if it can't
// be type asserted to a uint8 slice.
doConvert = true
}
// Copy and convert the underlying type if needed.
if doConvert && vt.ConvertibleTo(uint8Type) {
// Convert and copy each element into a uint8 byte
// slice.
buf = make([]uint8, numEntries)
for i := 0; i < numEntries; i++ {
vv := v.Index(i)
buf[i] = uint8(vv.Convert(uint8Type).Uint())
}
doHexDump = true
}
}
// Hexdump the entire slice as needed.
if doHexDump {
indent := strings.Repeat(d.cs.Indent, d.depth)
str := indent + hex.Dump(buf)
str = strings.Replace(str, "\n", "\n"+indent, -1)
str = strings.TrimRight(str, d.cs.Indent)
d.w.Write([]byte(str))
return
}
// Recursively call dump for each item.
for i := 0; i < numEntries; i++ {
d.dump(d.unpackValue(v.Index(i)))
if i < (numEntries - 1) {
d.w.Write(commaNewlineBytes)
} else {
d.w.Write(newlineBytes)
}
}
}
// dump is the main workhorse for dumping a value. It uses the passed reflect
// value to figure out what kind of object we are dealing with and formats it
// appropriately. It is a recursive function, however circular data structures
// are detected and handled properly.
func (d *dumpState) dump(v reflect.Value) {
// Handle invalid reflect values immediately.
kind := v.Kind()
if kind == reflect.Invalid {
d.w.Write(invalidAngleBytes)
return
}
// Handle pointers specially.
if kind == reflect.Ptr {
d.indent()
d.dumpPtr(v)
return
}
// Print type information unless already handled elsewhere.
if !d.ignoreNextType {
d.indent()
d.w.Write(openParenBytes)
d.w.Write([]byte(v.Type().String()))
d.w.Write(closeParenBytes)
d.w.Write(spaceBytes)
}
d.ignoreNextType = false
// Display length and capacity if the built-in len and cap functions
// work with the value's kind and the len/cap itself is non-zero.
valueLen, valueCap := 0, 0
switch v.Kind() {
case reflect.Array, reflect.Slice, reflect.Chan:
valueLen, valueCap = v.Len(), v.Cap()
case reflect.Map, reflect.String:
valueLen = v.Len()
}
if valueLen != 0 || valueCap != 0 {
d.w.Write(openParenBytes)
if valueLen != 0 {
d.w.Write(lenEqualsBytes)
printInt(d.w, int64(valueLen), 10)
}
if valueCap != 0 {
if valueLen != 0 {
d.w.Write(spaceBytes)
}
d.w.Write(capEqualsBytes)
printInt(d.w, int64(valueCap), 10)
}
d.w.Write(closeParenBytes)
d.w.Write(spaceBytes)
}
// Call Stringer/error interfaces if they exist and the handle methods flag
// is enabled
if !d.cs.DisableMethods {
if (kind != reflect.Invalid) && (kind != reflect.Interface) {
if handled := handleMethods(d.cs, d.w, v); handled {
return
}
}
}
switch kind {
case reflect.Invalid:
// Do nothing. We should never get here since invalid has already
// been handled above.
case reflect.Bool:
printBool(d.w, v.Bool())
case reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, reflect.Int:
printInt(d.w, v.Int(), 10)
case reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uint:
printUint(d.w, v.Uint(), 10)
case reflect.Float32:
printFloat(d.w, v.Float(), 32)
case reflect.Float64:
printFloat(d.w, v.Float(), 64)
case reflect.Complex64:
printComplex(d.w, v.Complex(), 32)
case reflect.Complex128:
printComplex(d.w, v.Complex(), 64)
case reflect.Slice:
if v.IsNil() {
d.w.Write(nilAngleBytes)
break
}
fallthrough
case reflect.Array:
d.w.Write(openBraceNewlineBytes)
d.depth++
if (d.cs.MaxDepth != 0) && (d.depth > d.cs.MaxDepth) {
d.indent()
d.w.Write(maxNewlineBytes)
} else {
d.dumpSlice(v)
}
d.depth--
d.indent()
d.w.Write(closeBraceBytes)
case reflect.String:
d.w.Write([]byte(strconv.Quote(v.String())))
case reflect.Interface:
// The only time we should get here is for nil interfaces due to
// unpackValue calls.
if v.IsNil() {
d.w.Write(nilAngleBytes)
}
case reflect.Ptr:
// Do nothing. We should never get here since pointers have already
// been handled above.
case reflect.Map:
// nil maps should be indicated as different than empty maps
if v.IsNil() {
d.w.Write(nilAngleBytes)
break
}
d.w.Write(openBraceNewlineBytes)
d.depth++
if (d.cs.MaxDepth != 0) && (d.depth > d.cs.MaxDepth) {
d.indent()
d.w.Write(maxNewlineBytes)
} else {
numEntries := v.Len()
keys := v.MapKeys()
if d.cs.SortKeys {
sortValues(keys, d.cs)
}
for i, key := range keys {
d.dump(d.unpackValue(key))
d.w.Write(colonSpaceBytes)
d.ignoreNextIndent = true
d.dump(d.unpackValue(v.MapIndex(key)))
if i < (numEntries - 1) {
d.w.Write(commaNewlineBytes)
} else {
d.w.Write(newlineBytes)
}
}
}
d.depth--
d.indent()
d.w.Write(closeBraceBytes)
case reflect.Struct:
d.w.Write(openBraceNewlineBytes)
d.depth++
if (d.cs.MaxDepth != 0) && (d.depth > d.cs.MaxDepth) {
d.indent()
d.w.Write(maxNewlineBytes)
} else {
vt := v.Type()
numFields := v.NumField()
for i := 0; i < numFields; i++ {
d.indent()
vtf := vt.Field(i)
d.w.Write([]byte(vtf.Name))
d.w.Write(colonSpaceBytes)
d.ignoreNextIndent = true
d.dump(d.unpackValue(v.Field(i)))
if i < (numFields - 1) {
d.w.Write(commaNewlineBytes)
} else {
d.w.Write(newlineBytes)
}
}
}
d.depth--
d.indent()
d.w.Write(closeBraceBytes)
case reflect.Uintptr:
printHexPtr(d.w, uintptr(v.Uint()))
case reflect.UnsafePointer, reflect.Chan, reflect.Func:
printHexPtr(d.w, v.Pointer())
// There were not any other types at the time this code was written, but
// fall back to letting the default fmt package handle it in case any new
// types are added.
default:
if v.CanInterface() {
fmt.Fprintf(d.w, "%v", v.Interface())
} else {
fmt.Fprintf(d.w, "%v", v.String())
}
}
}
// fdump is a helper function to consolidate the logic from the various public
// methods which take varying writers and config states.
func fdump(cs *ConfigState, w io.Writer, a ...interface{}) {
for _, arg := range a {
if arg == nil {
w.Write(interfaceBytes)
w.Write(spaceBytes)
w.Write(nilAngleBytes)
w.Write(newlineBytes)
continue
}
d := dumpState{w: w, cs: cs}
d.pointers = make(map[uintptr]int)
d.dump(reflect.ValueOf(arg))
d.w.Write(newlineBytes)
}
}
// Fdump formats and displays the passed arguments to io.Writer w. It formats
// exactly the same as Dump.
func Fdump(w io.Writer, a ...interface{}) {
fdump(&Config, w, a...)
}
// Sdump returns a string with the passed arguments formatted exactly the same
// as Dump.
func Sdump(a ...interface{}) string {
var buf bytes.Buffer
fdump(&Config, &buf, a...)
return buf.String()
}
/*
Dump displays the passed parameters to standard out with newlines, customizable
indentation, and additional debug information such as complete types and all
pointer addresses used to indirect to the final value. It provides the
following features over the built-in printing facilities provided by the fmt
package:
* Pointers are dereferenced and followed
* Circular data structures are detected and handled properly
* Custom Stringer/error interfaces are optionally invoked, including
on unexported types
* Custom types which only implement the Stringer/error interfaces via
a pointer receiver are optionally invoked when passing non-pointer
variables
* Byte arrays and slices are dumped like the hexdump -C command which
includes offsets, byte values in hex, and ASCII output
The configuration options are controlled by an exported package global,
spew.Config. See ConfigState for options documentation.
See Fdump if you would prefer dumping to an arbitrary io.Writer or Sdump to
get the formatted result as a string.
*/
func Dump(a ...interface{}) {
fdump(&Config, os.Stdout, a...)
}

File diff suppressed because it is too large Load Diff

@ -0,0 +1,98 @@
// Copyright (c) 2013 Dave Collins <dave@davec.name>
//
// Permission to use, copy, modify, and distribute this software for any
// purpose with or without fee is hereby granted, provided that the above
// copyright notice and this permission notice appear in all copies.
//
// THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
// WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
// MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
// ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
// WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
// ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
// OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
// NOTE: Due to the following build constraints, this file will only be compiled
// when both cgo is supported and "-tags testcgo" is added to the go test
// command line. This means the cgo tests are only added (and hence run) when
// specifially requested. This configuration is used because spew itself
// does not require cgo to run even though it does handle certain cgo types
// specially. Rather than forcing all clients to require cgo and an external
// C compiler just to run the tests, this scheme makes them optional.
// +build cgo,testcgo
package spew_test
import (
"fmt"
"github.com/davecgh/go-spew/spew/testdata"
)
func addCgoDumpTests() {
// C char pointer.
v := testdata.GetCgoCharPointer()
nv := testdata.GetCgoNullCharPointer()
pv := &v
vcAddr := fmt.Sprintf("%p", v)
vAddr := fmt.Sprintf("%p", pv)
pvAddr := fmt.Sprintf("%p", &pv)
vt := "*testdata._Ctype_char"
vs := "116"
addDumpTest(v, "("+vt+")("+vcAddr+")("+vs+")\n")
addDumpTest(pv, "(*"+vt+")("+vAddr+"->"+vcAddr+")("+vs+")\n")
addDumpTest(&pv, "(**"+vt+")("+pvAddr+"->"+vAddr+"->"+vcAddr+")("+vs+")\n")
addDumpTest(nv, "("+vt+")(<nil>)\n")
// C char array.
v2, v2l, v2c := testdata.GetCgoCharArray()
v2Len := fmt.Sprintf("%d", v2l)
v2Cap := fmt.Sprintf("%d", v2c)
v2t := "[6]testdata._Ctype_char"
v2s := "(len=" + v2Len + " cap=" + v2Cap + ") " +
"{\n 00000000 74 65 73 74 32 00 " +
" |test2.|\n}"
addDumpTest(v2, "("+v2t+") "+v2s+"\n")
// C unsigned char array.
v3, v3l, v3c := testdata.GetCgoUnsignedCharArray()
v3Len := fmt.Sprintf("%d", v3l)
v3Cap := fmt.Sprintf("%d", v3c)
v3t := "[6]testdata._Ctype_unsignedchar"
v3s := "(len=" + v3Len + " cap=" + v3Cap + ") " +
"{\n 00000000 74 65 73 74 33 00 " +
" |test3.|\n}"
addDumpTest(v3, "("+v3t+") "+v3s+"\n")
// C signed char array.
v4, v4l, v4c := testdata.GetCgoSignedCharArray()
v4Len := fmt.Sprintf("%d", v4l)
v4Cap := fmt.Sprintf("%d", v4c)
v4t := "[6]testdata._Ctype_schar"
v4t2 := "testdata._Ctype_schar"
v4s := "(len=" + v4Len + " cap=" + v4Cap + ") " +
"{\n (" + v4t2 + ") 116,\n (" + v4t2 + ") 101,\n (" + v4t2 +
") 115,\n (" + v4t2 + ") 116,\n (" + v4t2 + ") 52,\n (" + v4t2 +
") 0\n}"
addDumpTest(v4, "("+v4t+") "+v4s+"\n")
// C uint8_t array.
v5, v5l, v5c := testdata.GetCgoUint8tArray()
v5Len := fmt.Sprintf("%d", v5l)
v5Cap := fmt.Sprintf("%d", v5c)
v5t := "[6]testdata._Ctype_uint8_t"
v5s := "(len=" + v5Len + " cap=" + v5Cap + ") " +
"{\n 00000000 74 65 73 74 35 00 " +
" |test5.|\n}"
addDumpTest(v5, "("+v5t+") "+v5s+"\n")
// C typedefed unsigned char array.
v6, v6l, v6c := testdata.GetCgoTypdefedUnsignedCharArray()
v6Len := fmt.Sprintf("%d", v6l)
v6Cap := fmt.Sprintf("%d", v6c)
v6t := "[6]testdata._Ctype_custom_uchar_t"
v6s := "(len=" + v6Len + " cap=" + v6Cap + ") " +
"{\n 00000000 74 65 73 74 36 00 " +
" |test6.|\n}"
addDumpTest(v6, "("+v6t+") "+v6s+"\n")
}

@ -0,0 +1,26 @@
// Copyright (c) 2013 Dave Collins <dave@davec.name>
//
// Permission to use, copy, modify, and distribute this software for any
// purpose with or without fee is hereby granted, provided that the above
// copyright notice and this permission notice appear in all copies.
//
// THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
// WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
// MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
// ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
// WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
// ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
// OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
// NOTE: Due to the following build constraints, this file will only be compiled
// when either cgo is not supported or "-tags testcgo" is not added to the go
// test command line. This file intentionally does not setup any cgo tests in
// this scenario.
// +build !cgo !testcgo
package spew_test
func addCgoDumpTests() {
// Don't add any tests for cgo since this file is only compiled when
// there should not be any cgo tests.
}

@ -0,0 +1,226 @@
/*
* Copyright (c) 2013 Dave Collins <dave@davec.name>
*
* Permission to use, copy, modify, and distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
* copyright notice and this permission notice appear in all copies.
*
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
* WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
* ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
* WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
* ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
* OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
package spew_test
import (
"fmt"
"github.com/davecgh/go-spew/spew"
)
type Flag int
const (
flagOne Flag = iota
flagTwo
)
var flagStrings = map[Flag]string{
flagOne: "flagOne",
flagTwo: "flagTwo",
}
func (f Flag) String() string {
if s, ok := flagStrings[f]; ok {
return s
}
return fmt.Sprintf("Unknown flag (%d)", int(f))
}
type Bar struct {
data uintptr
}
type Foo struct {
unexportedField Bar
ExportedField map[interface{}]interface{}
}
// This example demonstrates how to use Dump to dump variables to stdout.
func ExampleDump() {
// The following package level declarations are assumed for this example:
/*
type Flag int
const (
flagOne Flag = iota
flagTwo
)
var flagStrings = map[Flag]string{
flagOne: "flagOne",
flagTwo: "flagTwo",
}
func (f Flag) String() string {
if s, ok := flagStrings[f]; ok {
return s
}
return fmt.Sprintf("Unknown flag (%d)", int(f))
}
type Bar struct {
data uintptr
}
type Foo struct {
unexportedField Bar
ExportedField map[interface{}]interface{}
}
*/
// Setup some sample data structures for the example.
bar := Bar{uintptr(0)}
s1 := Foo{bar, map[interface{}]interface{}{"one": true}}
f := Flag(5)
b := []byte{
0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, 0x18,
0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f, 0x20,
0x21, 0x22, 0x23, 0x24, 0x25, 0x26, 0x27, 0x28,
0x29, 0x2a, 0x2b, 0x2c, 0x2d, 0x2e, 0x2f, 0x30,
0x31, 0x32,
}
// Dump!
spew.Dump(s1, f, b)
// Output:
// (spew_test.Foo) {
// unexportedField: (spew_test.Bar) {
// data: (uintptr) <nil>
// },
// ExportedField: (map[interface {}]interface {}) (len=1) {
// (string) (len=3) "one": (bool) true
// }
// }
// (spew_test.Flag) Unknown flag (5)
// ([]uint8) (len=34 cap=34) {
// 00000000 11 12 13 14 15 16 17 18 19 1a 1b 1c 1d 1e 1f 20 |............... |
// 00000010 21 22 23 24 25 26 27 28 29 2a 2b 2c 2d 2e 2f 30 |!"#$%&'()*+,-./0|
// 00000020 31 32 |12|
// }
//
}
// This example demonstrates how to use Printf to display a variable with a
// format string and inline formatting.
func ExamplePrintf() {
// Create a double pointer to a uint 8.
ui8 := uint8(5)
pui8 := &ui8
ppui8 := &pui8
// Create a circular data type.
type circular struct {
ui8 uint8
c *circular
}
c := circular{ui8: 1}
c.c = &c
// Print!
spew.Printf("ppui8: %v\n", ppui8)
spew.Printf("circular: %v\n", c)
// Output:
// ppui8: <**>5
// circular: {1 <*>{1 <*><shown>}}
}
// This example demonstrates how to use a ConfigState.
func ExampleConfigState() {
// Modify the indent level of the ConfigState only. The global
// configuration is not modified.
scs := spew.ConfigState{Indent: "\t"}
// Output using the ConfigState instance.
v := map[string]int{"one": 1}
scs.Printf("v: %v\n", v)
scs.Dump(v)
// Output:
// v: map[one:1]
// (map[string]int) (len=1) {
// (string) (len=3) "one": (int) 1
// }
}
// This example demonstrates how to use ConfigState.Dump to dump variables to
// stdout
func ExampleConfigState_Dump() {
// See the top-level Dump example for details on the types used in this
// example.
// Create two ConfigState instances with different indentation.
scs := spew.ConfigState{Indent: "\t"}
scs2 := spew.ConfigState{Indent: " "}
// Setup some sample data structures for the example.
bar := Bar{uintptr(0)}
s1 := Foo{bar, map[interface{}]interface{}{"one": true}}
// Dump using the ConfigState instances.
scs.Dump(s1)
scs2.Dump(s1)
// Output:
// (spew_test.Foo) {
// unexportedField: (spew_test.Bar) {
// data: (uintptr) <nil>
// },
// ExportedField: (map[interface {}]interface {}) (len=1) {
// (string) (len=3) "one": (bool) true
// }
// }
// (spew_test.Foo) {
// unexportedField: (spew_test.Bar) {
// data: (uintptr) <nil>
// },
// ExportedField: (map[interface {}]interface {}) (len=1) {
// (string) (len=3) "one": (bool) true
// }
// }
//
}
// This example demonstrates how to use ConfigState.Printf to display a variable
// with a format string and inline formatting.
func ExampleConfigState_Printf() {
// See the top-level Dump example for details on the types used in this
// example.
// Create two ConfigState instances and modify the method handling of the
// first ConfigState only.
scs := spew.NewDefaultConfig()
scs2 := spew.NewDefaultConfig()
scs.DisableMethods = true
// Alternatively
// scs := spew.ConfigState{Indent: " ", DisableMethods: true}
// scs2 := spew.ConfigState{Indent: " "}
// This is of type Flag which implements a Stringer and has raw value 1.
f := flagTwo
// Dump using the ConfigState instances.
scs.Printf("f: %v\n", f)
scs2.Printf("f: %v\n", f)
// Output:
// f: 1
// f: flagTwo
}

@ -0,0 +1,419 @@
/*
* Copyright (c) 2013 Dave Collins <dave@davec.name>
*
* Permission to use, copy, modify, and distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
* copyright notice and this permission notice appear in all copies.
*
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
* WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
* ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
* WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
* ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
* OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
package spew
import (
"bytes"
"fmt"
"reflect"
"strconv"
"strings"
)
// supportedFlags is a list of all the character flags supported by fmt package.
const supportedFlags = "0-+# "
// formatState implements the fmt.Formatter interface and contains information
// about the state of a formatting operation. The NewFormatter function can
// be used to get a new Formatter which can be used directly as arguments
// in standard fmt package printing calls.
type formatState struct {
value interface{}
fs fmt.State
depth int
pointers map[uintptr]int
ignoreNextType bool
cs *ConfigState
}
// buildDefaultFormat recreates the original format string without precision
// and width information to pass in to fmt.Sprintf in the case of an
// unrecognized type. Unless new types are added to the language, this
// function won't ever be called.
func (f *formatState) buildDefaultFormat() (format string) {
buf := bytes.NewBuffer(percentBytes)
for _, flag := range supportedFlags {
if f.fs.Flag(int(flag)) {
buf.WriteRune(flag)
}
}
buf.WriteRune('v')
format = buf.String()
return format
}
// constructOrigFormat recreates the original format string including precision
// and width information to pass along to the standard fmt package. This allows
// automatic deferral of all format strings this package doesn't support.
func (f *formatState) constructOrigFormat(verb rune) (format string) {
buf := bytes.NewBuffer(percentBytes)
for _, flag := range supportedFlags {
if f.fs.Flag(int(flag)) {
buf.WriteRune(flag)
}
}
if width, ok := f.fs.Width(); ok {
buf.WriteString(strconv.Itoa(width))
}
if precision, ok := f.fs.Precision(); ok {
buf.Write(precisionBytes)
buf.WriteString(strconv.Itoa(precision))
}
buf.WriteRune(verb)
format = buf.String()
return format
}
// unpackValue returns values inside of non-nil interfaces when possible and
// ensures that types for values which have been unpacked from an interface
// are displayed when the show types flag is also set.
// This is useful for data types like structs, arrays, slices, and maps which
// can contain varying types packed inside an interface.
func (f *formatState) unpackValue(v reflect.Value) reflect.Value {
if v.Kind() == reflect.Interface {
f.ignoreNextType = false
if !v.IsNil() {
v = v.Elem()
}
}
return v
}
// formatPtr handles formatting of pointers by indirecting them as necessary.
func (f *formatState) formatPtr(v reflect.Value) {
// Display nil if top level pointer is nil.
showTypes := f.fs.Flag('#')
if v.IsNil() && (!showTypes || f.ignoreNextType) {
f.fs.Write(nilAngleBytes)
return
}
// Remove pointers at or below the current depth from map used to detect
// circular refs.
for k, depth := range f.pointers {
if depth >= f.depth {
delete(f.pointers, k)
}
}
// Keep list of all dereferenced pointers to possibly show later.
pointerChain := make([]uintptr, 0)
// Figure out how many levels of indirection there are by derferencing
// pointers and unpacking interfaces down the chain while detecting circular
// references.
nilFound := false
cycleFound := false
indirects := 0
ve := v
for ve.Kind() == reflect.Ptr {
if ve.IsNil() {
nilFound = true
break
}
indirects++
addr := ve.Pointer()
pointerChain = append(pointerChain, addr)
if pd, ok := f.pointers[addr]; ok && pd < f.depth {
cycleFound = true
indirects--
break
}
f.pointers[addr] = f.depth
ve = ve.Elem()
if ve.Kind() == reflect.Interface {
if ve.IsNil() {
nilFound = true
break
}
ve = ve.Elem()
}
}
// Display type or indirection level depending on flags.
if showTypes && !f.ignoreNextType {
f.fs.Write(openParenBytes)
f.fs.Write(bytes.Repeat(asteriskBytes, indirects))
f.fs.Write([]byte(ve.Type().String()))
f.fs.Write(closeParenBytes)
} else {
if nilFound || cycleFound {
indirects += strings.Count(ve.Type().String(), "*")
}
f.fs.Write(openAngleBytes)
f.fs.Write([]byte(strings.Repeat("*", indirects)))
f.fs.Write(closeAngleBytes)
}
// Display pointer information depending on flags.
if f.fs.Flag('+') && (len(pointerChain) > 0) {
f.fs.Write(openParenBytes)
for i, addr := range pointerChain {
if i > 0 {
f.fs.Write(pointerChainBytes)
}
printHexPtr(f.fs, addr)
}
f.fs.Write(closeParenBytes)
}
// Display dereferenced value.
switch {
case nilFound == true:
f.fs.Write(nilAngleBytes)
case cycleFound == true:
f.fs.Write(circularShortBytes)
default:
f.ignoreNextType = true
f.format(ve)
}
}
// format is the main workhorse for providing the Formatter interface. It
// uses the passed reflect value to figure out what kind of object we are
// dealing with and formats it appropriately. It is a recursive function,
// however circular data structures are detected and handled properly.
func (f *formatState) format(v reflect.Value) {
// Handle invalid reflect values immediately.
kind := v.Kind()
if kind == reflect.Invalid {
f.fs.Write(invalidAngleBytes)
return
}
// Handle pointers specially.
if kind == reflect.Ptr {
f.formatPtr(v)
return
}
// Print type information unless already handled elsewhere.
if !f.ignoreNextType && f.fs.Flag('#') {
f.fs.Write(openParenBytes)
f.fs.Write([]byte(v.Type().String()))
f.fs.Write(closeParenBytes)
}
f.ignoreNextType = false
// Call Stringer/error interfaces if they exist and the handle methods
// flag is enabled.
if !f.cs.DisableMethods {
if (kind != reflect.Invalid) && (kind != reflect.Interface) {
if handled := handleMethods(f.cs, f.fs, v); handled {
return
}
}
}
switch kind {
case reflect.Invalid:
// Do nothing. We should never get here since invalid has already
// been handled above.
case reflect.Bool:
printBool(f.fs, v.Bool())
case reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, reflect.Int:
printInt(f.fs, v.Int(), 10)
case reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uint:
printUint(f.fs, v.Uint(), 10)
case reflect.Float32:
printFloat(f.fs, v.Float(), 32)
case reflect.Float64:
printFloat(f.fs, v.Float(), 64)
case reflect.Complex64:
printComplex(f.fs, v.Complex(), 32)
case reflect.Complex128:
printComplex(f.fs, v.Complex(), 64)
case reflect.Slice:
if v.IsNil() {
f.fs.Write(nilAngleBytes)
break
}
fallthrough
case reflect.Array:
f.fs.Write(openBracketBytes)
f.depth++
if (f.cs.MaxDepth != 0) && (f.depth > f.cs.MaxDepth) {
f.fs.Write(maxShortBytes)
} else {
numEntries := v.Len()
for i := 0; i < numEntries; i++ {
if i > 0 {
f.fs.Write(spaceBytes)
}
f.ignoreNextType = true
f.format(f.unpackValue(v.Index(i)))
}
}
f.depth--
f.fs.Write(closeBracketBytes)
case reflect.String:
f.fs.Write([]byte(v.String()))
case reflect.Interface:
// The only time we should get here is for nil interfaces due to
// unpackValue calls.
if v.IsNil() {
f.fs.Write(nilAngleBytes)
}
case reflect.Ptr:
// Do nothing. We should never get here since pointers have already
// been handled above.
case reflect.Map:
// nil maps should be indicated as different than empty maps
if v.IsNil() {
f.fs.Write(nilAngleBytes)
break
}
f.fs.Write(openMapBytes)
f.depth++
if (f.cs.MaxDepth != 0) && (f.depth > f.cs.MaxDepth) {
f.fs.Write(maxShortBytes)
} else {
keys := v.MapKeys()
if f.cs.SortKeys {
sortValues(keys, f.cs)
}
for i, key := range keys {
if i > 0 {
f.fs.Write(spaceBytes)
}
f.ignoreNextType = true
f.format(f.unpackValue(key))
f.fs.Write(colonBytes)
f.ignoreNextType = true
f.format(f.unpackValue(v.MapIndex(key)))
}
}
f.depth--
f.fs.Write(closeMapBytes)
case reflect.Struct:
numFields := v.NumField()
f.fs.Write(openBraceBytes)
f.depth++
if (f.cs.MaxDepth != 0) && (f.depth > f.cs.MaxDepth) {
f.fs.Write(maxShortBytes)
} else {
vt := v.Type()
for i := 0; i < numFields; i++ {
if i > 0 {
f.fs.Write(spaceBytes)
}
vtf := vt.Field(i)
if f.fs.Flag('+') || f.fs.Flag('#') {
f.fs.Write([]byte(vtf.Name))
f.fs.Write(colonBytes)
}
f.format(f.unpackValue(v.Field(i)))
}
}
f.depth--
f.fs.Write(closeBraceBytes)
case reflect.Uintptr:
printHexPtr(f.fs, uintptr(v.Uint()))
case reflect.UnsafePointer, reflect.Chan, reflect.Func:
printHexPtr(f.fs, v.Pointer())
// There were not any other types at the time this code was written, but
// fall back to letting the default fmt package handle it if any get added.
default:
format := f.buildDefaultFormat()
if v.CanInterface() {
fmt.Fprintf(f.fs, format, v.Interface())
} else {
fmt.Fprintf(f.fs, format, v.String())
}
}
}
// Format satisfies the fmt.Formatter interface. See NewFormatter for usage
// details.
func (f *formatState) Format(fs fmt.State, verb rune) {
f.fs = fs
// Use standard formatting for verbs that are not v.
if verb != 'v' {
format := f.constructOrigFormat(verb)
fmt.Fprintf(fs, format, f.value)
return
}
if f.value == nil {
if fs.Flag('#') {
fs.Write(interfaceBytes)
}
fs.Write(nilAngleBytes)
return
}
f.format(reflect.ValueOf(f.value))
}
// newFormatter is a helper function to consolidate the logic from the various
// public methods which take varying config states.
func newFormatter(cs *ConfigState, v interface{}) fmt.Formatter {
fs := &formatState{value: v, cs: cs}
fs.pointers = make(map[uintptr]int)
return fs
}
/*
NewFormatter returns a custom formatter that satisfies the fmt.Formatter
interface. As a result, it integrates cleanly with standard fmt package
printing functions. The formatter is useful for inline printing of smaller data
types similar to the standard %v format specifier.
The custom formatter only responds to the %v (most compact), %+v (adds pointer
addresses), %#v (adds types), or %#+v (adds types and pointer addresses) verb
combinations. Any other verbs such as %x and %q will be sent to the the
standard fmt package for formatting. In addition, the custom formatter ignores
the width and precision arguments (however they will still work on the format
specifiers not handled by the custom formatter).
Typically this function shouldn't be called directly. It is much easier to make
use of the custom formatter by calling one of the convenience functions such as
Printf, Println, or Fprintf.
*/
func NewFormatter(v interface{}) fmt.Formatter {
return newFormatter(&Config, v)
}

File diff suppressed because it is too large Load Diff

@ -0,0 +1,87 @@
/*
* Copyright (c) 2013 Dave Collins <dave@davec.name>
*
* Permission to use, copy, modify, and distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
* copyright notice and this permission notice appear in all copies.
*
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
* WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
* ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
* WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
* ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
* OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
/*
This test file is part of the spew package rather than than the spew_test
package because it needs access to internals to properly test certain cases
which are not possible via the public interface since they should never happen.
*/
package spew
import (
"bytes"
"reflect"
"testing"
)
// dummyFmtState implements a fake fmt.State to use for testing invalid
// reflect.Value handling. This is necessary because the fmt package catches
// invalid values before invoking the formatter on them.
type dummyFmtState struct {
bytes.Buffer
}
func (dfs *dummyFmtState) Flag(f int) bool {
if f == int('+') {
return true
}
return false
}
func (dfs *dummyFmtState) Precision() (int, bool) {
return 0, false
}
func (dfs *dummyFmtState) Width() (int, bool) {
return 0, false
}
// TestInvalidReflectValue ensures the dump and formatter code handles an
// invalid reflect value properly. This needs access to internal state since it
// should never happen in real code and therefore can't be tested via the public
// API.
func TestInvalidReflectValue(t *testing.T) {
i := 1
// Dump invalid reflect value.
v := new(reflect.Value)
buf := new(bytes.Buffer)
d := dumpState{w: buf, cs: &Config}
d.dump(*v)
s := buf.String()
want := "<invalid>"
if s != want {
t.Errorf("InvalidReflectValue #%d\n got: %s want: %s", i, s, want)
}
i++
// Formatter invalid reflect value.
buf2 := new(dummyFmtState)
f := formatState{value: *v, cs: &Config, fs: buf2}
f.format(*v)
s = buf2.String()
want = "<invalid>"
if s != want {
t.Errorf("InvalidReflectValue #%d got: %s want: %s", i, s, want)
}
}
// SortValues makes the internal sortValues function available to the test
// package.
func SortValues(values []reflect.Value, cs *ConfigState) {
sortValues(values, cs)
}

@ -0,0 +1,101 @@
// Copyright (c) 2013-2015 Dave Collins <dave@davec.name>
// Permission to use, copy, modify, and distribute this software for any
// purpose with or without fee is hereby granted, provided that the above
// copyright notice and this permission notice appear in all copies.
// THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
// WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
// MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
// ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
// WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
// ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
// OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
// NOTE: Due to the following build constraints, this file will only be compiled
// when the code is not running on Google App Engine and "-tags disableunsafe"
// is not added to the go build command line.
// +build !appengine,!disableunsafe
/*
This test file is part of the spew package rather than than the spew_test
package because it needs access to internals to properly test certain cases
which are not possible via the public interface since they should never happen.
*/
package spew
import (
"bytes"
"reflect"
"testing"
"unsafe"
)
// changeKind uses unsafe to intentionally change the kind of a reflect.Value to
// the maximum kind value which does not exist. This is needed to test the
// fallback code which punts to the standard fmt library for new types that
// might get added to the language.
func changeKind(v *reflect.Value, readOnly bool) {
rvf := (*uintptr)(unsafe.Pointer(uintptr(unsafe.Pointer(v)) + offsetFlag))
*rvf = *rvf | ((1<<flagKindWidth - 1) << flagKindShift)
if readOnly {
*rvf |= flagRO
} else {
*rvf &= ^uintptr(flagRO)
}
}
// TestAddedReflectValue tests functionaly of the dump and formatter code which
// falls back to the standard fmt library for new types that might get added to
// the language.
func TestAddedReflectValue(t *testing.T) {
i := 1
// Dump using a reflect.Value that is exported.
v := reflect.ValueOf(int8(5))
changeKind(&v, false)
buf := new(bytes.Buffer)
d := dumpState{w: buf, cs: &Config}
d.dump(v)
s := buf.String()
want := "(int8) 5"
if s != want {
t.Errorf("TestAddedReflectValue #%d\n got: %s want: %s", i, s, want)
}
i++
// Dump using a reflect.Value that is not exported.
changeKind(&v, true)
buf.Reset()
d.dump(v)
s = buf.String()
want = "(int8) <int8 Value>"
if s != want {
t.Errorf("TestAddedReflectValue #%d\n got: %s want: %s", i, s, want)
}
i++
// Formatter using a reflect.Value that is exported.
changeKind(&v, false)
buf2 := new(dummyFmtState)
f := formatState{value: v, cs: &Config, fs: buf2}
f.format(v)
s = buf2.String()
want = "5"
if s != want {
t.Errorf("TestAddedReflectValue #%d got: %s want: %s", i, s, want)
}
i++
// Formatter using a reflect.Value that is not exported.
changeKind(&v, true)
buf2.Reset()
f = formatState{value: v, cs: &Config, fs: buf2}
f.format(v)
s = buf2.String()
want = "<int8 Value>"
if s != want {
t.Errorf("TestAddedReflectValue #%d got: %s want: %s", i, s, want)
}
}

@ -0,0 +1,148 @@
/*
* Copyright (c) 2013 Dave Collins <dave@davec.name>
*
* Permission to use, copy, modify, and distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
* copyright notice and this permission notice appear in all copies.
*
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
* WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
* ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
* WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
* ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
* OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
package spew
import (
"fmt"
"io"
)
// Errorf is a wrapper for fmt.Errorf that treats each argument as if it were
// passed with a default Formatter interface returned by NewFormatter. It
// returns the formatted string as a value that satisfies error. See
// NewFormatter for formatting details.
//
// This function is shorthand for the following syntax:
//
// fmt.Errorf(format, spew.NewFormatter(a), spew.NewFormatter(b))
func Errorf(format string, a ...interface{}) (err error) {
return fmt.Errorf(format, convertArgs(a)...)
}
// Fprint is a wrapper for fmt.Fprint that treats each argument as if it were
// passed with a default Formatter interface returned by NewFormatter. It
// returns the number of bytes written and any write error encountered. See
// NewFormatter for formatting details.
//
// This function is shorthand for the following syntax:
//
// fmt.Fprint(w, spew.NewFormatter(a), spew.NewFormatter(b))
func Fprint(w io.Writer, a ...interface{}) (n int, err error) {
return fmt.Fprint(w, convertArgs(a)...)
}
// Fprintf is a wrapper for fmt.Fprintf that treats each argument as if it were
// passed with a default Formatter interface returned by NewFormatter. It
// returns the number of bytes written and any write error encountered. See
// NewFormatter for formatting details.
//
// This function is shorthand for the following syntax:
//
// fmt.Fprintf(w, format, spew.NewFormatter(a), spew.NewFormatter(b))
func Fprintf(w io.Writer, format string, a ...interface{}) (n int, err error) {
return fmt.Fprintf(w, format, convertArgs(a)...)
}
// Fprintln is a wrapper for fmt.Fprintln that treats each argument as if it
// passed with a default Formatter interface returned by NewFormatter. See
// NewFormatter for formatting details.
//
// This function is shorthand for the following syntax:
//
// fmt.Fprintln(w, spew.NewFormatter(a), spew.NewFormatter(b))
func Fprintln(w io.Writer, a ...interface{}) (n int, err error) {
return fmt.Fprintln(w, convertArgs(a)...)
}
// Print is a wrapper for fmt.Print that treats each argument as if it were
// passed with a default Formatter interface returned by NewFormatter. It
// returns the number of bytes written and any write error encountered. See
// NewFormatter for formatting details.
//
// This function is shorthand for the following syntax:
//
// fmt.Print(spew.NewFormatter(a), spew.NewFormatter(b))
func Print(a ...interface{}) (n int, err error) {
return fmt.Print(convertArgs(a)...)
}
// Printf is a wrapper for fmt.Printf that treats each argument as if it were
// passed with a default Formatter interface returned by NewFormatter. It
// returns the number of bytes written and any write error encountered. See
// NewFormatter for formatting details.
//
// This function is shorthand for the following syntax:
//
// fmt.Printf(format, spew.NewFormatter(a), spew.NewFormatter(b))
func Printf(format string, a ...interface{}) (n int, err error) {
return fmt.Printf(format, convertArgs(a)...)
}
// Println is a wrapper for fmt.Println that treats each argument as if it were
// passed with a default Formatter interface returned by NewFormatter. It
// returns the number of bytes written and any write error encountered. See
// NewFormatter for formatting details.
//
// This function is shorthand for the following syntax:
//
// fmt.Println(spew.NewFormatter(a), spew.NewFormatter(b))
func Println(a ...interface{}) (n int, err error) {
return fmt.Println(convertArgs(a)...)
}
// Sprint is a wrapper for fmt.Sprint that treats each argument as if it were
// passed with a default Formatter interface returned by NewFormatter. It
// returns the resulting string. See NewFormatter for formatting details.
//
// This function is shorthand for the following syntax:
//
// fmt.Sprint(spew.NewFormatter(a), spew.NewFormatter(b))
func Sprint(a ...interface{}) string {
return fmt.Sprint(convertArgs(a)...)
}
// Sprintf is a wrapper for fmt.Sprintf that treats each argument as if it were
// passed with a default Formatter interface returned by NewFormatter. It
// returns the resulting string. See NewFormatter for formatting details.
//
// This function is shorthand for the following syntax:
//
// fmt.Sprintf(format, spew.NewFormatter(a), spew.NewFormatter(b))
func Sprintf(format string, a ...interface{}) string {
return fmt.Sprintf(format, convertArgs(a)...)
}
// Sprintln is a wrapper for fmt.Sprintln that treats each argument as if it
// were passed with a default Formatter interface returned by NewFormatter. It
// returns the resulting string. See NewFormatter for formatting details.
//
// This function is shorthand for the following syntax:
//
// fmt.Sprintln(spew.NewFormatter(a), spew.NewFormatter(b))
func Sprintln(a ...interface{}) string {
return fmt.Sprintln(convertArgs(a)...)
}
// convertArgs accepts a slice of arguments and returns a slice of the same
// length with each argument converted to a default spew Formatter interface.
func convertArgs(args []interface{}) (formatters []interface{}) {
formatters = make([]interface{}, len(args))
for index, arg := range args {
formatters[index] = NewFormatter(arg)
}
return formatters
}

@ -0,0 +1,309 @@
/*
* Copyright (c) 2013 Dave Collins <dave@davec.name>
*
* Permission to use, copy, modify, and distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
* copyright notice and this permission notice appear in all copies.
*
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
* WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
* ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
* WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
* ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
* OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
package spew_test
import (
"bytes"
"fmt"
"io/ioutil"
"os"
"testing"
"github.com/davecgh/go-spew/spew"
)
// spewFunc is used to identify which public function of the spew package or
// ConfigState a test applies to.
type spewFunc int
const (
fCSFdump spewFunc = iota
fCSFprint
fCSFprintf
fCSFprintln
fCSPrint
fCSPrintln
fCSSdump
fCSSprint
fCSSprintf
fCSSprintln
fCSErrorf
fCSNewFormatter
fErrorf
fFprint
fFprintln
fPrint
fPrintln
fSdump
fSprint
fSprintf
fSprintln
)
// Map of spewFunc values to names for pretty printing.
var spewFuncStrings = map[spewFunc]string{
fCSFdump: "ConfigState.Fdump",
fCSFprint: "ConfigState.Fprint",
fCSFprintf: "ConfigState.Fprintf",
fCSFprintln: "ConfigState.Fprintln",
fCSSdump: "ConfigState.Sdump",
fCSPrint: "ConfigState.Print",
fCSPrintln: "ConfigState.Println",
fCSSprint: "ConfigState.Sprint",
fCSSprintf: "ConfigState.Sprintf",
fCSSprintln: "ConfigState.Sprintln",
fCSErrorf: "ConfigState.Errorf",
fCSNewFormatter: "ConfigState.NewFormatter",
fErrorf: "spew.Errorf",
fFprint: "spew.Fprint",
fFprintln: "spew.Fprintln",
fPrint: "spew.Print",
fPrintln: "spew.Println",
fSdump: "spew.Sdump",
fSprint: "spew.Sprint",
fSprintf: "spew.Sprintf",
fSprintln: "spew.Sprintln",
}
func (f spewFunc) String() string {
if s, ok := spewFuncStrings[f]; ok {
return s
}
return fmt.Sprintf("Unknown spewFunc (%d)", int(f))
}
// spewTest is used to describe a test to be performed against the public
// functions of the spew package or ConfigState.
type spewTest struct {
cs *spew.ConfigState
f spewFunc
format string
in interface{}
want string
}
// spewTests houses the tests to be performed against the public functions of
// the spew package and ConfigState.
//
// These tests are only intended to ensure the public functions are exercised
// and are intentionally not exhaustive of types. The exhaustive type
// tests are handled in the dump and format tests.
var spewTests []spewTest
// redirStdout is a helper function to return the standard output from f as a
// byte slice.
func redirStdout(f func()) ([]byte, error) {
tempFile, err := ioutil.TempFile("", "ss-test")
if err != nil {
return nil, err
}
fileName := tempFile.Name()
defer os.Remove(fileName) // Ignore error
origStdout := os.Stdout
os.Stdout = tempFile
f()
os.Stdout = origStdout
tempFile.Close()
return ioutil.ReadFile(fileName)
}
func initSpewTests() {
// Config states with various settings.
scsDefault := spew.NewDefaultConfig()
scsNoMethods := &spew.ConfigState{Indent: " ", DisableMethods: true}
scsNoPmethods := &spew.ConfigState{Indent: " ", DisablePointerMethods: true}
scsMaxDepth := &spew.ConfigState{Indent: " ", MaxDepth: 1}
scsContinue := &spew.ConfigState{Indent: " ", ContinueOnMethod: true}
// Variables for tests on types which implement Stringer interface with and
// without a pointer receiver.
ts := stringer("test")
tps := pstringer("test")
// depthTester is used to test max depth handling for structs, array, slices
// and maps.
type depthTester struct {
ic indirCir1
arr [1]string
slice []string
m map[string]int
}
dt := depthTester{indirCir1{nil}, [1]string{"arr"}, []string{"slice"},
map[string]int{"one": 1}}
// Variable for tests on types which implement error interface.
te := customError(10)
spewTests = []spewTest{
{scsDefault, fCSFdump, "", int8(127), "(int8) 127\n"},
{scsDefault, fCSFprint, "", int16(32767), "32767"},
{scsDefault, fCSFprintf, "%v", int32(2147483647), "2147483647"},
{scsDefault, fCSFprintln, "", int(2147483647), "2147483647\n"},
{scsDefault, fCSPrint, "", int64(9223372036854775807), "9223372036854775807"},
{scsDefault, fCSPrintln, "", uint8(255), "255\n"},
{scsDefault, fCSSdump, "", uint8(64), "(uint8) 64\n"},
{scsDefault, fCSSprint, "", complex(1, 2), "(1+2i)"},
{scsDefault, fCSSprintf, "%v", complex(float32(3), 4), "(3+4i)"},
{scsDefault, fCSSprintln, "", complex(float64(5), 6), "(5+6i)\n"},
{scsDefault, fCSErrorf, "%#v", uint16(65535), "(uint16)65535"},
{scsDefault, fCSNewFormatter, "%v", uint32(4294967295), "4294967295"},
{scsDefault, fErrorf, "%v", uint64(18446744073709551615), "18446744073709551615"},
{scsDefault, fFprint, "", float32(3.14), "3.14"},
{scsDefault, fFprintln, "", float64(6.28), "6.28\n"},
{scsDefault, fPrint, "", true, "true"},
{scsDefault, fPrintln, "", false, "false\n"},
{scsDefault, fSdump, "", complex(-10, -20), "(complex128) (-10-20i)\n"},
{scsDefault, fSprint, "", complex(-1, -2), "(-1-2i)"},
{scsDefault, fSprintf, "%v", complex(float32(-3), -4), "(-3-4i)"},
{scsDefault, fSprintln, "", complex(float64(-5), -6), "(-5-6i)\n"},
{scsNoMethods, fCSFprint, "", ts, "test"},
{scsNoMethods, fCSFprint, "", &ts, "<*>test"},
{scsNoMethods, fCSFprint, "", tps, "test"},
{scsNoMethods, fCSFprint, "", &tps, "<*>test"},
{scsNoPmethods, fCSFprint, "", ts, "stringer test"},
{scsNoPmethods, fCSFprint, "", &ts, "<*>stringer test"},
{scsNoPmethods, fCSFprint, "", tps, "test"},
{scsNoPmethods, fCSFprint, "", &tps, "<*>stringer test"},
{scsMaxDepth, fCSFprint, "", dt, "{{<max>} [<max>] [<max>] map[<max>]}"},
{scsMaxDepth, fCSFdump, "", dt, "(spew_test.depthTester) {\n" +
" ic: (spew_test.indirCir1) {\n <max depth reached>\n },\n" +
" arr: ([1]string) (len=1 cap=1) {\n <max depth reached>\n },\n" +
" slice: ([]string) (len=1 cap=1) {\n <max depth reached>\n },\n" +
" m: (map[string]int) (len=1) {\n <max depth reached>\n }\n}\n"},
{scsContinue, fCSFprint, "", ts, "(stringer test) test"},
{scsContinue, fCSFdump, "", ts, "(spew_test.stringer) " +
"(len=4) (stringer test) \"test\"\n"},
{scsContinue, fCSFprint, "", te, "(error: 10) 10"},
{scsContinue, fCSFdump, "", te, "(spew_test.customError) " +
"(error: 10) 10\n"},
}
}
// TestSpew executes all of the tests described by spewTests.
func TestSpew(t *testing.T) {
initSpewTests()
t.Logf("Running %d tests", len(spewTests))
for i, test := range spewTests {
buf := new(bytes.Buffer)
switch test.f {
case fCSFdump:
test.cs.Fdump(buf, test.in)
case fCSFprint:
test.cs.Fprint(buf, test.in)
case fCSFprintf:
test.cs.Fprintf(buf, test.format, test.in)
case fCSFprintln:
test.cs.Fprintln(buf, test.in)
case fCSPrint:
b, err := redirStdout(func() { test.cs.Print(test.in) })
if err != nil {
t.Errorf("%v #%d %v", test.f, i, err)
continue
}
buf.Write(b)
case fCSPrintln:
b, err := redirStdout(func() { test.cs.Println(test.in) })
if err != nil {
t.Errorf("%v #%d %v", test.f, i, err)
continue
}
buf.Write(b)
case fCSSdump:
str := test.cs.Sdump(test.in)
buf.WriteString(str)
case fCSSprint:
str := test.cs.Sprint(test.in)
buf.WriteString(str)
case fCSSprintf:
str := test.cs.Sprintf(test.format, test.in)
buf.WriteString(str)
case fCSSprintln:
str := test.cs.Sprintln(test.in)
buf.WriteString(str)
case fCSErrorf:
err := test.cs.Errorf(test.format, test.in)
buf.WriteString(err.Error())
case fCSNewFormatter:
fmt.Fprintf(buf, test.format, test.cs.NewFormatter(test.in))
case fErrorf:
err := spew.Errorf(test.format, test.in)
buf.WriteString(err.Error())
case fFprint:
spew.Fprint(buf, test.in)
case fFprintln:
spew.Fprintln(buf, test.in)
case fPrint:
b, err := redirStdout(func() { spew.Print(test.in) })
if err != nil {
t.Errorf("%v #%d %v", test.f, i, err)
continue
}
buf.Write(b)
case fPrintln:
b, err := redirStdout(func() { spew.Println(test.in) })
if err != nil {
t.Errorf("%v #%d %v", test.f, i, err)
continue
}
buf.Write(b)
case fSdump:
str := spew.Sdump(test.in)
buf.WriteString(str)
case fSprint:
str := spew.Sprint(test.in)
buf.WriteString(str)
case fSprintf:
str := spew.Sprintf(test.format, test.in)
buf.WriteString(str)
case fSprintln:
str := spew.Sprintln(test.in)
buf.WriteString(str)
default:
t.Errorf("%v #%d unrecognized function", test.f, i)
continue
}
s := buf.String()
if test.want != s {
t.Errorf("ConfigState #%d\n got: %s want: %s", i, s, test.want)
continue
}
}
}

@ -0,0 +1,82 @@
// Copyright (c) 2013 Dave Collins <dave@davec.name>
//
// Permission to use, copy, modify, and distribute this software for any
// purpose with or without fee is hereby granted, provided that the above
// copyright notice and this permission notice appear in all copies.
//
// THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
// WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
// MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
// ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
// WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
// ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
// OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
// NOTE: Due to the following build constraints, this file will only be compiled
// when both cgo is supported and "-tags testcgo" is added to the go test
// command line. This code should really only be in the dumpcgo_test.go file,
// but unfortunately Go will not allow cgo in test files, so this is a
// workaround to allow cgo types to be tested. This configuration is used
// because spew itself does not require cgo to run even though it does handle
// certain cgo types specially. Rather than forcing all clients to require cgo
// and an external C compiler just to run the tests, this scheme makes them
// optional.
// +build cgo,testcgo
package testdata
/*
#include <stdint.h>
typedef unsigned char custom_uchar_t;
char *ncp = 0;
char *cp = "test";
char ca[6] = {'t', 'e', 's', 't', '2', '\0'};
unsigned char uca[6] = {'t', 'e', 's', 't', '3', '\0'};
signed char sca[6] = {'t', 'e', 's', 't', '4', '\0'};
uint8_t ui8ta[6] = {'t', 'e', 's', 't', '5', '\0'};
custom_uchar_t tuca[6] = {'t', 'e', 's', 't', '6', '\0'};
*/
import "C"
// GetCgoNullCharPointer returns a null char pointer via cgo. This is only
// used for tests.
func GetCgoNullCharPointer() interface{} {
return C.ncp
}
// GetCgoCharPointer returns a char pointer via cgo. This is only used for
// tests.
func GetCgoCharPointer() interface{} {
return C.cp
}
// GetCgoCharArray returns a char array via cgo and the array's len and cap.
// This is only used for tests.
func GetCgoCharArray() (interface{}, int, int) {
return C.ca, len(C.ca), cap(C.ca)
}
// GetCgoUnsignedCharArray returns an unsigned char array via cgo and the
// array's len and cap. This is only used for tests.
func GetCgoUnsignedCharArray() (interface{}, int, int) {
return C.uca, len(C.uca), cap(C.uca)
}
// GetCgoSignedCharArray returns a signed char array via cgo and the array's len
// and cap. This is only used for tests.
func GetCgoSignedCharArray() (interface{}, int, int) {
return C.sca, len(C.sca), cap(C.sca)
}
// GetCgoUint8tArray returns a uint8_t array via cgo and the array's len and
// cap. This is only used for tests.
func GetCgoUint8tArray() (interface{}, int, int) {
return C.ui8ta, len(C.ui8ta), cap(C.ui8ta)
}
// GetCgoTypdefedUnsignedCharArray returns a typedefed unsigned char array via
// cgo and the array's len and cap. This is only used for tests.
func GetCgoTypdefedUnsignedCharArray() (interface{}, int, int) {
return C.tuca, len(C.tuca), cap(C.tuca)
}

@ -0,0 +1,61 @@
github.com/davecgh/go-spew/spew/dump.go dumpState.dump 100.00% (88/88)
github.com/davecgh/go-spew/spew/format.go formatState.format 100.00% (82/82)
github.com/davecgh/go-spew/spew/format.go formatState.formatPtr 100.00% (52/52)
github.com/davecgh/go-spew/spew/dump.go dumpState.dumpPtr 100.00% (44/44)
github.com/davecgh/go-spew/spew/dump.go dumpState.dumpSlice 100.00% (39/39)
github.com/davecgh/go-spew/spew/common.go handleMethods 100.00% (30/30)
github.com/davecgh/go-spew/spew/common.go printHexPtr 100.00% (18/18)
github.com/davecgh/go-spew/spew/common.go unsafeReflectValue 100.00% (13/13)
github.com/davecgh/go-spew/spew/format.go formatState.constructOrigFormat 100.00% (12/12)
github.com/davecgh/go-spew/spew/dump.go fdump 100.00% (11/11)
github.com/davecgh/go-spew/spew/format.go formatState.Format 100.00% (11/11)
github.com/davecgh/go-spew/spew/common.go init 100.00% (10/10)
github.com/davecgh/go-spew/spew/common.go printComplex 100.00% (9/9)
github.com/davecgh/go-spew/spew/common.go valuesSorter.Less 100.00% (8/8)
github.com/davecgh/go-spew/spew/format.go formatState.buildDefaultFormat 100.00% (7/7)
github.com/davecgh/go-spew/spew/format.go formatState.unpackValue 100.00% (5/5)
github.com/davecgh/go-spew/spew/dump.go dumpState.indent 100.00% (4/4)
github.com/davecgh/go-spew/spew/common.go catchPanic 100.00% (4/4)
github.com/davecgh/go-spew/spew/config.go ConfigState.convertArgs 100.00% (4/4)
github.com/davecgh/go-spew/spew/spew.go convertArgs 100.00% (4/4)
github.com/davecgh/go-spew/spew/format.go newFormatter 100.00% (3/3)
github.com/davecgh/go-spew/spew/dump.go Sdump 100.00% (3/3)
github.com/davecgh/go-spew/spew/common.go printBool 100.00% (3/3)
github.com/davecgh/go-spew/spew/common.go sortValues 100.00% (3/3)
github.com/davecgh/go-spew/spew/config.go ConfigState.Sdump 100.00% (3/3)
github.com/davecgh/go-spew/spew/dump.go dumpState.unpackValue 100.00% (3/3)
github.com/davecgh/go-spew/spew/spew.go Printf 100.00% (1/1)
github.com/davecgh/go-spew/spew/spew.go Println 100.00% (1/1)
github.com/davecgh/go-spew/spew/spew.go Sprint 100.00% (1/1)
github.com/davecgh/go-spew/spew/spew.go Sprintf 100.00% (1/1)
github.com/davecgh/go-spew/spew/spew.go Sprintln 100.00% (1/1)
github.com/davecgh/go-spew/spew/common.go printFloat 100.00% (1/1)
github.com/davecgh/go-spew/spew/config.go NewDefaultConfig 100.00% (1/1)
github.com/davecgh/go-spew/spew/common.go printInt 100.00% (1/1)
github.com/davecgh/go-spew/spew/common.go printUint 100.00% (1/1)
github.com/davecgh/go-spew/spew/common.go valuesSorter.Len 100.00% (1/1)
github.com/davecgh/go-spew/spew/common.go valuesSorter.Swap 100.00% (1/1)
github.com/davecgh/go-spew/spew/config.go ConfigState.Errorf 100.00% (1/1)
github.com/davecgh/go-spew/spew/config.go ConfigState.Fprint 100.00% (1/1)
github.com/davecgh/go-spew/spew/config.go ConfigState.Fprintf 100.00% (1/1)
github.com/davecgh/go-spew/spew/config.go ConfigState.Fprintln 100.00% (1/1)
github.com/davecgh/go-spew/spew/config.go ConfigState.Print 100.00% (1/1)
github.com/davecgh/go-spew/spew/config.go ConfigState.Printf 100.00% (1/1)
github.com/davecgh/go-spew/spew/config.go ConfigState.Println 100.00% (1/1)
github.com/davecgh/go-spew/spew/config.go ConfigState.Sprint 100.00% (1/1)
github.com/davecgh/go-spew/spew/config.go ConfigState.Sprintf 100.00% (1/1)
github.com/davecgh/go-spew/spew/config.go ConfigState.Sprintln 100.00% (1/1)
github.com/davecgh/go-spew/spew/config.go ConfigState.NewFormatter 100.00% (1/1)
github.com/davecgh/go-spew/spew/config.go ConfigState.Fdump 100.00% (1/1)
github.com/davecgh/go-spew/spew/config.go ConfigState.Dump 100.00% (1/1)
github.com/davecgh/go-spew/spew/dump.go Fdump 100.00% (1/1)
github.com/davecgh/go-spew/spew/dump.go Dump 100.00% (1/1)
github.com/davecgh/go-spew/spew/spew.go Fprintln 100.00% (1/1)
github.com/davecgh/go-spew/spew/format.go NewFormatter 100.00% (1/1)
github.com/davecgh/go-spew/spew/spew.go Errorf 100.00% (1/1)
github.com/davecgh/go-spew/spew/spew.go Fprint 100.00% (1/1)
github.com/davecgh/go-spew/spew/spew.go Fprintf 100.00% (1/1)
github.com/davecgh/go-spew/spew/spew.go Print 100.00% (1/1)
github.com/davecgh/go-spew/spew ------------------------------- 100.00% (505/505)

@ -0,0 +1,5 @@
language: go
go:
- 1.5
- tip

@ -0,0 +1,27 @@
Copyright (c) 2013, Patrick Mezard
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are
met:
Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
The names of its contributors may not be used to endorse or promote
products derived from this software without specific prior written
permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED
TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.

@ -0,0 +1,50 @@
go-difflib
==========
[![Build Status](https://travis-ci.org/pmezard/go-difflib.png?branch=master)](https://travis-ci.org/pmezard/go-difflib)
[![GoDoc](https://godoc.org/github.com/pmezard/go-difflib/difflib?status.svg)](https://godoc.org/github.com/pmezard/go-difflib/difflib)
Go-difflib is a partial port of python 3 difflib package. Its main goal
was to make unified and context diff available in pure Go, mostly for
testing purposes.
The following class and functions (and related tests) have be ported:
* `SequenceMatcher`
* `unified_diff()`
* `context_diff()`
## Installation
```bash
$ go get github.com/pmezard/go-difflib/difflib
```
### Quick Start
Diffs are configured with Unified (or ContextDiff) structures, and can
be output to an io.Writer or returned as a string.
```Go
diff := UnifiedDiff{
A: difflib.SplitLines("foo\nbar\n"),
B: difflib.SplitLines("foo\nbaz\n"),
FromFile: "Original",
ToFile: "Current",
Context: 3,
}
text, _ := GetUnifiedDiffString(diff)
fmt.Printf(text)
```
would output:
```
--- Original
+++ Current
@@ -1,3 +1,3 @@
foo
-bar
+baz
```

@ -0,0 +1,758 @@
// Package difflib is a partial port of Python difflib module.
//
// It provides tools to compare sequences of strings and generate textual diffs.
//
// The following class and functions have been ported:
//
// - SequenceMatcher
//
// - unified_diff
//
// - context_diff
//
// Getting unified diffs was the main goal of the port. Keep in mind this code
// is mostly suitable to output text differences in a human friendly way, there
// are no guarantees generated diffs are consumable by patch(1).
package difflib
import (
"bufio"
"bytes"
"fmt"
"io"
"strings"
)
func min(a, b int) int {
if a < b {
return a
}
return b
}
func max(a, b int) int {
if a > b {
return a
}
return b
}
func calculateRatio(matches, length int) float64 {
if length > 0 {
return 2.0 * float64(matches) / float64(length)
}
return 1.0
}
type Match struct {
A int
B int
Size int
}
type OpCode struct {
Tag byte
I1 int
I2 int
J1 int
J2 int
}
// SequenceMatcher compares sequence of strings. The basic
// algorithm predates, and is a little fancier than, an algorithm
// published in the late 1980's by Ratcliff and Obershelp under the
// hyperbolic name "gestalt pattern matching". The basic idea is to find
// the longest contiguous matching subsequence that contains no "junk"
// elements (R-O doesn't address junk). The same idea is then applied
// recursively to the pieces of the sequences to the left and to the right
// of the matching subsequence. This does not yield minimal edit
// sequences, but does tend to yield matches that "look right" to people.
//
// SequenceMatcher tries to compute a "human-friendly diff" between two
// sequences. Unlike e.g. UNIX(tm) diff, the fundamental notion is the
// longest *contiguous* & junk-free matching subsequence. That's what
// catches peoples' eyes. The Windows(tm) windiff has another interesting
// notion, pairing up elements that appear uniquely in each sequence.
// That, and the method here, appear to yield more intuitive difference
// reports than does diff. This method appears to be the least vulnerable
// to synching up on blocks of "junk lines", though (like blank lines in
// ordinary text files, or maybe "<P>" lines in HTML files). That may be
// because this is the only method of the 3 that has a *concept* of
// "junk" <wink>.
//
// Timing: Basic R-O is cubic time worst case and quadratic time expected
// case. SequenceMatcher is quadratic time for the worst case and has
// expected-case behavior dependent in a complicated way on how many
// elements the sequences have in common; best case time is linear.
type SequenceMatcher struct {
a []string
b []string
b2j map[string][]int
IsJunk func(string) bool
autoJunk bool
bJunk map[string]struct{}
matchingBlocks []Match
fullBCount map[string]int
bPopular map[string]struct{}
opCodes []OpCode
}
func NewMatcher(a, b []string) *SequenceMatcher {
m := SequenceMatcher{autoJunk: true}
m.SetSeqs(a, b)
return &m
}
func NewMatcherWithJunk(a, b []string, autoJunk bool,
isJunk func(string) bool) *SequenceMatcher {
m := SequenceMatcher{IsJunk: isJunk, autoJunk: autoJunk}
m.SetSeqs(a, b)
return &m
}
// Set two sequences to be compared.
func (m *SequenceMatcher) SetSeqs(a, b []string) {
m.SetSeq1(a)
m.SetSeq2(b)
}
// Set the first sequence to be compared. The second sequence to be compared is
// not changed.
//
// SequenceMatcher computes and caches detailed information about the second
// sequence, so if you want to compare one sequence S against many sequences,
// use .SetSeq2(s) once and call .SetSeq1(x) repeatedly for each of the other
// sequences.
//
// See also SetSeqs() and SetSeq2().
func (m *SequenceMatcher) SetSeq1(a []string) {
if &a == &m.a {
return
}
m.a = a
m.matchingBlocks = nil
m.opCodes = nil
}
// Set the second sequence to be compared. The first sequence to be compared is
// not changed.
func (m *SequenceMatcher) SetSeq2(b []string) {
if &b == &m.b {
return
}
m.b = b
m.matchingBlocks = nil
m.opCodes = nil
m.fullBCount = nil
m.chainB()
}
func (m *SequenceMatcher) chainB() {
// Populate line -> index mapping
b2j := map[string][]int{}
for i, s := range m.b {
indices := b2j[s]
indices = append(indices, i)
b2j[s] = indices
}
// Purge junk elements
m.bJunk = map[string]struct{}{}
if m.IsJunk != nil {
junk := m.bJunk
for s, _ := range b2j {
if m.IsJunk(s) {
junk[s] = struct{}{}
}
}
for s, _ := range junk {
delete(b2j, s)
}
}
// Purge remaining popular elements
popular := map[string]struct{}{}
n := len(m.b)
if m.autoJunk && n >= 200 {
ntest := n/100 + 1
for s, indices := range b2j {
if len(indices) > ntest {
popular[s] = struct{}{}
}
}
for s, _ := range popular {
delete(b2j, s)
}
}
m.bPopular = popular
m.b2j = b2j
}
func (m *SequenceMatcher) isBJunk(s string) bool {
_, ok := m.bJunk[s]
return ok
}
// Find longest matching block in a[alo:ahi] and b[blo:bhi].
//
// If IsJunk is not defined:
//
// Return (i,j,k) such that a[i:i+k] is equal to b[j:j+k], where
// alo <= i <= i+k <= ahi
// blo <= j <= j+k <= bhi
// and for all (i',j',k') meeting those conditions,
// k >= k'
// i <= i'
// and if i == i', j <= j'
//
// In other words, of all maximal matching blocks, return one that
// starts earliest in a, and of all those maximal matching blocks that
// start earliest in a, return the one that starts earliest in b.
//
// If IsJunk is defined, first the longest matching block is
// determined as above, but with the additional restriction that no
// junk element appears in the block. Then that block is extended as
// far as possible by matching (only) junk elements on both sides. So
// the resulting block never matches on junk except as identical junk
// happens to be adjacent to an "interesting" match.
//
// If no blocks match, return (alo, blo, 0).
func (m *SequenceMatcher) findLongestMatch(alo, ahi, blo, bhi int) Match {
// CAUTION: stripping common prefix or suffix would be incorrect.
// E.g.,
// ab
// acab
// Longest matching block is "ab", but if common prefix is
// stripped, it's "a" (tied with "b"). UNIX(tm) diff does so
// strip, so ends up claiming that ab is changed to acab by
// inserting "ca" in the middle. That's minimal but unintuitive:
// "it's obvious" that someone inserted "ac" at the front.
// Windiff ends up at the same place as diff, but by pairing up
// the unique 'b's and then matching the first two 'a's.
besti, bestj, bestsize := alo, blo, 0
// find longest junk-free match
// during an iteration of the loop, j2len[j] = length of longest
// junk-free match ending with a[i-1] and b[j]
j2len := map[int]int{}
for i := alo; i != ahi; i++ {
// look at all instances of a[i] in b; note that because
// b2j has no junk keys, the loop is skipped if a[i] is junk
newj2len := map[int]int{}
for _, j := range m.b2j[m.a[i]] {
// a[i] matches b[j]
if j < blo {
continue
}
if j >= bhi {
break
}
k := j2len[j-1] + 1
newj2len[j] = k
if k > bestsize {
besti, bestj, bestsize = i-k+1, j-k+1, k
}
}
j2len = newj2len
}
// Extend the best by non-junk elements on each end. In particular,
// "popular" non-junk elements aren't in b2j, which greatly speeds
// the inner loop above, but also means "the best" match so far
// doesn't contain any junk *or* popular non-junk elements.
for besti > alo && bestj > blo && !m.isBJunk(m.b[bestj-1]) &&
m.a[besti-1] == m.b[bestj-1] {
besti, bestj, bestsize = besti-1, bestj-1, bestsize+1
}
for besti+bestsize < ahi && bestj+bestsize < bhi &&
!m.isBJunk(m.b[bestj+bestsize]) &&
m.a[besti+bestsize] == m.b[bestj+bestsize] {
bestsize += 1
}
// Now that we have a wholly interesting match (albeit possibly
// empty!), we may as well suck up the matching junk on each
// side of it too. Can't think of a good reason not to, and it
// saves post-processing the (possibly considerable) expense of
// figuring out what to do with it. In the case of an empty
// interesting match, this is clearly the right thing to do,
// because no other kind of match is possible in the regions.
for besti > alo && bestj > blo && m.isBJunk(m.b[bestj-1]) &&
m.a[besti-1] == m.b[bestj-1] {
besti, bestj, bestsize = besti-1, bestj-1, bestsize+1
}
for besti+bestsize < ahi && bestj+bestsize < bhi &&
m.isBJunk(m.b[bestj+bestsize]) &&
m.a[besti+bestsize] == m.b[bestj+bestsize] {
bestsize += 1
}
return Match{A: besti, B: bestj, Size: bestsize}
}
// Return list of triples describing matching subsequences.
//
// Each triple is of the form (i, j, n), and means that
// a[i:i+n] == b[j:j+n]. The triples are monotonically increasing in
// i and in j. It's also guaranteed that if (i, j, n) and (i', j', n') are
// adjacent triples in the list, and the second is not the last triple in the
// list, then i+n != i' or j+n != j'. IOW, adjacent triples never describe
// adjacent equal blocks.
//
// The last triple is a dummy, (len(a), len(b), 0), and is the only
// triple with n==0.
func (m *SequenceMatcher) GetMatchingBlocks() []Match {
if m.matchingBlocks != nil {
return m.matchingBlocks
}
var matchBlocks func(alo, ahi, blo, bhi int, matched []Match) []Match
matchBlocks = func(alo, ahi, blo, bhi int, matched []Match) []Match {
match := m.findLongestMatch(alo, ahi, blo, bhi)
i, j, k := match.A, match.B, match.Size
if match.Size > 0 {
if alo < i && blo < j {
matched = matchBlocks(alo, i, blo, j, matched)
}
matched = append(matched, match)
if i+k < ahi && j+k < bhi {
matched = matchBlocks(i+k, ahi, j+k, bhi, matched)
}
}
return matched
}
matched := matchBlocks(0, len(m.a), 0, len(m.b), nil)
// It's possible that we have adjacent equal blocks in the
// matching_blocks list now.
nonAdjacent := []Match{}
i1, j1, k1 := 0, 0, 0
for _, b := range matched {
// Is this block adjacent to i1, j1, k1?
i2, j2, k2 := b.A, b.B, b.Size
if i1+k1 == i2 && j1+k1 == j2 {
// Yes, so collapse them -- this just increases the length of
// the first block by the length of the second, and the first
// block so lengthened remains the block to compare against.
k1 += k2
} else {
// Not adjacent. Remember the first block (k1==0 means it's
// the dummy we started with), and make the second block the
// new block to compare against.
if k1 > 0 {
nonAdjacent = append(nonAdjacent, Match{i1, j1, k1})
}
i1, j1, k1 = i2, j2, k2
}
}
if k1 > 0 {
nonAdjacent = append(nonAdjacent, Match{i1, j1, k1})
}
nonAdjacent = append(nonAdjacent, Match{len(m.a), len(m.b), 0})
m.matchingBlocks = nonAdjacent
return m.matchingBlocks
}
// Return list of 5-tuples describing how to turn a into b.
//
// Each tuple is of the form (tag, i1, i2, j1, j2). The first tuple
// has i1 == j1 == 0, and remaining tuples have i1 == the i2 from the
// tuple preceding it, and likewise for j1 == the previous j2.
//
// The tags are characters, with these meanings:
//
// 'r' (replace): a[i1:i2] should be replaced by b[j1:j2]
//
// 'd' (delete): a[i1:i2] should be deleted, j1==j2 in this case.
//
// 'i' (insert): b[j1:j2] should be inserted at a[i1:i1], i1==i2 in this case.
//
// 'e' (equal): a[i1:i2] == b[j1:j2]
func (m *SequenceMatcher) GetOpCodes() []OpCode {
if m.opCodes != nil {
return m.opCodes
}
i, j := 0, 0
matching := m.GetMatchingBlocks()
opCodes := make([]OpCode, 0, len(matching))
for _, m := range matching {
// invariant: we've pumped out correct diffs to change
// a[:i] into b[:j], and the next matching block is
// a[ai:ai+size] == b[bj:bj+size]. So we need to pump
// out a diff to change a[i:ai] into b[j:bj], pump out
// the matching block, and move (i,j) beyond the match
ai, bj, size := m.A, m.B, m.Size
tag := byte(0)
if i < ai && j < bj {
tag = 'r'
} else if i < ai {
tag = 'd'
} else if j < bj {
tag = 'i'
}
if tag > 0 {
opCodes = append(opCodes, OpCode{tag, i, ai, j, bj})
}
i, j = ai+size, bj+size
// the list of matching blocks is terminated by a
// sentinel with size 0
if size > 0 {
opCodes = append(opCodes, OpCode{'e', ai, i, bj, j})
}
}
m.opCodes = opCodes
return m.opCodes
}
// Isolate change clusters by eliminating ranges with no changes.
//
// Return a generator of groups with up to n lines of context.
// Each group is in the same format as returned by GetOpCodes().
func (m *SequenceMatcher) GetGroupedOpCodes(n int) [][]OpCode {
if n < 0 {
n = 3
}
codes := m.GetOpCodes()
if len(codes) == 0 {
codes = []OpCode{OpCode{'e', 0, 1, 0, 1}}
}
// Fixup leading and trailing groups if they show no changes.
if codes[0].Tag == 'e' {
c := codes[0]
i1, i2, j1, j2 := c.I1, c.I2, c.J1, c.J2
codes[0] = OpCode{c.Tag, max(i1, i2-n), i2, max(j1, j2-n), j2}
}
if codes[len(codes)-1].Tag == 'e' {
c := codes[len(codes)-1]
i1, i2, j1, j2 := c.I1, c.I2, c.J1, c.J2
codes[len(codes)-1] = OpCode{c.Tag, i1, min(i2, i1+n), j1, min(j2, j1+n)}
}
nn := n + n
groups := [][]OpCode{}
group := []OpCode{}
for _, c := range codes {
i1, i2, j1, j2 := c.I1, c.I2, c.J1, c.J2
// End the current group and start a new one whenever
// there is a large range with no changes.
if c.Tag == 'e' && i2-i1 > nn {
group = append(group, OpCode{c.Tag, i1, min(i2, i1+n),
j1, min(j2, j1+n)})
groups = append(groups, group)
group = []OpCode{}
i1, j1 = max(i1, i2-n), max(j1, j2-n)
}
group = append(group, OpCode{c.Tag, i1, i2, j1, j2})
}
if len(group) > 0 && !(len(group) == 1 && group[0].Tag == 'e') {
groups = append(groups, group)
}
return groups
}
// Return a measure of the sequences' similarity (float in [0,1]).
//
// Where T is the total number of elements in both sequences, and
// M is the number of matches, this is 2.0*M / T.
// Note that this is 1 if the sequences are identical, and 0 if
// they have nothing in common.
//
// .Ratio() is expensive to compute if you haven't already computed
// .GetMatchingBlocks() or .GetOpCodes(), in which case you may
// want to try .QuickRatio() or .RealQuickRation() first to get an
// upper bound.
func (m *SequenceMatcher) Ratio() float64 {
matches := 0
for _, m := range m.GetMatchingBlocks() {
matches += m.Size
}
return calculateRatio(matches, len(m.a)+len(m.b))
}
// Return an upper bound on ratio() relatively quickly.
//
// This isn't defined beyond that it is an upper bound on .Ratio(), and
// is faster to compute.
func (m *SequenceMatcher) QuickRatio() float64 {
// viewing a and b as multisets, set matches to the cardinality
// of their intersection; this counts the number of matches
// without regard to order, so is clearly an upper bound
if m.fullBCount == nil {
m.fullBCount = map[string]int{}
for _, s := range m.b {
m.fullBCount[s] = m.fullBCount[s] + 1
}
}
// avail[x] is the number of times x appears in 'b' less the
// number of times we've seen it in 'a' so far ... kinda
avail := map[string]int{}
matches := 0
for _, s := range m.a {
n, ok := avail[s]
if !ok {
n = m.fullBCount[s]
}
avail[s] = n - 1
if n > 0 {
matches += 1
}
}
return calculateRatio(matches, len(m.a)+len(m.b))
}
// Return an upper bound on ratio() very quickly.
//
// This isn't defined beyond that it is an upper bound on .Ratio(), and
// is faster to compute than either .Ratio() or .QuickRatio().
func (m *SequenceMatcher) RealQuickRatio() float64 {
la, lb := len(m.a), len(m.b)
return calculateRatio(min(la, lb), la+lb)
}
// Convert range to the "ed" format
func formatRangeUnified(start, stop int) string {
// Per the diff spec at http://www.unix.org/single_unix_specification/
beginning := start + 1 // lines start numbering with one
length := stop - start
if length == 1 {
return fmt.Sprintf("%d", beginning)
}
if length == 0 {
beginning -= 1 // empty ranges begin at line just before the range
}
return fmt.Sprintf("%d,%d", beginning, length)
}
// Unified diff parameters
type UnifiedDiff struct {
A []string // First sequence lines
FromFile string // First file name
FromDate string // First file time
B []string // Second sequence lines
ToFile string // Second file name
ToDate string // Second file time
Eol string // Headers end of line, defaults to LF
Context int // Number of context lines
}
// Compare two sequences of lines; generate the delta as a unified diff.
//
// Unified diffs are a compact way of showing line changes and a few
// lines of context. The number of context lines is set by 'n' which
// defaults to three.
//
// By default, the diff control lines (those with ---, +++, or @@) are
// created with a trailing newline. This is helpful so that inputs
// created from file.readlines() result in diffs that are suitable for
// file.writelines() since both the inputs and outputs have trailing
// newlines.
//
// For inputs that do not have trailing newlines, set the lineterm
// argument to "" so that the output will be uniformly newline free.
//
// The unidiff format normally has a header for filenames and modification
// times. Any or all of these may be specified using strings for
// 'fromfile', 'tofile', 'fromfiledate', and 'tofiledate'.
// The modification times are normally expressed in the ISO 8601 format.
func WriteUnifiedDiff(writer io.Writer, diff UnifiedDiff) error {
buf := bufio.NewWriter(writer)
defer buf.Flush()
w := func(format string, args ...interface{}) error {
_, err := buf.WriteString(fmt.Sprintf(format, args...))
return err
}
if len(diff.Eol) == 0 {
diff.Eol = "\n"
}
started := false
m := NewMatcher(diff.A, diff.B)
for _, g := range m.GetGroupedOpCodes(diff.Context) {
if !started {
started = true
fromDate := ""
if len(diff.FromDate) > 0 {
fromDate = "\t" + diff.FromDate
}
toDate := ""
if len(diff.ToDate) > 0 {
toDate = "\t" + diff.ToDate
}
err := w("--- %s%s%s", diff.FromFile, fromDate, diff.Eol)
if err != nil {
return err
}
err = w("+++ %s%s%s", diff.ToFile, toDate, diff.Eol)
if err != nil {
return err
}
}
first, last := g[0], g[len(g)-1]
range1 := formatRangeUnified(first.I1, last.I2)
range2 := formatRangeUnified(first.J1, last.J2)
if err := w("@@ -%s +%s @@%s", range1, range2, diff.Eol); err != nil {
return err
}
for _, c := range g {
i1, i2, j1, j2 := c.I1, c.I2, c.J1, c.J2
if c.Tag == 'e' {
for _, line := range diff.A[i1:i2] {
if err := w(" " + line); err != nil {
return err
}
}
continue
}
if c.Tag == 'r' || c.Tag == 'd' {
for _, line := range diff.A[i1:i2] {
if err := w("-" + line); err != nil {
return err
}
}
}
if c.Tag == 'r' || c.Tag == 'i' {
for _, line := range diff.B[j1:j2] {
if err := w("+" + line); err != nil {
return err
}
}
}
}
}
return nil
}
// Like WriteUnifiedDiff but returns the diff a string.
func GetUnifiedDiffString(diff UnifiedDiff) (string, error) {
w := &bytes.Buffer{}
err := WriteUnifiedDiff(w, diff)
return string(w.Bytes()), err
}
// Convert range to the "ed" format.
func formatRangeContext(start, stop int) string {
// Per the diff spec at http://www.unix.org/single_unix_specification/
beginning := start + 1 // lines start numbering with one
length := stop - start
if length == 0 {
beginning -= 1 // empty ranges begin at line just before the range
}
if length <= 1 {
return fmt.Sprintf("%d", beginning)
}
return fmt.Sprintf("%d,%d", beginning, beginning+length-1)
}
type ContextDiff UnifiedDiff
// Compare two sequences of lines; generate the delta as a context diff.
//
// Context diffs are a compact way of showing line changes and a few
// lines of context. The number of context lines is set by diff.Context
// which defaults to three.
//
// By default, the diff control lines (those with *** or ---) are
// created with a trailing newline.
//
// For inputs that do not have trailing newlines, set the diff.Eol
// argument to "" so that the output will be uniformly newline free.
//
// The context diff format normally has a header for filenames and
// modification times. Any or all of these may be specified using
// strings for diff.FromFile, diff.ToFile, diff.FromDate, diff.ToDate.
// The modification times are normally expressed in the ISO 8601 format.
// If not specified, the strings default to blanks.
func WriteContextDiff(writer io.Writer, diff ContextDiff) error {
buf := bufio.NewWriter(writer)
defer buf.Flush()
var diffErr error
w := func(format string, args ...interface{}) {
_, err := buf.WriteString(fmt.Sprintf(format, args...))
if diffErr == nil && err != nil {
diffErr = err
}
}
if len(diff.Eol) == 0 {
diff.Eol = "\n"
}
prefix := map[byte]string{
'i': "+ ",
'd': "- ",
'r': "! ",
'e': " ",
}
started := false
m := NewMatcher(diff.A, diff.B)
for _, g := range m.GetGroupedOpCodes(diff.Context) {
if !started {
started = true
fromDate := ""
if len(diff.FromDate) > 0 {
fromDate = "\t" + diff.FromDate
}
toDate := ""
if len(diff.ToDate) > 0 {
toDate = "\t" + diff.ToDate
}
w("*** %s%s%s", diff.FromFile, fromDate, diff.Eol)
w("--- %s%s%s", diff.ToFile, toDate, diff.Eol)
}
first, last := g[0], g[len(g)-1]
w("***************" + diff.Eol)
range1 := formatRangeContext(first.I1, last.I2)
w("*** %s ****%s", range1, diff.Eol)
for _, c := range g {
if c.Tag == 'r' || c.Tag == 'd' {
for _, cc := range g {
if cc.Tag == 'i' {
continue
}
for _, line := range diff.A[cc.I1:cc.I2] {
w(prefix[cc.Tag] + line)
}
}
break
}
}
range2 := formatRangeContext(first.J1, last.J2)
w("--- %s ----%s", range2, diff.Eol)
for _, c := range g {
if c.Tag == 'r' || c.Tag == 'i' {
for _, cc := range g {
if cc.Tag == 'd' {
continue
}
for _, line := range diff.B[cc.J1:cc.J2] {
w(prefix[cc.Tag] + line)
}
}
break
}
}
}
return diffErr
}
// Like WriteContextDiff but returns the diff a string.
func GetContextDiffString(diff ContextDiff) (string, error) {
w := &bytes.Buffer{}
err := WriteContextDiff(w, diff)
return string(w.Bytes()), err
}
// Split a string on "\n" while preserving them. The output can be used
// as input for UnifiedDiff and ContextDiff structures.
func SplitLines(s string) []string {
lines := strings.SplitAfter(s, "\n")
lines[len(lines)-1] += "\n"
return lines
}

@ -0,0 +1,352 @@
package difflib
import (
"bytes"
"fmt"
"math"
"reflect"
"strings"
"testing"
)
func assertAlmostEqual(t *testing.T, a, b float64, places int) {
if math.Abs(a-b) > math.Pow10(-places) {
t.Errorf("%.7f != %.7f", a, b)
}
}
func assertEqual(t *testing.T, a, b interface{}) {
if !reflect.DeepEqual(a, b) {
t.Errorf("%v != %v", a, b)
}
}
func splitChars(s string) []string {
chars := make([]string, 0, len(s))
// Assume ASCII inputs
for i := 0; i != len(s); i++ {
chars = append(chars, string(s[i]))
}
return chars
}
func TestSequenceMatcherRatio(t *testing.T) {
s := NewMatcher(splitChars("abcd"), splitChars("bcde"))
assertEqual(t, s.Ratio(), 0.75)
assertEqual(t, s.QuickRatio(), 0.75)
assertEqual(t, s.RealQuickRatio(), 1.0)
}
func TestGetOptCodes(t *testing.T) {
a := "qabxcd"
b := "abycdf"
s := NewMatcher(splitChars(a), splitChars(b))
w := &bytes.Buffer{}
for _, op := range s.GetOpCodes() {
fmt.Fprintf(w, "%s a[%d:%d], (%s) b[%d:%d] (%s)\n", string(op.Tag),
op.I1, op.I2, a[op.I1:op.I2], op.J1, op.J2, b[op.J1:op.J2])
}
result := string(w.Bytes())
expected := `d a[0:1], (q) b[0:0] ()
e a[1:3], (ab) b[0:2] (ab)
r a[3:4], (x) b[2:3] (y)
e a[4:6], (cd) b[3:5] (cd)
i a[6:6], () b[5:6] (f)
`
if expected != result {
t.Errorf("unexpected op codes: \n%s", result)
}
}
func TestGroupedOpCodes(t *testing.T) {
a := []string{}
for i := 0; i != 39; i++ {
a = append(a, fmt.Sprintf("%02d", i))
}
b := []string{}
b = append(b, a[:8]...)
b = append(b, " i")
b = append(b, a[8:19]...)
b = append(b, " x")
b = append(b, a[20:22]...)
b = append(b, a[27:34]...)
b = append(b, " y")
b = append(b, a[35:]...)
s := NewMatcher(a, b)
w := &bytes.Buffer{}
for _, g := range s.GetGroupedOpCodes(-1) {
fmt.Fprintf(w, "group\n")
for _, op := range g {
fmt.Fprintf(w, " %s, %d, %d, %d, %d\n", string(op.Tag),
op.I1, op.I2, op.J1, op.J2)
}
}
result := string(w.Bytes())
expected := `group
e, 5, 8, 5, 8
i, 8, 8, 8, 9
e, 8, 11, 9, 12
group
e, 16, 19, 17, 20
r, 19, 20, 20, 21
e, 20, 22, 21, 23
d, 22, 27, 23, 23
e, 27, 30, 23, 26
group
e, 31, 34, 27, 30
r, 34, 35, 30, 31
e, 35, 38, 31, 34
`
if expected != result {
t.Errorf("unexpected op codes: \n%s", result)
}
}
func ExampleGetUnifiedDiffString() {
a := `one
two
three
four`
b := `zero
one
three
four`
diff := UnifiedDiff{
A: SplitLines(a),
B: SplitLines(b),
FromFile: "Original",
FromDate: "2005-01-26 23:30:50",
ToFile: "Current",
ToDate: "2010-04-02 10:20:52",
Context: 3,
}
result, _ := GetUnifiedDiffString(diff)
fmt.Printf(strings.Replace(result, "\t", " ", -1))
// Output:
// --- Original 2005-01-26 23:30:50
// +++ Current 2010-04-02 10:20:52
// @@ -1,4 +1,4 @@
// +zero
// one
// -two
// three
// four
}
func ExampleGetContextDiffString() {
a := `one
two
three
four`
b := `zero
one
tree
four`
diff := ContextDiff{
A: SplitLines(a),
B: SplitLines(b),
FromFile: "Original",
ToFile: "Current",
Context: 3,
Eol: "\n",
}
result, _ := GetContextDiffString(diff)
fmt.Printf(strings.Replace(result, "\t", " ", -1))
// Output:
// *** Original
// --- Current
// ***************
// *** 1,4 ****
// one
// ! two
// ! three
// four
// --- 1,4 ----
// + zero
// one
// ! tree
// four
}
func rep(s string, count int) string {
return strings.Repeat(s, count)
}
func TestWithAsciiOneInsert(t *testing.T) {
sm := NewMatcher(splitChars(rep("b", 100)),
splitChars("a"+rep("b", 100)))
assertAlmostEqual(t, sm.Ratio(), 0.995, 3)
assertEqual(t, sm.GetOpCodes(),
[]OpCode{{'i', 0, 0, 0, 1}, {'e', 0, 100, 1, 101}})
assertEqual(t, len(sm.bPopular), 0)
sm = NewMatcher(splitChars(rep("b", 100)),
splitChars(rep("b", 50)+"a"+rep("b", 50)))
assertAlmostEqual(t, sm.Ratio(), 0.995, 3)
assertEqual(t, sm.GetOpCodes(),
[]OpCode{{'e', 0, 50, 0, 50}, {'i', 50, 50, 50, 51}, {'e', 50, 100, 51, 101}})
assertEqual(t, len(sm.bPopular), 0)
}
func TestWithAsciiOnDelete(t *testing.T) {
sm := NewMatcher(splitChars(rep("a", 40)+"c"+rep("b", 40)),
splitChars(rep("a", 40)+rep("b", 40)))
assertAlmostEqual(t, sm.Ratio(), 0.994, 3)
assertEqual(t, sm.GetOpCodes(),
[]OpCode{{'e', 0, 40, 0, 40}, {'d', 40, 41, 40, 40}, {'e', 41, 81, 40, 80}})
}
func TestWithAsciiBJunk(t *testing.T) {
isJunk := func(s string) bool {
return s == " "
}
sm := NewMatcherWithJunk(splitChars(rep("a", 40)+rep("b", 40)),
splitChars(rep("a", 44)+rep("b", 40)), true, isJunk)
assertEqual(t, sm.bJunk, map[string]struct{}{})
sm = NewMatcherWithJunk(splitChars(rep("a", 40)+rep("b", 40)),
splitChars(rep("a", 44)+rep("b", 40)+rep(" ", 20)), false, isJunk)
assertEqual(t, sm.bJunk, map[string]struct{}{" ": struct{}{}})
isJunk = func(s string) bool {
return s == " " || s == "b"
}
sm = NewMatcherWithJunk(splitChars(rep("a", 40)+rep("b", 40)),
splitChars(rep("a", 44)+rep("b", 40)+rep(" ", 20)), false, isJunk)
assertEqual(t, sm.bJunk, map[string]struct{}{" ": struct{}{}, "b": struct{}{}})
}
func TestSFBugsRatioForNullSeqn(t *testing.T) {
sm := NewMatcher(nil, nil)
assertEqual(t, sm.Ratio(), 1.0)
assertEqual(t, sm.QuickRatio(), 1.0)
assertEqual(t, sm.RealQuickRatio(), 1.0)
}
func TestSFBugsComparingEmptyLists(t *testing.T) {
groups := NewMatcher(nil, nil).GetGroupedOpCodes(-1)
assertEqual(t, len(groups), 0)
diff := UnifiedDiff{
FromFile: "Original",
ToFile: "Current",
Context: 3,
}
result, err := GetUnifiedDiffString(diff)
assertEqual(t, err, nil)
assertEqual(t, result, "")
}
func TestOutputFormatRangeFormatUnified(t *testing.T) {
// Per the diff spec at http://www.unix.org/single_unix_specification/
//
// Each <range> field shall be of the form:
// %1d", <beginning line number> if the range contains exactly one line,
// and:
// "%1d,%1d", <beginning line number>, <number of lines> otherwise.
// If a range is empty, its beginning line number shall be the number of
// the line just before the range, or 0 if the empty range starts the file.
fm := formatRangeUnified
assertEqual(t, fm(3, 3), "3,0")
assertEqual(t, fm(3, 4), "4")
assertEqual(t, fm(3, 5), "4,2")
assertEqual(t, fm(3, 6), "4,3")
assertEqual(t, fm(0, 0), "0,0")
}
func TestOutputFormatRangeFormatContext(t *testing.T) {
// Per the diff spec at http://www.unix.org/single_unix_specification/
//
// The range of lines in file1 shall be written in the following format
// if the range contains two or more lines:
// "*** %d,%d ****\n", <beginning line number>, <ending line number>
// and the following format otherwise:
// "*** %d ****\n", <ending line number>
// The ending line number of an empty range shall be the number of the preceding line,
// or 0 if the range is at the start of the file.
//
// Next, the range of lines in file2 shall be written in the following format
// if the range contains two or more lines:
// "--- %d,%d ----\n", <beginning line number>, <ending line number>
// and the following format otherwise:
// "--- %d ----\n", <ending line number>
fm := formatRangeContext
assertEqual(t, fm(3, 3), "3")
assertEqual(t, fm(3, 4), "4")
assertEqual(t, fm(3, 5), "4,5")
assertEqual(t, fm(3, 6), "4,6")
assertEqual(t, fm(0, 0), "0")
}
func TestOutputFormatTabDelimiter(t *testing.T) {
diff := UnifiedDiff{
A: splitChars("one"),
B: splitChars("two"),
FromFile: "Original",
FromDate: "2005-01-26 23:30:50",
ToFile: "Current",
ToDate: "2010-04-12 10:20:52",
Eol: "\n",
}
ud, err := GetUnifiedDiffString(diff)
assertEqual(t, err, nil)
assertEqual(t, SplitLines(ud)[:2], []string{
"--- Original\t2005-01-26 23:30:50\n",
"+++ Current\t2010-04-12 10:20:52\n",
})
cd, err := GetContextDiffString(ContextDiff(diff))
assertEqual(t, err, nil)
assertEqual(t, SplitLines(cd)[:2], []string{
"*** Original\t2005-01-26 23:30:50\n",
"--- Current\t2010-04-12 10:20:52\n",
})
}
func TestOutputFormatNoTrailingTabOnEmptyFiledate(t *testing.T) {
diff := UnifiedDiff{
A: splitChars("one"),
B: splitChars("two"),
FromFile: "Original",
ToFile: "Current",
Eol: "\n",
}
ud, err := GetUnifiedDiffString(diff)
assertEqual(t, err, nil)
assertEqual(t, SplitLines(ud)[:2], []string{"--- Original\n", "+++ Current\n"})
cd, err := GetContextDiffString(ContextDiff(diff))
assertEqual(t, err, nil)
assertEqual(t, SplitLines(cd)[:2], []string{"*** Original\n", "--- Current\n"})
}
func TestSplitLines(t *testing.T) {
allTests := []struct {
input string
want []string
}{
{"foo", []string{"foo\n"}},
{"foo\nbar", []string{"foo\n", "bar\n"}},
{"foo\nbar\n", []string{"foo\n", "bar\n", "\n"}},
}
for _, test := range allTests {
assertEqual(t, SplitLines(test.input), test.want)
}
}
func benchmarkSplitLines(b *testing.B, count int) {
str := strings.Repeat("foo\n", count)
b.ResetTimer()
n := 0
for i := 0; i < b.N; i++ {
n += len(SplitLines(str))
}
}
func BenchmarkSplitLines100(b *testing.B) {
benchmarkSplitLines(b, 100)
}
func BenchmarkSplitLines10000(b *testing.B) {
benchmarkSplitLines(b, 10000)
}

@ -0,0 +1,24 @@
# Compiled Object files, Static and Dynamic libs (Shared Objects)
*.o
*.a
*.so
# Folders
_obj
_test
# Architecture specific extensions/prefixes
*.[568vq]
[568vq].out
*.cgo1.go
*.cgo2.c
_cgo_defun.c
_cgo_gotypes.go
_cgo_export.*
_testmain.go
*.exe
.DS_Store

@ -0,0 +1,15 @@
language: go
sudo: false
go:
- 1.1
- 1.2
- 1.3
- 1.4
- 1.5
- 1.6
- tip
script:
- go test -v ./...

Some files were not shown because too many files have changed in this diff Show More