[Vendor] blevesearch v0.8.1 -> v1.0.7 (#11360)
* Update blevesearch v0.8.1 -> v1.0.7 * make vendor Co-authored-by: zeripath <art27@cantab.net>
This commit is contained in:
17
go.mod
17
go.mod
@ -19,15 +19,11 @@ require (
|
||||
gitea.com/macaron/toolbox v0.0.0-20190822013122-05ff0fc766b7
|
||||
github.com/BurntSushi/toml v0.3.1
|
||||
github.com/PuerkitoBio/goquery v1.5.0
|
||||
github.com/RoaringBitmap/roaring v0.4.21 // indirect
|
||||
github.com/RoaringBitmap/roaring v0.4.23 // indirect
|
||||
github.com/bgentry/speakeasy v0.1.0 // indirect
|
||||
github.com/blevesearch/bleve v0.8.1
|
||||
github.com/blevesearch/blevex v0.0.0-20180227211930-4b158bb555a3 // indirect
|
||||
github.com/blevesearch/go-porterstemmer v1.0.2 // indirect
|
||||
github.com/blevesearch/segment v0.0.0-20160915185041-762005e7a34f // indirect
|
||||
github.com/blevesearch/bleve v1.0.7
|
||||
github.com/boombuler/barcode v0.0.0-20161226211916-fe0f26ff6d26 // indirect
|
||||
github.com/couchbase/gomemcached v0.0.0-20191004160342-7b5da2ec40b2 // indirect
|
||||
github.com/couchbase/vellum v0.0.0-20190829182332-ef2e028c01fd // indirect
|
||||
github.com/cznic/b v0.0.0-20181122101859-a26611c4d92d // indirect
|
||||
github.com/cznic/mathutil v0.0.0-20181122101859-297441e03548 // indirect
|
||||
github.com/cznic/strutil v0.0.0-20181122101858-275e90344537 // indirect
|
||||
@ -36,7 +32,6 @@ require (
|
||||
github.com/dustin/go-humanize v1.0.0
|
||||
github.com/editorconfig/editorconfig-core-go/v2 v2.1.1
|
||||
github.com/emirpasic/gods v1.12.0
|
||||
github.com/etcd-io/bbolt v1.3.3 // indirect
|
||||
github.com/ethantkoenig/rupture v0.0.0-20180203182544-0a76f03a811a
|
||||
github.com/facebookgo/ensure v0.0.0-20160127193407-b4ab57deab51 // indirect
|
||||
github.com/facebookgo/stack v0.0.0-20160209184415-751773369052 // indirect
|
||||
@ -53,7 +48,7 @@ require (
|
||||
github.com/gobwas/glob v0.2.3
|
||||
github.com/gogs/chardet v0.0.0-20191104214054-4b6791f73a28
|
||||
github.com/gogs/cron v0.0.0-20171120032916-9f6c956d3e14
|
||||
github.com/golang/protobuf v1.4.0 // indirect
|
||||
github.com/golang/protobuf v1.4.1 // indirect
|
||||
github.com/google/go-github/v24 v24.0.1
|
||||
github.com/gorilla/context v1.1.1
|
||||
github.com/hashicorp/go-retryablehttp v0.6.6 // indirect
|
||||
@ -95,10 +90,9 @@ require (
|
||||
github.com/sergi/go-diff v1.1.0
|
||||
github.com/shurcooL/httpfs v0.0.0-20190527155220-6a4d4a70508b // indirect
|
||||
github.com/shurcooL/vfsgen v0.0.0-20181202132449-6a9ea43bcacd
|
||||
github.com/steveyen/gtreap v0.0.0-20150807155958-0abe01ef9be2 // indirect
|
||||
github.com/stretchr/testify v1.4.0
|
||||
github.com/tecbot/gorocksdb v0.0.0-20181010114359-8752a9433481 // indirect
|
||||
github.com/tinylib/msgp v1.1.1 // indirect
|
||||
github.com/tinylib/msgp v1.1.2 // indirect
|
||||
github.com/tstranex/u2f v1.0.0
|
||||
github.com/unknwon/cae v1.0.0
|
||||
github.com/unknwon/com v1.0.1
|
||||
@ -109,11 +103,10 @@ require (
|
||||
github.com/yohcop/openid-go v0.0.0-20160914080427-2c050d2dae53
|
||||
github.com/yuin/goldmark v1.1.25
|
||||
github.com/yuin/goldmark-meta v0.0.0-20191126180153-f0638e958b60
|
||||
go.etcd.io/bbolt v1.3.3 // indirect
|
||||
golang.org/x/crypto v0.0.0-20200302210943-78000ba7a073
|
||||
golang.org/x/net v0.0.0-20200324143707-d3edc9973b7e
|
||||
golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d
|
||||
golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd
|
||||
golang.org/x/sys v0.0.0-20200509044756-6aff5f38e54f
|
||||
golang.org/x/text v0.3.2
|
||||
golang.org/x/time v0.0.0-20200416051211-89c76fbcd5d1 // indirect
|
||||
golang.org/x/tools v0.0.0-20200325010219-a49f79bcc224
|
||||
|
58
go.sum
58
go.sum
@ -56,6 +56,8 @@ github.com/PuerkitoBio/urlesc v0.0.0-20170810143723-de5bf2ad4578 h1:d+Bc7a5rLufV
|
||||
github.com/PuerkitoBio/urlesc v0.0.0-20170810143723-de5bf2ad4578/go.mod h1:uGdkoq3SwY9Y+13GIhn11/XLaGBb4BfwItxLd5jeuXE=
|
||||
github.com/RoaringBitmap/roaring v0.4.21 h1:WJ/zIlNX4wQZ9x8Ey33O1UaD9TCTakYsdLFSBcTwH+8=
|
||||
github.com/RoaringBitmap/roaring v0.4.21/go.mod h1:D0gp8kJQgE1A4LQ5wFLggQEyvDi06Mq5mKs52e1TwOo=
|
||||
github.com/RoaringBitmap/roaring v0.4.23 h1:gpyfd12QohbqhFO4NVDUdoPOCXsyahYRQhINmlHxKeo=
|
||||
github.com/RoaringBitmap/roaring v0.4.23/go.mod h1:D0gp8kJQgE1A4LQ5wFLggQEyvDi06Mq5mKs52e1TwOo=
|
||||
github.com/Shopify/sarama v1.19.0/go.mod h1:FVkBWblsNy7DGZRfXLU0O9RCGt5g3g3yEuWXgklEdEo=
|
||||
github.com/Shopify/toxiproxy v2.1.4+incompatible/go.mod h1:OXgGpZ6Cli1/URJOF1DMxUHB2q5Ap20/P/eIdh4G0pI=
|
||||
github.com/Unknwon/com v0.0.0-20190321035513-0fed4efef755/go.mod h1:voKvFVpXBJxdIPeqjoJuLK+UVcRlo/JLjeToGxPYu68=
|
||||
@ -83,14 +85,22 @@ github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM=
|
||||
github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw=
|
||||
github.com/bgentry/speakeasy v0.1.0 h1:ByYyxL9InA1OWqxJqqp2A5pYHUrCiAL6K3J+LKSsQkY=
|
||||
github.com/bgentry/speakeasy v0.1.0/go.mod h1:+zsyZBPWlz7T6j88CTgSN5bM796AkVf0kBD4zp0CCIs=
|
||||
github.com/blevesearch/bleve v0.8.1 h1:20zBREtGe8dvBxCC+717SaxKcUVQOWk3/Fm75vabKpU=
|
||||
github.com/blevesearch/bleve v0.8.1/go.mod h1:Y2lmIkzV6mcNfAnAdOd+ZxHkHchhBfU/xroGIp61wfw=
|
||||
github.com/blevesearch/blevex v0.0.0-20180227211930-4b158bb555a3 h1:U6vnxZrTfItfiUiYx0lf/LgHjRSfaKK5QHSom3lEbnA=
|
||||
github.com/blevesearch/blevex v0.0.0-20180227211930-4b158bb555a3/go.mod h1:WH+MU2F4T0VmSdaPX+Wu5GYoZBrYWdOZWSjzvYcDmqQ=
|
||||
github.com/blevesearch/go-porterstemmer v1.0.2 h1:qe7n69gBd1OLY5sHKnxQHIbzn0LNJA4hpAf+5XDxV2I=
|
||||
github.com/blevesearch/go-porterstemmer v1.0.2/go.mod h1:haWQqFT3RdOGz7PJuM3or/pWNJS1pKkoZJWCkWu0DVA=
|
||||
github.com/blevesearch/segment v0.0.0-20160915185041-762005e7a34f h1:kqbi9lqXLLs+zfWlgo1PIiRQ86n33K1JKotjj4rSYOg=
|
||||
github.com/blevesearch/segment v0.0.0-20160915185041-762005e7a34f/go.mod h1:IInt5XRvpiGE09KOk9mmCMLjHhydIhNPKPPFLFBB7L8=
|
||||
github.com/blevesearch/bleve v1.0.7 h1:4PspZE7XABMSKcVpzAKp0E05Yer1PIYmTWk+1ngNr/c=
|
||||
github.com/blevesearch/bleve v1.0.7/go.mod h1:3xvmBtaw12Y4C9iA1RTzwWCof5j5HjydjCTiDE2TeE0=
|
||||
github.com/blevesearch/blevex v0.0.0-20190916190636-152f0fe5c040 h1:SjYVcfJVZoCfBlg+fkaq2eoZHTf5HaJfaTeTkOtyfHQ=
|
||||
github.com/blevesearch/blevex v0.0.0-20190916190636-152f0fe5c040/go.mod h1:WH+MU2F4T0VmSdaPX+Wu5GYoZBrYWdOZWSjzvYcDmqQ=
|
||||
github.com/blevesearch/go-porterstemmer v1.0.3 h1:GtmsqID0aZdCSNiY8SkuPJ12pD4jI+DdXTAn4YRcHCo=
|
||||
github.com/blevesearch/go-porterstemmer v1.0.3/go.mod h1:angGc5Ht+k2xhJdZi511LtmxuEf0OVpvUUNrwmM1P7M=
|
||||
github.com/blevesearch/mmap-go v1.0.2 h1:JtMHb+FgQCTTYIhtMvimw15dJwu1Y5lrZDMOFXVWPk0=
|
||||
github.com/blevesearch/mmap-go v1.0.2/go.mod h1:ol2qBqYaOUsGdm7aRMRrYGgPvnwLe6Y+7LMvAB5IbSA=
|
||||
github.com/blevesearch/segment v0.9.0 h1:5lG7yBCx98or7gK2cHMKPukPZ/31Kag7nONpoBt22Ac=
|
||||
github.com/blevesearch/segment v0.9.0/go.mod h1:9PfHYUdQCgHktBgvtUOF4x+pc4/l8rdH0u5spnW85UQ=
|
||||
github.com/blevesearch/snowballstem v0.9.0 h1:lMQ189YspGP6sXvZQ4WZ+MLawfV8wOmPoD/iWeNXm8s=
|
||||
github.com/blevesearch/snowballstem v0.9.0/go.mod h1:PivSj3JMc8WuaFkTSRDW2SlrulNWPl4ABg1tC/hlgLs=
|
||||
github.com/blevesearch/zap/v11 v11.0.7 h1:nnmAOP6eXBkqEa1Srq1eqA5Wmn4w+BZjLdjynNxvd+M=
|
||||
github.com/blevesearch/zap/v11 v11.0.7/go.mod h1:bJoY56fdU2m/IP4LLz/1h4jY2thBoREvoqbuJ8zhm9k=
|
||||
github.com/blevesearch/zap/v12 v12.0.7 h1:y8FWSAYkdc4p1dn4YLxNNr1dxXlSUsakJh2Fc/r6cj4=
|
||||
github.com/blevesearch/zap/v12 v12.0.7/go.mod h1:70DNK4ZN4tb42LubeDbfpp6xnm8g3ROYVvvZ6pEoXD8=
|
||||
github.com/boombuler/barcode v0.0.0-20161226211916-fe0f26ff6d26 h1:NGpwhs9FOwddM6TptNrq2ycby4s24TcppSe5uG4DA/Q=
|
||||
github.com/boombuler/barcode v0.0.0-20161226211916-fe0f26ff6d26/go.mod h1:paBWMcWSl3LHKBqUq+rly7CNSldXjb2rDl3JlRe0mD8=
|
||||
github.com/bradfitz/gomemcache v0.0.0-20190329173943-551aad21a668 h1:U/lr3Dgy4WK+hNk4tyD+nuGjpVLPEHuJSFXMw11/HPA=
|
||||
@ -108,6 +118,7 @@ github.com/coreos/go-oidc v2.1.0+incompatible/go.mod h1:CgnwVTmzoESiwO9qyAFEMiHo
|
||||
github.com/coreos/go-semver v0.2.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk=
|
||||
github.com/coreos/go-systemd v0.0.0-20190321100706-95778dfbb74e/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4=
|
||||
github.com/coreos/pkg v0.0.0-20180928190104-399ea9e2e55f/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA=
|
||||
github.com/couchbase/ghistogram v0.1.0/go.mod h1:s1Jhy76zqfEecpNWJfWUiKZookAFaiGOEoyzgHt9i7k=
|
||||
github.com/couchbase/gomemcached v0.0.0-20190515232915-c4b4ca0eb21d h1:XMf4E1U+b9E3ElF0mjvfXZdflBRZz4gLp16nQ/QSHQM=
|
||||
github.com/couchbase/gomemcached v0.0.0-20190515232915-c4b4ca0eb21d/go.mod h1:srVSlQLB8iXBVXHgnqemxUXqN6FCvClgCMPCsjBDR7c=
|
||||
github.com/couchbase/gomemcached v0.0.0-20191004160342-7b5da2ec40b2 h1:vZryARwW4PSFXd9arwegEywvMTvPuXL3/oa+4L5NTe8=
|
||||
@ -116,8 +127,9 @@ github.com/couchbase/goutils v0.0.0-20190315194238-f9d42b11473b h1:bZ9rKU2/V8sY+
|
||||
github.com/couchbase/goutils v0.0.0-20190315194238-f9d42b11473b/go.mod h1:BQwMFlJzDjFDG3DJUdU0KORxn88UlsOULuxLExMh3Hs=
|
||||
github.com/couchbase/goutils v0.0.0-20191018232750-b49639060d85 h1:0WMIDtuXCKEm4wtAJgAAXa/qtM5O9MariLwgHaRlYmk=
|
||||
github.com/couchbase/goutils v0.0.0-20191018232750-b49639060d85/go.mod h1:BQwMFlJzDjFDG3DJUdU0KORxn88UlsOULuxLExMh3Hs=
|
||||
github.com/couchbase/vellum v0.0.0-20190829182332-ef2e028c01fd h1:zeuJhcG3f8eePshH3KxkNE+Xtl53pVln9MOUPMyr/1w=
|
||||
github.com/couchbase/vellum v0.0.0-20190829182332-ef2e028c01fd/go.mod h1:xbc8Ff/oG7h2ejd7AlwOpfd+6QZntc92ygpAOfGwcKY=
|
||||
github.com/couchbase/moss v0.1.0/go.mod h1:9MaHIaRuy9pvLPUJxB8sh8OrLfyDczECVL37grCIubs=
|
||||
github.com/couchbase/vellum v1.0.1 h1:qrj9ohvZedvc51S5KzPfJ6P6z0Vqzv7Lx7k3mVc2WOk=
|
||||
github.com/couchbase/vellum v1.0.1/go.mod h1:FcwrEivFpNi24R3jLOs3n+fs5RnuQnQqCLBJ1uAg1W4=
|
||||
github.com/couchbaselabs/go-couchbase v0.0.0-20190708161019-23e7ca2ce2b7 h1:1XjEY/gnjQ+AfXef2U6dxCquhiRzkEpxZuWqs+QxTL8=
|
||||
github.com/couchbaselabs/go-couchbase v0.0.0-20190708161019-23e7ca2ce2b7/go.mod h1:mby/05p8HE5yHEAKiIH/555NoblMs7PtW6NrYshDruc=
|
||||
github.com/cpuguy83/go-md2man v1.0.10/go.mod h1:SmD6nW6nTyfqj6ABTjUi3V3JVMnlJmwcJI5acqYI6dE=
|
||||
@ -153,8 +165,6 @@ github.com/edsrzf/mmap-go v1.0.0 h1:CEBF7HpRnUCSJgGUb5h1Gm7e3VkmVDrR8lvWVLtrOFw=
|
||||
github.com/edsrzf/mmap-go v1.0.0/go.mod h1:YO35OhQPt3KJa3ryjFM5Bs14WD66h8eGKpfaBNrHW5M=
|
||||
github.com/emirpasic/gods v1.12.0 h1:QAUIPSaCu4G+POclxeqb3F+WPpdKqFGlw36+yOzGlrg=
|
||||
github.com/emirpasic/gods v1.12.0/go.mod h1:YfzfFFoVP/catgzJb4IKIqXjX78Ha8FMSDh3ymbK86o=
|
||||
github.com/etcd-io/bbolt v1.3.3 h1:gSJmxrs37LgTqR/oyJBWok6k6SvXEUerFTbltIhXkBM=
|
||||
github.com/etcd-io/bbolt v1.3.3/go.mod h1:ZF2nL25h33cCyBtcyWeZ2/I3HQOfTP+0PIEvHjkjCrw=
|
||||
github.com/ethantkoenig/rupture v0.0.0-20180203182544-0a76f03a811a h1:M1bRpaZAn4GSsqu3hdK2R8H0AH9O6vqCTCbm2oAFGfE=
|
||||
github.com/ethantkoenig/rupture v0.0.0-20180203182544-0a76f03a811a/go.mod h1:MkKY/CB98aVE4VxO63X5vTQKUgcn+3XP15LMASe3lYs=
|
||||
github.com/facebookgo/ensure v0.0.0-20160127193407-b4ab57deab51 h1:0JZ+dUmQeA8IIVUMzysrX4/AKuQwWhV2dYQuPZdvdSQ=
|
||||
@ -292,6 +302,8 @@ github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrU
|
||||
github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:WU3c8KckQ9AFe+yFwt9sWVRKCVIyN9cPHBJSNnbL67w=
|
||||
github.com/golang/protobuf v1.4.0 h1:oOuy+ugB+P/kBdUnG5QaMXSIyJ1q38wWSojYCb3z5VQ=
|
||||
github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0=
|
||||
github.com/golang/protobuf v1.4.1 h1:ZFgWrT+bLgsYPirOnRfKLYJLvssAegOj/hgyMFdJZe0=
|
||||
github.com/golang/protobuf v1.4.1/go.mod h1:U8fpvMrcmy5pZrNK1lt4xCsGvpyWQ/VVv6QDs8UjoX8=
|
||||
github.com/golang/snappy v0.0.0-20180518054509-2e65f85255db/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q=
|
||||
github.com/golang/snappy v0.0.1 h1:Qgr9rKW7uDUkrbSmQeiDsGa8SjGyCOGtuasMWwvp2P4=
|
||||
github.com/golang/snappy v0.0.1/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q=
|
||||
@ -396,6 +408,7 @@ github.com/klauspost/compress v1.9.2 h1:LfVyl+ZlLlLDeQ/d2AqfGIIH4qEDu0Ed2S5GyhCW
|
||||
github.com/klauspost/compress v1.9.2/go.mod h1:RyIbtBH6LamlWaDj8nUwkbUhJ87Yi3uG0guNDohfE1A=
|
||||
github.com/klauspost/compress v1.10.2 h1:Znfn6hXZAHaLPNnlqUYRrBSReFHYybslgv4PTiyz6P0=
|
||||
github.com/klauspost/compress v1.10.2/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs=
|
||||
github.com/kljensen/snowball v0.6.0/go.mod h1:27N7E8fVU5H68RlUmnWwZCfxgt4POBJfENGMvNRhldw=
|
||||
github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
|
||||
github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc=
|
||||
github.com/kr/pretty v0.1.0 h1:L/CwN0zerZDmRFUapSPitk6f+Q3+0za1rQkzVuMiMFI=
|
||||
@ -464,6 +477,8 @@ github.com/mrjones/oauth v0.0.0-20180629183705-f4e24b6d100c h1:3wkDRdxK92dF+c1ke
|
||||
github.com/mrjones/oauth v0.0.0-20180629183705-f4e24b6d100c/go.mod h1:skjdDftzkFALcuGzYSklqYd8gvat6F1gZJ4YPVbkZpM=
|
||||
github.com/mschoch/smat v0.0.0-20160514031455-90eadee771ae h1:VeRdUYdCw49yizlSbMEn2SZ+gT+3IUKx8BqxyQdz+BY=
|
||||
github.com/mschoch/smat v0.0.0-20160514031455-90eadee771ae/go.mod h1:qAyveg+e4CE+eKJXWVjKXM4ck2QobLqTDytGJbLLhJg=
|
||||
github.com/mschoch/smat v0.2.0 h1:8imxQsjDm8yFEAVBe7azKmKSgzSkZXDuKkSq9374khM=
|
||||
github.com/mschoch/smat v0.2.0/go.mod h1:kc9mz7DoBKqDyiRL7VZN8KvXQMWeTaVnttLRXOlotKw=
|
||||
github.com/msteinert/pam v0.0.0-20151204160544-02ccfbfaf0cc h1:z1PgdCCmYYVL0BoJTUgmAq1p7ca8fzYIPsNyfsN3xAU=
|
||||
github.com/msteinert/pam v0.0.0-20151204160544-02ccfbfaf0cc/go.mod h1:np1wUFZ6tyoke22qDJZY40URn9Ae51gX7ljIWXN5TJs=
|
||||
github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U=
|
||||
@ -534,6 +549,7 @@ github.com/prometheus/tsdb v0.7.1/go.mod h1:qhTCs0VvXwvX/y3TZrWD7rabWM+ijKTux40T
|
||||
github.com/quasoft/websspi v1.0.0 h1:5nDgdM5xSur9s+B5w2xQ5kxf5nUGqgFgU4W0aDLZ8Mw=
|
||||
github.com/quasoft/websspi v1.0.0/go.mod h1:HmVdl939dQ0WIXZhyik+ARdI03M6bQzaSEKcgpFmewk=
|
||||
github.com/rcrowley/go-metrics v0.0.0-20181016184325-3113b8401b8a/go.mod h1:bCqnVzQkZxMG4s8nGwiZ5l3QUCyqpo9Y+/ZMZ9VjZe4=
|
||||
github.com/rcrowley/go-metrics v0.0.0-20190826022208-cac0b30c2563/go.mod h1:bCqnVzQkZxMG4s8nGwiZ5l3QUCyqpo9Y+/ZMZ9VjZe4=
|
||||
github.com/remyoudompheng/bigfft v0.0.0-20190321074620-2f0d2b0e0001 h1:YDeskXpkNDhPdWN3REluVa46HQOVuVkjkd2sWnrABNQ=
|
||||
github.com/remyoudompheng/bigfft v0.0.0-20190321074620-2f0d2b0e0001/go.mod h1:qqbHyh8v60DhA7CoWK5oRCqLrMHRGoxYCSS9EjAz6Eo=
|
||||
github.com/rogpeppe/fastuuid v0.0.0-20150106093220-6724a57986af/go.mod h1:XWv6SoW27p1b0cqNHllgS5HIMJraePCO15w5zCzIWYg=
|
||||
@ -578,8 +594,8 @@ github.com/spf13/pflag v1.0.3/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnIn
|
||||
github.com/spf13/viper v1.3.2/go.mod h1:ZiWeW+zYFKm7srdB9IoDzzZXaJaI5eL9QjNiN/DMA2s=
|
||||
github.com/spf13/viper v1.4.0 h1:yXHLWeravcrgGyFSyCgdYpXQ9dR9c/WED3pg1RhxqEU=
|
||||
github.com/spf13/viper v1.4.0/go.mod h1:PTJ7Z/lr49W6bUbkmS1V3by4uWynFiR9p7+dSq/yZzE=
|
||||
github.com/steveyen/gtreap v0.0.0-20150807155958-0abe01ef9be2 h1:JNEGSiWg6D3lcBCMCBqN3ELniXujt+0QNHLhNnO0w3s=
|
||||
github.com/steveyen/gtreap v0.0.0-20150807155958-0abe01ef9be2/go.mod h1:mjqs7N0Q6m5HpR7QfXVBZXZWSqTjQLeTujjA/xUp2uw=
|
||||
github.com/steveyen/gtreap v0.1.0 h1:CjhzTa274PyJLJuMZwIzCO1PfC00oRa8d1Kc78bFXJM=
|
||||
github.com/steveyen/gtreap v0.1.0/go.mod h1:kl/5J7XbrOmlIbYIXdRHDDE5QxHqpk0cmkT7Z4dM9/Y=
|
||||
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
|
||||
github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
|
||||
github.com/stretchr/objx v0.2.0 h1:Hbg2NidpLE8veEBkEZTL3CvlkUIVzuU9jDplZO54c48=
|
||||
@ -596,8 +612,8 @@ github.com/tidwall/pretty v1.0.0 h1:HsD+QiTn7sK6flMKIvNmpqz1qrpP3Ps6jOKIKMooyg4=
|
||||
github.com/tidwall/pretty v1.0.0/go.mod h1:XNkn88O1ChpSDQmQeStsy+sBenx6DDtFZJxhVysOjyk=
|
||||
github.com/tinylib/msgp v1.1.0 h1:9fQd+ICuRIu/ue4vxJZu6/LzxN0HwMds2nq/0cFvxHU=
|
||||
github.com/tinylib/msgp v1.1.0/go.mod h1:+d+yLhGm8mzTaHzB+wgMYrodPfmZrzkirds8fDWklFE=
|
||||
github.com/tinylib/msgp v1.1.1 h1:TnCZ3FIuKeaIy+F45+Cnp+caqdXGy4z74HvwXN+570Y=
|
||||
github.com/tinylib/msgp v1.1.1/go.mod h1:+d+yLhGm8mzTaHzB+wgMYrodPfmZrzkirds8fDWklFE=
|
||||
github.com/tinylib/msgp v1.1.2 h1:gWmO7n0Ys2RBEb7GPYB9Ujq8Mk5p2U08lRnmMcGy6BQ=
|
||||
github.com/tinylib/msgp v1.1.2/go.mod h1:+d+yLhGm8mzTaHzB+wgMYrodPfmZrzkirds8fDWklFE=
|
||||
github.com/tmc/grpc-websocket-proxy v0.0.0-20190109142713-0ad062ec5ee5/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U=
|
||||
github.com/toqueteos/trie v1.0.0 h1:8i6pXxNUXNRAqP246iibb7w/pSFquNTQ+uNfriG7vlk=
|
||||
github.com/toqueteos/trie v1.0.0/go.mod h1:Ywk48QhEqhU1+DwhMkJ2x7eeGxDHiGkAdc9+0DYcbsM=
|
||||
@ -637,8 +653,8 @@ github.com/yuin/goldmark-meta v0.0.0-20191126180153-f0638e958b60/go.mod h1:i9Vhc
|
||||
github.com/ziutek/mymysql v1.5.4 h1:GB0qdRGsTwQSBVYuVShFBKaXSnSnYYC2d9knnE1LHFs=
|
||||
github.com/ziutek/mymysql v1.5.4/go.mod h1:LMSpPZ6DbqWFxNCHW77HeMg9I646SAhApZ/wKdgO/C0=
|
||||
go.etcd.io/bbolt v1.3.2/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU=
|
||||
go.etcd.io/bbolt v1.3.3 h1:MUGmc65QhB3pIlaQ5bB4LwqSj6GIonVJXpZiaKNyaKk=
|
||||
go.etcd.io/bbolt v1.3.3/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU=
|
||||
go.etcd.io/bbolt v1.3.4 h1:hi1bXHMVrlQh6WwxAy+qZCV/SYIlqo+Ushwdpa4tAKg=
|
||||
go.etcd.io/bbolt v1.3.4/go.mod h1:G5EMThwa9y8QZGBClrRx5EY+Yw9kAhnjy3bSjsnlVTQ=
|
||||
go.mongodb.org/mongo-driver v1.0.3/go.mod h1:u7ryQJ+DOzQmeO7zB6MHyr8jkEQvC8vH7qLUO4lqsUM=
|
||||
go.mongodb.org/mongo-driver v1.1.1 h1:Sq1fR+0c58RME5EoqKdjkiQAmPjmfHlZOoRI6fTUOcs=
|
||||
go.mongodb.org/mongo-driver v1.1.1/go.mod h1:u7ryQJ+DOzQmeO7zB6MHyr8jkEQvC8vH7qLUO4lqsUM=
|
||||
@ -728,6 +744,7 @@ golang.org/x/sys v0.0.0-20181107165924-66b7b1311ac8/go.mod h1:STP8DvDyc/dI5b8T5h
|
||||
golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20181122145206-62eef0e2fa9b/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20181205085412-a5c9d58dba9a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20181221143128-b4a75ba826a6/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20190221075227-b4e8571b14e0/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20190222072716-a9d3bda3a223/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
@ -746,10 +763,13 @@ golang.org/x/sys v0.0.0-20190907184412-d223b2b6db03/go.mod h1:h1NjWce9XRLGQEsW7w
|
||||
golang.org/x/sys v0.0.0-20191010194322-b09406accb47 h1:/XfQ9z7ib8eEJX2hdgFTZJ/ntt0swNk5oYBziWeTCvY=
|
||||
golang.org/x/sys v0.0.0-20191010194322-b09406accb47/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20191026070338-33540a1f6037/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200202164722-d101bd2416d5/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200302150141-5c8b2ff67527 h1:uYVVQ9WP/Ds2ROhcaGPeIdVq0RIXVLwsHlnvJ+cT1So=
|
||||
golang.org/x/sys v0.0.0-20200302150141-5c8b2ff67527/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd h1:xhmwyvizuTgC2qz7ZlMluP20uW+C3Rm0FD/WLDX8884=
|
||||
golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200509044756-6aff5f38e54f h1:mOhmO9WsBaJCNmaZHPtHs9wOcdqdKCjF6OPJlmDM3KI=
|
||||
golang.org/x/sys v0.0.0-20200509044756-6aff5f38e54f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
||||
golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
||||
golang.org/x/text v0.3.2 h1:tW2bmiBqwgJj/UpqtC8EpXEZVYOwU0yG4iWbprSVAcs=
|
||||
@ -822,6 +842,8 @@ google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQ
|
||||
google.golang.org/protobuf v1.20.1-0.20200309200217-e05f789c0967/go.mod h1:A+miEFZTKqfCUM6K7xSMQL9OKL/b6hQv+e19PK+JZNE=
|
||||
google.golang.org/protobuf v1.21.0 h1:qdOKuR/EIArgaWNjetjgTzgVTAZ+S/WXVrq9HW9zimw=
|
||||
google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzikPIcrTAo=
|
||||
google.golang.org/protobuf v1.22.0 h1:cJv5/xdbk1NnMPR1VP9+HU6gupuG9MLBoH1r6RHZ2MY=
|
||||
google.golang.org/protobuf v1.22.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=
|
||||
gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw=
|
||||
gopkg.in/alexcesaro/quotedprintable.v3 v3.0.0-20150716171945-2caba252f4dc h1:2gGKlE2+asNV9m7xrywl36YYNnBG5ZQ0r/BOOxqPpmk=
|
||||
gopkg.in/alexcesaro/quotedprintable.v3 v3.0.0-20150716171945-2caba252f4dc/go.mod h1:m7x9LTH6d71AHyAX77c9yqWCCa3UKHcVEj9y7hAtKDk=
|
||||
|
1
vendor/github.com/RoaringBitmap/roaring/.travis.yml
generated
vendored
1
vendor/github.com/RoaringBitmap/roaring/.travis.yml
generated
vendored
@ -14,6 +14,7 @@ go:
|
||||
- "1.10.x"
|
||||
- "1.11.x"
|
||||
- "1.12.x"
|
||||
- "1.13.x"
|
||||
- tip
|
||||
|
||||
# whitelist
|
||||
|
10
vendor/github.com/RoaringBitmap/roaring/README.md
generated
vendored
10
vendor/github.com/RoaringBitmap/roaring/README.md
generated
vendored
@ -29,11 +29,17 @@ Roaring bitmaps are found to work well in many important applications:
|
||||
|
||||
|
||||
The ``roaring`` Go library is used by
|
||||
* [Cloud Torrent](https://github.com/jpillora/cloud-torrent): a self-hosted remote torrent client
|
||||
* [runv](https://github.com/hyperhq/runv): an Hypervisor-based runtime for the Open Containers Initiative
|
||||
* [Cloud Torrent](https://github.com/jpillora/cloud-torrent)
|
||||
* [runv](https://github.com/hyperhq/runv)
|
||||
* [InfluxDB](https://www.influxdata.com)
|
||||
* [Pilosa](https://www.pilosa.com/)
|
||||
* [Bleve](http://www.blevesearch.com)
|
||||
* [lindb](https://github.com/lindb/lindb)
|
||||
* [Elasticell](https://github.com/deepfabric/elasticell)
|
||||
* [SourceGraph](https://github.com/sourcegraph/sourcegraph)
|
||||
* [M3](https://github.com/m3db/m3)
|
||||
* [trident](https://github.com/NetApp/trident)
|
||||
|
||||
|
||||
This library is used in production in several systems, it is part of the [Awesome Go collection](https://awesome-go.com).
|
||||
|
||||
|
12
vendor/github.com/RoaringBitmap/roaring/arraycontainer.go
generated
vendored
12
vendor/github.com/RoaringBitmap/roaring/arraycontainer.go
generated
vendored
@ -24,6 +24,18 @@ func (ac *arrayContainer) fillLeastSignificant16bits(x []uint32, i int, mask uin
|
||||
}
|
||||
}
|
||||
|
||||
func (ac *arrayContainer) iterate(cb func(x uint16) bool) bool {
|
||||
iterator := shortIterator{ac.content, 0}
|
||||
|
||||
for iterator.hasNext() {
|
||||
if !cb(iterator.next()) {
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
return true
|
||||
}
|
||||
|
||||
func (ac *arrayContainer) getShortIterator() shortPeekable {
|
||||
return &shortIterator{ac.content, 0}
|
||||
}
|
||||
|
12
vendor/github.com/RoaringBitmap/roaring/bitmapcontainer.go
generated
vendored
12
vendor/github.com/RoaringBitmap/roaring/bitmapcontainer.go
generated
vendored
@ -96,6 +96,18 @@ func (bc *bitmapContainer) maximum() uint16 {
|
||||
return uint16(0)
|
||||
}
|
||||
|
||||
func (bc *bitmapContainer) iterate(cb func(x uint16) bool) bool {
|
||||
iterator := bitmapContainerShortIterator{bc, bc.NextSetBit(0)}
|
||||
|
||||
for iterator.hasNext() {
|
||||
if !cb(iterator.next()) {
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
return true
|
||||
}
|
||||
|
||||
type bitmapContainerShortIterator struct {
|
||||
ptr *bitmapContainer
|
||||
i int
|
||||
|
85
vendor/github.com/RoaringBitmap/roaring/roaring.go
generated
vendored
85
vendor/github.com/RoaringBitmap/roaring/roaring.go
generated
vendored
@ -416,6 +416,38 @@ func (rb *Bitmap) String() string {
|
||||
return buffer.String()
|
||||
}
|
||||
|
||||
// Iterate iterates over the bitmap, calling the given callback with each value in the bitmap. If the callback returns
|
||||
// false, the iteration is halted.
|
||||
// The iteration results are undefined if the bitmap is modified (e.g., with Add or Remove).
|
||||
// There is no guarantee as to what order the values will be iterated
|
||||
func (rb *Bitmap) Iterate(cb func(x uint32) bool) {
|
||||
for i := 0; i < rb.highlowcontainer.size(); i++ {
|
||||
hs := uint32(rb.highlowcontainer.getKeyAtIndex(i)) << 16
|
||||
c := rb.highlowcontainer.getContainerAtIndex(i)
|
||||
|
||||
var shouldContinue bool
|
||||
// This is hacky but it avoids allocations from invoking an interface method with a closure
|
||||
switch t := c.(type) {
|
||||
case *arrayContainer:
|
||||
shouldContinue = t.iterate(func(x uint16) bool {
|
||||
return cb(uint32(x) | hs)
|
||||
})
|
||||
case *runContainer16:
|
||||
shouldContinue = t.iterate(func(x uint16) bool {
|
||||
return cb(uint32(x) | hs)
|
||||
})
|
||||
case *bitmapContainer:
|
||||
shouldContinue = t.iterate(func(x uint16) bool {
|
||||
return cb(uint32(x) | hs)
|
||||
})
|
||||
}
|
||||
|
||||
if !shouldContinue {
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Iterator creates a new IntPeekable to iterate over the integers contained in the bitmap, in sorted order;
|
||||
// the iterator becomes invalid if the bitmap is modified (e.g., with Add or Remove).
|
||||
func (rb *Bitmap) Iterator() IntPeekable {
|
||||
@ -475,41 +507,72 @@ func (rb *Bitmap) Equals(o interface{}) bool {
|
||||
|
||||
// AddOffset adds the value 'offset' to each and every value in a bitmap, generating a new bitmap in the process
|
||||
func AddOffset(x *Bitmap, offset uint32) (answer *Bitmap) {
|
||||
containerOffset := highbits(offset)
|
||||
inOffset := lowbits(offset)
|
||||
return AddOffset64(x, int64(offset))
|
||||
}
|
||||
|
||||
// AddOffset64 adds the value 'offset' to each and every value in a bitmap, generating a new bitmap in the process
|
||||
// If offset + element is outside of the range [0,2^32), that the element will be dropped
|
||||
func AddOffset64(x *Bitmap, offset int64) (answer *Bitmap) {
|
||||
// we need "offset" to be a long because we want to support values
|
||||
// between -0xFFFFFFFF up to +-0xFFFFFFFF
|
||||
var containerOffset64 int64
|
||||
|
||||
if offset < 0 {
|
||||
containerOffset64 = (offset - (1 << 16) + 1) / (1 << 16)
|
||||
} else {
|
||||
containerOffset64 = offset >> 16
|
||||
}
|
||||
|
||||
if containerOffset64 >= (1<<16) || containerOffset64 <= -(1<<16) {
|
||||
return New()
|
||||
}
|
||||
|
||||
containerOffset := int32(containerOffset64)
|
||||
inOffset := (uint16)(offset - containerOffset64*(1<<16))
|
||||
|
||||
if inOffset == 0 {
|
||||
answer = x.Clone()
|
||||
for pos := 0; pos < answer.highlowcontainer.size(); pos++ {
|
||||
key := answer.highlowcontainer.getKeyAtIndex(pos)
|
||||
key := int32(answer.highlowcontainer.getKeyAtIndex(pos))
|
||||
key += containerOffset
|
||||
answer.highlowcontainer.keys[pos] = key
|
||||
|
||||
if key >= 0 && key <= MaxUint16 {
|
||||
answer.highlowcontainer.keys[pos] = uint16(key)
|
||||
}
|
||||
}
|
||||
} else {
|
||||
answer = New()
|
||||
|
||||
for pos := 0; pos < x.highlowcontainer.size(); pos++ {
|
||||
key := x.highlowcontainer.getKeyAtIndex(pos)
|
||||
key := int32(x.highlowcontainer.getKeyAtIndex(pos))
|
||||
key += containerOffset
|
||||
|
||||
c := x.highlowcontainer.getContainerAtIndex(pos)
|
||||
offsetted := c.addOffset(inOffset)
|
||||
if offsetted[0].getCardinality() > 0 {
|
||||
|
||||
if offsetted[0].getCardinality() > 0 && (key >= 0 && key <= MaxUint16) {
|
||||
curSize := answer.highlowcontainer.size()
|
||||
lastkey := uint16(0)
|
||||
lastkey := int32(0)
|
||||
|
||||
if curSize > 0 {
|
||||
lastkey = answer.highlowcontainer.getKeyAtIndex(curSize - 1)
|
||||
lastkey = int32(answer.highlowcontainer.getKeyAtIndex(curSize - 1))
|
||||
}
|
||||
|
||||
if curSize > 0 && lastkey == key {
|
||||
prev := answer.highlowcontainer.getContainerAtIndex(curSize - 1)
|
||||
orrseult := prev.ior(offsetted[0])
|
||||
answer.highlowcontainer.setContainerAtIndex(curSize-1, orrseult)
|
||||
} else {
|
||||
answer.highlowcontainer.appendContainer(key, offsetted[0], false)
|
||||
answer.highlowcontainer.appendContainer(uint16(key), offsetted[0], false)
|
||||
}
|
||||
}
|
||||
if offsetted[1].getCardinality() > 0 {
|
||||
answer.highlowcontainer.appendContainer(key+1, offsetted[1], false)
|
||||
|
||||
if offsetted[1].getCardinality() > 0 && ((key+1) >= 0 && (key+1) <= MaxUint16) {
|
||||
answer.highlowcontainer.appendContainer(uint16(key+1), offsetted[1], false)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return answer
|
||||
}
|
||||
|
||||
|
17
vendor/github.com/RoaringBitmap/roaring/roaringarray.go
generated
vendored
17
vendor/github.com/RoaringBitmap/roaring/roaringarray.go
generated
vendored
@ -4,9 +4,10 @@ import (
|
||||
"bytes"
|
||||
"encoding/binary"
|
||||
"fmt"
|
||||
"io"
|
||||
|
||||
snappy "github.com/glycerine/go-unsnap-stream"
|
||||
"github.com/tinylib/msgp/msgp"
|
||||
"io"
|
||||
)
|
||||
|
||||
//go:generate msgp -unexported
|
||||
@ -38,6 +39,7 @@ type container interface {
|
||||
inot(firstOfRange, endx int) container // i stands for inplace, range is [firstOfRange,endx)
|
||||
xor(r container) container
|
||||
getShortIterator() shortPeekable
|
||||
iterate(cb func(x uint16) bool) bool
|
||||
getReverseIterator() shortIterable
|
||||
getManyIterator() manyIterable
|
||||
contains(i uint16) bool
|
||||
@ -488,20 +490,15 @@ func (ra *roaringArray) writeTo(w io.Writer) (n int64, err error) {
|
||||
nw += 2
|
||||
binary.LittleEndian.PutUint16(buf[2:], uint16(len(ra.keys)-1))
|
||||
nw += 2
|
||||
|
||||
// compute isRun bitmap
|
||||
var ir []byte
|
||||
|
||||
isRun := newBitmapContainer()
|
||||
// compute isRun bitmap without temporary allocation
|
||||
var runbitmapslice = buf[nw:nw+isRunSizeInBytes]
|
||||
for i, c := range ra.containers {
|
||||
switch c.(type) {
|
||||
case *runContainer16:
|
||||
isRun.iadd(uint16(i))
|
||||
runbitmapslice[i / 8] |= 1<<(uint(i)%8)
|
||||
}
|
||||
}
|
||||
// convert to little endian
|
||||
ir = isRun.asLittleEndianByteSlice()[:isRunSizeInBytes]
|
||||
nw += copy(buf[nw:], ir)
|
||||
nw += isRunSizeInBytes
|
||||
} else {
|
||||
binary.LittleEndian.PutUint32(buf[0:], uint32(serialCookieNoRunContainer))
|
||||
nw += 4
|
||||
|
12
vendor/github.com/RoaringBitmap/roaring/runcontainer.go
generated
vendored
12
vendor/github.com/RoaringBitmap/roaring/runcontainer.go
generated
vendored
@ -1162,6 +1162,18 @@ func (rc *runContainer16) newRunIterator16() *runIterator16 {
|
||||
return &runIterator16{rc: rc, curIndex: 0, curPosInIndex: 0}
|
||||
}
|
||||
|
||||
func (rc *runContainer16) iterate(cb func(x uint16) bool) bool {
|
||||
iterator := runIterator16{rc, 0, 0}
|
||||
|
||||
for iterator.hasNext() {
|
||||
if !cb(iterator.next()) {
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
return true
|
||||
}
|
||||
|
||||
// hasNext returns false if calling next will panic. It
|
||||
// returns true when there is at least one more value
|
||||
// available in the iteration sequence.
|
||||
|
1
vendor/github.com/RoaringBitmap/roaring/setutil.go
generated
vendored
1
vendor/github.com/RoaringBitmap/roaring/setutil.go
generated
vendored
@ -14,6 +14,7 @@ func equal(a, b []uint16) bool {
|
||||
|
||||
func difference(set1 []uint16, set2 []uint16, buffer []uint16) int {
|
||||
if 0 == len(set2) {
|
||||
buffer = buffer[:len(set1)]
|
||||
for k := 0; k < len(set1); k++ {
|
||||
buffer[k] = set1[k]
|
||||
}
|
||||
|
2
vendor/github.com/RoaringBitmap/roaring/util.go
generated
vendored
2
vendor/github.com/RoaringBitmap/roaring/util.go
generated
vendored
@ -112,7 +112,7 @@ func highbits(x uint32) uint16 {
|
||||
return uint16(x >> 16)
|
||||
}
|
||||
func lowbits(x uint32) uint16 {
|
||||
return uint16(x & 0xFFFF)
|
||||
return uint16(x & maxLowBit)
|
||||
}
|
||||
|
||||
const maxLowBit = 0xFFFF
|
||||
|
10
vendor/github.com/blevesearch/bleve/.travis.yml
generated
vendored
10
vendor/github.com/blevesearch/bleve/.travis.yml
generated
vendored
@ -3,9 +3,9 @@ sudo: false
|
||||
language: go
|
||||
|
||||
go:
|
||||
- "1.10.x"
|
||||
- "1.11.x"
|
||||
- "1.12.x"
|
||||
- "1.13.x"
|
||||
- "1.14.x"
|
||||
|
||||
script:
|
||||
- go get golang.org/x/tools/cmd/cover
|
||||
@ -16,11 +16,7 @@ script:
|
||||
- go test -race -v $(go list ./... | grep -v vendor/)
|
||||
- go vet $(go list ./... | grep -v vendor/)
|
||||
- go test ./test -v -indexType scorch
|
||||
- if [[ ${TRAVIS_GO_VERSION} =~ ^1\.10 ]]; then
|
||||
echo "errcheck skipped for go version" $TRAVIS_GO_VERSION;
|
||||
else
|
||||
errcheck -ignorepkg fmt $(go list ./... | grep -v vendor/);
|
||||
fi
|
||||
- errcheck -ignorepkg fmt $(go list ./... | grep -v vendor/);
|
||||
- docs/project-code-coverage.sh
|
||||
- docs/build_children.sh
|
||||
|
||||
|
49
vendor/github.com/blevesearch/bleve/analysis/lang/en/stemmer_en_snowball.go
generated
vendored
Normal file
49
vendor/github.com/blevesearch/bleve/analysis/lang/en/stemmer_en_snowball.go
generated
vendored
Normal file
@ -0,0 +1,49 @@
|
||||
// Copyright (c) 2020 Couchbase, Inc.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package en
|
||||
|
||||
import (
|
||||
"github.com/blevesearch/bleve/analysis"
|
||||
"github.com/blevesearch/bleve/registry"
|
||||
|
||||
"github.com/blevesearch/snowballstem"
|
||||
"github.com/blevesearch/snowballstem/english"
|
||||
)
|
||||
|
||||
const SnowballStemmerName = "stemmer_en_snowball"
|
||||
|
||||
type EnglishStemmerFilter struct {
|
||||
}
|
||||
|
||||
func NewEnglishStemmerFilter() *EnglishStemmerFilter {
|
||||
return &EnglishStemmerFilter{}
|
||||
}
|
||||
|
||||
func (s *EnglishStemmerFilter) Filter(input analysis.TokenStream) analysis.TokenStream {
|
||||
for _, token := range input {
|
||||
env := snowballstem.NewEnv(string(token.Term))
|
||||
english.Stem(env)
|
||||
token.Term = []byte(env.Current())
|
||||
}
|
||||
return input
|
||||
}
|
||||
|
||||
func EnglishStemmerFilterConstructor(config map[string]interface{}, cache *registry.Cache) (analysis.TokenFilter, error) {
|
||||
return NewEnglishStemmerFilter(), nil
|
||||
}
|
||||
|
||||
func init() {
|
||||
registry.RegisterTokenFilter(SnowballStemmerName, EnglishStemmerFilterConstructor)
|
||||
}
|
8
vendor/github.com/blevesearch/bleve/geo/geo.go
generated
vendored
8
vendor/github.com/blevesearch/bleve/geo/geo.go
generated
vendored
@ -33,14 +33,16 @@ var minLonRad = minLon * degreesToRadian
|
||||
var minLatRad = minLat * degreesToRadian
|
||||
var maxLonRad = maxLon * degreesToRadian
|
||||
var maxLatRad = maxLat * degreesToRadian
|
||||
var geoTolerance = 1E-6
|
||||
var geoTolerance = 1e-6
|
||||
var lonScale = float64((uint64(0x1)<<GeoBits)-1) / 360.0
|
||||
var latScale = float64((uint64(0x1)<<GeoBits)-1) / 180.0
|
||||
|
||||
var geoHashMaxLength = 12
|
||||
|
||||
// Point represents a geo point.
|
||||
type Point struct {
|
||||
Lon float64
|
||||
Lat float64
|
||||
Lon float64 `json:"lon"`
|
||||
Lat float64 `json:"lat"`
|
||||
}
|
||||
|
||||
// MortonHash computes the morton hash value for the provided geo point
|
||||
|
2
vendor/github.com/blevesearch/bleve/geo/parse.go
generated
vendored
2
vendor/github.com/blevesearch/bleve/geo/parse.go
generated
vendored
@ -85,11 +85,13 @@ func ExtractGeoPoint(thing interface{}) (lon, lat float64, success bool) {
|
||||
}
|
||||
} else {
|
||||
// geohash
|
||||
if len(geoStr) <= geoHashMaxLength {
|
||||
lat, lon = DecodeGeoHash(geoStr)
|
||||
foundLat = true
|
||||
foundLon = true
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// is it a map
|
||||
if l, ok := thing.(map[string]interface{}); ok {
|
||||
|
25
vendor/github.com/blevesearch/bleve/go.mod
generated
vendored
Normal file
25
vendor/github.com/blevesearch/bleve/go.mod
generated
vendored
Normal file
@ -0,0 +1,25 @@
|
||||
module github.com/blevesearch/bleve
|
||||
|
||||
go 1.13
|
||||
|
||||
require (
|
||||
github.com/RoaringBitmap/roaring v0.4.21
|
||||
github.com/blevesearch/blevex v0.0.0-20190916190636-152f0fe5c040
|
||||
github.com/blevesearch/go-porterstemmer v1.0.3
|
||||
github.com/blevesearch/segment v0.9.0
|
||||
github.com/blevesearch/snowballstem v0.9.0
|
||||
github.com/blevesearch/zap/v11 v11.0.7
|
||||
github.com/blevesearch/zap/v12 v12.0.7
|
||||
github.com/couchbase/ghistogram v0.1.0 // indirect
|
||||
github.com/couchbase/moss v0.1.0
|
||||
github.com/couchbase/vellum v1.0.1
|
||||
github.com/golang/protobuf v1.3.2
|
||||
github.com/kljensen/snowball v0.6.0
|
||||
github.com/rcrowley/go-metrics v0.0.0-20190826022208-cac0b30c2563
|
||||
github.com/spf13/cobra v0.0.5
|
||||
github.com/steveyen/gtreap v0.1.0
|
||||
github.com/syndtr/goleveldb v1.0.0
|
||||
github.com/willf/bitset v1.1.10
|
||||
go.etcd.io/bbolt v1.3.4
|
||||
golang.org/x/text v0.3.0
|
||||
)
|
95
vendor/github.com/blevesearch/bleve/index/scorch/introducer.go
generated
vendored
95
vendor/github.com/blevesearch/bleve/index/scorch/introducer.go
generated
vendored
@ -21,7 +21,6 @@ import (
|
||||
"github.com/RoaringBitmap/roaring"
|
||||
"github.com/blevesearch/bleve/index"
|
||||
"github.com/blevesearch/bleve/index/scorch/segment"
|
||||
"github.com/blevesearch/bleve/index/scorch/segment/zap"
|
||||
)
|
||||
|
||||
type segmentIntroduction struct {
|
||||
@ -77,11 +76,6 @@ OUTER:
|
||||
case persist := <-s.persists:
|
||||
s.introducePersist(persist)
|
||||
|
||||
case revertTo := <-s.revertToSnapshots:
|
||||
err := s.revertToSnapshot(revertTo)
|
||||
if err != nil {
|
||||
continue OUTER
|
||||
}
|
||||
}
|
||||
|
||||
var epochCurr uint64
|
||||
@ -312,6 +306,8 @@ func (s *Scorch) introducePersist(persist *persistIntroduction) {
|
||||
close(persist.applied)
|
||||
}
|
||||
|
||||
// The introducer should definitely handle the segmentMerge.notify
|
||||
// channel before exiting the introduceMerge.
|
||||
func (s *Scorch) introduceMerge(nextMerge *segmentMerge) {
|
||||
atomic.AddUint64(&s.stats.TotIntroduceMergeBeg, 1)
|
||||
defer atomic.AddUint64(&s.stats.TotIntroduceMergeEnd, 1)
|
||||
@ -409,11 +405,11 @@ func (s *Scorch) introduceMerge(nextMerge *segmentMerge) {
|
||||
atomic.AddUint64(&s.stats.TotIntroducedSegmentsMerge, 1)
|
||||
|
||||
switch nextMerge.new.(type) {
|
||||
case *zap.SegmentBase:
|
||||
case segment.PersistedSegment:
|
||||
fileSegments++
|
||||
default:
|
||||
docsToPersistCount += nextMerge.new.Count() - newSegmentDeleted.GetCardinality()
|
||||
memSegments++
|
||||
case *zap.Segment:
|
||||
fileSegments++
|
||||
}
|
||||
}
|
||||
|
||||
@ -443,86 +439,11 @@ func (s *Scorch) introduceMerge(nextMerge *segmentMerge) {
|
||||
close(nextMerge.notify)
|
||||
}
|
||||
|
||||
func (s *Scorch) revertToSnapshot(revertTo *snapshotReversion) error {
|
||||
atomic.AddUint64(&s.stats.TotIntroduceRevertBeg, 1)
|
||||
defer atomic.AddUint64(&s.stats.TotIntroduceRevertEnd, 1)
|
||||
|
||||
if revertTo.snapshot == nil {
|
||||
err := fmt.Errorf("Cannot revert to a nil snapshot")
|
||||
revertTo.applied <- err
|
||||
return err
|
||||
}
|
||||
|
||||
// acquire lock
|
||||
s.rootLock.Lock()
|
||||
|
||||
// prepare a new index snapshot, based on next snapshot
|
||||
newSnapshot := &IndexSnapshot{
|
||||
parent: s,
|
||||
segment: make([]*SegmentSnapshot, len(revertTo.snapshot.segment)),
|
||||
offsets: revertTo.snapshot.offsets,
|
||||
internal: revertTo.snapshot.internal,
|
||||
epoch: s.nextSnapshotEpoch,
|
||||
refs: 1,
|
||||
creator: "revertToSnapshot",
|
||||
}
|
||||
s.nextSnapshotEpoch++
|
||||
|
||||
var docsToPersistCount, memSegments, fileSegments uint64
|
||||
// iterate through segments
|
||||
for i, segmentSnapshot := range revertTo.snapshot.segment {
|
||||
newSnapshot.segment[i] = &SegmentSnapshot{
|
||||
id: segmentSnapshot.id,
|
||||
segment: segmentSnapshot.segment,
|
||||
deleted: segmentSnapshot.deleted,
|
||||
cachedDocs: segmentSnapshot.cachedDocs,
|
||||
creator: segmentSnapshot.creator,
|
||||
}
|
||||
newSnapshot.segment[i].segment.AddRef()
|
||||
|
||||
// remove segment from ineligibleForRemoval map
|
||||
filename := zapFileName(segmentSnapshot.id)
|
||||
delete(s.ineligibleForRemoval, filename)
|
||||
|
||||
if isMemorySegment(segmentSnapshot) {
|
||||
docsToPersistCount += segmentSnapshot.Count()
|
||||
memSegments++
|
||||
} else {
|
||||
fileSegments++
|
||||
}
|
||||
}
|
||||
|
||||
atomic.StoreUint64(&s.stats.TotItemsToPersist, docsToPersistCount)
|
||||
atomic.StoreUint64(&s.stats.TotMemorySegmentsAtRoot, memSegments)
|
||||
atomic.StoreUint64(&s.stats.TotFileSegmentsAtRoot, fileSegments)
|
||||
|
||||
if revertTo.persisted != nil {
|
||||
s.rootPersisted = append(s.rootPersisted, revertTo.persisted)
|
||||
}
|
||||
|
||||
newSnapshot.updateSize()
|
||||
// swap in new snapshot
|
||||
rootPrev := s.root
|
||||
s.root = newSnapshot
|
||||
|
||||
atomic.StoreUint64(&s.stats.CurRootEpoch, s.root.epoch)
|
||||
// release lock
|
||||
s.rootLock.Unlock()
|
||||
|
||||
if rootPrev != nil {
|
||||
_ = rootPrev.DecRef()
|
||||
}
|
||||
|
||||
close(revertTo.applied)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func isMemorySegment(s *SegmentSnapshot) bool {
|
||||
switch s.segment.(type) {
|
||||
case *zap.SegmentBase:
|
||||
return true
|
||||
default:
|
||||
case segment.PersistedSegment:
|
||||
return false
|
||||
default:
|
||||
return true
|
||||
}
|
||||
}
|
||||
|
77
vendor/github.com/blevesearch/bleve/index/scorch/merge.go
generated
vendored
77
vendor/github.com/blevesearch/bleve/index/scorch/merge.go
generated
vendored
@ -25,7 +25,6 @@ import (
|
||||
"github.com/RoaringBitmap/roaring"
|
||||
"github.com/blevesearch/bleve/index/scorch/mergeplan"
|
||||
"github.com/blevesearch/bleve/index/scorch/segment"
|
||||
"github.com/blevesearch/bleve/index/scorch/segment/zap"
|
||||
)
|
||||
|
||||
func (s *Scorch) mergerLoop() {
|
||||
@ -131,18 +130,18 @@ func (s *Scorch) parseMergePlannerOptions() (*mergeplan.MergePlanOptions,
|
||||
|
||||
func (s *Scorch) planMergeAtSnapshot(ourSnapshot *IndexSnapshot,
|
||||
options *mergeplan.MergePlanOptions) error {
|
||||
// build list of zap segments in this snapshot
|
||||
var onlyZapSnapshots []mergeplan.Segment
|
||||
// build list of persisted segments in this snapshot
|
||||
var onlyPersistedSnapshots []mergeplan.Segment
|
||||
for _, segmentSnapshot := range ourSnapshot.segment {
|
||||
if _, ok := segmentSnapshot.segment.(*zap.Segment); ok {
|
||||
onlyZapSnapshots = append(onlyZapSnapshots, segmentSnapshot)
|
||||
if _, ok := segmentSnapshot.segment.(segment.PersistedSegment); ok {
|
||||
onlyPersistedSnapshots = append(onlyPersistedSnapshots, segmentSnapshot)
|
||||
}
|
||||
}
|
||||
|
||||
atomic.AddUint64(&s.stats.TotFileMergePlan, 1)
|
||||
|
||||
// give this list to the planner
|
||||
resultMergePlan, err := mergeplan.Plan(onlyZapSnapshots, options)
|
||||
resultMergePlan, err := mergeplan.Plan(onlyPersistedSnapshots, options)
|
||||
if err != nil {
|
||||
atomic.AddUint64(&s.stats.TotFileMergePlanErr, 1)
|
||||
return fmt.Errorf("merge planning err: %v", err)
|
||||
@ -157,8 +156,8 @@ func (s *Scorch) planMergeAtSnapshot(ourSnapshot *IndexSnapshot,
|
||||
atomic.AddUint64(&s.stats.TotFileMergePlanTasks, uint64(len(resultMergePlan.Tasks)))
|
||||
|
||||
// process tasks in serial for now
|
||||
var notifications []chan *IndexSnapshot
|
||||
var filenames []string
|
||||
|
||||
for _, task := range resultMergePlan.Tasks {
|
||||
if len(task.Segments) == 0 {
|
||||
atomic.AddUint64(&s.stats.TotFileMergePlanTasksSegmentsEmpty, 1)
|
||||
@ -169,24 +168,24 @@ func (s *Scorch) planMergeAtSnapshot(ourSnapshot *IndexSnapshot,
|
||||
|
||||
oldMap := make(map[uint64]*SegmentSnapshot)
|
||||
newSegmentID := atomic.AddUint64(&s.nextSegmentID, 1)
|
||||
segmentsToMerge := make([]*zap.Segment, 0, len(task.Segments))
|
||||
segmentsToMerge := make([]segment.Segment, 0, len(task.Segments))
|
||||
docsToDrop := make([]*roaring.Bitmap, 0, len(task.Segments))
|
||||
|
||||
for _, planSegment := range task.Segments {
|
||||
if segSnapshot, ok := planSegment.(*SegmentSnapshot); ok {
|
||||
oldMap[segSnapshot.id] = segSnapshot
|
||||
if zapSeg, ok := segSnapshot.segment.(*zap.Segment); ok {
|
||||
if persistedSeg, ok := segSnapshot.segment.(segment.PersistedSegment); ok {
|
||||
if segSnapshot.LiveSize() == 0 {
|
||||
atomic.AddUint64(&s.stats.TotFileMergeSegmentsEmpty, 1)
|
||||
oldMap[segSnapshot.id] = nil
|
||||
} else {
|
||||
segmentsToMerge = append(segmentsToMerge, zapSeg)
|
||||
segmentsToMerge = append(segmentsToMerge, segSnapshot.segment)
|
||||
docsToDrop = append(docsToDrop, segSnapshot.deleted)
|
||||
}
|
||||
// track the files getting merged for unsetting the
|
||||
// removal ineligibility. This helps to unflip files
|
||||
// even with fast merger, slow persister work flows.
|
||||
path := zapSeg.Path()
|
||||
path := persistedSeg.Path()
|
||||
filenames = append(filenames,
|
||||
strings.TrimPrefix(path, s.path+string(os.PathSeparator)))
|
||||
}
|
||||
@ -203,8 +202,8 @@ func (s *Scorch) planMergeAtSnapshot(ourSnapshot *IndexSnapshot,
|
||||
fileMergeZapStartTime := time.Now()
|
||||
|
||||
atomic.AddUint64(&s.stats.TotFileMergeZapBeg, 1)
|
||||
newDocNums, _, err := zap.Merge(segmentsToMerge, docsToDrop, path,
|
||||
DefaultChunkFactor, s.closeCh, s)
|
||||
newDocNums, _, err := s.segPlugin.Merge(segmentsToMerge, docsToDrop, path,
|
||||
s.closeCh, s)
|
||||
atomic.AddUint64(&s.stats.TotFileMergeZapEnd, 1)
|
||||
|
||||
fileMergeZapTime := uint64(time.Since(fileMergeZapStartTime))
|
||||
@ -222,17 +221,12 @@ func (s *Scorch) planMergeAtSnapshot(ourSnapshot *IndexSnapshot,
|
||||
return fmt.Errorf("merging failed: %v", err)
|
||||
}
|
||||
|
||||
seg, err = zap.Open(path)
|
||||
seg, err = s.segPlugin.Open(path)
|
||||
if err != nil {
|
||||
s.unmarkIneligibleForRemoval(filename)
|
||||
atomic.AddUint64(&s.stats.TotFileMergePlanTasksErr, 1)
|
||||
return err
|
||||
}
|
||||
err = zap.ValidateMerge(segmentsToMerge, nil, docsToDrop, seg.(*zap.Segment))
|
||||
if err != nil {
|
||||
s.unmarkIneligibleForRemoval(filename)
|
||||
return fmt.Errorf("merge validation failed: %v", err)
|
||||
}
|
||||
oldNewDocNums = make(map[uint64][]uint64)
|
||||
for i, segNewDocNums := range newDocNums {
|
||||
oldNewDocNums[task.Segments[i].Id()] = segNewDocNums
|
||||
@ -246,9 +240,8 @@ func (s *Scorch) planMergeAtSnapshot(ourSnapshot *IndexSnapshot,
|
||||
old: oldMap,
|
||||
oldNewDocNums: oldNewDocNums,
|
||||
new: seg,
|
||||
notify: make(chan *IndexSnapshot, 1),
|
||||
notify: make(chan *IndexSnapshot),
|
||||
}
|
||||
notifications = append(notifications, sm.notify)
|
||||
|
||||
// give it to the introducer
|
||||
select {
|
||||
@ -259,20 +252,21 @@ func (s *Scorch) planMergeAtSnapshot(ourSnapshot *IndexSnapshot,
|
||||
atomic.AddUint64(&s.stats.TotFileMergeIntroductions, 1)
|
||||
}
|
||||
|
||||
atomic.AddUint64(&s.stats.TotFileMergePlanTasksDone, 1)
|
||||
introStartTime := time.Now()
|
||||
// it is safe to blockingly wait for the merge introduction
|
||||
// here as the introducer is bound to handle the notify channel.
|
||||
newSnapshot := <-sm.notify
|
||||
introTime := uint64(time.Since(introStartTime))
|
||||
atomic.AddUint64(&s.stats.TotFileMergeZapIntroductionTime, introTime)
|
||||
if atomic.LoadUint64(&s.stats.MaxFileMergeZapIntroductionTime) < introTime {
|
||||
atomic.StoreUint64(&s.stats.MaxFileMergeZapIntroductionTime, introTime)
|
||||
}
|
||||
|
||||
for _, notification := range notifications {
|
||||
select {
|
||||
case <-s.closeCh:
|
||||
atomic.AddUint64(&s.stats.TotFileMergeIntroductionsSkipped, 1)
|
||||
return segment.ErrClosed
|
||||
case newSnapshot := <-notification:
|
||||
atomic.AddUint64(&s.stats.TotFileMergeIntroductionsDone, 1)
|
||||
if newSnapshot != nil {
|
||||
_ = newSnapshot.DecRef()
|
||||
}
|
||||
}
|
||||
|
||||
atomic.AddUint64(&s.stats.TotFileMergePlanTasksDone, 1)
|
||||
}
|
||||
|
||||
// once all the newly merged segment introductions are done,
|
||||
@ -297,8 +291,8 @@ type segmentMerge struct {
|
||||
// persisted segment, and synchronously introduce that new segment
|
||||
// into the root
|
||||
func (s *Scorch) mergeSegmentBases(snapshot *IndexSnapshot,
|
||||
sbs []*zap.SegmentBase, sbsDrops []*roaring.Bitmap, sbsIndexes []int,
|
||||
chunkFactor uint32) (*IndexSnapshot, uint64, error) {
|
||||
sbs []segment.Segment, sbsDrops []*roaring.Bitmap,
|
||||
sbsIndexes []int) (*IndexSnapshot, uint64, error) {
|
||||
atomic.AddUint64(&s.stats.TotMemMergeBeg, 1)
|
||||
|
||||
memMergeZapStartTime := time.Now()
|
||||
@ -310,7 +304,7 @@ func (s *Scorch) mergeSegmentBases(snapshot *IndexSnapshot,
|
||||
path := s.path + string(os.PathSeparator) + filename
|
||||
|
||||
newDocNums, _, err :=
|
||||
zap.MergeSegmentBases(sbs, sbsDrops, path, chunkFactor, s.closeCh, s)
|
||||
s.segPlugin.Merge(sbs, sbsDrops, path, s.closeCh, s)
|
||||
|
||||
atomic.AddUint64(&s.stats.TotMemMergeZapEnd, 1)
|
||||
|
||||
@ -325,15 +319,11 @@ func (s *Scorch) mergeSegmentBases(snapshot *IndexSnapshot,
|
||||
return nil, 0, err
|
||||
}
|
||||
|
||||
seg, err := zap.Open(path)
|
||||
seg, err := s.segPlugin.Open(path)
|
||||
if err != nil {
|
||||
atomic.AddUint64(&s.stats.TotMemMergeErr, 1)
|
||||
return nil, 0, err
|
||||
}
|
||||
err = zap.ValidateMerge(nil, sbs, sbsDrops, seg.(*zap.Segment))
|
||||
if err != nil {
|
||||
return nil, 0, fmt.Errorf("in-memory merge validation failed: %v", err)
|
||||
}
|
||||
|
||||
// update persisted stats
|
||||
atomic.AddUint64(&s.stats.TotPersistedItems, seg.Count())
|
||||
@ -344,7 +334,7 @@ func (s *Scorch) mergeSegmentBases(snapshot *IndexSnapshot,
|
||||
old: make(map[uint64]*SegmentSnapshot),
|
||||
oldNewDocNums: make(map[uint64][]uint64),
|
||||
new: seg,
|
||||
notify: make(chan *IndexSnapshot, 1),
|
||||
notify: make(chan *IndexSnapshot),
|
||||
}
|
||||
|
||||
for i, idx := range sbsIndexes {
|
||||
@ -360,14 +350,13 @@ func (s *Scorch) mergeSegmentBases(snapshot *IndexSnapshot,
|
||||
case s.merges <- sm:
|
||||
}
|
||||
|
||||
select { // wait for introduction to complete
|
||||
case <-s.closeCh:
|
||||
return nil, 0, segment.ErrClosed
|
||||
case newSnapshot := <-sm.notify:
|
||||
// blockingly wait for the introduction to complete
|
||||
newSnapshot := <-sm.notify
|
||||
if newSnapshot != nil {
|
||||
atomic.AddUint64(&s.stats.TotMemMergeSegments, uint64(len(sbs)))
|
||||
atomic.AddUint64(&s.stats.TotMemMergeDone, 1)
|
||||
return newSnapshot, newSegmentID, nil
|
||||
}
|
||||
return newSnapshot, newSegmentID, nil
|
||||
}
|
||||
|
||||
func (s *Scorch) ReportBytesWritten(bytesWritten uint64) {
|
||||
|
74
vendor/github.com/blevesearch/bleve/index/scorch/optimize.go
generated
vendored
74
vendor/github.com/blevesearch/bleve/index/scorch/optimize.go
generated
vendored
@ -18,10 +18,8 @@ import (
|
||||
"fmt"
|
||||
|
||||
"github.com/RoaringBitmap/roaring"
|
||||
|
||||
"github.com/blevesearch/bleve/index"
|
||||
"github.com/blevesearch/bleve/index/scorch/segment"
|
||||
"github.com/blevesearch/bleve/index/scorch/segment/zap"
|
||||
)
|
||||
|
||||
var OptimizeConjunction = true
|
||||
@ -81,25 +79,25 @@ func (o *OptimizeTFRConjunction) Finish() (index.Optimized, error) {
|
||||
}
|
||||
|
||||
for i := range o.snapshot.segment {
|
||||
itr0, ok := o.tfrs[0].iterators[i].(*zap.PostingsIterator)
|
||||
if !ok || itr0.ActualBM == nil {
|
||||
itr0, ok := o.tfrs[0].iterators[i].(segment.OptimizablePostingsIterator)
|
||||
if !ok || itr0.ActualBitmap() == nil {
|
||||
continue
|
||||
}
|
||||
|
||||
itr1, ok := o.tfrs[1].iterators[i].(*zap.PostingsIterator)
|
||||
if !ok || itr1.ActualBM == nil {
|
||||
itr1, ok := o.tfrs[1].iterators[i].(segment.OptimizablePostingsIterator)
|
||||
if !ok || itr1.ActualBitmap() == nil {
|
||||
continue
|
||||
}
|
||||
|
||||
bm := roaring.And(itr0.ActualBM, itr1.ActualBM)
|
||||
bm := roaring.And(itr0.ActualBitmap(), itr1.ActualBitmap())
|
||||
|
||||
for _, tfr := range o.tfrs[2:] {
|
||||
itr, ok := tfr.iterators[i].(*zap.PostingsIterator)
|
||||
if !ok || itr.ActualBM == nil {
|
||||
itr, ok := tfr.iterators[i].(segment.OptimizablePostingsIterator)
|
||||
if !ok || itr.ActualBitmap() == nil {
|
||||
continue
|
||||
}
|
||||
|
||||
bm.And(itr.ActualBM)
|
||||
bm.And(itr.ActualBitmap())
|
||||
}
|
||||
|
||||
// in this conjunction optimization, the postings iterators
|
||||
@ -107,10 +105,9 @@ func (o *OptimizeTFRConjunction) Finish() (index.Optimized, error) {
|
||||
// regular conjunction searcher machinery will still be used,
|
||||
// but the underlying bitmap will be smaller.
|
||||
for _, tfr := range o.tfrs {
|
||||
itr, ok := tfr.iterators[i].(*zap.PostingsIterator)
|
||||
if ok && itr.ActualBM != nil {
|
||||
itr.ActualBM = bm
|
||||
itr.Actual = bm.Iterator()
|
||||
itr, ok := tfr.iterators[i].(segment.OptimizablePostingsIterator)
|
||||
if ok && itr.ActualBitmap() != nil {
|
||||
itr.ReplaceActual(bm)
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -191,9 +188,9 @@ OUTER:
|
||||
continue OUTER
|
||||
}
|
||||
|
||||
itr, ok := tfr.iterators[i].(*zap.PostingsIterator)
|
||||
itr, ok := tfr.iterators[i].(segment.OptimizablePostingsIterator)
|
||||
if !ok {
|
||||
// We optimize zap postings iterators only.
|
||||
// We only optimize postings iterators that support this operation.
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
@ -201,12 +198,6 @@ OUTER:
|
||||
// can perform several optimizations up-front here.
|
||||
docNum1Hit, ok := itr.DocNum1Hit()
|
||||
if ok {
|
||||
if docNum1Hit == zap.DocNum1HitFinished {
|
||||
// An empty docNum here means the entire AND is empty.
|
||||
oTFR.iterators[i] = segment.AnEmptyPostingsIterator
|
||||
continue OUTER
|
||||
}
|
||||
|
||||
if docNum1HitLastOk && docNum1HitLast != docNum1Hit {
|
||||
// The docNum1Hit doesn't match the previous
|
||||
// docNum1HitLast, so the entire AND is empty.
|
||||
@ -220,14 +211,14 @@ OUTER:
|
||||
continue
|
||||
}
|
||||
|
||||
if itr.ActualBM == nil {
|
||||
if itr.ActualBitmap() == nil {
|
||||
// An empty actual bitmap means the entire AND is empty.
|
||||
oTFR.iterators[i] = segment.AnEmptyPostingsIterator
|
||||
continue OUTER
|
||||
}
|
||||
|
||||
// Collect the actual bitmap for more processing later.
|
||||
actualBMs = append(actualBMs, itr.ActualBM)
|
||||
actualBMs = append(actualBMs, itr.ActualBitmap())
|
||||
}
|
||||
|
||||
if docNum1HitLastOk {
|
||||
@ -245,11 +236,7 @@ OUTER:
|
||||
|
||||
// The actual bitmaps and docNum1Hits all contain or have
|
||||
// the same 1-hit docNum, so that's our AND'ed result.
|
||||
oTFR.iterators[i], err = zap.PostingsIteratorFrom1Hit(
|
||||
docNum1HitLast, zap.NormBits1Hit, false, false)
|
||||
if err != nil {
|
||||
return nil, nil
|
||||
}
|
||||
oTFR.iterators[i] = segment.NewUnadornedPostingsIteratorFrom1Hit(docNum1HitLast)
|
||||
|
||||
continue OUTER
|
||||
}
|
||||
@ -263,11 +250,7 @@ OUTER:
|
||||
|
||||
if len(actualBMs) == 1 {
|
||||
// If we've only 1 actual bitmap, then that's our result.
|
||||
oTFR.iterators[i], err = zap.PostingsIteratorFromBitmap(
|
||||
actualBMs[0], false, false)
|
||||
if err != nil {
|
||||
return nil, nil
|
||||
}
|
||||
oTFR.iterators[i] = segment.NewUnadornedPostingsIteratorFromBitmap(actualBMs[0])
|
||||
|
||||
continue OUTER
|
||||
}
|
||||
@ -279,11 +262,7 @@ OUTER:
|
||||
bm.And(actualBM)
|
||||
}
|
||||
|
||||
oTFR.iterators[i], err = zap.PostingsIteratorFromBitmap(
|
||||
bm, false, false)
|
||||
if err != nil {
|
||||
return nil, nil
|
||||
}
|
||||
oTFR.iterators[i] = segment.NewUnadornedPostingsIteratorFromBitmap(bm)
|
||||
}
|
||||
|
||||
return oTFR, nil
|
||||
@ -337,13 +316,13 @@ func (o *OptimizeTFRDisjunctionUnadorned) Finish() (rv index.Optimized, err erro
|
||||
var cMax uint64
|
||||
|
||||
for _, tfr := range o.tfrs {
|
||||
itr, ok := tfr.iterators[i].(*zap.PostingsIterator)
|
||||
itr, ok := tfr.iterators[i].(segment.OptimizablePostingsIterator)
|
||||
if !ok {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
if itr.ActualBM != nil {
|
||||
c := itr.ActualBM.GetCardinality()
|
||||
if itr.ActualBitmap() != nil {
|
||||
c := itr.ActualBitmap().GetCardinality()
|
||||
if cMax < c {
|
||||
cMax = c
|
||||
}
|
||||
@ -379,7 +358,7 @@ func (o *OptimizeTFRDisjunctionUnadorned) Finish() (rv index.Optimized, err erro
|
||||
actualBMs = actualBMs[:0]
|
||||
|
||||
for _, tfr := range o.tfrs {
|
||||
itr, ok := tfr.iterators[i].(*zap.PostingsIterator)
|
||||
itr, ok := tfr.iterators[i].(segment.OptimizablePostingsIterator)
|
||||
if !ok {
|
||||
return nil, nil
|
||||
}
|
||||
@ -390,8 +369,8 @@ func (o *OptimizeTFRDisjunctionUnadorned) Finish() (rv index.Optimized, err erro
|
||||
continue
|
||||
}
|
||||
|
||||
if itr.ActualBM != nil {
|
||||
actualBMs = append(actualBMs, itr.ActualBM)
|
||||
if itr.ActualBitmap() != nil {
|
||||
actualBMs = append(actualBMs, itr.ActualBitmap())
|
||||
}
|
||||
}
|
||||
|
||||
@ -410,10 +389,7 @@ func (o *OptimizeTFRDisjunctionUnadorned) Finish() (rv index.Optimized, err erro
|
||||
|
||||
bm.AddMany(docNums)
|
||||
|
||||
oTFR.iterators[i], err = zap.PostingsIteratorFromBitmap(bm, false, false)
|
||||
if err != nil {
|
||||
return nil, nil
|
||||
}
|
||||
oTFR.iterators[i] = segment.NewUnadornedPostingsIteratorFromBitmap(bm)
|
||||
}
|
||||
|
||||
return oTFR, nil
|
||||
|
69
vendor/github.com/blevesearch/bleve/index/scorch/persister.go
generated
vendored
69
vendor/github.com/blevesearch/bleve/index/scorch/persister.go
generated
vendored
@ -32,12 +32,9 @@ import (
|
||||
"github.com/RoaringBitmap/roaring"
|
||||
"github.com/blevesearch/bleve/index"
|
||||
"github.com/blevesearch/bleve/index/scorch/segment"
|
||||
"github.com/blevesearch/bleve/index/scorch/segment/zap"
|
||||
bolt "github.com/etcd-io/bbolt"
|
||||
bolt "go.etcd.io/bbolt"
|
||||
)
|
||||
|
||||
var DefaultChunkFactor uint32 = 1024
|
||||
|
||||
// DefaultPersisterNapTimeMSec is kept to zero as this helps in direct
|
||||
// persistence of segments with the default safe batch option.
|
||||
// If the default safe batch option results in high number of
|
||||
@ -253,7 +250,7 @@ func (s *Scorch) pausePersisterForMergerCatchUp(lastPersistedEpoch uint64,
|
||||
persistWatchers = notifyMergeWatchers(lastPersistedEpoch, persistWatchers)
|
||||
|
||||
// Check the merger lag by counting the segment files on disk,
|
||||
numFilesOnDisk, _ := s.diskFileStats()
|
||||
numFilesOnDisk, _, _ := s.diskFileStats(nil)
|
||||
|
||||
// On finding fewer files on disk, persister takes a short pause
|
||||
// for sufficient in-memory segments to pile up for the next
|
||||
@ -280,7 +277,7 @@ func (s *Scorch) pausePersisterForMergerCatchUp(lastPersistedEpoch uint64,
|
||||
// 2. The merger could be lagging behind on merging the disk files.
|
||||
if numFilesOnDisk > uint64(po.PersisterNapUnderNumFiles) {
|
||||
s.removeOldData()
|
||||
numFilesOnDisk, _ = s.diskFileStats()
|
||||
numFilesOnDisk, _, _ = s.diskFileStats(nil)
|
||||
}
|
||||
|
||||
// Persister pause until the merger catches up to reduce the segment
|
||||
@ -305,7 +302,7 @@ OUTER:
|
||||
// let the watchers proceed if they lag behind
|
||||
persistWatchers = notifyMergeWatchers(lastPersistedEpoch, persistWatchers)
|
||||
|
||||
numFilesOnDisk, _ = s.diskFileStats()
|
||||
numFilesOnDisk, _, _ = s.diskFileStats(nil)
|
||||
}
|
||||
|
||||
return lastMergedEpoch, persistWatchers
|
||||
@ -360,13 +357,13 @@ var DefaultMinSegmentsForInMemoryMerge = 2
|
||||
func (s *Scorch) persistSnapshotMaybeMerge(snapshot *IndexSnapshot) (
|
||||
bool, error) {
|
||||
// collect the in-memory zap segments (SegmentBase instances)
|
||||
var sbs []*zap.SegmentBase
|
||||
var sbs []segment.Segment
|
||||
var sbsDrops []*roaring.Bitmap
|
||||
var sbsIndexes []int
|
||||
|
||||
for i, segmentSnapshot := range snapshot.segment {
|
||||
if sb, ok := segmentSnapshot.segment.(*zap.SegmentBase); ok {
|
||||
sbs = append(sbs, sb)
|
||||
if _, ok := segmentSnapshot.segment.(segment.PersistedSegment); !ok {
|
||||
sbs = append(sbs, segmentSnapshot.segment)
|
||||
sbsDrops = append(sbsDrops, segmentSnapshot.deleted)
|
||||
sbsIndexes = append(sbsIndexes, i)
|
||||
}
|
||||
@ -377,7 +374,7 @@ func (s *Scorch) persistSnapshotMaybeMerge(snapshot *IndexSnapshot) (
|
||||
}
|
||||
|
||||
newSnapshot, newSegmentID, err := s.mergeSegmentBases(
|
||||
snapshot, sbs, sbsDrops, sbsIndexes, DefaultChunkFactor)
|
||||
snapshot, sbs, sbsDrops, sbsIndexes)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
@ -459,13 +456,13 @@ func (s *Scorch) persistSnapshotDirect(snapshot *IndexSnapshot) (err error) {
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
err = metaBucket.Put([]byte("type"), []byte(zap.Type))
|
||||
err = metaBucket.Put(boltMetaDataSegmentTypeKey, []byte(s.segPlugin.Type()))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
buf := make([]byte, binary.MaxVarintLen32)
|
||||
binary.BigEndian.PutUint32(buf, zap.Version)
|
||||
err = metaBucket.Put([]byte("version"), buf)
|
||||
binary.BigEndian.PutUint32(buf, s.segPlugin.Version())
|
||||
err = metaBucket.Put(boltMetaDataSegmentVersionKey, buf)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@ -494,11 +491,19 @@ func (s *Scorch) persistSnapshotDirect(snapshot *IndexSnapshot) (err error) {
|
||||
return err
|
||||
}
|
||||
switch seg := segmentSnapshot.segment.(type) {
|
||||
case *zap.SegmentBase:
|
||||
case segment.PersistedSegment:
|
||||
path := seg.Path()
|
||||
filename := strings.TrimPrefix(path, s.path+string(os.PathSeparator))
|
||||
err = snapshotSegmentBucket.Put(boltPathKey, []byte(filename))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
filenames = append(filenames, filename)
|
||||
case segment.UnpersistedSegment:
|
||||
// need to persist this to disk
|
||||
filename := zapFileName(segmentSnapshot.id)
|
||||
path := s.path + string(os.PathSeparator) + filename
|
||||
err = zap.PersistSegmentBase(seg, path)
|
||||
err = seg.Persist(path)
|
||||
if err != nil {
|
||||
return fmt.Errorf("error persisting segment: %v", err)
|
||||
}
|
||||
@ -508,14 +513,7 @@ func (s *Scorch) persistSnapshotDirect(snapshot *IndexSnapshot) (err error) {
|
||||
return err
|
||||
}
|
||||
filenames = append(filenames, filename)
|
||||
case *zap.Segment:
|
||||
path := seg.Path()
|
||||
filename := strings.TrimPrefix(path, s.path+string(os.PathSeparator))
|
||||
err = snapshotSegmentBucket.Put(boltPathKey, []byte(filename))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
filenames = append(filenames, filename)
|
||||
|
||||
default:
|
||||
return fmt.Errorf("unknown segment type: %T", seg)
|
||||
}
|
||||
@ -553,7 +551,7 @@ func (s *Scorch) persistSnapshotDirect(snapshot *IndexSnapshot) (err error) {
|
||||
}
|
||||
}()
|
||||
for segmentID, path := range newSegmentPaths {
|
||||
newSegments[segmentID], err = zap.Open(path)
|
||||
newSegments[segmentID], err = s.segPlugin.Open(path)
|
||||
if err != nil {
|
||||
return fmt.Errorf("error opening new segment at %s, %v", path, err)
|
||||
}
|
||||
@ -609,6 +607,8 @@ var boltPathKey = []byte{'p'}
|
||||
var boltDeletedKey = []byte{'d'}
|
||||
var boltInternalKey = []byte{'i'}
|
||||
var boltMetaDataKey = []byte{'m'}
|
||||
var boltMetaDataSegmentTypeKey = []byte("type")
|
||||
var boltMetaDataSegmentVersionKey = []byte("version")
|
||||
|
||||
func (s *Scorch) loadFromBolt() error {
|
||||
return s.rootBolt.View(func(tx *bolt.Tx) error {
|
||||
@ -693,6 +693,23 @@ func (s *Scorch) loadSnapshot(snapshot *bolt.Bucket) (*IndexSnapshot, error) {
|
||||
refs: 1,
|
||||
creator: "loadSnapshot",
|
||||
}
|
||||
// first we look for the meta-data bucket, this will tell us
|
||||
// which segment type/version was used for this snapshot
|
||||
// all operations for this scorch will use this type/version
|
||||
metaBucket := snapshot.Bucket(boltMetaDataKey)
|
||||
if metaBucket == nil {
|
||||
_ = rv.DecRef()
|
||||
return nil, fmt.Errorf("meta-data bucket missing")
|
||||
}
|
||||
segmentType := string(metaBucket.Get(boltMetaDataSegmentTypeKey))
|
||||
segmentVersion := binary.BigEndian.Uint32(
|
||||
metaBucket.Get(boltMetaDataSegmentVersionKey))
|
||||
err := s.loadSegmentPlugin(segmentType, segmentVersion)
|
||||
if err != nil {
|
||||
_ = rv.DecRef()
|
||||
return nil, fmt.Errorf(
|
||||
"unable to load correct segment wrapper: %v", err)
|
||||
}
|
||||
var running uint64
|
||||
c := snapshot.Cursor()
|
||||
for k, _ := c.First(); k != nil; k, _ = c.Next() {
|
||||
@ -737,7 +754,7 @@ func (s *Scorch) loadSegment(segmentBucket *bolt.Bucket) (*SegmentSnapshot, erro
|
||||
return nil, fmt.Errorf("segment path missing")
|
||||
}
|
||||
segmentPath := s.path + string(os.PathSeparator) + string(pathBytes)
|
||||
segment, err := zap.Open(segmentPath)
|
||||
segment, err := s.segPlugin.Open(segmentPath)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("error opening bolt segment: %v", err)
|
||||
}
|
||||
|
78
vendor/github.com/blevesearch/bleve/index/scorch/scorch.go
generated
vendored
78
vendor/github.com/blevesearch/bleve/index/scorch/scorch.go
generated
vendored
@ -28,10 +28,9 @@ import (
|
||||
"github.com/blevesearch/bleve/document"
|
||||
"github.com/blevesearch/bleve/index"
|
||||
"github.com/blevesearch/bleve/index/scorch/segment"
|
||||
"github.com/blevesearch/bleve/index/scorch/segment/zap"
|
||||
"github.com/blevesearch/bleve/index/store"
|
||||
"github.com/blevesearch/bleve/registry"
|
||||
bolt "github.com/etcd-io/bbolt"
|
||||
bolt "go.etcd.io/bbolt"
|
||||
)
|
||||
|
||||
const Name = "scorch"
|
||||
@ -67,7 +66,6 @@ type Scorch struct {
|
||||
persists chan *persistIntroduction
|
||||
merges chan *segmentMerge
|
||||
introducerNotifier chan *epochWatcher
|
||||
revertToSnapshots chan *snapshotReversion
|
||||
persisterNotifier chan *epochWatcher
|
||||
rootBolt *bolt.DB
|
||||
asyncTasks sync.WaitGroup
|
||||
@ -78,6 +76,8 @@ type Scorch struct {
|
||||
pauseLock sync.RWMutex
|
||||
|
||||
pauseCount uint64
|
||||
|
||||
segPlugin segment.Plugin
|
||||
}
|
||||
|
||||
type internalStats struct {
|
||||
@ -101,7 +101,25 @@ func NewScorch(storeName string,
|
||||
nextSnapshotEpoch: 1,
|
||||
closeCh: make(chan struct{}),
|
||||
ineligibleForRemoval: map[string]bool{},
|
||||
segPlugin: defaultSegmentPlugin,
|
||||
}
|
||||
|
||||
// check if the caller has requested a specific segment type/version
|
||||
forcedSegmentVersion, ok := config["forceSegmentVersion"].(int)
|
||||
if ok {
|
||||
forcedSegmentType, ok2 := config["forceSegmentType"].(string)
|
||||
if !ok2 {
|
||||
return nil, fmt.Errorf(
|
||||
"forceSegmentVersion set to %d, must also specify forceSegmentType", forcedSegmentVersion)
|
||||
}
|
||||
|
||||
err := rv.loadSegmentPlugin(forcedSegmentType,
|
||||
uint32(forcedSegmentVersion))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
rv.root = &IndexSnapshot{parent: rv, refs: 1, creator: "NewScorch"}
|
||||
ro, ok := config["read_only"].(bool)
|
||||
if ok {
|
||||
@ -221,8 +239,8 @@ func (s *Scorch) openBolt() error {
|
||||
s.persists = make(chan *persistIntroduction)
|
||||
s.merges = make(chan *segmentMerge)
|
||||
s.introducerNotifier = make(chan *epochWatcher, 1)
|
||||
s.revertToSnapshots = make(chan *snapshotReversion)
|
||||
s.persisterNotifier = make(chan *epochWatcher, 1)
|
||||
s.closeCh = make(chan struct{})
|
||||
|
||||
if !s.readOnly && s.path != "" {
|
||||
err := s.removeOldZapFiles() // Before persister or merger create any new files.
|
||||
@ -263,7 +281,10 @@ func (s *Scorch) Close() (err error) {
|
||||
err = s.rootBolt.Close()
|
||||
s.rootLock.Lock()
|
||||
if s.root != nil {
|
||||
_ = s.root.DecRef()
|
||||
err2 := s.root.DecRef()
|
||||
if err == nil {
|
||||
err = err2
|
||||
}
|
||||
}
|
||||
s.root = nil
|
||||
s.rootLock.Unlock()
|
||||
@ -349,7 +370,7 @@ func (s *Scorch) Batch(batch *index.Batch) (err error) {
|
||||
var newSegment segment.Segment
|
||||
var bufBytes uint64
|
||||
if len(analysisResults) > 0 {
|
||||
newSegment, bufBytes, err = zap.AnalysisResultsToSegmentBase(analysisResults, DefaultChunkFactor)
|
||||
newSegment, bufBytes, err = s.segPlugin.New(analysisResults)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@ -466,8 +487,9 @@ func (s *Scorch) Stats() json.Marshaler {
|
||||
return &s.stats
|
||||
}
|
||||
|
||||
func (s *Scorch) diskFileStats() (uint64, uint64) {
|
||||
var numFilesOnDisk, numBytesUsedDisk uint64
|
||||
func (s *Scorch) diskFileStats(rootSegmentPaths map[string]struct{}) (uint64,
|
||||
uint64, uint64) {
|
||||
var numFilesOnDisk, numBytesUsedDisk, numBytesOnDiskByRoot uint64
|
||||
if s.path != "" {
|
||||
finfos, err := ioutil.ReadDir(s.path)
|
||||
if err == nil {
|
||||
@ -475,24 +497,47 @@ func (s *Scorch) diskFileStats() (uint64, uint64) {
|
||||
if !finfo.IsDir() {
|
||||
numBytesUsedDisk += uint64(finfo.Size())
|
||||
numFilesOnDisk++
|
||||
if rootSegmentPaths != nil {
|
||||
fname := s.path + string(os.PathSeparator) + finfo.Name()
|
||||
if _, fileAtRoot := rootSegmentPaths[fname]; fileAtRoot {
|
||||
numBytesOnDiskByRoot += uint64(finfo.Size())
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
return numFilesOnDisk, numBytesUsedDisk
|
||||
}
|
||||
}
|
||||
// if no root files path given, then consider all disk files.
|
||||
if rootSegmentPaths == nil {
|
||||
return numFilesOnDisk, numBytesUsedDisk, numBytesUsedDisk
|
||||
}
|
||||
|
||||
return numFilesOnDisk, numBytesUsedDisk, numBytesOnDiskByRoot
|
||||
}
|
||||
|
||||
func (s *Scorch) rootDiskSegmentsPaths() map[string]struct{} {
|
||||
rv := make(map[string]struct{}, len(s.root.segment))
|
||||
for _, segmentSnapshot := range s.root.segment {
|
||||
if seg, ok := segmentSnapshot.segment.(segment.PersistedSegment); ok {
|
||||
rv[seg.Path()] = struct{}{}
|
||||
}
|
||||
}
|
||||
return rv
|
||||
}
|
||||
|
||||
func (s *Scorch) StatsMap() map[string]interface{} {
|
||||
m := s.stats.ToMap()
|
||||
|
||||
numFilesOnDisk, numBytesUsedDisk := s.diskFileStats()
|
||||
s.rootLock.RLock()
|
||||
rootSegPaths := s.rootDiskSegmentsPaths()
|
||||
m["CurFilesIneligibleForRemoval"] = uint64(len(s.ineligibleForRemoval))
|
||||
s.rootLock.RUnlock()
|
||||
|
||||
numFilesOnDisk, numBytesUsedDisk, numBytesOnDiskByRoot := s.diskFileStats(rootSegPaths)
|
||||
|
||||
m["CurOnDiskBytes"] = numBytesUsedDisk
|
||||
m["CurOnDiskFiles"] = numFilesOnDisk
|
||||
|
||||
s.rootLock.RLock()
|
||||
m["CurFilesIneligibleForRemoval"] = uint64(len(s.ineligibleForRemoval))
|
||||
s.rootLock.RUnlock()
|
||||
// TODO: consider one day removing these backwards compatible
|
||||
// names for apps using the old names
|
||||
m["updates"] = m["TotUpdates"]
|
||||
@ -507,8 +552,11 @@ func (s *Scorch) StatsMap() map[string]interface{} {
|
||||
m["num_items_introduced"] = m["TotIntroducedItems"]
|
||||
m["num_items_persisted"] = m["TotPersistedItems"]
|
||||
m["num_recs_to_persist"] = m["TotItemsToPersist"]
|
||||
m["num_bytes_used_disk"] = m["CurOnDiskBytes"]
|
||||
m["num_files_on_disk"] = m["CurOnDiskFiles"]
|
||||
// total disk bytes found in index directory inclusive of older snapshots
|
||||
m["num_bytes_used_disk"] = numBytesUsedDisk
|
||||
// total disk bytes by the latest root index, exclusive of older snapshots
|
||||
m["num_bytes_used_disk_by_root"] = numBytesOnDiskByRoot
|
||||
m["num_files_on_disk"] = numFilesOnDisk
|
||||
m["num_root_memorysegments"] = m["TotMemorySegmentsAtRoot"]
|
||||
m["num_root_filesegments"] = m["TotFileSegmentsAtRoot"]
|
||||
m["num_persister_nap_pause_completed"] = m["TotPersisterNapPauseCompleted"]
|
||||
|
8
vendor/github.com/blevesearch/bleve/index/scorch/segment/empty.go
generated
vendored
8
vendor/github.com/blevesearch/bleve/index/scorch/segment/empty.go
generated
vendored
@ -105,10 +105,6 @@ func (e *EmptyDictionaryIterator) Contains(key []byte) (bool, error) {
|
||||
return false, nil
|
||||
}
|
||||
|
||||
func (e *EmptyPostingsIterator) Advance(uint64) (Posting, error) {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
type EmptyPostingsList struct{}
|
||||
|
||||
func (e *EmptyPostingsList) Iterator(includeFreq, includeNorm, includeLocations bool,
|
||||
@ -130,6 +126,10 @@ func (e *EmptyPostingsIterator) Next() (Posting, error) {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
func (e *EmptyPostingsIterator) Advance(uint64) (Posting, error) {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
func (e *EmptyPostingsIterator) Size() int {
|
||||
return 0
|
||||
}
|
||||
|
58
vendor/github.com/blevesearch/bleve/index/scorch/segment/plugin.go
generated
vendored
Normal file
58
vendor/github.com/blevesearch/bleve/index/scorch/segment/plugin.go
generated
vendored
Normal file
@ -0,0 +1,58 @@
|
||||
// Copyright (c) 2020 Couchbase, Inc.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package segment
|
||||
|
||||
import (
|
||||
"github.com/RoaringBitmap/roaring"
|
||||
"github.com/blevesearch/bleve/index"
|
||||
)
|
||||
|
||||
// Plugin represents the essential functions required by a package to plug in
|
||||
// it's segment implementation
|
||||
type Plugin interface {
|
||||
|
||||
// Type is the name for this segment plugin
|
||||
Type() string
|
||||
|
||||
// Version is a numeric value identifying a specific version of this type.
|
||||
// When incompatible changes are made to a particular type of plugin, the
|
||||
// version must be incremented.
|
||||
Version() uint32
|
||||
|
||||
// New takes a set of AnalysisResults and turns them into a new Segment
|
||||
New(results []*index.AnalysisResult) (Segment, uint64, error)
|
||||
|
||||
// Open attempts to open the file at the specified path and
|
||||
// return the corresponding Segment
|
||||
Open(path string) (Segment, error)
|
||||
|
||||
// Merge takes a set of Segments, and creates a new segment on disk at
|
||||
// the specified path.
|
||||
// Drops is a set of bitmaps (one for each segment) indicating which
|
||||
// documents can be dropped from the segments during the merge.
|
||||
// If the closeCh channel is closed, Merge will cease doing work at
|
||||
// the next opportunity, and return an error (closed).
|
||||
// StatsReporter can optionally be provided, in which case progress
|
||||
// made during the merge is reported while operation continues.
|
||||
// Returns:
|
||||
// A slice of new document numbers (one for each input segment),
|
||||
// this allows the caller to know a particular document's new
|
||||
// document number in the newly merged segment.
|
||||
// The number of bytes written to the new segment file.
|
||||
// An error, if any occurred.
|
||||
Merge(segments []Segment, drops []*roaring.Bitmap, path string,
|
||||
closeCh chan struct{}, s StatsReporter) (
|
||||
[][]uint64, uint64, error)
|
||||
}
|
16
vendor/github.com/blevesearch/bleve/index/scorch/segment/segment.go
generated
vendored
16
vendor/github.com/blevesearch/bleve/index/scorch/segment/segment.go
generated
vendored
@ -50,6 +50,16 @@ type Segment interface {
|
||||
DecRef() error
|
||||
}
|
||||
|
||||
type UnpersistedSegment interface {
|
||||
Segment
|
||||
Persist(path string) error
|
||||
}
|
||||
|
||||
type PersistedSegment interface {
|
||||
Segment
|
||||
Path() string
|
||||
}
|
||||
|
||||
type TermDictionary interface {
|
||||
PostingsList(term []byte, except *roaring.Bitmap, prealloc PostingsList) (PostingsList, error)
|
||||
|
||||
@ -96,6 +106,12 @@ type PostingsIterator interface {
|
||||
Size() int
|
||||
}
|
||||
|
||||
type OptimizablePostingsIterator interface {
|
||||
ActualBitmap() *roaring.Bitmap
|
||||
DocNum1Hit() (uint64, bool)
|
||||
ReplaceActual(*roaring.Bitmap)
|
||||
}
|
||||
|
||||
type Posting interface {
|
||||
Number() uint64
|
||||
|
||||
|
148
vendor/github.com/blevesearch/bleve/index/scorch/segment/unadorned.go
generated
vendored
Normal file
148
vendor/github.com/blevesearch/bleve/index/scorch/segment/unadorned.go
generated
vendored
Normal file
@ -0,0 +1,148 @@
|
||||
// Copyright (c) 2020 Couchbase, Inc.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package segment
|
||||
|
||||
import (
|
||||
"github.com/RoaringBitmap/roaring"
|
||||
"math"
|
||||
"reflect"
|
||||
)
|
||||
|
||||
var reflectStaticSizeUnadornedPostingsIteratorBitmap int
|
||||
var reflectStaticSizeUnadornedPostingsIterator1Hit int
|
||||
var reflectStaticSizeUnadornedPosting int
|
||||
|
||||
|
||||
func init() {
|
||||
var pib UnadornedPostingsIteratorBitmap
|
||||
reflectStaticSizeUnadornedPostingsIteratorBitmap = int(reflect.TypeOf(pib).Size())
|
||||
var pi1h UnadornedPostingsIterator1Hit
|
||||
reflectStaticSizeUnadornedPostingsIterator1Hit = int(reflect.TypeOf(pi1h).Size())
|
||||
var up UnadornedPosting
|
||||
reflectStaticSizeUnadornedPosting = int(reflect.TypeOf(up).Size())
|
||||
}
|
||||
|
||||
type UnadornedPostingsIteratorBitmap struct{
|
||||
actual roaring.IntPeekable
|
||||
actualBM *roaring.Bitmap
|
||||
}
|
||||
|
||||
func (i *UnadornedPostingsIteratorBitmap) Next() (Posting, error) {
|
||||
return i.nextAtOrAfter(0)
|
||||
}
|
||||
|
||||
func (i *UnadornedPostingsIteratorBitmap) Advance(docNum uint64) (Posting, error) {
|
||||
return i.nextAtOrAfter(docNum)
|
||||
}
|
||||
|
||||
func (i *UnadornedPostingsIteratorBitmap) nextAtOrAfter(atOrAfter uint64) (Posting, error) {
|
||||
docNum, exists := i.nextDocNumAtOrAfter(atOrAfter)
|
||||
if !exists {
|
||||
return nil, nil
|
||||
}
|
||||
return UnadornedPosting(docNum), nil
|
||||
}
|
||||
|
||||
func (i *UnadornedPostingsIteratorBitmap) nextDocNumAtOrAfter(atOrAfter uint64) (uint64, bool) {
|
||||
if i.actual == nil || !i.actual.HasNext() {
|
||||
return 0, false
|
||||
}
|
||||
i.actual.AdvanceIfNeeded(uint32(atOrAfter))
|
||||
|
||||
if !i.actual.HasNext() {
|
||||
return 0, false // couldn't find anything
|
||||
}
|
||||
|
||||
return uint64(i.actual.Next()), true
|
||||
}
|
||||
|
||||
func (i *UnadornedPostingsIteratorBitmap) Size() int {
|
||||
return reflectStaticSizeUnadornedPostingsIteratorBitmap
|
||||
}
|
||||
|
||||
func NewUnadornedPostingsIteratorFromBitmap(bm *roaring.Bitmap) PostingsIterator {
|
||||
return &UnadornedPostingsIteratorBitmap{
|
||||
actualBM: bm,
|
||||
actual: bm.Iterator(),
|
||||
}
|
||||
}
|
||||
|
||||
const docNum1HitFinished = math.MaxUint64
|
||||
|
||||
type UnadornedPostingsIterator1Hit struct{
|
||||
docNum uint64
|
||||
}
|
||||
|
||||
func (i *UnadornedPostingsIterator1Hit) Next() (Posting, error) {
|
||||
return i.nextAtOrAfter(0)
|
||||
}
|
||||
|
||||
func (i *UnadornedPostingsIterator1Hit) Advance(docNum uint64) (Posting, error) {
|
||||
return i.nextAtOrAfter(docNum)
|
||||
}
|
||||
|
||||
func (i *UnadornedPostingsIterator1Hit) nextAtOrAfter(atOrAfter uint64) (Posting, error) {
|
||||
docNum, exists := i.nextDocNumAtOrAfter(atOrAfter)
|
||||
if !exists {
|
||||
return nil, nil
|
||||
}
|
||||
return UnadornedPosting(docNum), nil
|
||||
}
|
||||
|
||||
func (i *UnadornedPostingsIterator1Hit) nextDocNumAtOrAfter(atOrAfter uint64) (uint64, bool) {
|
||||
if i.docNum == docNum1HitFinished {
|
||||
return 0, false
|
||||
}
|
||||
if i.docNum < atOrAfter {
|
||||
// advanced past our 1-hit
|
||||
i.docNum = docNum1HitFinished // consume our 1-hit docNum
|
||||
return 0, false
|
||||
}
|
||||
docNum := i.docNum
|
||||
i.docNum = docNum1HitFinished // consume our 1-hit docNum
|
||||
return docNum, true
|
||||
}
|
||||
|
||||
func (i *UnadornedPostingsIterator1Hit) Size() int {
|
||||
return reflectStaticSizeUnadornedPostingsIterator1Hit
|
||||
}
|
||||
|
||||
func NewUnadornedPostingsIteratorFrom1Hit(docNum1Hit uint64) PostingsIterator {
|
||||
return &UnadornedPostingsIterator1Hit{
|
||||
docNum1Hit,
|
||||
}
|
||||
}
|
||||
|
||||
type UnadornedPosting uint64
|
||||
|
||||
func (p UnadornedPosting) Number() uint64 {
|
||||
return uint64(p)
|
||||
}
|
||||
|
||||
func (p UnadornedPosting) Frequency() uint64 {
|
||||
return 0
|
||||
}
|
||||
|
||||
func (p UnadornedPosting) Norm() float64 {
|
||||
return 0
|
||||
}
|
||||
|
||||
func (p UnadornedPosting) Locations() []Location {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (p UnadornedPosting) Size() int {
|
||||
return reflectStaticSizeUnadornedPosting
|
||||
}
|
77
vendor/github.com/blevesearch/bleve/index/scorch/segment_plugin.go
generated
vendored
Normal file
77
vendor/github.com/blevesearch/bleve/index/scorch/segment_plugin.go
generated
vendored
Normal file
@ -0,0 +1,77 @@
|
||||
// Copyright (c) 2019 Couchbase, Inc.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package scorch
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"github.com/blevesearch/bleve/index/scorch/segment"
|
||||
|
||||
zapv11 "github.com/blevesearch/zap/v11"
|
||||
zapv12 "github.com/blevesearch/zap/v12"
|
||||
)
|
||||
|
||||
var supportedSegmentPlugins map[string]map[uint32]segment.Plugin
|
||||
var defaultSegmentPlugin segment.Plugin
|
||||
|
||||
func init() {
|
||||
ResetPlugins()
|
||||
RegisterPlugin(zapv12.Plugin(), false)
|
||||
RegisterPlugin(zapv11.Plugin(), true)
|
||||
}
|
||||
|
||||
func ResetPlugins() {
|
||||
supportedSegmentPlugins = map[string]map[uint32]segment.Plugin{}
|
||||
}
|
||||
|
||||
func RegisterPlugin(plugin segment.Plugin, makeDefault bool) {
|
||||
if _, ok := supportedSegmentPlugins[plugin.Type()]; !ok {
|
||||
supportedSegmentPlugins[plugin.Type()] = map[uint32]segment.Plugin{}
|
||||
}
|
||||
supportedSegmentPlugins[plugin.Type()][plugin.Version()] = plugin
|
||||
if makeDefault {
|
||||
defaultSegmentPlugin = plugin
|
||||
}
|
||||
}
|
||||
|
||||
func SupportedSegmentTypes() (rv []string) {
|
||||
for k := range supportedSegmentPlugins {
|
||||
rv = append(rv, k)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func SupportedSegmentTypeVersions(typ string) (rv []uint32) {
|
||||
for k := range supportedSegmentPlugins[typ] {
|
||||
rv = append(rv, k)
|
||||
}
|
||||
return rv
|
||||
}
|
||||
|
||||
func (s *Scorch) loadSegmentPlugin(forcedSegmentType string,
|
||||
forcedSegmentVersion uint32) error {
|
||||
if versions, ok := supportedSegmentPlugins[forcedSegmentType]; ok {
|
||||
if segPlugin, ok := versions[uint32(forcedSegmentVersion)]; ok {
|
||||
s.segPlugin = segPlugin
|
||||
return nil
|
||||
}
|
||||
return fmt.Errorf(
|
||||
"unsupported version %d for segment type: %s, supported: %v",
|
||||
forcedSegmentVersion, forcedSegmentType,
|
||||
SupportedSegmentTypeVersions(forcedSegmentType))
|
||||
}
|
||||
return fmt.Errorf("unsupported segment type: %s, supported: %v",
|
||||
forcedSegmentType, SupportedSegmentTypes())
|
||||
}
|
132
vendor/github.com/blevesearch/bleve/index/scorch/snapshot_rollback.go
generated
vendored
132
vendor/github.com/blevesearch/bleve/index/scorch/snapshot_rollback.go
generated
vendored
@ -17,9 +17,10 @@ package scorch
|
||||
import (
|
||||
"fmt"
|
||||
"log"
|
||||
"os"
|
||||
|
||||
"github.com/blevesearch/bleve/index/scorch/segment"
|
||||
bolt "github.com/etcd-io/bbolt"
|
||||
bolt "go.etcd.io/bbolt"
|
||||
)
|
||||
|
||||
type RollbackPoint struct {
|
||||
@ -34,13 +35,22 @@ func (r *RollbackPoint) GetInternal(key []byte) []byte {
|
||||
// RollbackPoints returns an array of rollback points available for
|
||||
// the application to rollback to, with more recent rollback points
|
||||
// (higher epochs) coming first.
|
||||
func (s *Scorch) RollbackPoints() ([]*RollbackPoint, error) {
|
||||
if s.rootBolt == nil {
|
||||
return nil, fmt.Errorf("RollbackPoints: root is nil")
|
||||
func RollbackPoints(path string) ([]*RollbackPoint, error) {
|
||||
if len(path) == 0 {
|
||||
return nil, fmt.Errorf("RollbackPoints: invalid path")
|
||||
}
|
||||
|
||||
rootBoltPath := path + string(os.PathSeparator) + "root.bolt"
|
||||
rootBoltOpt := &bolt.Options{
|
||||
ReadOnly: true,
|
||||
}
|
||||
rootBolt, err := bolt.Open(rootBoltPath, 0600, rootBoltOpt)
|
||||
if err != nil || rootBolt == nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// start a read-only bolt transaction
|
||||
tx, err := s.rootBolt.Begin(false)
|
||||
tx, err := rootBolt.Begin(false)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("RollbackPoints: failed to start" +
|
||||
" read-only transaction")
|
||||
@ -49,6 +59,7 @@ func (s *Scorch) RollbackPoints() ([]*RollbackPoint, error) {
|
||||
// read-only bolt transactions to be rolled back
|
||||
defer func() {
|
||||
_ = tx.Rollback()
|
||||
_ = rootBolt.Close()
|
||||
}()
|
||||
|
||||
snapshots := tx.Bucket(boltSnapshotsBucket)
|
||||
@ -105,69 +116,98 @@ func (s *Scorch) RollbackPoints() ([]*RollbackPoint, error) {
|
||||
return rollbackPoints, nil
|
||||
}
|
||||
|
||||
// Rollback atomically and durably (if unsafeBatch is unset) brings
|
||||
// the store back to the point in time as represented by the
|
||||
// RollbackPoint. Rollback() should only be passed a RollbackPoint
|
||||
// that came from the same store using the RollbackPoints() API.
|
||||
func (s *Scorch) Rollback(to *RollbackPoint) error {
|
||||
// Rollback atomically and durably brings the store back to the point
|
||||
// in time as represented by the RollbackPoint.
|
||||
// Rollback() should only be passed a RollbackPoint that came from the
|
||||
// same store using the RollbackPoints() API along with the index path.
|
||||
func Rollback(path string, to *RollbackPoint) error {
|
||||
if to == nil {
|
||||
return fmt.Errorf("Rollback: RollbackPoint is nil")
|
||||
}
|
||||
|
||||
if s.rootBolt == nil {
|
||||
return fmt.Errorf("Rollback: root is nil")
|
||||
if len(path) == 0 {
|
||||
return fmt.Errorf("Rollback: index path is empty")
|
||||
}
|
||||
|
||||
revert := &snapshotReversion{}
|
||||
rootBoltPath := path + string(os.PathSeparator) + "root.bolt"
|
||||
rootBoltOpt := &bolt.Options{
|
||||
ReadOnly: false,
|
||||
}
|
||||
rootBolt, err := bolt.Open(rootBoltPath, 0600, rootBoltOpt)
|
||||
if err != nil || rootBolt == nil {
|
||||
return err
|
||||
}
|
||||
defer func() {
|
||||
err1 := rootBolt.Close()
|
||||
if err1 != nil && err == nil {
|
||||
err = err1
|
||||
}
|
||||
}()
|
||||
|
||||
s.rootLock.Lock()
|
||||
|
||||
err := s.rootBolt.View(func(tx *bolt.Tx) error {
|
||||
// pick all the younger persisted epochs in bolt store
|
||||
// including the target one.
|
||||
var found bool
|
||||
var eligibleEpochs []uint64
|
||||
err = rootBolt.View(func(tx *bolt.Tx) error {
|
||||
snapshots := tx.Bucket(boltSnapshotsBucket)
|
||||
if snapshots == nil {
|
||||
return fmt.Errorf("Rollback: no snapshots available")
|
||||
return nil
|
||||
}
|
||||
|
||||
pos := segment.EncodeUvarintAscending(nil, to.epoch)
|
||||
|
||||
snapshot := snapshots.Bucket(pos)
|
||||
if snapshot == nil {
|
||||
return fmt.Errorf("Rollback: snapshot not found")
|
||||
}
|
||||
|
||||
indexSnapshot, err := s.loadSnapshot(snapshot)
|
||||
sc := snapshots.Cursor()
|
||||
for sk, _ := sc.Last(); sk != nil && !found; sk, _ = sc.Prev() {
|
||||
_, snapshotEpoch, err := segment.DecodeUvarintAscending(sk)
|
||||
if err != nil {
|
||||
return fmt.Errorf("Rollback: unable to load snapshot: %v", err)
|
||||
continue
|
||||
}
|
||||
|
||||
// add segments referenced by loaded index snapshot to the
|
||||
// ineligibleForRemoval map
|
||||
for _, segSnap := range indexSnapshot.segment {
|
||||
filename := zapFileName(segSnap.id)
|
||||
s.ineligibleForRemoval[filename] = true
|
||||
if snapshotEpoch == to.epoch {
|
||||
found = true
|
||||
}
|
||||
eligibleEpochs = append(eligibleEpochs, snapshotEpoch)
|
||||
}
|
||||
|
||||
revert.snapshot = indexSnapshot
|
||||
revert.applied = make(chan error)
|
||||
revert.persisted = make(chan error)
|
||||
|
||||
return nil
|
||||
})
|
||||
|
||||
s.rootLock.Unlock()
|
||||
if len(eligibleEpochs) == 0 {
|
||||
return fmt.Errorf("Rollback: no persisted epochs found in bolt")
|
||||
}
|
||||
if !found {
|
||||
return fmt.Errorf("Rollback: target epoch %d not found in bolt", to.epoch)
|
||||
}
|
||||
|
||||
// start a write transaction
|
||||
tx, err := rootBolt.Begin(true)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// introduce the reversion
|
||||
s.revertToSnapshots <- revert
|
||||
defer func() {
|
||||
if err == nil {
|
||||
err = tx.Commit()
|
||||
} else {
|
||||
_ = tx.Rollback()
|
||||
}
|
||||
if err == nil {
|
||||
err = rootBolt.Sync()
|
||||
}
|
||||
}()
|
||||
|
||||
// block until this snapshot is applied
|
||||
err = <-revert.applied
|
||||
snapshots := tx.Bucket(boltSnapshotsBucket)
|
||||
if snapshots == nil {
|
||||
return nil
|
||||
}
|
||||
for _, epoch := range eligibleEpochs {
|
||||
k := segment.EncodeUvarintAscending(nil, epoch)
|
||||
if err != nil {
|
||||
return fmt.Errorf("Rollback: failed with err: %v", err)
|
||||
continue
|
||||
}
|
||||
if epoch == to.epoch {
|
||||
// return here as it already processed until the given epoch
|
||||
return nil
|
||||
}
|
||||
err = snapshots.DeleteBucket(k)
|
||||
if err == bolt.ErrBucketNotFound {
|
||||
err = nil
|
||||
}
|
||||
}
|
||||
|
||||
return <-revert.persisted
|
||||
return err
|
||||
}
|
||||
|
2
vendor/github.com/blevesearch/bleve/index/scorch/stats.go
generated
vendored
2
vendor/github.com/blevesearch/bleve/index/scorch/stats.go
generated
vendored
@ -102,6 +102,8 @@ type Stats struct {
|
||||
TotFileMergeZapEnd uint64
|
||||
TotFileMergeZapTime uint64
|
||||
MaxFileMergeZapTime uint64
|
||||
TotFileMergeZapIntroductionTime uint64
|
||||
MaxFileMergeZapIntroductionTime uint64
|
||||
|
||||
TotFileMergeIntroductions uint64
|
||||
TotFileMergeIntroductionsDone uint64
|
||||
|
2
vendor/github.com/blevesearch/bleve/index/store/boltdb/iterator.go
generated
vendored
2
vendor/github.com/blevesearch/bleve/index/store/boltdb/iterator.go
generated
vendored
@ -17,7 +17,7 @@ package boltdb
|
||||
import (
|
||||
"bytes"
|
||||
|
||||
bolt "github.com/etcd-io/bbolt"
|
||||
bolt "go.etcd.io/bbolt"
|
||||
)
|
||||
|
||||
type Iterator struct {
|
||||
|
2
vendor/github.com/blevesearch/bleve/index/store/boltdb/reader.go
generated
vendored
2
vendor/github.com/blevesearch/bleve/index/store/boltdb/reader.go
generated
vendored
@ -16,7 +16,7 @@ package boltdb
|
||||
|
||||
import (
|
||||
"github.com/blevesearch/bleve/index/store"
|
||||
bolt "github.com/etcd-io/bbolt"
|
||||
bolt "go.etcd.io/bbolt"
|
||||
)
|
||||
|
||||
type Reader struct {
|
||||
|
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user