Browse Source

libs: Remove db from tendermint in favor of tendermint/tm-cmn (#3811)

* Remove db from tendemrint in favor of tendermint/tm-cmn

- remove db from `libs`
- update dependancy, there have been no breaking changes in the updated deps
	- https://github.com/grpc/grpc-go/releases
	- https://github.com/golang/protobuf/releases

Signed-off-by: Marko Baricevic <marbar3778@yahoo.com>

* changelog add

* gofmt

* more gofmt
pull/3816/head
Marko 5 years ago
committed by Anton Kaliaev
parent
commit
816dfce8fe
69 changed files with 79 additions and 5184 deletions
  1. +2
    -0
      CHANGELOG_PENDING.md
  2. +1
    -1
      abci/example/kvstore/kvstore.go
  3. +1
    -1
      abci/example/kvstore/persistent_kvstore.go
  4. +1
    -1
      blockchain/reactor_test.go
  5. +1
    -1
      blockchain/store.go
  6. +3
    -2
      blockchain/store_test.go
  7. +1
    -1
      consensus/common_test.go
  8. +1
    -1
      consensus/mempool_test.go
  9. +1
    -1
      consensus/reactor_test.go
  10. +1
    -1
      consensus/replay.go
  11. +1
    -1
      consensus/replay_file.go
  12. +1
    -1
      consensus/replay_test.go
  13. +1
    -1
      consensus/wal_generator.go
  14. +1
    -1
      evidence/pool.go
  15. +1
    -1
      evidence/pool_test.go
  16. +1
    -1
      evidence/reactor_test.go
  17. +1
    -1
      evidence/store.go
  18. +1
    -1
      evidence/store_test.go
  19. +7
    -12
      go.mod
  20. +31
    -11
      go.sum
  21. +0
    -223
      libs/db/backend_test.go
  22. +0
    -349
      libs/db/boltdb.go
  23. +0
    -37
      libs/db/boltdb_test.go
  24. +0
    -325
      libs/db/c_level_db.go
  25. +0
    -110
      libs/db/c_level_db_test.go
  26. +0
    -256
      libs/db/common_test.go
  27. +0
    -70
      libs/db/db.go
  28. +0
    -194
      libs/db/db_test.go
  29. +0
    -270
      libs/db/fsdb.go
  30. +0
    -333
      libs/db/go_level_db.go
  31. +0
    -45
      libs/db/go_level_db_test.go
  32. +0
    -74
      libs/db/mem_batch.go
  33. +0
    -255
      libs/db/mem_db.go
  34. +0
    -336
      libs/db/prefix_db.go
  35. +0
    -192
      libs/db/prefix_db_test.go
  36. +0
    -37
      libs/db/remotedb/doc.go
  37. +0
    -22
      libs/db/remotedb/grpcdb/client.go
  38. +0
    -32
      libs/db/remotedb/grpcdb/doc.go
  39. +0
    -52
      libs/db/remotedb/grpcdb/example_test.go
  40. +0
    -200
      libs/db/remotedb/grpcdb/server.go
  41. +0
    -914
      libs/db/remotedb/proto/defs.pb.go
  42. +0
    -71
      libs/db/remotedb/proto/defs.proto
  43. +0
    -266
      libs/db/remotedb/remotedb.go
  44. +0
    -123
      libs/db/remotedb/remotedb_test.go
  45. +0
    -25
      libs/db/remotedb/test.crt
  46. +0
    -27
      libs/db/remotedb/test.key
  47. +0
    -136
      libs/db/types.go
  48. +0
    -45
      libs/db/util.go
  49. +0
    -104
      libs/db/util_test.go
  50. +1
    -1
      lite/dbprovider.go
  51. +1
    -1
      lite/dynamic_verifier_test.go
  52. +1
    -1
      lite/provider_test.go
  53. +1
    -1
      lite/proxy/verifier.go
  54. +1
    -1
      node/node.go
  55. +1
    -1
      node/node_test.go
  56. +1
    -1
      p2p/trust/store.go
  57. +1
    -1
      p2p/trust/store_test.go
  58. +1
    -1
      rpc/core/pipe.go
  59. +1
    -1
      state/execution.go
  60. +1
    -1
      state/export_test.go
  61. +1
    -1
      state/helpers_test.go
  62. +1
    -1
      state/state_test.go
  63. +1
    -1
      state/store.go
  64. +1
    -1
      state/store_test.go
  65. +1
    -1
      state/tx_filter_test.go
  66. +1
    -1
      state/txindex/indexer_service_test.go
  67. +1
    -1
      state/txindex/kv/kv.go
  68. +1
    -1
      state/txindex/kv/kv_test.go
  69. +1
    -1
      state/validation.go

+ 2
- 0
CHANGELOG_PENDING.md View File

@ -14,10 +14,12 @@ program](https://hackerone.com/tendermint).
- Apps
- Go API
- [libs] \#3811 Remove `db` from libs in favor of `https://github.com/tendermint/tm-cmn`
### FEATURES:
### IMPROVEMENTS:
- [abci] \#3809 Recover from application panics in `server/socket_server.go` to allow socket cleanup (@ruseinov)
### BUG FIXES:

+ 1
- 1
abci/example/kvstore/kvstore.go View File

@ -9,8 +9,8 @@ import (
"github.com/tendermint/tendermint/abci/example/code"
"github.com/tendermint/tendermint/abci/types"
cmn "github.com/tendermint/tendermint/libs/common"
dbm "github.com/tendermint/tendermint/libs/db"
"github.com/tendermint/tendermint/version"
dbm "github.com/tendermint/tm-cmn/db"
)
var (


+ 1
- 1
abci/example/kvstore/persistent_kvstore.go View File

@ -9,8 +9,8 @@ import (
"github.com/tendermint/tendermint/abci/example/code"
"github.com/tendermint/tendermint/abci/types"
dbm "github.com/tendermint/tendermint/libs/db"
"github.com/tendermint/tendermint/libs/log"
dbm "github.com/tendermint/tm-cmn/db"
)
const (


+ 1
- 1
blockchain/reactor_test.go View File

@ -11,7 +11,6 @@ import (
abci "github.com/tendermint/tendermint/abci/types"
cfg "github.com/tendermint/tendermint/config"
cmn "github.com/tendermint/tendermint/libs/common"
dbm "github.com/tendermint/tendermint/libs/db"
"github.com/tendermint/tendermint/libs/log"
"github.com/tendermint/tendermint/mock"
"github.com/tendermint/tendermint/p2p"
@ -19,6 +18,7 @@ import (
sm "github.com/tendermint/tendermint/state"
"github.com/tendermint/tendermint/types"
tmtime "github.com/tendermint/tendermint/types/time"
dbm "github.com/tendermint/tm-cmn/db"
)
var config *cfg.Config


+ 1
- 1
blockchain/store.go View File

@ -5,7 +5,7 @@ import (
"sync"
cmn "github.com/tendermint/tendermint/libs/common"
dbm "github.com/tendermint/tendermint/libs/db"
dbm "github.com/tendermint/tm-cmn/db"
"github.com/tendermint/tendermint/types"
)


+ 3
- 2
blockchain/store_test.go View File

@ -11,10 +11,11 @@ import (
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
"github.com/tendermint/tm-cmn/db"
dbm "github.com/tendermint/tm-cmn/db"
cfg "github.com/tendermint/tendermint/config"
cmn "github.com/tendermint/tendermint/libs/common"
"github.com/tendermint/tendermint/libs/db"
dbm "github.com/tendermint/tendermint/libs/db"
"github.com/tendermint/tendermint/libs/log"
sm "github.com/tendermint/tendermint/state"


+ 1
- 1
consensus/common_test.go View File

@ -24,7 +24,6 @@ import (
cfg "github.com/tendermint/tendermint/config"
cstypes "github.com/tendermint/tendermint/consensus/types"
cmn "github.com/tendermint/tendermint/libs/common"
dbm "github.com/tendermint/tendermint/libs/db"
"github.com/tendermint/tendermint/libs/log"
tmpubsub "github.com/tendermint/tendermint/libs/pubsub"
mempl "github.com/tendermint/tendermint/mempool"
@ -33,6 +32,7 @@ import (
sm "github.com/tendermint/tendermint/state"
"github.com/tendermint/tendermint/types"
tmtime "github.com/tendermint/tendermint/types/time"
dbm "github.com/tendermint/tm-cmn/db"
)
const (


+ 1
- 1
consensus/mempool_test.go View File

@ -11,10 +11,10 @@ import (
"github.com/tendermint/tendermint/abci/example/code"
abci "github.com/tendermint/tendermint/abci/types"
dbm "github.com/tendermint/tendermint/libs/db"
mempl "github.com/tendermint/tendermint/mempool"
sm "github.com/tendermint/tendermint/state"
"github.com/tendermint/tendermint/types"
dbm "github.com/tendermint/tm-cmn/db"
)
// for testing


+ 1
- 1
consensus/reactor_test.go View File

@ -19,13 +19,13 @@ import (
abci "github.com/tendermint/tendermint/abci/types"
bc "github.com/tendermint/tendermint/blockchain"
cfg "github.com/tendermint/tendermint/config"
dbm "github.com/tendermint/tendermint/libs/db"
"github.com/tendermint/tendermint/libs/log"
mempl "github.com/tendermint/tendermint/mempool"
"github.com/tendermint/tendermint/p2p"
"github.com/tendermint/tendermint/p2p/mock"
sm "github.com/tendermint/tendermint/state"
"github.com/tendermint/tendermint/types"
dbm "github.com/tendermint/tm-cmn/db"
)
//----------------------------------------------


+ 1
- 1
consensus/replay.go View File

@ -13,8 +13,8 @@ import (
abci "github.com/tendermint/tendermint/abci/types"
//auto "github.com/tendermint/tendermint/libs/autofile"
dbm "github.com/tendermint/tm-cmn/db"
dbm "github.com/tendermint/tendermint/libs/db"
"github.com/tendermint/tendermint/libs/log"
"github.com/tendermint/tendermint/mock"
"github.com/tendermint/tendermint/proxy"


+ 1
- 1
consensus/replay_file.go View File

@ -10,11 +10,11 @@ import (
"strings"
"github.com/pkg/errors"
dbm "github.com/tendermint/tm-cmn/db"
bc "github.com/tendermint/tendermint/blockchain"
cfg "github.com/tendermint/tendermint/config"
cmn "github.com/tendermint/tendermint/libs/common"
dbm "github.com/tendermint/tendermint/libs/db"
"github.com/tendermint/tendermint/libs/log"
"github.com/tendermint/tendermint/mock"
"github.com/tendermint/tendermint/proxy"


+ 1
- 1
consensus/replay_test.go View File

@ -22,7 +22,6 @@ import (
cfg "github.com/tendermint/tendermint/config"
"github.com/tendermint/tendermint/crypto"
cmn "github.com/tendermint/tendermint/libs/common"
dbm "github.com/tendermint/tendermint/libs/db"
"github.com/tendermint/tendermint/libs/log"
"github.com/tendermint/tendermint/mock"
"github.com/tendermint/tendermint/privval"
@ -31,6 +30,7 @@ import (
"github.com/tendermint/tendermint/types"
tmtime "github.com/tendermint/tendermint/types/time"
"github.com/tendermint/tendermint/version"
dbm "github.com/tendermint/tm-cmn/db"
)
func TestMain(m *testing.M) {


+ 1
- 1
consensus/wal_generator.go View File

@ -15,13 +15,13 @@ import (
bc "github.com/tendermint/tendermint/blockchain"
cfg "github.com/tendermint/tendermint/config"
cmn "github.com/tendermint/tendermint/libs/common"
"github.com/tendermint/tendermint/libs/db"
"github.com/tendermint/tendermint/libs/log"
"github.com/tendermint/tendermint/mock"
"github.com/tendermint/tendermint/privval"
"github.com/tendermint/tendermint/proxy"
sm "github.com/tendermint/tendermint/state"
"github.com/tendermint/tendermint/types"
"github.com/tendermint/tm-cmn/db"
)
// WALGenerateNBlocks generates a consensus WAL. It does this by spinning up a


+ 1
- 1
evidence/pool.go View File

@ -5,8 +5,8 @@ import (
"sync"
clist "github.com/tendermint/tendermint/libs/clist"
dbm "github.com/tendermint/tendermint/libs/db"
"github.com/tendermint/tendermint/libs/log"
dbm "github.com/tendermint/tm-cmn/db"
sm "github.com/tendermint/tendermint/state"
"github.com/tendermint/tendermint/types"


+ 1
- 1
evidence/pool_test.go View File

@ -7,10 +7,10 @@ import (
"github.com/stretchr/testify/assert"
dbm "github.com/tendermint/tendermint/libs/db"
sm "github.com/tendermint/tendermint/state"
"github.com/tendermint/tendermint/types"
tmtime "github.com/tendermint/tendermint/types/time"
dbm "github.com/tendermint/tm-cmn/db"
)
func TestMain(m *testing.M) {


+ 1
- 1
evidence/reactor_test.go View File

@ -11,10 +11,10 @@ import (
cfg "github.com/tendermint/tendermint/config"
"github.com/tendermint/tendermint/crypto/secp256k1"
dbm "github.com/tendermint/tendermint/libs/db"
"github.com/tendermint/tendermint/libs/log"
"github.com/tendermint/tendermint/p2p"
"github.com/tendermint/tendermint/types"
dbm "github.com/tendermint/tm-cmn/db"
)
// evidenceLogger is a TestingLogger which uses a different


+ 1
- 1
evidence/store.go View File

@ -3,8 +3,8 @@ package evidence
import (
"fmt"
dbm "github.com/tendermint/tendermint/libs/db"
"github.com/tendermint/tendermint/types"
dbm "github.com/tendermint/tm-cmn/db"
)
/*


+ 1
- 1
evidence/store_test.go View File

@ -4,8 +4,8 @@ import (
"testing"
"github.com/stretchr/testify/assert"
dbm "github.com/tendermint/tendermint/libs/db"
"github.com/tendermint/tendermint/types"
dbm "github.com/tendermint/tm-cmn/db"
)
//-------------------------------------------


+ 7
- 12
go.mod View File

@ -3,30 +3,26 @@ module github.com/tendermint/tendermint
go 1.12
require (
github.com/BurntSushi/toml v0.3.1 // indirect
github.com/VividCortex/gohistogram v1.0.0 // indirect
github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973 // indirect
github.com/btcsuite/btcd v0.0.0-20190115013929-ed77733ec07d
github.com/btcsuite/btcutil v0.0.0-20180706230648-ab6388e0c60a
github.com/etcd-io/bbolt v1.3.2
github.com/fortytw2/leaktest v1.2.0
github.com/go-kit/kit v0.6.0
github.com/go-logfmt/logfmt v0.3.0
github.com/go-stack/stack v1.8.0 // indirect
github.com/gogo/protobuf v1.2.1
github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b // indirect
github.com/golang/protobuf v1.3.0
github.com/golang/protobuf v1.3.2
github.com/google/gofuzz v1.0.0 // indirect
github.com/gorilla/websocket v1.2.0
github.com/hashicorp/hcl v1.0.0 // indirect
github.com/inconshreveable/mousetrap v1.0.0 // indirect
github.com/jmhodges/levigo v1.0.0
github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515 // indirect
github.com/magiconair/properties v1.8.0
github.com/matttproud/golang_protobuf_extensions v1.0.1 // indirect
github.com/mitchellh/mapstructure v1.1.2 // indirect
github.com/pelletier/go-toml v1.2.0 // indirect
github.com/pkg/errors v0.8.0
github.com/pkg/errors v0.8.1
github.com/prometheus/client_golang v0.9.1
github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910 // indirect
github.com/prometheus/common v0.0.0-20181020173914-7e9e6cabbd39 // indirect
@ -39,12 +35,11 @@ require (
github.com/spf13/jwalterweatherman v1.0.0 // indirect
github.com/spf13/pflag v1.0.3 // indirect
github.com/spf13/viper v1.0.0
github.com/stretchr/testify v1.2.2
github.com/syndtr/goleveldb v1.0.1-0.20190318030020-c3a204f8e965
github.com/stretchr/testify v1.3.0
github.com/tendermint/go-amino v0.14.1
go.etcd.io/bbolt v1.3.3 // indirect
golang.org/x/crypto v0.0.0-20190228161510-8dd112bcdc25
golang.org/x/net v0.0.0-20180906233101-161cd47e91fd
github.com/tendermint/tm-cmn v0.0.0-20190716080004-dfcde30d5acb
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2
golang.org/x/net v0.0.0-20190628185345-da137c7871d7
google.golang.org/genproto v0.0.0-20181029155118-b69ba1387ce2 // indirect
google.golang.org/grpc v1.13.0
google.golang.org/grpc v1.22.0
)

+ 31
- 11
go.sum View File

@ -1,3 +1,4 @@
cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw=
github.com/BurntSushi/toml v0.3.1 h1:WXkYYl6Yr3qBf1K79EBnL4mak0OimBfB0XUf9Vl28OQ=
github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU=
github.com/VividCortex/gohistogram v1.0.0 h1:6+hBz+qvs0JOrrNhhmR7lFxo5sINxBCGXrdtl/UvroE=
@ -15,11 +16,13 @@ github.com/btcsuite/goleveldb v0.0.0-20160330041536-7834afc9e8cd/go.mod h1:F+uVa
github.com/btcsuite/snappy-go v0.0.0-20151229074030-0bdef8d06723/go.mod h1:8woku9dyThutzjeg+3xrA5iCpBRH8XEEg3lh6TiUghc=
github.com/btcsuite/websocket v0.0.0-20150119174127-31079b680792/go.mod h1:ghJtEyQwv5/p4Mg4C0fgbePVuGr935/5ddU9Z3TmDRY=
github.com/btcsuite/winsvc v1.0.0/go.mod h1:jsenWakMcC0zFBFurPLEAyrnc/teJEM1O46fmI40EZs=
github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw=
github.com/davecgh/go-spew v0.0.0-20171005155431-ecdeabc65495/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/etcd-io/bbolt v1.3.2 h1:RLRQ0TKLX7DlBRXAJHvbmXL17Q3KNnTBtZ9B6Qo+/Y0=
github.com/etcd-io/bbolt v1.3.2/go.mod h1:ZF2nL25h33cCyBtcyWeZ2/I3HQOfTP+0PIEvHjkjCrw=
github.com/etcd-io/bbolt v1.3.3 h1:gSJmxrs37LgTqR/oyJBWok6k6SvXEUerFTbltIhXkBM=
github.com/etcd-io/bbolt v1.3.3/go.mod h1:ZF2nL25h33cCyBtcyWeZ2/I3HQOfTP+0PIEvHjkjCrw=
github.com/fortytw2/leaktest v1.2.0 h1:cj6GCiwJDH7l3tMHLjZDo0QqPtrXJiWSI9JgpeQKw+Q=
github.com/fortytw2/leaktest v1.2.0/go.mod h1:jDsjWgpAGjm2CA7WthBh/CdZYEPF31XHquHwclZch5g=
github.com/fsnotify/fsnotify v1.4.7 h1:IXs+QLmnXW2CcXuY+8Mzv/fWEsPGWxqefPtCP5CnV9I=
@ -34,13 +37,14 @@ github.com/gogo/protobuf v1.2.1 h1:/s5zKNz0uPFCZ5hddgPdo2TK2TVrUNMn0OOX8/aZMTE=
github.com/gogo/protobuf v1.2.1/go.mod h1:hp+jE20tsWTFYpLwKvXlhS1hjn+gTNwPg2I6zVXpSg4=
github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b h1:VKtxabqXZkF25pY9ekfRL6a582T4P37/31XEstQ5p58=
github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q=
github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A=
github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
github.com/golang/protobuf v1.3.0 h1:kbxbvI4Un1LUWKxufD+BiE6AEExYYgkQLQmLFqA1LFk=
github.com/golang/protobuf v1.3.0/go.mod h1:Qd/q+1AKNOZr9uGQzbzCmRO6sUih6GTPZv6a1/R87v0=
github.com/golang/snappy v0.0.0-20180518054509-2e65f85255db h1:woRePGFeVFfLKN/pOkfl+p/TAqKOfFu+7KPlMVpok/w=
github.com/golang/snappy v0.0.0-20180518054509-2e65f85255db/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q=
github.com/golang/protobuf v1.3.2 h1:6nsPYzhq5kReh6QImI3k5qWzO4PEbvbIW2cwSfR/6xs=
github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
github.com/golang/snappy v0.0.1 h1:Qgr9rKW7uDUkrbSmQeiDsGa8SjGyCOGtuasMWwvp2P4=
github.com/golang/snappy v0.0.1/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q=
github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M=
github.com/google/gofuzz v1.0.0 h1:A8PeW59pxE9IoFRqBp37U+mSNaQoZ46F1f0f863XSXw=
github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
github.com/gorilla/websocket v1.2.0 h1:VJtLvh6VQym50czpZzx07z/kw9EgAxI3x1ZB8taTMQQ=
@ -73,8 +77,9 @@ github.com/onsi/gomega v1.4.3 h1:RE1xgDvH7imwFD45h+u2SgIfERHlS2yNG4DObb5BSKU=
github.com/onsi/gomega v1.4.3/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY=
github.com/pelletier/go-toml v1.2.0 h1:T5zMGML61Wp+FlcbWjRDT7yAxhJNAiPPLOFECq181zc=
github.com/pelletier/go-toml v1.2.0/go.mod h1:5z9KED0ma1S8pY6P1sdut58dfprrGBbd/94hg7ilaic=
github.com/pkg/errors v0.8.0 h1:WdK/asTD0HN+q6hsWO3/vpuAkAr+tw6aNJNDFFf0+qw=
github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
github.com/pkg/errors v0.8.1 h1:iURUrRGxPUNPdy5/HRSm+Yj6okJ6UtLINN0Q9M4+h3I=
github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
github.com/prometheus/client_golang v0.9.1 h1:K47Rk0v/fkEfwfQet2KWhscE0cJzjgCCDBG2KHZoVno=
@ -101,35 +106,49 @@ github.com/spf13/pflag v1.0.3 h1:zPAT6CGy6wXeQ7NtTnaTerfKOsV6V6F8agHXFiazDkg=
github.com/spf13/pflag v1.0.3/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4=
github.com/spf13/viper v1.0.0 h1:RUA/ghS2i64rlnn4ydTfblY8Og8QzcPtCcHvgMn+w/I=
github.com/spf13/viper v1.0.0/go.mod h1:A8kyI5cUJhb8N+3pkfONlcEcZbueH6nhAm0Fq7SrnBM=
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
github.com/stretchr/testify v1.2.2 h1:bSDNvY7ZPG5RlJ8otE/7V6gMiyenm9RtJ7IUVIAoJ1w=
github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs=
github.com/syndtr/goleveldb v0.0.0-20181012014443-6b91fda63f2e h1:91EeXI4y4ShkyzkMqZ7QP/ZTIqwXp3RuDu5WFzxcFAs=
github.com/syndtr/goleveldb v0.0.0-20181012014443-6b91fda63f2e/go.mod h1:Z4AUp2Km+PwemOoO/VB5AOx9XSsIItzFjoJlOSiYmn0=
github.com/syndtr/goleveldb v1.0.0 h1:fBdIW9lB4Iz0n9khmH8w27SJ3QEJ7+IgjPEwGSZiFdE=
github.com/stretchr/testify v1.3.0 h1:TivCn/peBQ7UY8ooIcPgZFpTNSz0Q2U6UrFlUfqbe0Q=
github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI=
github.com/syndtr/goleveldb v1.0.1-0.20190318030020-c3a204f8e965 h1:1oFLiOyVl+W7bnBzGhf7BbIv9loSFQcieWWYIjLqcAw=
github.com/syndtr/goleveldb v1.0.1-0.20190318030020-c3a204f8e965/go.mod h1:9OrXJhf154huy1nPWmuSrkgjPUtUNhA+Zmy+6AESzuA=
github.com/tendermint/go-amino v0.14.1 h1:o2WudxNfdLNBwMyl2dqOJxiro5rfrEaU0Ugs6offJMk=
github.com/tendermint/go-amino v0.14.1/go.mod h1:i/UKE5Uocn+argJJBb12qTZsCDBcAYMbR92AaJVmKso=
github.com/tendermint/tm-cmn v0.0.0-20190716080004-dfcde30d5acb h1:t/HdvqJc9e1iJDl+hf8wQKfOo40aen+Rkqh4AwEaNsI=
github.com/tendermint/tm-cmn v0.0.0-20190716080004-dfcde30d5acb/go.mod h1:SLI3Mc+gRrorRsAXJArnHz4xmAdJT8O7Ns0NL4HslXE=
go.etcd.io/bbolt v1.3.3 h1:MUGmc65QhB3pIlaQ5bB4LwqSj6GIonVJXpZiaKNyaKk=
go.etcd.io/bbolt v1.3.3/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU=
golang.org/x/crypto v0.0.0-20170930174604-9419663f5a44/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
golang.org/x/crypto v0.0.0-20190228161510-8dd112bcdc25 h1:jsG6UpNLt9iAsb0S2AGW28DveNzzgmbXR+ENoPjUeIU=
golang.org/x/crypto v0.0.0-20190228161510-8dd112bcdc25/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2 h1:VklqNMn3ovrHsnt90PveolxSbWFaJdECFbxSq0Mqo2M=
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
golang.org/x/net v0.0.0-20180906233101-161cd47e91fd h1:nTDtHvHSdCn1m6ITfMRqtOd/9+7a3s8RBNOZ3eYZzJA=
golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
golang.org/x/net v0.0.0-20190628185345-da137c7871d7 h1:rTIdg5QFRR7XCaK4LCjBiPbx8j4DQRpdYMnGn/bJUEU=
golang.org/x/net v0.0.0-20190628185345-da137c7871d7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f h1:wMNYb4v58l5UBM7MYRLPG6ZhfOqbKu7X5eyFl8ZhKvA=
golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a h1:1BGLXjeY4akVXGgbC9HugT3Jv3hCI0z56oJR5vAMgBU=
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/text v0.3.0 h1:g61tztE5qeGQ89tm6NTjjM9VPIm088od1l6aSorWRWg=
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/tools v0.0.0-20180221164845-07fd8470d635/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q=
google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM=
google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc=
google.golang.org/genproto v0.0.0-20180831171423-11092d34479b/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc=
google.golang.org/genproto v0.0.0-20181029155118-b69ba1387ce2 h1:67iHsV9djwGdZpdZNbLuQj6FOzCaZe3w+vhLjn5AcFA=
google.golang.org/genproto v0.0.0-20181029155118-b69ba1387ce2/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc=
google.golang.org/grpc v1.13.0 h1:bHIbVsCwmvbArgCJmLdgOdHFXlKqTOVjbibbS19cXHc=
google.golang.org/grpc v1.13.0/go.mod h1:yo6s7OP7yaDglbqo1J04qKzAhqBH6lvTonzMVmEdcZw=
google.golang.org/grpc v1.22.0 h1:J0UbZOIrCAl+fpTOf8YLs4dJo8L/owV4LYVtAXQoPkw=
google.golang.org/grpc v1.22.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg=
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405 h1:yhCVgyC4o1eVCa2tZl7eS0r+SDo693bJlVdllGtEeKM=
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/fsnotify.v1 v1.4.7 h1:xOHLXZwVvI9hhs+cLKq5+I5onOuwQLhQwiu63xxlHs4=
@ -138,3 +157,4 @@ gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 h1:uRGJdciOHaEIrze2W8Q3AKkep
gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw=
gopkg.in/yaml.v2 v2.2.1 h1:mUhvW9EsL+naU5Q3cakzfE91YhliOondGd6ZrsDBHQE=
gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=

+ 0
- 223
libs/db/backend_test.go View File

@ -1,223 +0,0 @@
package db
import (
"fmt"
"io/ioutil"
"os"
"path/filepath"
"testing"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
cmn "github.com/tendermint/tendermint/libs/common"
)
func cleanupDBDir(dir, name string) {
err := os.RemoveAll(filepath.Join(dir, name) + ".db")
if err != nil {
panic(err)
}
}
func testBackendGetSetDelete(t *testing.T, backend DBBackendType) {
// Default
dirname, err := ioutil.TempDir("", fmt.Sprintf("test_backend_%s_", backend))
require.Nil(t, err)
db := NewDB("testdb", backend, dirname)
defer cleanupDBDir(dirname, "testdb")
// A nonexistent key should return nil, even if the key is empty
require.Nil(t, db.Get([]byte("")))
// A nonexistent key should return nil, even if the key is nil
require.Nil(t, db.Get(nil))
// A nonexistent key should return nil.
key := []byte("abc")
require.Nil(t, db.Get(key))
// Set empty value.
db.Set(key, []byte(""))
require.NotNil(t, db.Get(key))
require.Empty(t, db.Get(key))
// Set nil value.
db.Set(key, nil)
require.NotNil(t, db.Get(key))
require.Empty(t, db.Get(key))
// Delete.
db.Delete(key)
require.Nil(t, db.Get(key))
}
func TestBackendsGetSetDelete(t *testing.T) {
for dbType := range backends {
testBackendGetSetDelete(t, dbType)
}
}
func withDB(t *testing.T, creator dbCreator, fn func(DB)) {
name := fmt.Sprintf("test_%x", cmn.RandStr(12))
dir := os.TempDir()
db, err := creator(name, dir)
require.Nil(t, err)
defer cleanupDBDir(dir, name)
fn(db)
db.Close()
}
func TestBackendsNilKeys(t *testing.T) {
// Test all backends.
for dbType, creator := range backends {
withDB(t, creator, func(db DB) {
t.Run(fmt.Sprintf("Testing %s", dbType), func(t *testing.T) {
// Nil keys are treated as the empty key for most operations.
expect := func(key, value []byte) {
if len(key) == 0 { // nil or empty
assert.Equal(t, db.Get(nil), db.Get([]byte("")))
assert.Equal(t, db.Has(nil), db.Has([]byte("")))
}
assert.Equal(t, db.Get(key), value)
assert.Equal(t, db.Has(key), value != nil)
}
// Not set
expect(nil, nil)
// Set nil value
db.Set(nil, nil)
expect(nil, []byte(""))
// Set empty value
db.Set(nil, []byte(""))
expect(nil, []byte(""))
// Set nil, Delete nil
db.Set(nil, []byte("abc"))
expect(nil, []byte("abc"))
db.Delete(nil)
expect(nil, nil)
// Set nil, Delete empty
db.Set(nil, []byte("abc"))
expect(nil, []byte("abc"))
db.Delete([]byte(""))
expect(nil, nil)
// Set empty, Delete nil
db.Set([]byte(""), []byte("abc"))
expect(nil, []byte("abc"))
db.Delete(nil)
expect(nil, nil)
// Set empty, Delete empty
db.Set([]byte(""), []byte("abc"))
expect(nil, []byte("abc"))
db.Delete([]byte(""))
expect(nil, nil)
// SetSync nil, DeleteSync nil
db.SetSync(nil, []byte("abc"))
expect(nil, []byte("abc"))
db.DeleteSync(nil)
expect(nil, nil)
// SetSync nil, DeleteSync empty
db.SetSync(nil, []byte("abc"))
expect(nil, []byte("abc"))
db.DeleteSync([]byte(""))
expect(nil, nil)
// SetSync empty, DeleteSync nil
db.SetSync([]byte(""), []byte("abc"))
expect(nil, []byte("abc"))
db.DeleteSync(nil)
expect(nil, nil)
// SetSync empty, DeleteSync empty
db.SetSync([]byte(""), []byte("abc"))
expect(nil, []byte("abc"))
db.DeleteSync([]byte(""))
expect(nil, nil)
})
})
}
}
func TestGoLevelDBBackend(t *testing.T) {
name := fmt.Sprintf("test_%x", cmn.RandStr(12))
db := NewDB(name, GoLevelDBBackend, "")
defer cleanupDBDir("", name)
_, ok := db.(*GoLevelDB)
assert.True(t, ok)
}
func TestDBIterator(t *testing.T) {
for dbType := range backends {
t.Run(fmt.Sprintf("%v", dbType), func(t *testing.T) {
testDBIterator(t, dbType)
})
}
}
func testDBIterator(t *testing.T, backend DBBackendType) {
name := fmt.Sprintf("test_%x", cmn.RandStr(12))
dir := os.TempDir()
db := NewDB(name, backend, dir)
defer cleanupDBDir(dir, name)
for i := 0; i < 10; i++ {
if i != 6 { // but skip 6.
db.Set(int642Bytes(int64(i)), nil)
}
}
verifyIterator(t, db.Iterator(nil, nil), []int64{0, 1, 2, 3, 4, 5, 7, 8, 9}, "forward iterator")
verifyIterator(t, db.ReverseIterator(nil, nil), []int64{9, 8, 7, 5, 4, 3, 2, 1, 0}, "reverse iterator")
verifyIterator(t, db.Iterator(nil, int642Bytes(0)), []int64(nil), "forward iterator to 0")
verifyIterator(t, db.ReverseIterator(int642Bytes(10), nil), []int64(nil), "reverse iterator from 10 (ex)")
verifyIterator(t, db.Iterator(int642Bytes(0), nil), []int64{0, 1, 2, 3, 4, 5, 7, 8, 9}, "forward iterator from 0")
verifyIterator(t, db.Iterator(int642Bytes(1), nil), []int64{1, 2, 3, 4, 5, 7, 8, 9}, "forward iterator from 1")
verifyIterator(t, db.ReverseIterator(nil, int642Bytes(10)), []int64{9, 8, 7, 5, 4, 3, 2, 1, 0}, "reverse iterator from 10 (ex)")
verifyIterator(t, db.ReverseIterator(nil, int642Bytes(9)), []int64{8, 7, 5, 4, 3, 2, 1, 0}, "reverse iterator from 9 (ex)")
verifyIterator(t, db.ReverseIterator(nil, int642Bytes(8)), []int64{7, 5, 4, 3, 2, 1, 0}, "reverse iterator from 8 (ex)")
verifyIterator(t, db.Iterator(int642Bytes(5), int642Bytes(6)), []int64{5}, "forward iterator from 5 to 6")
verifyIterator(t, db.Iterator(int642Bytes(5), int642Bytes(7)), []int64{5}, "forward iterator from 5 to 7")
verifyIterator(t, db.Iterator(int642Bytes(5), int642Bytes(8)), []int64{5, 7}, "forward iterator from 5 to 8")
verifyIterator(t, db.Iterator(int642Bytes(6), int642Bytes(7)), []int64(nil), "forward iterator from 6 to 7")
verifyIterator(t, db.Iterator(int642Bytes(6), int642Bytes(8)), []int64{7}, "forward iterator from 6 to 8")
verifyIterator(t, db.Iterator(int642Bytes(7), int642Bytes(8)), []int64{7}, "forward iterator from 7 to 8")
verifyIterator(t, db.ReverseIterator(int642Bytes(4), int642Bytes(5)), []int64{4}, "reverse iterator from 5 (ex) to 4")
verifyIterator(t, db.ReverseIterator(int642Bytes(4), int642Bytes(6)), []int64{5, 4}, "reverse iterator from 6 (ex) to 4")
verifyIterator(t, db.ReverseIterator(int642Bytes(4), int642Bytes(7)), []int64{5, 4}, "reverse iterator from 7 (ex) to 4")
verifyIterator(t, db.ReverseIterator(int642Bytes(5), int642Bytes(6)), []int64{5}, "reverse iterator from 6 (ex) to 5")
verifyIterator(t, db.ReverseIterator(int642Bytes(5), int642Bytes(7)), []int64{5}, "reverse iterator from 7 (ex) to 5")
verifyIterator(t, db.ReverseIterator(int642Bytes(6), int642Bytes(7)), []int64(nil), "reverse iterator from 7 (ex) to 6")
verifyIterator(t, db.Iterator(int642Bytes(0), int642Bytes(1)), []int64{0}, "forward iterator from 0 to 1")
verifyIterator(t, db.ReverseIterator(int642Bytes(8), int642Bytes(9)), []int64{8}, "reverse iterator from 9 (ex) to 8")
verifyIterator(t, db.Iterator(int642Bytes(2), int642Bytes(4)), []int64{2, 3}, "forward iterator from 2 to 4")
verifyIterator(t, db.Iterator(int642Bytes(4), int642Bytes(2)), []int64(nil), "forward iterator from 4 to 2")
verifyIterator(t, db.ReverseIterator(int642Bytes(2), int642Bytes(4)), []int64{3, 2}, "reverse iterator from 4 (ex) to 2")
verifyIterator(t, db.ReverseIterator(int642Bytes(4), int642Bytes(2)), []int64(nil), "reverse iterator from 2 (ex) to 4")
}
func verifyIterator(t *testing.T, itr Iterator, expected []int64, msg string) {
var list []int64
for itr.Valid() {
list = append(list, bytes2Int64(itr.Key()))
itr.Next()
}
assert.Equal(t, expected, list, msg)
}

+ 0
- 349
libs/db/boltdb.go View File

@ -1,349 +0,0 @@
// +build boltdb
package db
import (
"bytes"
"errors"
"fmt"
"os"
"path/filepath"
"github.com/etcd-io/bbolt"
)
var bucket = []byte("tm")
func init() {
registerDBCreator(BoltDBBackend, func(name, dir string) (DB, error) {
return NewBoltDB(name, dir)
}, false)
}
// BoltDB is a wrapper around etcd's fork of bolt
// (https://github.com/etcd-io/bbolt).
//
// NOTE: All operations (including Set, Delete) are synchronous by default. One
// can globally turn it off by using NoSync config option (not recommended).
//
// A single bucket ([]byte("tm")) is used per a database instance. This could
// lead to performance issues when/if there will be lots of keys.
type BoltDB struct {
db *bbolt.DB
}
// NewBoltDB returns a BoltDB with default options.
func NewBoltDB(name, dir string) (DB, error) {
return NewBoltDBWithOpts(name, dir, bbolt.DefaultOptions)
}
// NewBoltDBWithOpts allows you to supply *bbolt.Options. ReadOnly: true is not
// supported because NewBoltDBWithOpts creates a global bucket.
func NewBoltDBWithOpts(name string, dir string, opts *bbolt.Options) (DB, error) {
if opts.ReadOnly {
return nil, errors.New("ReadOnly: true is not supported")
}
dbPath := filepath.Join(dir, name+".db")
db, err := bbolt.Open(dbPath, os.ModePerm, opts)
if err != nil {
return nil, err
}
// create a global bucket
err = db.Update(func(tx *bbolt.Tx) error {
_, err := tx.CreateBucketIfNotExists(bucket)
return err
})
if err != nil {
return nil, err
}
return &BoltDB{db: db}, nil
}
func (bdb *BoltDB) Get(key []byte) (value []byte) {
key = nonEmptyKey(nonNilBytes(key))
err := bdb.db.View(func(tx *bbolt.Tx) error {
b := tx.Bucket(bucket)
if v := b.Get(key); v != nil {
value = append([]byte{}, v...)
}
return nil
})
if err != nil {
panic(err)
}
return
}
func (bdb *BoltDB) Has(key []byte) bool {
return bdb.Get(key) != nil
}
func (bdb *BoltDB) Set(key, value []byte) {
key = nonEmptyKey(nonNilBytes(key))
value = nonNilBytes(value)
err := bdb.db.Update(func(tx *bbolt.Tx) error {
b := tx.Bucket(bucket)
return b.Put(key, value)
})
if err != nil {
panic(err)
}
}
func (bdb *BoltDB) SetSync(key, value []byte) {
bdb.Set(key, value)
}
func (bdb *BoltDB) Delete(key []byte) {
key = nonEmptyKey(nonNilBytes(key))
err := bdb.db.Update(func(tx *bbolt.Tx) error {
return tx.Bucket(bucket).Delete(key)
})
if err != nil {
panic(err)
}
}
func (bdb *BoltDB) DeleteSync(key []byte) {
bdb.Delete(key)
}
func (bdb *BoltDB) Close() {
bdb.db.Close()
}
func (bdb *BoltDB) Print() {
stats := bdb.db.Stats()
fmt.Printf("%v\n", stats)
err := bdb.db.View(func(tx *bbolt.Tx) error {
tx.Bucket(bucket).ForEach(func(k, v []byte) error {
fmt.Printf("[%X]:\t[%X]\n", k, v)
return nil
})
return nil
})
if err != nil {
panic(err)
}
}
func (bdb *BoltDB) Stats() map[string]string {
stats := bdb.db.Stats()
m := make(map[string]string)
// Freelist stats
m["FreePageN"] = fmt.Sprintf("%v", stats.FreePageN)
m["PendingPageN"] = fmt.Sprintf("%v", stats.PendingPageN)
m["FreeAlloc"] = fmt.Sprintf("%v", stats.FreeAlloc)
m["FreelistInuse"] = fmt.Sprintf("%v", stats.FreelistInuse)
// Transaction stats
m["TxN"] = fmt.Sprintf("%v", stats.TxN)
m["OpenTxN"] = fmt.Sprintf("%v", stats.OpenTxN)
return m
}
// boltDBBatch stores key values in sync.Map and dumps them to the underlying
// DB upon Write call.
type boltDBBatch struct {
db *BoltDB
ops []operation
}
// NewBatch returns a new batch.
func (bdb *BoltDB) NewBatch() Batch {
return &boltDBBatch{
ops: nil,
db: bdb,
}
}
// It is safe to modify the contents of the argument after Set returns but not
// before.
func (bdb *boltDBBatch) Set(key, value []byte) {
bdb.ops = append(bdb.ops, operation{opTypeSet, key, value})
}
// It is safe to modify the contents of the argument after Delete returns but
// not before.
func (bdb *boltDBBatch) Delete(key []byte) {
bdb.ops = append(bdb.ops, operation{opTypeDelete, key, nil})
}
// NOTE: the operation is synchronous (see BoltDB for reasons)
func (bdb *boltDBBatch) Write() {
err := bdb.db.db.Batch(func(tx *bbolt.Tx) error {
b := tx.Bucket(bucket)
for _, op := range bdb.ops {
key := nonEmptyKey(nonNilBytes(op.key))
switch op.opType {
case opTypeSet:
if putErr := b.Put(key, op.value); putErr != nil {
return putErr
}
case opTypeDelete:
if delErr := b.Delete(key); delErr != nil {
return delErr
}
}
}
return nil
})
if err != nil {
panic(err)
}
}
func (bdb *boltDBBatch) WriteSync() {
bdb.Write()
}
func (bdb *boltDBBatch) Close() {}
// WARNING: Any concurrent writes or reads will block until the iterator is
// closed.
func (bdb *BoltDB) Iterator(start, end []byte) Iterator {
tx, err := bdb.db.Begin(false)
if err != nil {
panic(err)
}
return newBoltDBIterator(tx, start, end, false)
}
// WARNING: Any concurrent writes or reads will block until the iterator is
// closed.
func (bdb *BoltDB) ReverseIterator(start, end []byte) Iterator {
tx, err := bdb.db.Begin(false)
if err != nil {
panic(err)
}
return newBoltDBIterator(tx, start, end, true)
}
// boltDBIterator allows you to iterate on range of keys/values given some
// start / end keys (nil & nil will result in doing full scan).
type boltDBIterator struct {
tx *bbolt.Tx
itr *bbolt.Cursor
start []byte
end []byte
currentKey []byte
currentValue []byte
isInvalid bool
isReverse bool
}
func newBoltDBIterator(tx *bbolt.Tx, start, end []byte, isReverse bool) *boltDBIterator {
itr := tx.Bucket(bucket).Cursor()
var ck, cv []byte
if isReverse {
if end == nil {
ck, cv = itr.Last()
} else {
_, _ = itr.Seek(end) // after key
ck, cv = itr.Prev() // return to end key
}
} else {
if start == nil {
ck, cv = itr.First()
} else {
ck, cv = itr.Seek(start)
}
}
return &boltDBIterator{
tx: tx,
itr: itr,
start: start,
end: end,
currentKey: ck,
currentValue: cv,
isReverse: isReverse,
isInvalid: false,
}
}
func (itr *boltDBIterator) Domain() ([]byte, []byte) {
return itr.start, itr.end
}
func (itr *boltDBIterator) Valid() bool {
if itr.isInvalid {
return false
}
// iterated to the end of the cursor
if len(itr.currentKey) == 0 {
itr.isInvalid = true
return false
}
if itr.isReverse {
if itr.start != nil && bytes.Compare(itr.currentKey, itr.start) < 0 {
itr.isInvalid = true
return false
}
} else {
if itr.end != nil && bytes.Compare(itr.end, itr.currentKey) <= 0 {
itr.isInvalid = true
return false
}
}
// Valid
return true
}
func (itr *boltDBIterator) Next() {
itr.assertIsValid()
if itr.isReverse {
itr.currentKey, itr.currentValue = itr.itr.Prev()
} else {
itr.currentKey, itr.currentValue = itr.itr.Next()
}
}
func (itr *boltDBIterator) Key() []byte {
itr.assertIsValid()
return append([]byte{}, itr.currentKey...)
}
func (itr *boltDBIterator) Value() []byte {
itr.assertIsValid()
var value []byte
if itr.currentValue != nil {
value = append([]byte{}, itr.currentValue...)
}
return value
}
func (itr *boltDBIterator) Close() {
err := itr.tx.Rollback()
if err != nil {
panic(err)
}
}
func (itr *boltDBIterator) assertIsValid() {
if !itr.Valid() {
panic("Boltdb-iterator is invalid")
}
}
// nonEmptyKey returns a []byte("nil") if key is empty.
// WARNING: this may collude with "nil" user key!
func nonEmptyKey(key []byte) []byte {
if len(key) == 0 {
return []byte("nil")
}
return key
}

+ 0
- 37
libs/db/boltdb_test.go View File

@ -1,37 +0,0 @@
// +build boltdb
package db
import (
"fmt"
"os"
"testing"
"github.com/stretchr/testify/require"
cmn "github.com/tendermint/tendermint/libs/common"
)
func TestBoltDBNewBoltDB(t *testing.T) {
name := fmt.Sprintf("test_%x", cmn.RandStr(12))
dir := os.TempDir()
defer cleanupDBDir(dir, name)
db, err := NewBoltDB(name, dir)
require.NoError(t, err)
db.Close()
}
func BenchmarkBoltDBRandomReadsWrites(b *testing.B) {
name := fmt.Sprintf("test_%x", cmn.RandStr(12))
db, err := NewBoltDB(name, "")
if err != nil {
b.Fatal(err)
}
defer func() {
db.Close()
cleanupDBDir("", name)
}()
benchmarkRandomReadsWrites(b, db)
}

+ 0
- 325
libs/db/c_level_db.go View File

@ -1,325 +0,0 @@
// +build cleveldb
package db
import (
"bytes"
"fmt"
"path/filepath"
"github.com/jmhodges/levigo"
)
func init() {
dbCreator := func(name string, dir string) (DB, error) {
return NewCLevelDB(name, dir)
}
registerDBCreator(CLevelDBBackend, dbCreator, false)
}
var _ DB = (*CLevelDB)(nil)
type CLevelDB struct {
db *levigo.DB
ro *levigo.ReadOptions
wo *levigo.WriteOptions
woSync *levigo.WriteOptions
}
func NewCLevelDB(name string, dir string) (*CLevelDB, error) {
dbPath := filepath.Join(dir, name+".db")
opts := levigo.NewOptions()
opts.SetCache(levigo.NewLRUCache(1 << 30))
opts.SetCreateIfMissing(true)
db, err := levigo.Open(dbPath, opts)
if err != nil {
return nil, err
}
ro := levigo.NewReadOptions()
wo := levigo.NewWriteOptions()
woSync := levigo.NewWriteOptions()
woSync.SetSync(true)
database := &CLevelDB{
db: db,
ro: ro,
wo: wo,
woSync: woSync,
}
return database, nil
}
// Implements DB.
func (db *CLevelDB) Get(key []byte) []byte {
key = nonNilBytes(key)
res, err := db.db.Get(db.ro, key)
if err != nil {
panic(err)
}
return res
}
// Implements DB.
func (db *CLevelDB) Has(key []byte) bool {
return db.Get(key) != nil
}
// Implements DB.
func (db *CLevelDB) Set(key []byte, value []byte) {
key = nonNilBytes(key)
value = nonNilBytes(value)
err := db.db.Put(db.wo, key, value)
if err != nil {
panic(err)
}
}
// Implements DB.
func (db *CLevelDB) SetSync(key []byte, value []byte) {
key = nonNilBytes(key)
value = nonNilBytes(value)
err := db.db.Put(db.woSync, key, value)
if err != nil {
panic(err)
}
}
// Implements DB.
func (db *CLevelDB) Delete(key []byte) {
key = nonNilBytes(key)
err := db.db.Delete(db.wo, key)
if err != nil {
panic(err)
}
}
// Implements DB.
func (db *CLevelDB) DeleteSync(key []byte) {
key = nonNilBytes(key)
err := db.db.Delete(db.woSync, key)
if err != nil {
panic(err)
}
}
func (db *CLevelDB) DB() *levigo.DB {
return db.db
}
// Implements DB.
func (db *CLevelDB) Close() {
db.db.Close()
db.ro.Close()
db.wo.Close()
db.woSync.Close()
}
// Implements DB.
func (db *CLevelDB) Print() {
itr := db.Iterator(nil, nil)
defer itr.Close()
for ; itr.Valid(); itr.Next() {
key := itr.Key()
value := itr.Value()
fmt.Printf("[%X]:\t[%X]\n", key, value)
}
}
// Implements DB.
func (db *CLevelDB) Stats() map[string]string {
keys := []string{
"leveldb.aliveiters",
"leveldb.alivesnaps",
"leveldb.blockpool",
"leveldb.cachedblock",
"leveldb.num-files-at-level{n}",
"leveldb.openedtables",
"leveldb.sstables",
"leveldb.stats",
}
stats := make(map[string]string, len(keys))
for _, key := range keys {
str := db.db.PropertyValue(key)
stats[key] = str
}
return stats
}
//----------------------------------------
// Batch
// Implements DB.
func (db *CLevelDB) NewBatch() Batch {
batch := levigo.NewWriteBatch()
return &cLevelDBBatch{db, batch}
}
type cLevelDBBatch struct {
db *CLevelDB
batch *levigo.WriteBatch
}
// Implements Batch.
func (mBatch *cLevelDBBatch) Set(key, value []byte) {
mBatch.batch.Put(key, value)
}
// Implements Batch.
func (mBatch *cLevelDBBatch) Delete(key []byte) {
mBatch.batch.Delete(key)
}
// Implements Batch.
func (mBatch *cLevelDBBatch) Write() {
err := mBatch.db.db.Write(mBatch.db.wo, mBatch.batch)
if err != nil {
panic(err)
}
}
// Implements Batch.
func (mBatch *cLevelDBBatch) WriteSync() {
err := mBatch.db.db.Write(mBatch.db.woSync, mBatch.batch)
if err != nil {
panic(err)
}
}
// Implements Batch.
func (mBatch *cLevelDBBatch) Close() {
mBatch.batch.Close()
}
//----------------------------------------
// Iterator
// NOTE This is almost identical to db/go_level_db.Iterator
// Before creating a third version, refactor.
func (db *CLevelDB) Iterator(start, end []byte) Iterator {
itr := db.db.NewIterator(db.ro)
return newCLevelDBIterator(itr, start, end, false)
}
func (db *CLevelDB) ReverseIterator(start, end []byte) Iterator {
itr := db.db.NewIterator(db.ro)
return newCLevelDBIterator(itr, start, end, true)
}
var _ Iterator = (*cLevelDBIterator)(nil)
type cLevelDBIterator struct {
source *levigo.Iterator
start, end []byte
isReverse bool
isInvalid bool
}
func newCLevelDBIterator(source *levigo.Iterator, start, end []byte, isReverse bool) *cLevelDBIterator {
if isReverse {
if end == nil {
source.SeekToLast()
} else {
source.Seek(end)
if source.Valid() {
eoakey := source.Key() // end or after key
if bytes.Compare(end, eoakey) <= 0 {
source.Prev()
}
} else {
source.SeekToLast()
}
}
} else {
if start == nil {
source.SeekToFirst()
} else {
source.Seek(start)
}
}
return &cLevelDBIterator{
source: source,
start: start,
end: end,
isReverse: isReverse,
isInvalid: false,
}
}
func (itr cLevelDBIterator) Domain() ([]byte, []byte) {
return itr.start, itr.end
}
func (itr cLevelDBIterator) Valid() bool {
// Once invalid, forever invalid.
if itr.isInvalid {
return false
}
// Panic on DB error. No way to recover.
itr.assertNoError()
// If source is invalid, invalid.
if !itr.source.Valid() {
itr.isInvalid = true
return false
}
// If key is end or past it, invalid.
var start = itr.start
var end = itr.end
var key = itr.source.Key()
if itr.isReverse {
if start != nil && bytes.Compare(key, start) < 0 {
itr.isInvalid = true
return false
}
} else {
if end != nil && bytes.Compare(end, key) <= 0 {
itr.isInvalid = true
return false
}
}
// It's valid.
return true
}
func (itr cLevelDBIterator) Key() []byte {
itr.assertNoError()
itr.assertIsValid()
return itr.source.Key()
}
func (itr cLevelDBIterator) Value() []byte {
itr.assertNoError()
itr.assertIsValid()
return itr.source.Value()
}
func (itr cLevelDBIterator) Next() {
itr.assertNoError()
itr.assertIsValid()
if itr.isReverse {
itr.source.Prev()
} else {
itr.source.Next()
}
}
func (itr cLevelDBIterator) Close() {
itr.source.Close()
}
func (itr cLevelDBIterator) assertNoError() {
if err := itr.source.GetError(); err != nil {
panic(err)
}
}
func (itr cLevelDBIterator) assertIsValid() {
if !itr.Valid() {
panic("cLevelDBIterator is invalid")
}
}

+ 0
- 110
libs/db/c_level_db_test.go View File

@ -1,110 +0,0 @@
// +build cleveldb
package db
import (
"bytes"
"fmt"
"os"
"testing"
"github.com/stretchr/testify/assert"
cmn "github.com/tendermint/tendermint/libs/common"
)
func BenchmarkRandomReadsWrites2(b *testing.B) {
b.StopTimer()
numItems := int64(1000000)
internal := map[int64]int64{}
for i := 0; i < int(numItems); i++ {
internal[int64(i)] = int64(0)
}
db, err := NewCLevelDB(fmt.Sprintf("test_%x", cmn.RandStr(12)), "")
if err != nil {
b.Fatal(err.Error())
return
}
fmt.Println("ok, starting")
b.StartTimer()
for i := 0; i < b.N; i++ {
// Write something
{
idx := (int64(cmn.RandInt()) % numItems)
internal[idx]++
val := internal[idx]
idxBytes := int642Bytes(int64(idx))
valBytes := int642Bytes(int64(val))
//fmt.Printf("Set %X -> %X\n", idxBytes, valBytes)
db.Set(
idxBytes,
valBytes,
)
}
// Read something
{
idx := (int64(cmn.RandInt()) % numItems)
val := internal[idx]
idxBytes := int642Bytes(int64(idx))
valBytes := db.Get(idxBytes)
//fmt.Printf("Get %X -> %X\n", idxBytes, valBytes)
if val == 0 {
if !bytes.Equal(valBytes, nil) {
b.Errorf("Expected %v for %v, got %X",
nil, idx, valBytes)
break
}
} else {
if len(valBytes) != 8 {
b.Errorf("Expected length 8 for %v, got %X",
idx, valBytes)
break
}
valGot := bytes2Int64(valBytes)
if val != valGot {
b.Errorf("Expected %v for %v, got %v",
val, idx, valGot)
break
}
}
}
}
db.Close()
}
/*
func int642Bytes(i int64) []byte {
buf := make([]byte, 8)
binary.BigEndian.PutUint64(buf, uint64(i))
return buf
}
func bytes2Int64(buf []byte) int64 {
return int64(binary.BigEndian.Uint64(buf))
}
*/
func TestCLevelDBBackend(t *testing.T) {
name := fmt.Sprintf("test_%x", cmn.RandStr(12))
// Can't use "" (current directory) or "./" here because levigo.Open returns:
// "Error initializing DB: IO error: test_XXX.db: Invalid argument"
dir := os.TempDir()
db := NewDB(name, CLevelDBBackend, dir)
defer cleanupDBDir(dir, name)
_, ok := db.(*CLevelDB)
assert.True(t, ok)
}
func TestCLevelDBStats(t *testing.T) {
name := fmt.Sprintf("test_%x", cmn.RandStr(12))
dir := os.TempDir()
db := NewDB(name, CLevelDBBackend, dir)
defer cleanupDBDir(dir, name)
assert.NotEmpty(t, db.Stats())
}

+ 0
- 256
libs/db/common_test.go View File

@ -1,256 +0,0 @@
package db
import (
"bytes"
"encoding/binary"
"fmt"
"io/ioutil"
"sync"
"testing"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
cmn "github.com/tendermint/tendermint/libs/common"
)
//----------------------------------------
// Helper functions.
func checkValue(t *testing.T, db DB, key []byte, valueWanted []byte) {
valueGot := db.Get(key)
assert.Equal(t, valueWanted, valueGot)
}
func checkValid(t *testing.T, itr Iterator, expected bool) {
valid := itr.Valid()
require.Equal(t, expected, valid)
}
func checkNext(t *testing.T, itr Iterator, expected bool) {
itr.Next()
valid := itr.Valid()
require.Equal(t, expected, valid)
}
func checkNextPanics(t *testing.T, itr Iterator) {
assert.Panics(t, func() { itr.Next() }, "checkNextPanics expected panic but didn't")
}
func checkDomain(t *testing.T, itr Iterator, start, end []byte) {
ds, de := itr.Domain()
assert.Equal(t, start, ds, "checkDomain domain start incorrect")
assert.Equal(t, end, de, "checkDomain domain end incorrect")
}
func checkItem(t *testing.T, itr Iterator, key []byte, value []byte) {
k, v := itr.Key(), itr.Value()
assert.Exactly(t, key, k)
assert.Exactly(t, value, v)
}
func checkInvalid(t *testing.T, itr Iterator) {
checkValid(t, itr, false)
checkKeyPanics(t, itr)
checkValuePanics(t, itr)
checkNextPanics(t, itr)
}
func checkKeyPanics(t *testing.T, itr Iterator) {
assert.Panics(t, func() { itr.Key() }, "checkKeyPanics expected panic but didn't")
}
func checkValuePanics(t *testing.T, itr Iterator) {
assert.Panics(t, func() { itr.Value() }, "checkValuePanics expected panic but didn't")
}
func newTempDB(t *testing.T, backend DBBackendType) (db DB, dbDir string) {
dirname, err := ioutil.TempDir("", "db_common_test")
require.Nil(t, err)
return NewDB("testdb", backend, dirname), dirname
}
//----------------------------------------
// mockDB
// NOTE: not actually goroutine safe.
// If you want something goroutine safe, maybe you just want a MemDB.
type mockDB struct {
mtx sync.Mutex
calls map[string]int
}
func newMockDB() *mockDB {
return &mockDB{
calls: make(map[string]int),
}
}
func (mdb *mockDB) Mutex() *sync.Mutex {
return &(mdb.mtx)
}
func (mdb *mockDB) Get([]byte) []byte {
mdb.calls["Get"]++
return nil
}
func (mdb *mockDB) Has([]byte) bool {
mdb.calls["Has"]++
return false
}
func (mdb *mockDB) Set([]byte, []byte) {
mdb.calls["Set"]++
}
func (mdb *mockDB) SetSync([]byte, []byte) {
mdb.calls["SetSync"]++
}
func (mdb *mockDB) SetNoLock([]byte, []byte) {
mdb.calls["SetNoLock"]++
}
func (mdb *mockDB) SetNoLockSync([]byte, []byte) {
mdb.calls["SetNoLockSync"]++
}
func (mdb *mockDB) Delete([]byte) {
mdb.calls["Delete"]++
}
func (mdb *mockDB) DeleteSync([]byte) {
mdb.calls["DeleteSync"]++
}
func (mdb *mockDB) DeleteNoLock([]byte) {
mdb.calls["DeleteNoLock"]++
}
func (mdb *mockDB) DeleteNoLockSync([]byte) {
mdb.calls["DeleteNoLockSync"]++
}
func (mdb *mockDB) Iterator(start, end []byte) Iterator {
mdb.calls["Iterator"]++
return &mockIterator{}
}
func (mdb *mockDB) ReverseIterator(start, end []byte) Iterator {
mdb.calls["ReverseIterator"]++
return &mockIterator{}
}
func (mdb *mockDB) Close() {
mdb.calls["Close"]++
}
func (mdb *mockDB) NewBatch() Batch {
mdb.calls["NewBatch"]++
return &memBatch{db: mdb}
}
func (mdb *mockDB) Print() {
mdb.calls["Print"]++
fmt.Printf("mockDB{%v}", mdb.Stats())
}
func (mdb *mockDB) Stats() map[string]string {
mdb.calls["Stats"]++
res := make(map[string]string)
for key, count := range mdb.calls {
res[key] = fmt.Sprintf("%d", count)
}
return res
}
//----------------------------------------
// mockIterator
type mockIterator struct{}
func (mockIterator) Domain() (start []byte, end []byte) {
return nil, nil
}
func (mockIterator) Valid() bool {
return false
}
func (mockIterator) Next() {
}
func (mockIterator) Key() []byte {
return nil
}
func (mockIterator) Value() []byte {
return nil
}
func (mockIterator) Close() {
}
func benchmarkRandomReadsWrites(b *testing.B, db DB) {
b.StopTimer()
// create dummy data
const numItems = int64(1000000)
internal := map[int64]int64{}
for i := 0; i < int(numItems); i++ {
internal[int64(i)] = int64(0)
}
// fmt.Println("ok, starting")
b.StartTimer()
for i := 0; i < b.N; i++ {
// Write something
{
idx := int64(cmn.RandInt()) % numItems
internal[idx]++
val := internal[idx]
idxBytes := int642Bytes(int64(idx))
valBytes := int642Bytes(int64(val))
//fmt.Printf("Set %X -> %X\n", idxBytes, valBytes)
db.Set(idxBytes, valBytes)
}
// Read something
{
idx := int64(cmn.RandInt()) % numItems
valExp := internal[idx]
idxBytes := int642Bytes(int64(idx))
valBytes := db.Get(idxBytes)
//fmt.Printf("Get %X -> %X\n", idxBytes, valBytes)
if valExp == 0 {
if !bytes.Equal(valBytes, nil) {
b.Errorf("Expected %v for %v, got %X", nil, idx, valBytes)
break
}
} else {
if len(valBytes) != 8 {
b.Errorf("Expected length 8 for %v, got %X", idx, valBytes)
break
}
valGot := bytes2Int64(valBytes)
if valExp != valGot {
b.Errorf("Expected %v for %v, got %v", valExp, idx, valGot)
break
}
}
}
}
}
func int642Bytes(i int64) []byte {
buf := make([]byte, 8)
binary.BigEndian.PutUint64(buf, uint64(i))
return buf
}
func bytes2Int64(buf []byte) int64 {
return int64(binary.BigEndian.Uint64(buf))
}

+ 0
- 70
libs/db/db.go View File

@ -1,70 +0,0 @@
package db
import (
"fmt"
"strings"
)
type DBBackendType string
// These are valid backend types.
const (
// GoLevelDBBackend represents goleveldb (github.com/syndtr/goleveldb - most
// popular implementation)
// - pure go
// - stable
GoLevelDBBackend DBBackendType = "goleveldb"
// CLevelDBBackend represents cleveldb (uses levigo wrapper)
// - fast
// - requires gcc
// - use cleveldb build tag (go build -tags cleveldb)
CLevelDBBackend DBBackendType = "cleveldb"
// MemDBBackend represents in-memoty key value store, which is mostly used
// for testing.
MemDBBackend DBBackendType = "memdb"
// FSDBBackend represents filesystem database
// - EXPERIMENTAL
// - slow
FSDBBackend DBBackendType = "fsdb"
// BoltDBBackend represents bolt (uses etcd's fork of bolt -
// github.com/etcd-io/bbolt)
// - EXPERIMENTAL
// - may be faster is some use-cases (random reads - indexer)
// - use boltdb build tag (go build -tags boltdb)
BoltDBBackend DBBackendType = "boltdb"
)
type dbCreator func(name string, dir string) (DB, error)
var backends = map[DBBackendType]dbCreator{}
func registerDBCreator(backend DBBackendType, creator dbCreator, force bool) {
_, ok := backends[backend]
if !force && ok {
return
}
backends[backend] = creator
}
// NewDB creates a new database of type backend with the given name.
// NOTE: function panics if:
// - backend is unknown (not registered)
// - creator function, provided during registration, returns error
func NewDB(name string, backend DBBackendType, dir string) DB {
dbCreator, ok := backends[backend]
if !ok {
keys := make([]string, len(backends))
i := 0
for k := range backends {
keys[i] = string(k)
i++
}
panic(fmt.Sprintf("Unknown db_backend %s, expected either %s", backend, strings.Join(keys, " or ")))
}
db, err := dbCreator(name, dir)
if err != nil {
panic(fmt.Sprintf("Error initializing DB: %v", err))
}
return db
}

+ 0
- 194
libs/db/db_test.go View File

@ -1,194 +0,0 @@
package db
import (
"fmt"
"os"
"testing"
"github.com/stretchr/testify/assert"
)
func TestDBIteratorSingleKey(t *testing.T) {
for backend := range backends {
t.Run(fmt.Sprintf("Backend %s", backend), func(t *testing.T) {
db, dir := newTempDB(t, backend)
defer os.RemoveAll(dir)
db.SetSync(bz("1"), bz("value_1"))
itr := db.Iterator(nil, nil)
checkValid(t, itr, true)
checkNext(t, itr, false)
checkValid(t, itr, false)
checkNextPanics(t, itr)
// Once invalid...
checkInvalid(t, itr)
})
}
}
func TestDBIteratorTwoKeys(t *testing.T) {
for backend := range backends {
t.Run(fmt.Sprintf("Backend %s", backend), func(t *testing.T) {
db, dir := newTempDB(t, backend)
defer os.RemoveAll(dir)
db.SetSync(bz("1"), bz("value_1"))
db.SetSync(bz("2"), bz("value_1"))
{ // Fail by calling Next too much
itr := db.Iterator(nil, nil)
checkValid(t, itr, true)
checkNext(t, itr, true)
checkValid(t, itr, true)
checkNext(t, itr, false)
checkValid(t, itr, false)
checkNextPanics(t, itr)
// Once invalid...
checkInvalid(t, itr)
}
})
}
}
func TestDBIteratorMany(t *testing.T) {
for backend := range backends {
t.Run(fmt.Sprintf("Backend %s", backend), func(t *testing.T) {
db, dir := newTempDB(t, backend)
defer os.RemoveAll(dir)
keys := make([][]byte, 100)
for i := 0; i < 100; i++ {
keys[i] = []byte{byte(i)}
}
value := []byte{5}
for _, k := range keys {
db.Set(k, value)
}
itr := db.Iterator(nil, nil)
defer itr.Close()
for ; itr.Valid(); itr.Next() {
assert.Equal(t, db.Get(itr.Key()), itr.Value())
}
})
}
}
func TestDBIteratorEmpty(t *testing.T) {
for backend := range backends {
t.Run(fmt.Sprintf("Backend %s", backend), func(t *testing.T) {
db, dir := newTempDB(t, backend)
defer os.RemoveAll(dir)
itr := db.Iterator(nil, nil)
checkInvalid(t, itr)
})
}
}
func TestDBIteratorEmptyBeginAfter(t *testing.T) {
for backend := range backends {
t.Run(fmt.Sprintf("Backend %s", backend), func(t *testing.T) {
db, dir := newTempDB(t, backend)
defer os.RemoveAll(dir)
itr := db.Iterator(bz("1"), nil)
checkInvalid(t, itr)
})
}
}
func TestDBIteratorNonemptyBeginAfter(t *testing.T) {
for backend := range backends {
t.Run(fmt.Sprintf("Backend %s", backend), func(t *testing.T) {
db, dir := newTempDB(t, backend)
defer os.RemoveAll(dir)
db.SetSync(bz("1"), bz("value_1"))
itr := db.Iterator(bz("2"), nil)
checkInvalid(t, itr)
})
}
}
func TestDBBatchWrite(t *testing.T) {
testCases := []struct {
modify func(batch Batch)
calls map[string]int
}{
0: {
func(batch Batch) {
batch.Set(bz("1"), bz("1"))
batch.Set(bz("2"), bz("2"))
batch.Delete(bz("3"))
batch.Set(bz("4"), bz("4"))
batch.Write()
},
map[string]int{
"Set": 0, "SetSync": 0, "SetNoLock": 3, "SetNoLockSync": 0,
"Delete": 0, "DeleteSync": 0, "DeleteNoLock": 1, "DeleteNoLockSync": 0,
},
},
1: {
func(batch Batch) {
batch.Set(bz("1"), bz("1"))
batch.Set(bz("2"), bz("2"))
batch.Set(bz("4"), bz("4"))
batch.Delete(bz("3"))
batch.Write()
},
map[string]int{
"Set": 0, "SetSync": 0, "SetNoLock": 3, "SetNoLockSync": 0,
"Delete": 0, "DeleteSync": 0, "DeleteNoLock": 1, "DeleteNoLockSync": 0,
},
},
2: {
func(batch Batch) {
batch.Set(bz("1"), bz("1"))
batch.Set(bz("2"), bz("2"))
batch.Delete(bz("3"))
batch.Set(bz("4"), bz("4"))
batch.WriteSync()
},
map[string]int{
"Set": 0, "SetSync": 0, "SetNoLock": 2, "SetNoLockSync": 1,
"Delete": 0, "DeleteSync": 0, "DeleteNoLock": 1, "DeleteNoLockSync": 0,
},
},
3: {
func(batch Batch) {
batch.Set(bz("1"), bz("1"))
batch.Set(bz("2"), bz("2"))
batch.Set(bz("4"), bz("4"))
batch.Delete(bz("3"))
batch.WriteSync()
},
map[string]int{
"Set": 0, "SetSync": 0, "SetNoLock": 3, "SetNoLockSync": 0,
"Delete": 0, "DeleteSync": 0, "DeleteNoLock": 0, "DeleteNoLockSync": 1,
},
},
}
for i, tc := range testCases {
mdb := newMockDB()
batch := mdb.NewBatch()
tc.modify(batch)
for call, exp := range tc.calls {
got := mdb.calls[call]
assert.Equal(t, exp, got, "#%v - key: %s", i, call)
}
}
}

+ 0
- 270
libs/db/fsdb.go View File

@ -1,270 +0,0 @@
package db
import (
"fmt"
"io/ioutil"
"net/url"
"os"
"path/filepath"
"sort"
"sync"
"github.com/pkg/errors"
cmn "github.com/tendermint/tendermint/libs/common"
)
const (
keyPerm = os.FileMode(0600)
dirPerm = os.FileMode(0700)
)
func init() {
registerDBCreator(FSDBBackend, func(name, dir string) (DB, error) {
dbPath := filepath.Join(dir, name+".db")
return NewFSDB(dbPath), nil
}, false)
}
var _ DB = (*FSDB)(nil)
// It's slow.
type FSDB struct {
mtx sync.Mutex
dir string
}
func NewFSDB(dir string) *FSDB {
err := os.MkdirAll(dir, dirPerm)
if err != nil {
panic(errors.Wrap(err, "Creating FSDB dir "+dir))
}
database := &FSDB{
dir: dir,
}
return database
}
func (db *FSDB) Get(key []byte) []byte {
db.mtx.Lock()
defer db.mtx.Unlock()
key = escapeKey(key)
path := db.nameToPath(key)
value, err := read(path)
if os.IsNotExist(err) {
return nil
} else if err != nil {
panic(errors.Wrapf(err, "Getting key %s (0x%X)", string(key), key))
}
return value
}
func (db *FSDB) Has(key []byte) bool {
db.mtx.Lock()
defer db.mtx.Unlock()
key = escapeKey(key)
path := db.nameToPath(key)
return cmn.FileExists(path)
}
func (db *FSDB) Set(key []byte, value []byte) {
db.mtx.Lock()
defer db.mtx.Unlock()
db.SetNoLock(key, value)
}
func (db *FSDB) SetSync(key []byte, value []byte) {
db.mtx.Lock()
defer db.mtx.Unlock()
db.SetNoLock(key, value)
}
// NOTE: Implements atomicSetDeleter.
func (db *FSDB) SetNoLock(key []byte, value []byte) {
key = escapeKey(key)
value = nonNilBytes(value)
path := db.nameToPath(key)
err := write(path, value)
if err != nil {
panic(errors.Wrapf(err, "Setting key %s (0x%X)", string(key), key))
}
}
func (db *FSDB) Delete(key []byte) {
db.mtx.Lock()
defer db.mtx.Unlock()
db.DeleteNoLock(key)
}
func (db *FSDB) DeleteSync(key []byte) {
db.mtx.Lock()
defer db.mtx.Unlock()
db.DeleteNoLock(key)
}
// NOTE: Implements atomicSetDeleter.
func (db *FSDB) DeleteNoLock(key []byte) {
key = escapeKey(key)
path := db.nameToPath(key)
err := remove(path)
if os.IsNotExist(err) {
return
} else if err != nil {
panic(errors.Wrapf(err, "Removing key %s (0x%X)", string(key), key))
}
}
func (db *FSDB) Close() {
// Nothing to do.
}
func (db *FSDB) Print() {
db.mtx.Lock()
defer db.mtx.Unlock()
panic("FSDB.Print not yet implemented")
}
func (db *FSDB) Stats() map[string]string {
db.mtx.Lock()
defer db.mtx.Unlock()
panic("FSDB.Stats not yet implemented")
}
func (db *FSDB) NewBatch() Batch {
db.mtx.Lock()
defer db.mtx.Unlock()
// Not sure we would ever want to try...
// It doesn't seem easy for general filesystems.
panic("FSDB.NewBatch not yet implemented")
}
func (db *FSDB) Mutex() *sync.Mutex {
return &(db.mtx)
}
func (db *FSDB) Iterator(start, end []byte) Iterator {
return db.MakeIterator(start, end, false)
}
func (db *FSDB) MakeIterator(start, end []byte, isReversed bool) Iterator {
db.mtx.Lock()
defer db.mtx.Unlock()
// We need a copy of all of the keys.
// Not the best, but probably not a bottleneck depending.
keys, err := list(db.dir, start, end)
if err != nil {
panic(errors.Wrapf(err, "Listing keys in %s", db.dir))
}
if isReversed {
sort.Sort(sort.Reverse(sort.StringSlice(keys)))
} else {
sort.Strings(keys)
}
return newMemDBIterator(db, keys, start, end)
}
func (db *FSDB) ReverseIterator(start, end []byte) Iterator {
return db.MakeIterator(start, end, true)
}
func (db *FSDB) nameToPath(name []byte) string {
n := url.PathEscape(string(name))
return filepath.Join(db.dir, n)
}
// Read some bytes to a file.
// CONTRACT: returns os errors directly without wrapping.
func read(path string) ([]byte, error) {
f, err := os.Open(path)
if err != nil {
return nil, err
}
defer f.Close()
d, err := ioutil.ReadAll(f)
if err != nil {
return nil, err
}
return d, nil
}
// Write some bytes from a file.
// CONTRACT: returns os errors directly without wrapping.
func write(path string, d []byte) error {
f, err := os.OpenFile(path, os.O_CREATE|os.O_WRONLY, keyPerm)
if err != nil {
return err
}
defer f.Close()
// fInfo, err := f.Stat()
// if err != nil {
// return err
// }
// if fInfo.Mode() != keyPerm {
// return tmerrors.NewErrPermissionsChanged(f.Name(), keyPerm, fInfo.Mode())
// }
_, err = f.Write(d)
if err != nil {
return err
}
err = f.Sync()
return err
}
// Remove a file.
// CONTRACT: returns os errors directly without wrapping.
func remove(path string) error {
return os.Remove(path)
}
// List keys in a directory, stripping of escape sequences and dir portions.
// CONTRACT: returns os errors directly without wrapping.
func list(dirPath string, start, end []byte) ([]string, error) {
dir, err := os.Open(dirPath)
if err != nil {
return nil, err
}
defer dir.Close()
names, err := dir.Readdirnames(0)
if err != nil {
return nil, err
}
var keys []string
for _, name := range names {
n, err := url.PathUnescape(name)
if err != nil {
return nil, fmt.Errorf("Failed to unescape %s while listing", name)
}
key := unescapeKey([]byte(n))
if IsKeyInDomain(key, start, end) {
keys = append(keys, string(key))
}
}
return keys, nil
}
// To support empty or nil keys, while the file system doesn't allow empty
// filenames.
func escapeKey(key []byte) []byte {
return []byte("k_" + string(key))
}
func unescapeKey(escKey []byte) []byte {
if len(escKey) < 2 {
panic(fmt.Sprintf("Invalid esc key: %x", escKey))
}
if string(escKey[:2]) != "k_" {
panic(fmt.Sprintf("Invalid esc key: %x", escKey))
}
return escKey[2:]
}

+ 0
- 333
libs/db/go_level_db.go View File

@ -1,333 +0,0 @@
package db
import (
"bytes"
"fmt"
"path/filepath"
"github.com/syndtr/goleveldb/leveldb"
"github.com/syndtr/goleveldb/leveldb/errors"
"github.com/syndtr/goleveldb/leveldb/iterator"
"github.com/syndtr/goleveldb/leveldb/opt"
)
func init() {
dbCreator := func(name string, dir string) (DB, error) {
return NewGoLevelDB(name, dir)
}
registerDBCreator(GoLevelDBBackend, dbCreator, false)
}
var _ DB = (*GoLevelDB)(nil)
type GoLevelDB struct {
db *leveldb.DB
}
func NewGoLevelDB(name string, dir string) (*GoLevelDB, error) {
return NewGoLevelDBWithOpts(name, dir, nil)
}
func NewGoLevelDBWithOpts(name string, dir string, o *opt.Options) (*GoLevelDB, error) {
dbPath := filepath.Join(dir, name+".db")
db, err := leveldb.OpenFile(dbPath, o)
if err != nil {
return nil, err
}
database := &GoLevelDB{
db: db,
}
return database, nil
}
// Implements DB.
func (db *GoLevelDB) Get(key []byte) []byte {
key = nonNilBytes(key)
res, err := db.db.Get(key, nil)
if err != nil {
if err == errors.ErrNotFound {
return nil
}
panic(err)
}
return res
}
// Implements DB.
func (db *GoLevelDB) Has(key []byte) bool {
return db.Get(key) != nil
}
// Implements DB.
func (db *GoLevelDB) Set(key []byte, value []byte) {
key = nonNilBytes(key)
value = nonNilBytes(value)
err := db.db.Put(key, value, nil)
if err != nil {
panic(err)
}
}
// Implements DB.
func (db *GoLevelDB) SetSync(key []byte, value []byte) {
key = nonNilBytes(key)
value = nonNilBytes(value)
err := db.db.Put(key, value, &opt.WriteOptions{Sync: true})
if err != nil {
panic(err)
}
}
// Implements DB.
func (db *GoLevelDB) Delete(key []byte) {
key = nonNilBytes(key)
err := db.db.Delete(key, nil)
if err != nil {
panic(err)
}
}
// Implements DB.
func (db *GoLevelDB) DeleteSync(key []byte) {
key = nonNilBytes(key)
err := db.db.Delete(key, &opt.WriteOptions{Sync: true})
if err != nil {
panic(err)
}
}
func (db *GoLevelDB) DB() *leveldb.DB {
return db.db
}
// Implements DB.
func (db *GoLevelDB) Close() {
db.db.Close()
}
// Implements DB.
func (db *GoLevelDB) Print() {
str, _ := db.db.GetProperty("leveldb.stats")
fmt.Printf("%v\n", str)
itr := db.db.NewIterator(nil, nil)
for itr.Next() {
key := itr.Key()
value := itr.Value()
fmt.Printf("[%X]:\t[%X]\n", key, value)
}
}
// Implements DB.
func (db *GoLevelDB) Stats() map[string]string {
keys := []string{
"leveldb.num-files-at-level{n}",
"leveldb.stats",
"leveldb.sstables",
"leveldb.blockpool",
"leveldb.cachedblock",
"leveldb.openedtables",
"leveldb.alivesnaps",
"leveldb.aliveiters",
}
stats := make(map[string]string)
for _, key := range keys {
str, err := db.db.GetProperty(key)
if err == nil {
stats[key] = str
}
}
return stats
}
//----------------------------------------
// Batch
// Implements DB.
func (db *GoLevelDB) NewBatch() Batch {
batch := new(leveldb.Batch)
return &goLevelDBBatch{db, batch}
}
type goLevelDBBatch struct {
db *GoLevelDB
batch *leveldb.Batch
}
// Implements Batch.
func (mBatch *goLevelDBBatch) Set(key, value []byte) {
mBatch.batch.Put(key, value)
}
// Implements Batch.
func (mBatch *goLevelDBBatch) Delete(key []byte) {
mBatch.batch.Delete(key)
}
// Implements Batch.
func (mBatch *goLevelDBBatch) Write() {
err := mBatch.db.db.Write(mBatch.batch, &opt.WriteOptions{Sync: false})
if err != nil {
panic(err)
}
}
// Implements Batch.
func (mBatch *goLevelDBBatch) WriteSync() {
err := mBatch.db.db.Write(mBatch.batch, &opt.WriteOptions{Sync: true})
if err != nil {
panic(err)
}
}
// Implements Batch.
// Close is no-op for goLevelDBBatch.
func (mBatch *goLevelDBBatch) Close() {}
//----------------------------------------
// Iterator
// NOTE This is almost identical to db/c_level_db.Iterator
// Before creating a third version, refactor.
// Implements DB.
func (db *GoLevelDB) Iterator(start, end []byte) Iterator {
itr := db.db.NewIterator(nil, nil)
return newGoLevelDBIterator(itr, start, end, false)
}
// Implements DB.
func (db *GoLevelDB) ReverseIterator(start, end []byte) Iterator {
itr := db.db.NewIterator(nil, nil)
return newGoLevelDBIterator(itr, start, end, true)
}
type goLevelDBIterator struct {
source iterator.Iterator
start []byte
end []byte
isReverse bool
isInvalid bool
}
var _ Iterator = (*goLevelDBIterator)(nil)
func newGoLevelDBIterator(source iterator.Iterator, start, end []byte, isReverse bool) *goLevelDBIterator {
if isReverse {
if end == nil {
source.Last()
} else {
valid := source.Seek(end)
if valid {
eoakey := source.Key() // end or after key
if bytes.Compare(end, eoakey) <= 0 {
source.Prev()
}
} else {
source.Last()
}
}
} else {
if start == nil {
source.First()
} else {
source.Seek(start)
}
}
return &goLevelDBIterator{
source: source,
start: start,
end: end,
isReverse: isReverse,
isInvalid: false,
}
}
// Implements Iterator.
func (itr *goLevelDBIterator) Domain() ([]byte, []byte) {
return itr.start, itr.end
}
// Implements Iterator.
func (itr *goLevelDBIterator) Valid() bool {
// Once invalid, forever invalid.
if itr.isInvalid {
return false
}
// Panic on DB error. No way to recover.
itr.assertNoError()
// If source is invalid, invalid.
if !itr.source.Valid() {
itr.isInvalid = true
return false
}
// If key is end or past it, invalid.
var start = itr.start
var end = itr.end
var key = itr.source.Key()
if itr.isReverse {
if start != nil && bytes.Compare(key, start) < 0 {
itr.isInvalid = true
return false
}
} else {
if end != nil && bytes.Compare(end, key) <= 0 {
itr.isInvalid = true
return false
}
}
// Valid
return true
}
// Implements Iterator.
func (itr *goLevelDBIterator) Key() []byte {
// Key returns a copy of the current key.
// See https://github.com/syndtr/goleveldb/blob/52c212e6c196a1404ea59592d3f1c227c9f034b2/leveldb/iterator/iter.go#L88
itr.assertNoError()
itr.assertIsValid()
return cp(itr.source.Key())
}
// Implements Iterator.
func (itr *goLevelDBIterator) Value() []byte {
// Value returns a copy of the current value.
// See https://github.com/syndtr/goleveldb/blob/52c212e6c196a1404ea59592d3f1c227c9f034b2/leveldb/iterator/iter.go#L88
itr.assertNoError()
itr.assertIsValid()
return cp(itr.source.Value())
}
// Implements Iterator.
func (itr *goLevelDBIterator) Next() {
itr.assertNoError()
itr.assertIsValid()
if itr.isReverse {
itr.source.Prev()
} else {
itr.source.Next()
}
}
// Implements Iterator.
func (itr *goLevelDBIterator) Close() {
itr.source.Release()
}
func (itr *goLevelDBIterator) assertNoError() {
if err := itr.source.Error(); err != nil {
panic(err)
}
}
func (itr goLevelDBIterator) assertIsValid() {
if !itr.Valid() {
panic("goLevelDBIterator is invalid")
}
}

+ 0
- 45
libs/db/go_level_db_test.go View File

@ -1,45 +0,0 @@
package db
import (
"fmt"
"testing"
"github.com/stretchr/testify/require"
"github.com/syndtr/goleveldb/leveldb/opt"
cmn "github.com/tendermint/tendermint/libs/common"
)
func TestGoLevelDBNewGoLevelDB(t *testing.T) {
name := fmt.Sprintf("test_%x", cmn.RandStr(12))
defer cleanupDBDir("", name)
// Test we can't open the db twice for writing
wr1, err := NewGoLevelDB(name, "")
require.Nil(t, err)
_, err = NewGoLevelDB(name, "")
require.NotNil(t, err)
wr1.Close() // Close the db to release the lock
// Test we can open the db twice for reading only
ro1, err := NewGoLevelDBWithOpts(name, "", &opt.Options{ReadOnly: true})
defer ro1.Close()
require.Nil(t, err)
ro2, err := NewGoLevelDBWithOpts(name, "", &opt.Options{ReadOnly: true})
defer ro2.Close()
require.Nil(t, err)
}
func BenchmarkGoLevelDBRandomReadsWrites(b *testing.B) {
name := fmt.Sprintf("test_%x", cmn.RandStr(12))
db, err := NewGoLevelDB(name, "")
if err != nil {
b.Fatal(err)
}
defer func() {
db.Close()
cleanupDBDir("", name)
}()
benchmarkRandomReadsWrites(b, db)
}

+ 0
- 74
libs/db/mem_batch.go View File

@ -1,74 +0,0 @@
package db
import "sync"
type atomicSetDeleter interface {
Mutex() *sync.Mutex
SetNoLock(key, value []byte)
SetNoLockSync(key, value []byte)
DeleteNoLock(key []byte)
DeleteNoLockSync(key []byte)
}
type memBatch struct {
db atomicSetDeleter
ops []operation
}
type opType int
const (
opTypeSet opType = 1
opTypeDelete opType = 2
)
type operation struct {
opType
key []byte
value []byte
}
func (mBatch *memBatch) Set(key, value []byte) {
mBatch.ops = append(mBatch.ops, operation{opTypeSet, key, value})
}
func (mBatch *memBatch) Delete(key []byte) {
mBatch.ops = append(mBatch.ops, operation{opTypeDelete, key, nil})
}
func (mBatch *memBatch) Write() {
mBatch.write(false)
}
func (mBatch *memBatch) WriteSync() {
mBatch.write(true)
}
func (mBatch *memBatch) Close() {
mBatch.ops = nil
}
func (mBatch *memBatch) write(doSync bool) {
if mtx := mBatch.db.Mutex(); mtx != nil {
mtx.Lock()
defer mtx.Unlock()
}
for i, op := range mBatch.ops {
if doSync && i == (len(mBatch.ops)-1) {
switch op.opType {
case opTypeSet:
mBatch.db.SetNoLockSync(op.key, op.value)
case opTypeDelete:
mBatch.db.DeleteNoLockSync(op.key)
}
break // we're done.
}
switch op.opType {
case opTypeSet:
mBatch.db.SetNoLock(op.key, op.value)
case opTypeDelete:
mBatch.db.DeleteNoLock(op.key)
}
}
}

+ 0
- 255
libs/db/mem_db.go View File

@ -1,255 +0,0 @@
package db
import (
"fmt"
"sort"
"sync"
)
func init() {
registerDBCreator(MemDBBackend, func(name, dir string) (DB, error) {
return NewMemDB(), nil
}, false)
}
var _ DB = (*MemDB)(nil)
type MemDB struct {
mtx sync.Mutex
db map[string][]byte
}
func NewMemDB() *MemDB {
database := &MemDB{
db: make(map[string][]byte),
}
return database
}
// Implements atomicSetDeleter.
func (db *MemDB) Mutex() *sync.Mutex {
return &(db.mtx)
}
// Implements DB.
func (db *MemDB) Get(key []byte) []byte {
db.mtx.Lock()
defer db.mtx.Unlock()
key = nonNilBytes(key)
value := db.db[string(key)]
return value
}
// Implements DB.
func (db *MemDB) Has(key []byte) bool {
db.mtx.Lock()
defer db.mtx.Unlock()
key = nonNilBytes(key)
_, ok := db.db[string(key)]
return ok
}
// Implements DB.
func (db *MemDB) Set(key []byte, value []byte) {
db.mtx.Lock()
defer db.mtx.Unlock()
db.SetNoLock(key, value)
}
// Implements DB.
func (db *MemDB) SetSync(key []byte, value []byte) {
db.mtx.Lock()
defer db.mtx.Unlock()
db.SetNoLock(key, value)
}
// Implements atomicSetDeleter.
func (db *MemDB) SetNoLock(key []byte, value []byte) {
db.SetNoLockSync(key, value)
}
// Implements atomicSetDeleter.
func (db *MemDB) SetNoLockSync(key []byte, value []byte) {
key = nonNilBytes(key)
value = nonNilBytes(value)
db.db[string(key)] = value
}
// Implements DB.
func (db *MemDB) Delete(key []byte) {
db.mtx.Lock()
defer db.mtx.Unlock()
db.DeleteNoLock(key)
}
// Implements DB.
func (db *MemDB) DeleteSync(key []byte) {
db.mtx.Lock()
defer db.mtx.Unlock()
db.DeleteNoLock(key)
}
// Implements atomicSetDeleter.
func (db *MemDB) DeleteNoLock(key []byte) {
db.DeleteNoLockSync(key)
}
// Implements atomicSetDeleter.
func (db *MemDB) DeleteNoLockSync(key []byte) {
key = nonNilBytes(key)
delete(db.db, string(key))
}
// Implements DB.
func (db *MemDB) Close() {
// Close is a noop since for an in-memory
// database, we don't have a destination
// to flush contents to nor do we want
// any data loss on invoking Close()
// See the discussion in https://github.com/tendermint/tendermint/libs/pull/56
}
// Implements DB.
func (db *MemDB) Print() {
db.mtx.Lock()
defer db.mtx.Unlock()
for key, value := range db.db {
fmt.Printf("[%X]:\t[%X]\n", []byte(key), value)
}
}
// Implements DB.
func (db *MemDB) Stats() map[string]string {
db.mtx.Lock()
defer db.mtx.Unlock()
stats := make(map[string]string)
stats["database.type"] = "memDB"
stats["database.size"] = fmt.Sprintf("%d", len(db.db))
return stats
}
// Implements DB.
func (db *MemDB) NewBatch() Batch {
db.mtx.Lock()
defer db.mtx.Unlock()
return &memBatch{db, nil}
}
//----------------------------------------
// Iterator
// Implements DB.
func (db *MemDB) Iterator(start, end []byte) Iterator {
db.mtx.Lock()
defer db.mtx.Unlock()
keys := db.getSortedKeys(start, end, false)
return newMemDBIterator(db, keys, start, end)
}
// Implements DB.
func (db *MemDB) ReverseIterator(start, end []byte) Iterator {
db.mtx.Lock()
defer db.mtx.Unlock()
keys := db.getSortedKeys(start, end, true)
return newMemDBIterator(db, keys, start, end)
}
// We need a copy of all of the keys.
// Not the best, but probably not a bottleneck depending.
type memDBIterator struct {
db DB
cur int
keys []string
start []byte
end []byte
}
var _ Iterator = (*memDBIterator)(nil)
// Keys is expected to be in reverse order for reverse iterators.
func newMemDBIterator(db DB, keys []string, start, end []byte) *memDBIterator {
return &memDBIterator{
db: db,
cur: 0,
keys: keys,
start: start,
end: end,
}
}
// Implements Iterator.
func (itr *memDBIterator) Domain() ([]byte, []byte) {
return itr.start, itr.end
}
// Implements Iterator.
func (itr *memDBIterator) Valid() bool {
return 0 <= itr.cur && itr.cur < len(itr.keys)
}
// Implements Iterator.
func (itr *memDBIterator) Next() {
itr.assertIsValid()
itr.cur++
}
// Implements Iterator.
func (itr *memDBIterator) Key() []byte {
itr.assertIsValid()
return []byte(itr.keys[itr.cur])
}
// Implements Iterator.
func (itr *memDBIterator) Value() []byte {
itr.assertIsValid()
key := []byte(itr.keys[itr.cur])
return itr.db.Get(key)
}
// Implements Iterator.
func (itr *memDBIterator) Close() {
itr.keys = nil
itr.db = nil
}
func (itr *memDBIterator) assertIsValid() {
if !itr.Valid() {
panic("memDBIterator is invalid")
}
}
//----------------------------------------
// Misc.
func (db *MemDB) getSortedKeys(start, end []byte, reverse bool) []string {
keys := []string{}
for key := range db.db {
inDomain := IsKeyInDomain([]byte(key), start, end)
if inDomain {
keys = append(keys, key)
}
}
sort.Strings(keys)
if reverse {
nkeys := len(keys)
for i := 0; i < nkeys/2; i++ {
temp := keys[i]
keys[i] = keys[nkeys-i-1]
keys[nkeys-i-1] = temp
}
}
return keys
}

+ 0
- 336
libs/db/prefix_db.go View File

@ -1,336 +0,0 @@
package db
import (
"bytes"
"fmt"
"sync"
)
// IteratePrefix is a convenience function for iterating over a key domain
// restricted by prefix.
func IteratePrefix(db DB, prefix []byte) Iterator {
var start, end []byte
if len(prefix) == 0 {
start = nil
end = nil
} else {
start = cp(prefix)
end = cpIncr(prefix)
}
return db.Iterator(start, end)
}
/*
TODO: Make test, maybe rename.
// Like IteratePrefix but the iterator strips the prefix from the keys.
func IteratePrefixStripped(db DB, prefix []byte) Iterator {
start, end := ...
return newPrefixIterator(prefix, start, end, IteratePrefix(db, prefix))
}
*/
//----------------------------------------
// prefixDB
type prefixDB struct {
mtx sync.Mutex
prefix []byte
db DB
}
// NewPrefixDB lets you namespace multiple DBs within a single DB.
func NewPrefixDB(db DB, prefix []byte) *prefixDB {
return &prefixDB{
prefix: prefix,
db: db,
}
}
// Implements atomicSetDeleter.
func (pdb *prefixDB) Mutex() *sync.Mutex {
return &(pdb.mtx)
}
// Implements DB.
func (pdb *prefixDB) Get(key []byte) []byte {
pdb.mtx.Lock()
defer pdb.mtx.Unlock()
pkey := pdb.prefixed(key)
value := pdb.db.Get(pkey)
return value
}
// Implements DB.
func (pdb *prefixDB) Has(key []byte) bool {
pdb.mtx.Lock()
defer pdb.mtx.Unlock()
return pdb.db.Has(pdb.prefixed(key))
}
// Implements DB.
func (pdb *prefixDB) Set(key []byte, value []byte) {
pdb.mtx.Lock()
defer pdb.mtx.Unlock()
pkey := pdb.prefixed(key)
pdb.db.Set(pkey, value)
}
// Implements DB.
func (pdb *prefixDB) SetSync(key []byte, value []byte) {
pdb.mtx.Lock()
defer pdb.mtx.Unlock()
pdb.db.SetSync(pdb.prefixed(key), value)
}
// Implements DB.
func (pdb *prefixDB) Delete(key []byte) {
pdb.mtx.Lock()
defer pdb.mtx.Unlock()
pdb.db.Delete(pdb.prefixed(key))
}
// Implements DB.
func (pdb *prefixDB) DeleteSync(key []byte) {
pdb.mtx.Lock()
defer pdb.mtx.Unlock()
pdb.db.DeleteSync(pdb.prefixed(key))
}
// Implements DB.
func (pdb *prefixDB) Iterator(start, end []byte) Iterator {
pdb.mtx.Lock()
defer pdb.mtx.Unlock()
var pstart, pend []byte
pstart = append(cp(pdb.prefix), start...)
if end == nil {
pend = cpIncr(pdb.prefix)
} else {
pend = append(cp(pdb.prefix), end...)
}
return newPrefixIterator(
pdb.prefix,
start,
end,
pdb.db.Iterator(
pstart,
pend,
),
)
}
// Implements DB.
func (pdb *prefixDB) ReverseIterator(start, end []byte) Iterator {
pdb.mtx.Lock()
defer pdb.mtx.Unlock()
var pstart, pend []byte
pstart = append(cp(pdb.prefix), start...)
if end == nil {
pend = cpIncr(pdb.prefix)
} else {
pend = append(cp(pdb.prefix), end...)
}
ritr := pdb.db.ReverseIterator(pstart, pend)
return newPrefixIterator(
pdb.prefix,
start,
end,
ritr,
)
}
// Implements DB.
// Panics if the underlying DB is not an
// atomicSetDeleter.
func (pdb *prefixDB) NewBatch() Batch {
pdb.mtx.Lock()
defer pdb.mtx.Unlock()
return newPrefixBatch(pdb.prefix, pdb.db.NewBatch())
}
/* NOTE: Uncomment to use memBatch instead of prefixBatch
// Implements atomicSetDeleter.
func (pdb *prefixDB) SetNoLock(key []byte, value []byte) {
pdb.db.(atomicSetDeleter).SetNoLock(pdb.prefixed(key), value)
}
// Implements atomicSetDeleter.
func (pdb *prefixDB) SetNoLockSync(key []byte, value []byte) {
pdb.db.(atomicSetDeleter).SetNoLockSync(pdb.prefixed(key), value)
}
// Implements atomicSetDeleter.
func (pdb *prefixDB) DeleteNoLock(key []byte) {
pdb.db.(atomicSetDeleter).DeleteNoLock(pdb.prefixed(key))
}
// Implements atomicSetDeleter.
func (pdb *prefixDB) DeleteNoLockSync(key []byte) {
pdb.db.(atomicSetDeleter).DeleteNoLockSync(pdb.prefixed(key))
}
*/
// Implements DB.
func (pdb *prefixDB) Close() {
pdb.mtx.Lock()
defer pdb.mtx.Unlock()
pdb.db.Close()
}
// Implements DB.
func (pdb *prefixDB) Print() {
fmt.Printf("prefix: %X\n", pdb.prefix)
itr := pdb.Iterator(nil, nil)
defer itr.Close()
for ; itr.Valid(); itr.Next() {
key := itr.Key()
value := itr.Value()
fmt.Printf("[%X]:\t[%X]\n", key, value)
}
}
// Implements DB.
func (pdb *prefixDB) Stats() map[string]string {
stats := make(map[string]string)
stats["prefixdb.prefix.string"] = string(pdb.prefix)
stats["prefixdb.prefix.hex"] = fmt.Sprintf("%X", pdb.prefix)
source := pdb.db.Stats()
for key, value := range source {
stats["prefixdb.source."+key] = value
}
return stats
}
func (pdb *prefixDB) prefixed(key []byte) []byte {
return append(cp(pdb.prefix), key...)
}
//----------------------------------------
// prefixBatch
type prefixBatch struct {
prefix []byte
source Batch
}
func newPrefixBatch(prefix []byte, source Batch) prefixBatch {
return prefixBatch{
prefix: prefix,
source: source,
}
}
func (pb prefixBatch) Set(key, value []byte) {
pkey := append(cp(pb.prefix), key...)
pb.source.Set(pkey, value)
}
func (pb prefixBatch) Delete(key []byte) {
pkey := append(cp(pb.prefix), key...)
pb.source.Delete(pkey)
}
func (pb prefixBatch) Write() {
pb.source.Write()
}
func (pb prefixBatch) WriteSync() {
pb.source.WriteSync()
}
func (pb prefixBatch) Close() {
pb.source.Close()
}
//----------------------------------------
// prefixIterator
var _ Iterator = (*prefixIterator)(nil)
// Strips prefix while iterating from Iterator.
type prefixIterator struct {
prefix []byte
start []byte
end []byte
source Iterator
valid bool
}
func newPrefixIterator(prefix, start, end []byte, source Iterator) *prefixIterator {
if !source.Valid() || !bytes.HasPrefix(source.Key(), prefix) {
return &prefixIterator{
prefix: prefix,
start: start,
end: end,
source: source,
valid: false,
}
} else {
return &prefixIterator{
prefix: prefix,
start: start,
end: end,
source: source,
valid: true,
}
}
}
func (itr *prefixIterator) Domain() (start []byte, end []byte) {
return itr.start, itr.end
}
func (itr *prefixIterator) Valid() bool {
return itr.valid && itr.source.Valid()
}
func (itr *prefixIterator) Next() {
if !itr.valid {
panic("prefixIterator invalid, cannot call Next()")
}
itr.source.Next()
if !itr.source.Valid() || !bytes.HasPrefix(itr.source.Key(), itr.prefix) {
itr.valid = false
return
}
}
func (itr *prefixIterator) Key() (key []byte) {
if !itr.valid {
panic("prefixIterator invalid, cannot call Key()")
}
return stripPrefix(itr.source.Key(), itr.prefix)
}
func (itr *prefixIterator) Value() (value []byte) {
if !itr.valid {
panic("prefixIterator invalid, cannot call Value()")
}
return itr.source.Value()
}
func (itr *prefixIterator) Close() {
itr.source.Close()
}
//----------------------------------------
func stripPrefix(key []byte, prefix []byte) (stripped []byte) {
if len(key) < len(prefix) {
panic("should not happen")
}
if !bytes.Equal(key[:len(prefix)], prefix) {
panic("should not happne")
}
return key[len(prefix):]
}

+ 0
- 192
libs/db/prefix_db_test.go View File

@ -1,192 +0,0 @@
package db
import "testing"
func mockDBWithStuff() DB {
db := NewMemDB()
// Under "key" prefix
db.Set(bz("key"), bz("value"))
db.Set(bz("key1"), bz("value1"))
db.Set(bz("key2"), bz("value2"))
db.Set(bz("key3"), bz("value3"))
db.Set(bz("something"), bz("else"))
db.Set(bz(""), bz(""))
db.Set(bz("k"), bz("val"))
db.Set(bz("ke"), bz("valu"))
db.Set(bz("kee"), bz("valuu"))
return db
}
func TestPrefixDBSimple(t *testing.T) {
db := mockDBWithStuff()
pdb := NewPrefixDB(db, bz("key"))
checkValue(t, pdb, bz("key"), nil)
checkValue(t, pdb, bz(""), bz("value"))
checkValue(t, pdb, bz("key1"), nil)
checkValue(t, pdb, bz("1"), bz("value1"))
checkValue(t, pdb, bz("key2"), nil)
checkValue(t, pdb, bz("2"), bz("value2"))
checkValue(t, pdb, bz("key3"), nil)
checkValue(t, pdb, bz("3"), bz("value3"))
checkValue(t, pdb, bz("something"), nil)
checkValue(t, pdb, bz("k"), nil)
checkValue(t, pdb, bz("ke"), nil)
checkValue(t, pdb, bz("kee"), nil)
}
func TestPrefixDBIterator1(t *testing.T) {
db := mockDBWithStuff()
pdb := NewPrefixDB(db, bz("key"))
itr := pdb.Iterator(nil, nil)
checkDomain(t, itr, nil, nil)
checkItem(t, itr, bz(""), bz("value"))
checkNext(t, itr, true)
checkItem(t, itr, bz("1"), bz("value1"))
checkNext(t, itr, true)
checkItem(t, itr, bz("2"), bz("value2"))
checkNext(t, itr, true)
checkItem(t, itr, bz("3"), bz("value3"))
checkNext(t, itr, false)
checkInvalid(t, itr)
itr.Close()
}
func TestPrefixDBIterator2(t *testing.T) {
db := mockDBWithStuff()
pdb := NewPrefixDB(db, bz("key"))
itr := pdb.Iterator(nil, bz(""))
checkDomain(t, itr, nil, bz(""))
checkInvalid(t, itr)
itr.Close()
}
func TestPrefixDBIterator3(t *testing.T) {
db := mockDBWithStuff()
pdb := NewPrefixDB(db, bz("key"))
itr := pdb.Iterator(bz(""), nil)
checkDomain(t, itr, bz(""), nil)
checkItem(t, itr, bz(""), bz("value"))
checkNext(t, itr, true)
checkItem(t, itr, bz("1"), bz("value1"))
checkNext(t, itr, true)
checkItem(t, itr, bz("2"), bz("value2"))
checkNext(t, itr, true)
checkItem(t, itr, bz("3"), bz("value3"))
checkNext(t, itr, false)
checkInvalid(t, itr)
itr.Close()
}
func TestPrefixDBIterator4(t *testing.T) {
db := mockDBWithStuff()
pdb := NewPrefixDB(db, bz("key"))
itr := pdb.Iterator(bz(""), bz(""))
checkDomain(t, itr, bz(""), bz(""))
checkInvalid(t, itr)
itr.Close()
}
func TestPrefixDBReverseIterator1(t *testing.T) {
db := mockDBWithStuff()
pdb := NewPrefixDB(db, bz("key"))
itr := pdb.ReverseIterator(nil, nil)
checkDomain(t, itr, nil, nil)
checkItem(t, itr, bz("3"), bz("value3"))
checkNext(t, itr, true)
checkItem(t, itr, bz("2"), bz("value2"))
checkNext(t, itr, true)
checkItem(t, itr, bz("1"), bz("value1"))
checkNext(t, itr, true)
checkItem(t, itr, bz(""), bz("value"))
checkNext(t, itr, false)
checkInvalid(t, itr)
itr.Close()
}
func TestPrefixDBReverseIterator2(t *testing.T) {
db := mockDBWithStuff()
pdb := NewPrefixDB(db, bz("key"))
itr := pdb.ReverseIterator(bz(""), nil)
checkDomain(t, itr, bz(""), nil)
checkItem(t, itr, bz("3"), bz("value3"))
checkNext(t, itr, true)
checkItem(t, itr, bz("2"), bz("value2"))
checkNext(t, itr, true)
checkItem(t, itr, bz("1"), bz("value1"))
checkNext(t, itr, true)
checkItem(t, itr, bz(""), bz("value"))
checkNext(t, itr, false)
checkInvalid(t, itr)
itr.Close()
}
func TestPrefixDBReverseIterator3(t *testing.T) {
db := mockDBWithStuff()
pdb := NewPrefixDB(db, bz("key"))
itr := pdb.ReverseIterator(nil, bz(""))
checkDomain(t, itr, nil, bz(""))
checkInvalid(t, itr)
itr.Close()
}
func TestPrefixDBReverseIterator4(t *testing.T) {
db := mockDBWithStuff()
pdb := NewPrefixDB(db, bz("key"))
itr := pdb.ReverseIterator(bz(""), bz(""))
checkDomain(t, itr, bz(""), bz(""))
checkInvalid(t, itr)
itr.Close()
}
func TestPrefixDBReverseIterator5(t *testing.T) {
db := mockDBWithStuff()
pdb := NewPrefixDB(db, bz("key"))
itr := pdb.ReverseIterator(bz("1"), nil)
checkDomain(t, itr, bz("1"), nil)
checkItem(t, itr, bz("3"), bz("value3"))
checkNext(t, itr, true)
checkItem(t, itr, bz("2"), bz("value2"))
checkNext(t, itr, true)
checkItem(t, itr, bz("1"), bz("value1"))
checkNext(t, itr, false)
checkInvalid(t, itr)
itr.Close()
}
func TestPrefixDBReverseIterator6(t *testing.T) {
db := mockDBWithStuff()
pdb := NewPrefixDB(db, bz("key"))
itr := pdb.ReverseIterator(bz("2"), nil)
checkDomain(t, itr, bz("2"), nil)
checkItem(t, itr, bz("3"), bz("value3"))
checkNext(t, itr, true)
checkItem(t, itr, bz("2"), bz("value2"))
checkNext(t, itr, false)
checkInvalid(t, itr)
itr.Close()
}
func TestPrefixDBReverseIterator7(t *testing.T) {
db := mockDBWithStuff()
pdb := NewPrefixDB(db, bz("key"))
itr := pdb.ReverseIterator(nil, bz("2"))
checkDomain(t, itr, nil, bz("2"))
checkItem(t, itr, bz("1"), bz("value1"))
checkNext(t, itr, true)
checkItem(t, itr, bz(""), bz("value"))
checkNext(t, itr, false)
checkInvalid(t, itr)
itr.Close()
}

+ 0
- 37
libs/db/remotedb/doc.go View File

@ -1,37 +0,0 @@
/*
remotedb is a package for connecting to distributed Tendermint db.DB
instances. The purpose is to detach difficult deployments such as
CLevelDB that requires gcc or perhaps for databases that require
custom configurations such as extra disk space. It also eases
the burden and cost of deployment of dependencies for databases
to be used by Tendermint developers. Most importantly it is built
over the high performant gRPC transport.
remotedb's RemoteDB implements db.DB so can be used normally
like other databases. One just has to explicitly connect to the
remote database with a client setup such as:
client, err := remotedb.NewRemoteDB(addr, cert)
// Make sure to invoke InitRemote!
if err := client.InitRemote(&remotedb.Init{Name: "test-remote-db", Type: "leveldb"}); err != nil {
log.Fatalf("Failed to initialize the remote db")
}
client.Set(key1, value)
gv1 := client.SetSync(k2, v2)
client.Delete(k1)
gv2 := client.Get(k1)
for itr := client.Iterator(k1, k9); itr.Valid(); itr.Next() {
ik, iv := itr.Key(), itr.Value()
ds, de := itr.Domain()
}
stats := client.Stats()
if !client.Has(dk1) {
client.SetSync(dk1, dv1)
}
*/
package remotedb

+ 0
- 22
libs/db/remotedb/grpcdb/client.go View File

@ -1,22 +0,0 @@
package grpcdb
import (
"google.golang.org/grpc"
"google.golang.org/grpc/credentials"
protodb "github.com/tendermint/tendermint/libs/db/remotedb/proto"
)
// NewClient creates a gRPC client connected to the bound gRPC server at serverAddr.
// Use kind to set the level of security to either Secure or Insecure.
func NewClient(serverAddr, serverCert string) (protodb.DBClient, error) {
creds, err := credentials.NewClientTLSFromFile(serverCert, "")
if err != nil {
return nil, err
}
cc, err := grpc.Dial(serverAddr, grpc.WithTransportCredentials(creds))
if err != nil {
return nil, err
}
return protodb.NewDBClient(cc), nil
}

+ 0
- 32
libs/db/remotedb/grpcdb/doc.go View File

@ -1,32 +0,0 @@
/*
grpcdb is the distribution of Tendermint's db.DB instances using
the gRPC transport to decouple local db.DB usages from applications,
to using them over a network in a highly performant manner.
grpcdb allows users to initialize a database's server like
they would locally and invoke the respective methods of db.DB.
Most users shouldn't use this package, but should instead use
remotedb. Only the lower level users and database server deployers
should use it, for functionality such as:
ln, err := net.Listen("tcp", "0.0.0.0:0")
srv := grpcdb.NewServer()
defer srv.Stop()
go func() {
if err := srv.Serve(ln); err != nil {
t.Fatalf("BindServer: %v", err)
}
}()
or
addr := ":8998"
cert := "server.crt"
key := "server.key"
go func() {
if err := grpcdb.ListenAndServe(addr, cert, key); err != nil {
log.Fatalf("BindServer: %v", err)
}
}()
*/
package grpcdb

+ 0
- 52
libs/db/remotedb/grpcdb/example_test.go View File

@ -1,52 +0,0 @@
package grpcdb_test
import (
"bytes"
"context"
"log"
grpcdb "github.com/tendermint/tendermint/libs/db/remotedb/grpcdb"
protodb "github.com/tendermint/tendermint/libs/db/remotedb/proto"
)
func Example() {
addr := ":8998"
cert := "server.crt"
key := "server.key"
go func() {
if err := grpcdb.ListenAndServe(addr, cert, key); err != nil {
log.Fatalf("BindServer: %v", err)
}
}()
client, err := grpcdb.NewClient(addr, cert)
if err != nil {
log.Fatalf("Failed to create grpcDB client: %v", err)
}
ctx := context.Background()
// 1. Initialize the DB
in := &protodb.Init{
Type: "leveldb",
Name: "grpc-uno-test",
Dir: ".",
}
if _, err := client.Init(ctx, in); err != nil {
log.Fatalf("Init error: %v", err)
}
// 2. Now it can be used!
query1 := &protodb.Entity{Key: []byte("Project"), Value: []byte("Tmlibs-on-gRPC")}
if _, err := client.SetSync(ctx, query1); err != nil {
log.Fatalf("SetSync err: %v", err)
}
query2 := &protodb.Entity{Key: []byte("Project")}
read, err := client.Get(ctx, query2)
if err != nil {
log.Fatalf("Get err: %v", err)
}
if g, w := read.Value, []byte("Tmlibs-on-gRPC"); !bytes.Equal(g, w) {
log.Fatalf("got= (%q ==> % X)\nwant=(%q ==> % X)", g, g, w, w)
}
}

+ 0
- 200
libs/db/remotedb/grpcdb/server.go View File

@ -1,200 +0,0 @@
package grpcdb
import (
"context"
"net"
"sync"
"time"
"google.golang.org/grpc"
"google.golang.org/grpc/credentials"
"github.com/tendermint/tendermint/libs/db"
protodb "github.com/tendermint/tendermint/libs/db/remotedb/proto"
)
// ListenAndServe is a blocking function that sets up a gRPC based
// server at the address supplied, with the gRPC options passed in.
// Normally in usage, invoke it in a goroutine like you would for http.ListenAndServe.
func ListenAndServe(addr, cert, key string, opts ...grpc.ServerOption) error {
ln, err := net.Listen("tcp", addr)
if err != nil {
return err
}
srv, err := NewServer(cert, key, opts...)
if err != nil {
return err
}
return srv.Serve(ln)
}
func NewServer(cert, key string, opts ...grpc.ServerOption) (*grpc.Server, error) {
creds, err := credentials.NewServerTLSFromFile(cert, key)
if err != nil {
return nil, err
}
opts = append(opts, grpc.Creds(creds))
srv := grpc.NewServer(opts...)
protodb.RegisterDBServer(srv, new(server))
return srv, nil
}
type server struct {
mu sync.Mutex
db db.DB
}
var _ protodb.DBServer = (*server)(nil)
// Init initializes the server's database. Only one type of database
// can be initialized per server.
//
// Dir is the directory on the file system in which the DB will be stored(if backed by disk) (TODO: remove)
//
// Name is representative filesystem entry's basepath
//
// Type can be either one of:
// * cleveldb (if built with gcc enabled)
// * fsdb
// * memdB
// * leveldb
// See https://godoc.org/github.com/tendermint/tendermint/libs/db#DBBackendType
func (s *server) Init(ctx context.Context, in *protodb.Init) (*protodb.Entity, error) {
s.mu.Lock()
defer s.mu.Unlock()
s.db = db.NewDB(in.Name, db.DBBackendType(in.Type), in.Dir)
return &protodb.Entity{CreatedAt: time.Now().Unix()}, nil
}
func (s *server) Delete(ctx context.Context, in *protodb.Entity) (*protodb.Nothing, error) {
s.db.Delete(in.Key)
return nothing, nil
}
var nothing = new(protodb.Nothing)
func (s *server) DeleteSync(ctx context.Context, in *protodb.Entity) (*protodb.Nothing, error) {
s.db.DeleteSync(in.Key)
return nothing, nil
}
func (s *server) Get(ctx context.Context, in *protodb.Entity) (*protodb.Entity, error) {
value := s.db.Get(in.Key)
return &protodb.Entity{Value: value}, nil
}
func (s *server) GetStream(ds protodb.DB_GetStreamServer) error {
// Receive routine
responsesChan := make(chan *protodb.Entity)
go func() {
defer close(responsesChan)
ctx := context.Background()
for {
in, err := ds.Recv()
if err != nil {
responsesChan <- &protodb.Entity{Err: err.Error()}
return
}
out, err := s.Get(ctx, in)
if err != nil {
if out == nil {
out = new(protodb.Entity)
out.Key = in.Key
}
out.Err = err.Error()
responsesChan <- out
return
}
// Otherwise continue on
responsesChan <- out
}
}()
// Send routine, block until we return
for out := range responsesChan {
if err := ds.Send(out); err != nil {
return err
}
}
return nil
}
func (s *server) Has(ctx context.Context, in *protodb.Entity) (*protodb.Entity, error) {
exists := s.db.Has(in.Key)
return &protodb.Entity{Exists: exists}, nil
}
func (s *server) Set(ctx context.Context, in *protodb.Entity) (*protodb.Nothing, error) {
s.db.Set(in.Key, in.Value)
return nothing, nil
}
func (s *server) SetSync(ctx context.Context, in *protodb.Entity) (*protodb.Nothing, error) {
s.db.SetSync(in.Key, in.Value)
return nothing, nil
}
func (s *server) Iterator(query *protodb.Entity, dis protodb.DB_IteratorServer) error {
it := s.db.Iterator(query.Start, query.End)
defer it.Close()
return s.handleIterator(it, dis.Send)
}
func (s *server) handleIterator(it db.Iterator, sendFunc func(*protodb.Iterator) error) error {
for it.Valid() {
start, end := it.Domain()
out := &protodb.Iterator{
Domain: &protodb.Domain{Start: start, End: end},
Valid: it.Valid(),
Key: it.Key(),
Value: it.Value(),
}
if err := sendFunc(out); err != nil {
return err
}
// Finally move the iterator forward
it.Next()
}
return nil
}
func (s *server) ReverseIterator(query *protodb.Entity, dis protodb.DB_ReverseIteratorServer) error {
it := s.db.ReverseIterator(query.Start, query.End)
defer it.Close()
return s.handleIterator(it, dis.Send)
}
func (s *server) Stats(context.Context, *protodb.Nothing) (*protodb.Stats, error) {
stats := s.db.Stats()
return &protodb.Stats{Data: stats, TimeAt: time.Now().Unix()}, nil
}
func (s *server) BatchWrite(c context.Context, b *protodb.Batch) (*protodb.Nothing, error) {
return s.batchWrite(c, b, false)
}
func (s *server) BatchWriteSync(c context.Context, b *protodb.Batch) (*protodb.Nothing, error) {
return s.batchWrite(c, b, true)
}
func (s *server) batchWrite(c context.Context, b *protodb.Batch, sync bool) (*protodb.Nothing, error) {
bat := s.db.NewBatch()
defer bat.Close()
for _, op := range b.Ops {
switch op.Type {
case protodb.Operation_SET:
bat.Set(op.Entity.Key, op.Entity.Value)
case protodb.Operation_DELETE:
bat.Delete(op.Entity.Key)
}
}
if sync {
bat.WriteSync()
} else {
bat.Write()
}
return nothing, nil
}

+ 0
- 914
libs/db/remotedb/proto/defs.pb.go View File

@ -1,914 +0,0 @@
// Code generated by protoc-gen-go. DO NOT EDIT.
// source: defs.proto
/*
Package protodb is a generated protocol buffer package.
It is generated from these files:
defs.proto
It has these top-level messages:
Batch
Operation
Entity
Nothing
Domain
Iterator
Stats
Init
*/
package protodb
import proto "github.com/golang/protobuf/proto"
import fmt "fmt"
import math "math"
import (
context "golang.org/x/net/context"
grpc "google.golang.org/grpc"
)
// Reference imports to suppress errors if they are not otherwise used.
var _ = proto.Marshal
var _ = fmt.Errorf
var _ = math.Inf
// This is a compile-time assertion to ensure that this generated file
// is compatible with the proto package it is being compiled against.
// A compilation error at this line likely means your copy of the
// proto package needs to be updated.
const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package
type Operation_Type int32
const (
Operation_SET Operation_Type = 0
Operation_DELETE Operation_Type = 1
)
var Operation_Type_name = map[int32]string{
0: "SET",
1: "DELETE",
}
var Operation_Type_value = map[string]int32{
"SET": 0,
"DELETE": 1,
}
func (x Operation_Type) String() string {
return proto.EnumName(Operation_Type_name, int32(x))
}
func (Operation_Type) EnumDescriptor() ([]byte, []int) { return fileDescriptor0, []int{1, 0} }
type Batch struct {
Ops []*Operation `protobuf:"bytes,1,rep,name=ops" json:"ops,omitempty"`
}
func (m *Batch) Reset() { *m = Batch{} }
func (m *Batch) String() string { return proto.CompactTextString(m) }
func (*Batch) ProtoMessage() {}
func (*Batch) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{0} }
func (m *Batch) GetOps() []*Operation {
if m != nil {
return m.Ops
}
return nil
}
type Operation struct {
Entity *Entity `protobuf:"bytes,1,opt,name=entity" json:"entity,omitempty"`
Type Operation_Type `protobuf:"varint,2,opt,name=type,enum=protodb.Operation_Type" json:"type,omitempty"`
}
func (m *Operation) Reset() { *m = Operation{} }
func (m *Operation) String() string { return proto.CompactTextString(m) }
func (*Operation) ProtoMessage() {}
func (*Operation) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{1} }
func (m *Operation) GetEntity() *Entity {
if m != nil {
return m.Entity
}
return nil
}
func (m *Operation) GetType() Operation_Type {
if m != nil {
return m.Type
}
return Operation_SET
}
type Entity struct {
Id int32 `protobuf:"varint,1,opt,name=id" json:"id,omitempty"`
Key []byte `protobuf:"bytes,2,opt,name=key,proto3" json:"key,omitempty"`
Value []byte `protobuf:"bytes,3,opt,name=value,proto3" json:"value,omitempty"`
Exists bool `protobuf:"varint,4,opt,name=exists" json:"exists,omitempty"`
Start []byte `protobuf:"bytes,5,opt,name=start,proto3" json:"start,omitempty"`
End []byte `protobuf:"bytes,6,opt,name=end,proto3" json:"end,omitempty"`
Err string `protobuf:"bytes,7,opt,name=err" json:"err,omitempty"`
CreatedAt int64 `protobuf:"varint,8,opt,name=created_at,json=createdAt" json:"created_at,omitempty"`
}
func (m *Entity) Reset() { *m = Entity{} }
func (m *Entity) String() string { return proto.CompactTextString(m) }
func (*Entity) ProtoMessage() {}
func (*Entity) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{2} }
func (m *Entity) GetId() int32 {
if m != nil {
return m.Id
}
return 0
}
func (m *Entity) GetKey() []byte {
if m != nil {
return m.Key
}
return nil
}
func (m *Entity) GetValue() []byte {
if m != nil {
return m.Value
}
return nil
}
func (m *Entity) GetExists() bool {
if m != nil {
return m.Exists
}
return false
}
func (m *Entity) GetStart() []byte {
if m != nil {
return m.Start
}
return nil
}
func (m *Entity) GetEnd() []byte {
if m != nil {
return m.End
}
return nil
}
func (m *Entity) GetErr() string {
if m != nil {
return m.Err
}
return ""
}
func (m *Entity) GetCreatedAt() int64 {
if m != nil {
return m.CreatedAt
}
return 0
}
type Nothing struct {
}
func (m *Nothing) Reset() { *m = Nothing{} }
func (m *Nothing) String() string { return proto.CompactTextString(m) }
func (*Nothing) ProtoMessage() {}
func (*Nothing) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{3} }
type Domain struct {
Start []byte `protobuf:"bytes,1,opt,name=start,proto3" json:"start,omitempty"`
End []byte `protobuf:"bytes,2,opt,name=end,proto3" json:"end,omitempty"`
}
func (m *Domain) Reset() { *m = Domain{} }
func (m *Domain) String() string { return proto.CompactTextString(m) }
func (*Domain) ProtoMessage() {}
func (*Domain) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{4} }
func (m *Domain) GetStart() []byte {
if m != nil {
return m.Start
}
return nil
}
func (m *Domain) GetEnd() []byte {
if m != nil {
return m.End
}
return nil
}
type Iterator struct {
Domain *Domain `protobuf:"bytes,1,opt,name=domain" json:"domain,omitempty"`
Valid bool `protobuf:"varint,2,opt,name=valid" json:"valid,omitempty"`
Key []byte `protobuf:"bytes,3,opt,name=key,proto3" json:"key,omitempty"`
Value []byte `protobuf:"bytes,4,opt,name=value,proto3" json:"value,omitempty"`
}
func (m *Iterator) Reset() { *m = Iterator{} }
func (m *Iterator) String() string { return proto.CompactTextString(m) }
func (*Iterator) ProtoMessage() {}
func (*Iterator) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{5} }
func (m *Iterator) GetDomain() *Domain {
if m != nil {
return m.Domain
}
return nil
}
func (m *Iterator) GetValid() bool {
if m != nil {
return m.Valid
}
return false
}
func (m *Iterator) GetKey() []byte {
if m != nil {
return m.Key
}
return nil
}
func (m *Iterator) GetValue() []byte {
if m != nil {
return m.Value
}
return nil
}
type Stats struct {
Data map[string]string `protobuf:"bytes,1,rep,name=data" json:"data,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"`
TimeAt int64 `protobuf:"varint,2,opt,name=time_at,json=timeAt" json:"time_at,omitempty"`
}
func (m *Stats) Reset() { *m = Stats{} }
func (m *Stats) String() string { return proto.CompactTextString(m) }
func (*Stats) ProtoMessage() {}
func (*Stats) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{6} }
func (m *Stats) GetData() map[string]string {
if m != nil {
return m.Data
}
return nil
}
func (m *Stats) GetTimeAt() int64 {
if m != nil {
return m.TimeAt
}
return 0
}
type Init struct {
Type string `protobuf:"bytes,1,opt,name=Type" json:"Type,omitempty"`
Name string `protobuf:"bytes,2,opt,name=Name" json:"Name,omitempty"`
Dir string `protobuf:"bytes,3,opt,name=Dir" json:"Dir,omitempty"`
}
func (m *Init) Reset() { *m = Init{} }
func (m *Init) String() string { return proto.CompactTextString(m) }
func (*Init) ProtoMessage() {}
func (*Init) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{7} }
func (m *Init) GetType() string {
if m != nil {
return m.Type
}
return ""
}
func (m *Init) GetName() string {
if m != nil {
return m.Name
}
return ""
}
func (m *Init) GetDir() string {
if m != nil {
return m.Dir
}
return ""
}
func init() {
proto.RegisterType((*Batch)(nil), "protodb.Batch")
proto.RegisterType((*Operation)(nil), "protodb.Operation")
proto.RegisterType((*Entity)(nil), "protodb.Entity")
proto.RegisterType((*Nothing)(nil), "protodb.Nothing")
proto.RegisterType((*Domain)(nil), "protodb.Domain")
proto.RegisterType((*Iterator)(nil), "protodb.Iterator")
proto.RegisterType((*Stats)(nil), "protodb.Stats")
proto.RegisterType((*Init)(nil), "protodb.Init")
proto.RegisterEnum("protodb.Operation_Type", Operation_Type_name, Operation_Type_value)
}
// Reference imports to suppress errors if they are not otherwise used.
var _ context.Context
var _ grpc.ClientConn
// This is a compile-time assertion to ensure that this generated file
// is compatible with the grpc package it is being compiled against.
const _ = grpc.SupportPackageIsVersion4
// Client API for DB service
type DBClient interface {
Init(ctx context.Context, in *Init, opts ...grpc.CallOption) (*Entity, error)
Get(ctx context.Context, in *Entity, opts ...grpc.CallOption) (*Entity, error)
GetStream(ctx context.Context, opts ...grpc.CallOption) (DB_GetStreamClient, error)
Has(ctx context.Context, in *Entity, opts ...grpc.CallOption) (*Entity, error)
Set(ctx context.Context, in *Entity, opts ...grpc.CallOption) (*Nothing, error)
SetSync(ctx context.Context, in *Entity, opts ...grpc.CallOption) (*Nothing, error)
Delete(ctx context.Context, in *Entity, opts ...grpc.CallOption) (*Nothing, error)
DeleteSync(ctx context.Context, in *Entity, opts ...grpc.CallOption) (*Nothing, error)
Iterator(ctx context.Context, in *Entity, opts ...grpc.CallOption) (DB_IteratorClient, error)
ReverseIterator(ctx context.Context, in *Entity, opts ...grpc.CallOption) (DB_ReverseIteratorClient, error)
// rpc print(Nothing) returns (Entity) {}
Stats(ctx context.Context, in *Nothing, opts ...grpc.CallOption) (*Stats, error)
BatchWrite(ctx context.Context, in *Batch, opts ...grpc.CallOption) (*Nothing, error)
BatchWriteSync(ctx context.Context, in *Batch, opts ...grpc.CallOption) (*Nothing, error)
}
type dBClient struct {
cc *grpc.ClientConn
}
func NewDBClient(cc *grpc.ClientConn) DBClient {
return &dBClient{cc}
}
func (c *dBClient) Init(ctx context.Context, in *Init, opts ...grpc.CallOption) (*Entity, error) {
out := new(Entity)
err := grpc.Invoke(ctx, "/protodb.DB/init", in, out, c.cc, opts...)
if err != nil {
return nil, err
}
return out, nil
}
func (c *dBClient) Get(ctx context.Context, in *Entity, opts ...grpc.CallOption) (*Entity, error) {
out := new(Entity)
err := grpc.Invoke(ctx, "/protodb.DB/get", in, out, c.cc, opts...)
if err != nil {
return nil, err
}
return out, nil
}
func (c *dBClient) GetStream(ctx context.Context, opts ...grpc.CallOption) (DB_GetStreamClient, error) {
stream, err := grpc.NewClientStream(ctx, &_DB_serviceDesc.Streams[0], c.cc, "/protodb.DB/getStream", opts...)
if err != nil {
return nil, err
}
x := &dBGetStreamClient{stream}
return x, nil
}
type DB_GetStreamClient interface {
Send(*Entity) error
Recv() (*Entity, error)
grpc.ClientStream
}
type dBGetStreamClient struct {
grpc.ClientStream
}
func (x *dBGetStreamClient) Send(m *Entity) error {
return x.ClientStream.SendMsg(m)
}
func (x *dBGetStreamClient) Recv() (*Entity, error) {
m := new(Entity)
if err := x.ClientStream.RecvMsg(m); err != nil {
return nil, err
}
return m, nil
}
func (c *dBClient) Has(ctx context.Context, in *Entity, opts ...grpc.CallOption) (*Entity, error) {
out := new(Entity)
err := grpc.Invoke(ctx, "/protodb.DB/has", in, out, c.cc, opts...)
if err != nil {
return nil, err
}
return out, nil
}
func (c *dBClient) Set(ctx context.Context, in *Entity, opts ...grpc.CallOption) (*Nothing, error) {
out := new(Nothing)
err := grpc.Invoke(ctx, "/protodb.DB/set", in, out, c.cc, opts...)
if err != nil {
return nil, err
}
return out, nil
}
func (c *dBClient) SetSync(ctx context.Context, in *Entity, opts ...grpc.CallOption) (*Nothing, error) {
out := new(Nothing)
err := grpc.Invoke(ctx, "/protodb.DB/setSync", in, out, c.cc, opts...)
if err != nil {
return nil, err
}
return out, nil
}
func (c *dBClient) Delete(ctx context.Context, in *Entity, opts ...grpc.CallOption) (*Nothing, error) {
out := new(Nothing)
err := grpc.Invoke(ctx, "/protodb.DB/delete", in, out, c.cc, opts...)
if err != nil {
return nil, err
}
return out, nil
}
func (c *dBClient) DeleteSync(ctx context.Context, in *Entity, opts ...grpc.CallOption) (*Nothing, error) {
out := new(Nothing)
err := grpc.Invoke(ctx, "/protodb.DB/deleteSync", in, out, c.cc, opts...)
if err != nil {
return nil, err
}
return out, nil
}
func (c *dBClient) Iterator(ctx context.Context, in *Entity, opts ...grpc.CallOption) (DB_IteratorClient, error) {
stream, err := grpc.NewClientStream(ctx, &_DB_serviceDesc.Streams[1], c.cc, "/protodb.DB/iterator", opts...)
if err != nil {
return nil, err
}
x := &dBIteratorClient{stream}
if err := x.ClientStream.SendMsg(in); err != nil {
return nil, err
}
if err := x.ClientStream.CloseSend(); err != nil {
return nil, err
}
return x, nil
}
type DB_IteratorClient interface {
Recv() (*Iterator, error)
grpc.ClientStream
}
type dBIteratorClient struct {
grpc.ClientStream
}
func (x *dBIteratorClient) Recv() (*Iterator, error) {
m := new(Iterator)
if err := x.ClientStream.RecvMsg(m); err != nil {
return nil, err
}
return m, nil
}
func (c *dBClient) ReverseIterator(ctx context.Context, in *Entity, opts ...grpc.CallOption) (DB_ReverseIteratorClient, error) {
stream, err := grpc.NewClientStream(ctx, &_DB_serviceDesc.Streams[2], c.cc, "/protodb.DB/reverseIterator", opts...)
if err != nil {
return nil, err
}
x := &dBReverseIteratorClient{stream}
if err := x.ClientStream.SendMsg(in); err != nil {
return nil, err
}
if err := x.ClientStream.CloseSend(); err != nil {
return nil, err
}
return x, nil
}
type DB_ReverseIteratorClient interface {
Recv() (*Iterator, error)
grpc.ClientStream
}
type dBReverseIteratorClient struct {
grpc.ClientStream
}
func (x *dBReverseIteratorClient) Recv() (*Iterator, error) {
m := new(Iterator)
if err := x.ClientStream.RecvMsg(m); err != nil {
return nil, err
}
return m, nil
}
func (c *dBClient) Stats(ctx context.Context, in *Nothing, opts ...grpc.CallOption) (*Stats, error) {
out := new(Stats)
err := grpc.Invoke(ctx, "/protodb.DB/stats", in, out, c.cc, opts...)
if err != nil {
return nil, err
}
return out, nil
}
func (c *dBClient) BatchWrite(ctx context.Context, in *Batch, opts ...grpc.CallOption) (*Nothing, error) {
out := new(Nothing)
err := grpc.Invoke(ctx, "/protodb.DB/batchWrite", in, out, c.cc, opts...)
if err != nil {
return nil, err
}
return out, nil
}
func (c *dBClient) BatchWriteSync(ctx context.Context, in *Batch, opts ...grpc.CallOption) (*Nothing, error) {
out := new(Nothing)
err := grpc.Invoke(ctx, "/protodb.DB/batchWriteSync", in, out, c.cc, opts...)
if err != nil {
return nil, err
}
return out, nil
}
// Server API for DB service
type DBServer interface {
Init(context.Context, *Init) (*Entity, error)
Get(context.Context, *Entity) (*Entity, error)
GetStream(DB_GetStreamServer) error
Has(context.Context, *Entity) (*Entity, error)
Set(context.Context, *Entity) (*Nothing, error)
SetSync(context.Context, *Entity) (*Nothing, error)
Delete(context.Context, *Entity) (*Nothing, error)
DeleteSync(context.Context, *Entity) (*Nothing, error)
Iterator(*Entity, DB_IteratorServer) error
ReverseIterator(*Entity, DB_ReverseIteratorServer) error
// rpc print(Nothing) returns (Entity) {}
Stats(context.Context, *Nothing) (*Stats, error)
BatchWrite(context.Context, *Batch) (*Nothing, error)
BatchWriteSync(context.Context, *Batch) (*Nothing, error)
}
func RegisterDBServer(s *grpc.Server, srv DBServer) {
s.RegisterService(&_DB_serviceDesc, srv)
}
func _DB_Init_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
in := new(Init)
if err := dec(in); err != nil {
return nil, err
}
if interceptor == nil {
return srv.(DBServer).Init(ctx, in)
}
info := &grpc.UnaryServerInfo{
Server: srv,
FullMethod: "/protodb.DB/Init",
}
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
return srv.(DBServer).Init(ctx, req.(*Init))
}
return interceptor(ctx, in, info, handler)
}
func _DB_Get_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
in := new(Entity)
if err := dec(in); err != nil {
return nil, err
}
if interceptor == nil {
return srv.(DBServer).Get(ctx, in)
}
info := &grpc.UnaryServerInfo{
Server: srv,
FullMethod: "/protodb.DB/Get",
}
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
return srv.(DBServer).Get(ctx, req.(*Entity))
}
return interceptor(ctx, in, info, handler)
}
func _DB_GetStream_Handler(srv interface{}, stream grpc.ServerStream) error {
return srv.(DBServer).GetStream(&dBGetStreamServer{stream})
}
type DB_GetStreamServer interface {
Send(*Entity) error
Recv() (*Entity, error)
grpc.ServerStream
}
type dBGetStreamServer struct {
grpc.ServerStream
}
func (x *dBGetStreamServer) Send(m *Entity) error {
return x.ServerStream.SendMsg(m)
}
func (x *dBGetStreamServer) Recv() (*Entity, error) {
m := new(Entity)
if err := x.ServerStream.RecvMsg(m); err != nil {
return nil, err
}
return m, nil
}
func _DB_Has_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
in := new(Entity)
if err := dec(in); err != nil {
return nil, err
}
if interceptor == nil {
return srv.(DBServer).Has(ctx, in)
}
info := &grpc.UnaryServerInfo{
Server: srv,
FullMethod: "/protodb.DB/Has",
}
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
return srv.(DBServer).Has(ctx, req.(*Entity))
}
return interceptor(ctx, in, info, handler)
}
func _DB_Set_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
in := new(Entity)
if err := dec(in); err != nil {
return nil, err
}
if interceptor == nil {
return srv.(DBServer).Set(ctx, in)
}
info := &grpc.UnaryServerInfo{
Server: srv,
FullMethod: "/protodb.DB/Set",
}
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
return srv.(DBServer).Set(ctx, req.(*Entity))
}
return interceptor(ctx, in, info, handler)
}
func _DB_SetSync_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
in := new(Entity)
if err := dec(in); err != nil {
return nil, err
}
if interceptor == nil {
return srv.(DBServer).SetSync(ctx, in)
}
info := &grpc.UnaryServerInfo{
Server: srv,
FullMethod: "/protodb.DB/SetSync",
}
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
return srv.(DBServer).SetSync(ctx, req.(*Entity))
}
return interceptor(ctx, in, info, handler)
}
func _DB_Delete_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
in := new(Entity)
if err := dec(in); err != nil {
return nil, err
}
if interceptor == nil {
return srv.(DBServer).Delete(ctx, in)
}
info := &grpc.UnaryServerInfo{
Server: srv,
FullMethod: "/protodb.DB/Delete",
}
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
return srv.(DBServer).Delete(ctx, req.(*Entity))
}
return interceptor(ctx, in, info, handler)
}
func _DB_DeleteSync_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
in := new(Entity)
if err := dec(in); err != nil {
return nil, err
}
if interceptor == nil {
return srv.(DBServer).DeleteSync(ctx, in)
}
info := &grpc.UnaryServerInfo{
Server: srv,
FullMethod: "/protodb.DB/DeleteSync",
}
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
return srv.(DBServer).DeleteSync(ctx, req.(*Entity))
}
return interceptor(ctx, in, info, handler)
}
func _DB_Iterator_Handler(srv interface{}, stream grpc.ServerStream) error {
m := new(Entity)
if err := stream.RecvMsg(m); err != nil {
return err
}
return srv.(DBServer).Iterator(m, &dBIteratorServer{stream})
}
type DB_IteratorServer interface {
Send(*Iterator) error
grpc.ServerStream
}
type dBIteratorServer struct {
grpc.ServerStream
}
func (x *dBIteratorServer) Send(m *Iterator) error {
return x.ServerStream.SendMsg(m)
}
func _DB_ReverseIterator_Handler(srv interface{}, stream grpc.ServerStream) error {
m := new(Entity)
if err := stream.RecvMsg(m); err != nil {
return err
}
return srv.(DBServer).ReverseIterator(m, &dBReverseIteratorServer{stream})
}
type DB_ReverseIteratorServer interface {
Send(*Iterator) error
grpc.ServerStream
}
type dBReverseIteratorServer struct {
grpc.ServerStream
}
func (x *dBReverseIteratorServer) Send(m *Iterator) error {
return x.ServerStream.SendMsg(m)
}
func _DB_Stats_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
in := new(Nothing)
if err := dec(in); err != nil {
return nil, err
}
if interceptor == nil {
return srv.(DBServer).Stats(ctx, in)
}
info := &grpc.UnaryServerInfo{
Server: srv,
FullMethod: "/protodb.DB/Stats",
}
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
return srv.(DBServer).Stats(ctx, req.(*Nothing))
}
return interceptor(ctx, in, info, handler)
}
func _DB_BatchWrite_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
in := new(Batch)
if err := dec(in); err != nil {
return nil, err
}
if interceptor == nil {
return srv.(DBServer).BatchWrite(ctx, in)
}
info := &grpc.UnaryServerInfo{
Server: srv,
FullMethod: "/protodb.DB/BatchWrite",
}
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
return srv.(DBServer).BatchWrite(ctx, req.(*Batch))
}
return interceptor(ctx, in, info, handler)
}
func _DB_BatchWriteSync_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
in := new(Batch)
if err := dec(in); err != nil {
return nil, err
}
if interceptor == nil {
return srv.(DBServer).BatchWriteSync(ctx, in)
}
info := &grpc.UnaryServerInfo{
Server: srv,
FullMethod: "/protodb.DB/BatchWriteSync",
}
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
return srv.(DBServer).BatchWriteSync(ctx, req.(*Batch))
}
return interceptor(ctx, in, info, handler)
}
var _DB_serviceDesc = grpc.ServiceDesc{
ServiceName: "protodb.DB",
HandlerType: (*DBServer)(nil),
Methods: []grpc.MethodDesc{
{
MethodName: "init",
Handler: _DB_Init_Handler,
},
{
MethodName: "get",
Handler: _DB_Get_Handler,
},
{
MethodName: "has",
Handler: _DB_Has_Handler,
},
{
MethodName: "set",
Handler: _DB_Set_Handler,
},
{
MethodName: "setSync",
Handler: _DB_SetSync_Handler,
},
{
MethodName: "delete",
Handler: _DB_Delete_Handler,
},
{
MethodName: "deleteSync",
Handler: _DB_DeleteSync_Handler,
},
{
MethodName: "stats",
Handler: _DB_Stats_Handler,
},
{
MethodName: "batchWrite",
Handler: _DB_BatchWrite_Handler,
},
{
MethodName: "batchWriteSync",
Handler: _DB_BatchWriteSync_Handler,
},
},
Streams: []grpc.StreamDesc{
{
StreamName: "getStream",
Handler: _DB_GetStream_Handler,
ServerStreams: true,
ClientStreams: true,
},
{
StreamName: "iterator",
Handler: _DB_Iterator_Handler,
ServerStreams: true,
},
{
StreamName: "reverseIterator",
Handler: _DB_ReverseIterator_Handler,
ServerStreams: true,
},
},
Metadata: "defs.proto",
}
func init() { proto.RegisterFile("defs.proto", fileDescriptor0) }
var fileDescriptor0 = []byte{
// 606 bytes of a gzipped FileDescriptorProto
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x94, 0x54, 0x4f, 0x6f, 0xd3, 0x4e,
0x10, 0xcd, 0xda, 0x8e, 0x13, 0x4f, 0x7f, 0xbf, 0x34, 0x8c, 0x10, 0xb5, 0x8a, 0x90, 0x22, 0x0b,
0x09, 0x43, 0x69, 0x14, 0x52, 0x24, 0xfe, 0x9c, 0x68, 0x95, 0x1c, 0x2a, 0xa1, 0x22, 0x39, 0x95,
0x38, 0xa2, 0x6d, 0x3d, 0x34, 0x2b, 0x1a, 0x3b, 0xac, 0x87, 0x8a, 0x5c, 0xb8, 0xf2, 0x79, 0xf8,
0x7c, 0x5c, 0xd0, 0xae, 0x1d, 0x87, 0x36, 0x39, 0x84, 0x53, 0x76, 0x66, 0xde, 0x7b, 0xb3, 0xf3,
0x32, 0x5e, 0x80, 0x94, 0x3e, 0x17, 0xfd, 0xb9, 0xce, 0x39, 0xc7, 0x96, 0xfd, 0x49, 0x2f, 0xa2,
0x43, 0x68, 0x9e, 0x48, 0xbe, 0x9c, 0xe2, 0x63, 0x70, 0xf3, 0x79, 0x11, 0x8a, 0x9e, 0x1b, 0xef,
0x0c, 0xb1, 0x5f, 0xd5, 0xfb, 0x1f, 0xe6, 0xa4, 0x25, 0xab, 0x3c, 0x4b, 0x4c, 0x39, 0xfa, 0x01,
0x41, 0x9d, 0xc1, 0x27, 0xe0, 0x53, 0xc6, 0x8a, 0x17, 0xa1, 0xe8, 0x89, 0x78, 0x67, 0xb8, 0x5b,
0xb3, 0xc6, 0x36, 0x9d, 0x54, 0x65, 0x3c, 0x00, 0x8f, 0x17, 0x73, 0x0a, 0x9d, 0x9e, 0x88, 0x3b,
0xc3, 0xbd, 0x75, 0xf1, 0xfe, 0xf9, 0x62, 0x4e, 0x89, 0x05, 0x45, 0x0f, 0xc1, 0x33, 0x11, 0xb6,
0xc0, 0x9d, 0x8c, 0xcf, 0xbb, 0x0d, 0x04, 0xf0, 0x47, 0xe3, 0xf7, 0xe3, 0xf3, 0x71, 0x57, 0x44,
0xbf, 0x04, 0xf8, 0xa5, 0x38, 0x76, 0xc0, 0x51, 0xa9, 0xed, 0xdc, 0x4c, 0x1c, 0x95, 0x62, 0x17,
0xdc, 0x2f, 0xb4, 0xb0, 0x3d, 0xfe, 0x4b, 0xcc, 0x11, 0xef, 0x43, 0xf3, 0x46, 0x5e, 0x7f, 0xa3,
0xd0, 0xb5, 0xb9, 0x32, 0xc0, 0x07, 0xe0, 0xd3, 0x77, 0x55, 0x70, 0x11, 0x7a, 0x3d, 0x11, 0xb7,
0x93, 0x2a, 0x32, 0xe8, 0x82, 0xa5, 0xe6, 0xb0, 0x59, 0xa2, 0x6d, 0x60, 0x54, 0x29, 0x4b, 0x43,
0xbf, 0x54, 0xa5, 0xcc, 0xf6, 0x21, 0xad, 0xc3, 0x56, 0x4f, 0xc4, 0x41, 0x62, 0x8e, 0xf8, 0x08,
0xe0, 0x52, 0x93, 0x64, 0x4a, 0x3f, 0x49, 0x0e, 0xdb, 0x3d, 0x11, 0xbb, 0x49, 0x50, 0x65, 0x8e,
0x39, 0x0a, 0xa0, 0x75, 0x96, 0xf3, 0x54, 0x65, 0x57, 0xd1, 0x00, 0xfc, 0x51, 0x3e, 0x93, 0x2a,
0x5b, 0x75, 0x13, 0x1b, 0xba, 0x39, 0x75, 0xb7, 0xe8, 0x2b, 0xb4, 0x4f, 0xd9, 0xb8, 0x94, 0x6b,
0xe3, 0x77, 0x6a, 0xd9, 0x6b, 0x7e, 0x97, 0xa2, 0x49, 0x55, 0xae, 0x06, 0x57, 0xa5, 0x50, 0x3b,
0x29, 0x83, 0xa5, 0x41, 0xee, 0x06, 0x83, 0xbc, 0xbf, 0x0c, 0x8a, 0x7e, 0x0a, 0x68, 0x4e, 0x58,
0x72, 0x81, 0xcf, 0xc1, 0x4b, 0x25, 0xcb, 0x6a, 0x29, 0xc2, 0xba, 0x9d, 0xad, 0xf6, 0x47, 0x92,
0xe5, 0x38, 0x63, 0xbd, 0x48, 0x2c, 0x0a, 0xf7, 0xa0, 0xc5, 0x6a, 0x46, 0xc6, 0x03, 0xc7, 0x7a,
0xe0, 0x9b, 0xf0, 0x98, 0xf7, 0x5f, 0x41, 0x50, 0x63, 0x97, 0xb7, 0x10, 0xa5, 0x7d, 0xb7, 0x6e,
0xe1, 0xd8, 0x5c, 0x19, 0xbc, 0x75, 0x5e, 0x8b, 0xe8, 0x1d, 0x78, 0xa7, 0x99, 0x62, 0xc4, 0x72,
0x25, 0x2a, 0x52, 0xb9, 0x1e, 0x08, 0xde, 0x99, 0x9c, 0x2d, 0x49, 0xf6, 0x6c, 0xb4, 0x47, 0x4a,
0xdb, 0x09, 0x83, 0xc4, 0x1c, 0x87, 0xbf, 0x3d, 0x70, 0x46, 0x27, 0x18, 0x83, 0xa7, 0x8c, 0xd0,
0xff, 0xf5, 0x08, 0x46, 0x77, 0xff, 0xee, 0xc2, 0x46, 0x0d, 0x7c, 0x0a, 0xee, 0x15, 0x31, 0xde,
0xad, 0x6c, 0x82, 0x1e, 0x41, 0x70, 0x45, 0x3c, 0x61, 0x4d, 0x72, 0xb6, 0x0d, 0x21, 0x16, 0x03,
0x61, 0xf4, 0xa7, 0xb2, 0xd8, 0x4a, 0xff, 0x19, 0xb8, 0xc5, 0xa6, 0xab, 0x74, 0xeb, 0xc4, 0x72,
0xad, 0x1a, 0xd8, 0x87, 0x56, 0x41, 0x3c, 0x59, 0x64, 0x97, 0xdb, 0xe1, 0x0f, 0xc1, 0x4f, 0xe9,
0x9a, 0x98, 0xb6, 0x83, 0xbf, 0x30, 0x8f, 0x87, 0x81, 0x6f, 0xdf, 0x61, 0x08, 0x6d, 0xb5, 0x5c,
0xdc, 0x35, 0xc2, 0xbd, 0xd5, 0xff, 0x50, 0x61, 0xa2, 0xc6, 0x40, 0xe0, 0x1b, 0xd8, 0xd5, 0x74,
0x43, 0xba, 0xa0, 0xd3, 0x7f, 0xa5, 0x1e, 0xd8, 0xef, 0x89, 0x0b, 0x5c, 0xbb, 0xcb, 0x7e, 0xe7,
0xf6, 0xde, 0x46, 0x0d, 0x1c, 0x00, 0x5c, 0x98, 0x47, 0xef, 0xa3, 0x56, 0x4c, 0xb8, 0xaa, 0xdb,
0x97, 0x70, 0xe3, 0x34, 0x2f, 0xa1, 0xb3, 0x62, 0x58, 0x13, 0xb6, 0x60, 0x5d, 0xf8, 0x36, 0x75,
0xf4, 0x27, 0x00, 0x00, 0xff, 0xff, 0x95, 0xf4, 0xe3, 0x82, 0x7a, 0x05, 0x00, 0x00,
}

+ 0
- 71
libs/db/remotedb/proto/defs.proto View File

@ -1,71 +0,0 @@
syntax = "proto3";
package protodb;
message Batch {
repeated Operation ops = 1;
}
message Operation {
Entity entity = 1;
enum Type {
SET = 0;
DELETE = 1;
}
Type type = 2;
}
message Entity {
int32 id = 1;
bytes key = 2;
bytes value = 3;
bool exists = 4;
bytes start = 5;
bytes end = 6;
string err = 7;
int64 created_at = 8;
}
message Nothing {
}
message Domain {
bytes start = 1;
bytes end = 2;
}
message Iterator {
Domain domain = 1;
bool valid = 2;
bytes key = 3;
bytes value = 4;
}
message Stats {
map<string, string> data = 1;
int64 time_at = 2;
}
message Init {
string Type = 1;
string Name = 2;
string Dir = 3;
}
service DB {
rpc init(Init) returns (Entity) {}
rpc get(Entity) returns (Entity) {}
rpc getStream(stream Entity) returns (stream Entity) {}
rpc has(Entity) returns (Entity) {}
rpc set(Entity) returns (Nothing) {}
rpc setSync(Entity) returns (Nothing) {}
rpc delete(Entity) returns (Nothing) {}
rpc deleteSync(Entity) returns (Nothing) {}
rpc iterator(Entity) returns (stream Iterator) {}
rpc reverseIterator(Entity) returns (stream Iterator) {}
// rpc print(Nothing) returns (Entity) {}
rpc stats(Nothing) returns (Stats) {}
rpc batchWrite(Batch) returns (Nothing) {}
rpc batchWriteSync(Batch) returns (Nothing) {}
}

+ 0
- 266
libs/db/remotedb/remotedb.go View File

@ -1,266 +0,0 @@
package remotedb
import (
"context"
"fmt"
"github.com/tendermint/tendermint/libs/db"
"github.com/tendermint/tendermint/libs/db/remotedb/grpcdb"
protodb "github.com/tendermint/tendermint/libs/db/remotedb/proto"
)
type RemoteDB struct {
ctx context.Context
dc protodb.DBClient
}
func NewRemoteDB(serverAddr string, serverKey string) (*RemoteDB, error) {
return newRemoteDB(grpcdb.NewClient(serverAddr, serverKey))
}
func newRemoteDB(gdc protodb.DBClient, err error) (*RemoteDB, error) {
if err != nil {
return nil, err
}
return &RemoteDB{dc: gdc, ctx: context.Background()}, nil
}
type Init struct {
Dir string
Name string
Type string
}
func (rd *RemoteDB) InitRemote(in *Init) error {
_, err := rd.dc.Init(rd.ctx, &protodb.Init{Dir: in.Dir, Type: in.Type, Name: in.Name})
return err
}
var _ db.DB = (*RemoteDB)(nil)
// Close is a noop currently
func (rd *RemoteDB) Close() {
}
func (rd *RemoteDB) Delete(key []byte) {
if _, err := rd.dc.Delete(rd.ctx, &protodb.Entity{Key: key}); err != nil {
panic(fmt.Sprintf("RemoteDB.Delete: %v", err))
}
}
func (rd *RemoteDB) DeleteSync(key []byte) {
if _, err := rd.dc.DeleteSync(rd.ctx, &protodb.Entity{Key: key}); err != nil {
panic(fmt.Sprintf("RemoteDB.DeleteSync: %v", err))
}
}
func (rd *RemoteDB) Set(key, value []byte) {
if _, err := rd.dc.Set(rd.ctx, &protodb.Entity{Key: key, Value: value}); err != nil {
panic(fmt.Sprintf("RemoteDB.Set: %v", err))
}
}
func (rd *RemoteDB) SetSync(key, value []byte) {
if _, err := rd.dc.SetSync(rd.ctx, &protodb.Entity{Key: key, Value: value}); err != nil {
panic(fmt.Sprintf("RemoteDB.SetSync: %v", err))
}
}
func (rd *RemoteDB) Get(key []byte) []byte {
res, err := rd.dc.Get(rd.ctx, &protodb.Entity{Key: key})
if err != nil {
panic(fmt.Sprintf("RemoteDB.Get error: %v", err))
}
return res.Value
}
func (rd *RemoteDB) Has(key []byte) bool {
res, err := rd.dc.Has(rd.ctx, &protodb.Entity{Key: key})
if err != nil {
panic(fmt.Sprintf("RemoteDB.Has error: %v", err))
}
return res.Exists
}
func (rd *RemoteDB) ReverseIterator(start, end []byte) db.Iterator {
dic, err := rd.dc.ReverseIterator(rd.ctx, &protodb.Entity{Start: start, End: end})
if err != nil {
panic(fmt.Sprintf("RemoteDB.Iterator error: %v", err))
}
return makeReverseIterator(dic)
}
func (rd *RemoteDB) NewBatch() db.Batch {
return &batch{
db: rd,
ops: nil,
}
}
// TODO: Implement Print when db.DB implements a method
// to print to a string and not db.Print to stdout.
func (rd *RemoteDB) Print() {
panic("Unimplemented")
}
func (rd *RemoteDB) Stats() map[string]string {
stats, err := rd.dc.Stats(rd.ctx, &protodb.Nothing{})
if err != nil {
panic(fmt.Sprintf("RemoteDB.Stats error: %v", err))
}
if stats == nil {
return nil
}
return stats.Data
}
func (rd *RemoteDB) Iterator(start, end []byte) db.Iterator {
dic, err := rd.dc.Iterator(rd.ctx, &protodb.Entity{Start: start, End: end})
if err != nil {
panic(fmt.Sprintf("RemoteDB.Iterator error: %v", err))
}
return makeIterator(dic)
}
func makeIterator(dic protodb.DB_IteratorClient) db.Iterator {
return &iterator{dic: dic}
}
func makeReverseIterator(dric protodb.DB_ReverseIteratorClient) db.Iterator {
return &reverseIterator{dric: dric}
}
type reverseIterator struct {
dric protodb.DB_ReverseIteratorClient
cur *protodb.Iterator
}
var _ db.Iterator = (*iterator)(nil)
func (rItr *reverseIterator) Valid() bool {
return rItr.cur != nil && rItr.cur.Valid
}
func (rItr *reverseIterator) Domain() (start, end []byte) {
if rItr.cur == nil || rItr.cur.Domain == nil {
return nil, nil
}
return rItr.cur.Domain.Start, rItr.cur.Domain.End
}
// Next advances the current reverseIterator
func (rItr *reverseIterator) Next() {
var err error
rItr.cur, err = rItr.dric.Recv()
if err != nil {
panic(fmt.Sprintf("RemoteDB.ReverseIterator.Next error: %v", err))
}
}
func (rItr *reverseIterator) Key() []byte {
if rItr.cur == nil {
return nil
}
return rItr.cur.Key
}
func (rItr *reverseIterator) Value() []byte {
if rItr.cur == nil {
return nil
}
return rItr.cur.Value
}
func (rItr *reverseIterator) Close() {
}
// iterator implements the db.Iterator by retrieving
// streamed iterators from the remote backend as
// needed. It is NOT safe for concurrent usage,
// matching the behavior of other iterators.
type iterator struct {
dic protodb.DB_IteratorClient
cur *protodb.Iterator
}
var _ db.Iterator = (*iterator)(nil)
func (itr *iterator) Valid() bool {
return itr.cur != nil && itr.cur.Valid
}
func (itr *iterator) Domain() (start, end []byte) {
if itr.cur == nil || itr.cur.Domain == nil {
return nil, nil
}
return itr.cur.Domain.Start, itr.cur.Domain.End
}
// Next advances the current iterator
func (itr *iterator) Next() {
var err error
itr.cur, err = itr.dic.Recv()
if err != nil {
panic(fmt.Sprintf("RemoteDB.Iterator.Next error: %v", err))
}
}
func (itr *iterator) Key() []byte {
if itr.cur == nil {
return nil
}
return itr.cur.Key
}
func (itr *iterator) Value() []byte {
if itr.cur == nil {
return nil
}
return itr.cur.Value
}
func (itr *iterator) Close() {
err := itr.dic.CloseSend()
if err != nil {
panic(fmt.Sprintf("Error closing iterator: %v", err))
}
}
type batch struct {
db *RemoteDB
ops []*protodb.Operation
}
var _ db.Batch = (*batch)(nil)
func (bat *batch) Set(key, value []byte) {
op := &protodb.Operation{
Entity: &protodb.Entity{Key: key, Value: value},
Type: protodb.Operation_SET,
}
bat.ops = append(bat.ops, op)
}
func (bat *batch) Delete(key []byte) {
op := &protodb.Operation{
Entity: &protodb.Entity{Key: key},
Type: protodb.Operation_DELETE,
}
bat.ops = append(bat.ops, op)
}
func (bat *batch) Write() {
if _, err := bat.db.dc.BatchWrite(bat.db.ctx, &protodb.Batch{Ops: bat.ops}); err != nil {
panic(fmt.Sprintf("RemoteDB.BatchWrite: %v", err))
}
}
func (bat *batch) WriteSync() {
if _, err := bat.db.dc.BatchWriteSync(bat.db.ctx, &protodb.Batch{Ops: bat.ops}); err != nil {
panic(fmt.Sprintf("RemoteDB.BatchWriteSync: %v", err))
}
}
func (bat *batch) Close() {
bat.ops = nil
}

+ 0
- 123
libs/db/remotedb/remotedb_test.go View File

@ -1,123 +0,0 @@
package remotedb_test
import (
"net"
"os"
"testing"
"github.com/stretchr/testify/require"
"github.com/tendermint/tendermint/libs/db/remotedb"
"github.com/tendermint/tendermint/libs/db/remotedb/grpcdb"
)
func TestRemoteDB(t *testing.T) {
cert := "test.crt"
key := "test.key"
ln, err := net.Listen("tcp", "localhost:0")
require.Nil(t, err, "expecting a port to have been assigned on which we can listen")
srv, err := grpcdb.NewServer(cert, key)
require.Nil(t, err)
defer srv.Stop()
go func() {
if err := srv.Serve(ln); err != nil {
t.Fatalf("BindServer: %v", err)
}
}()
client, err := remotedb.NewRemoteDB(ln.Addr().String(), cert)
require.Nil(t, err, "expecting a successful client creation")
dbName := "test-remote-db"
require.Nil(t, client.InitRemote(&remotedb.Init{Name: dbName, Type: "goleveldb"}))
defer func() {
err := os.RemoveAll(dbName + ".db")
if err != nil {
panic(err)
}
}()
k1 := []byte("key-1")
v1 := client.Get(k1)
require.Equal(t, 0, len(v1), "expecting no key1 to have been stored, got %X (%s)", v1, v1)
vv1 := []byte("value-1")
client.Set(k1, vv1)
gv1 := client.Get(k1)
require.Equal(t, gv1, vv1)
// Simple iteration
itr := client.Iterator(nil, nil)
itr.Next()
require.Equal(t, itr.Key(), []byte("key-1"))
require.Equal(t, itr.Value(), []byte("value-1"))
require.Panics(t, itr.Next)
itr.Close()
// Set some more keys
k2 := []byte("key-2")
v2 := []byte("value-2")
client.SetSync(k2, v2)
has := client.Has(k2)
require.True(t, has)
gv2 := client.Get(k2)
require.Equal(t, gv2, v2)
// More iteration
itr = client.Iterator(nil, nil)
itr.Next()
require.Equal(t, itr.Key(), []byte("key-1"))
require.Equal(t, itr.Value(), []byte("value-1"))
itr.Next()
require.Equal(t, itr.Key(), []byte("key-2"))
require.Equal(t, itr.Value(), []byte("value-2"))
require.Panics(t, itr.Next)
itr.Close()
// Deletion
client.Delete(k1)
client.DeleteSync(k2)
gv1 = client.Get(k1)
gv2 = client.Get(k2)
require.Equal(t, len(gv2), 0, "after deletion, not expecting the key to exist anymore")
require.Equal(t, len(gv1), 0, "after deletion, not expecting the key to exist anymore")
// Batch tests - set
k3 := []byte("key-3")
k4 := []byte("key-4")
k5 := []byte("key-5")
v3 := []byte("value-3")
v4 := []byte("value-4")
v5 := []byte("value-5")
bat := client.NewBatch()
bat.Set(k3, v3)
bat.Set(k4, v4)
rv3 := client.Get(k3)
require.Equal(t, 0, len(rv3), "expecting no k3 to have been stored")
rv4 := client.Get(k4)
require.Equal(t, 0, len(rv4), "expecting no k4 to have been stored")
bat.Write()
rv3 = client.Get(k3)
require.Equal(t, rv3, v3, "expecting k3 to have been stored")
rv4 = client.Get(k4)
require.Equal(t, rv4, v4, "expecting k4 to have been stored")
// Batch tests - deletion
bat = client.NewBatch()
bat.Delete(k4)
bat.Delete(k3)
bat.WriteSync()
rv3 = client.Get(k3)
require.Equal(t, 0, len(rv3), "expecting k3 to have been deleted")
rv4 = client.Get(k4)
require.Equal(t, 0, len(rv4), "expecting k4 to have been deleted")
// Batch tests - set and delete
bat = client.NewBatch()
bat.Set(k4, v4)
bat.Set(k5, v5)
bat.Delete(k4)
bat.WriteSync()
rv4 = client.Get(k4)
require.Equal(t, 0, len(rv4), "expecting k4 to have been deleted")
rv5 := client.Get(k5)
require.Equal(t, rv5, v5, "expecting k5 to have been stored")
}

+ 0
- 25
libs/db/remotedb/test.crt View File

@ -1,25 +0,0 @@
-----BEGIN CERTIFICATE-----
MIIEOjCCAiKgAwIBAgIQYO+jRR0Sbs+WzU/hj2aoxzANBgkqhkiG9w0BAQsFADAZ
MRcwFQYDVQQDEw50ZW5kZXJtaW50LmNvbTAeFw0xOTA2MDIxMTAyMDdaFw0yMDEy
MDIxMTAyMDRaMBMxETAPBgNVBAMTCHJlbW90ZWRiMIIBIjANBgkqhkiG9w0BAQEF
AAOCAQ8AMIIBCgKCAQEAt7YkYMJ5X5X3MT1tWG1KFO3uyZl962fInl+43xVESydp
qYYHYei7b3T8c/3Ww6f3aKkkCHrvPtqHZjU6o+wp/AQMNlyUoyRN89+6Oj67u2C7
iZjzAJ+Pk87jMaStubvmZ9J+uk4op4rv5Rt4ns/Kg70RaMvqYR8tGqPcy3o8fWS+
hCbuwAS8b65yp+AgbnThDEBUnieN3OFLfDV//45qw2OlqlM/gHOVT2JMRbl14Y7x
tW3/Xe+lsB7B3+OC6NQ2Nu7DEA1X+TBNyItIGnQH6DwK2ZBRtyQEk26FAWVj8fHd
A5I4+RcGWXz4T6gJmDZN7+47WHO0ProjARbUV0GIuQIDAQABo4GDMIGAMA4GA1Ud
DwEB/wQEAwIDuDAdBgNVHSUEFjAUBggrBgEFBQcDAQYIKwYBBQUHAwIwHQYDVR0O
BBYEFOA8wzCYhoZmy0WHgnv/0efijUMKMB8GA1UdIwQYMBaAFNSTPe743aIx7rIp
vn5HV3gJ4z1hMA8GA1UdEQQIMAaHBH8AAAEwDQYJKoZIhvcNAQELBQADggIBAKZf
EVo0i9nMZv6ZJjbmAlMfo5FH41/oBYC8pyGAnJKl42raXKJAbl45h80iGn3vNggf
7HJjN+znAHDFYjIwK2IV2WhHPyxK6uk+FA5uBR/aAPcw+zhRfXUMYdhNHr6KBlZZ
bvD7Iq4UALg+XFQz/fQkIi7QvTBwkYyPNA2+a/TGf6myMp26hoz73DQXklqm6Zle
myPs1Vp9bTgOv/3l64BMUV37FZ2TyiisBkV1qPEoDxT7Fbi8G1K8gMDLd0wu0jvX
nz96nk30TDnZewV1fhkMJVKKGiLbaIgHcu1lWsWJZ0tdc+MF7R9bLBO5T0cTDgNy
V8/51g+Cxu5SSHKjFkT0vBBONhjPmRqzJpxOQfHjiv8mmHwwiaNNy2VkJHj5GHer
64r67fQTSqAifzgwAbXYK+ObUbx4PnHvSYSF5dbcR1Oj6UTVtGAgdmN2Y03AIc1B
CiaojcMVuMRz/SvmPWl34GBvvT5/h9VCpHEB3vV6bQxJb5U1fLyo4GABA2Ic3DHr
kV5p7CZI06UNbyQyFtnEb5XoXywRa4Df7FzDIv3HL13MtyXrYrJqC1eAbn+3jGdh
bQa510mWYAlQQmzHSf/SLKott4QKR3SmhOGqGKNvquAYJ9XLdYdsPmKKGH6iGUD8
n7yEi0KMD/BHsPQNNLatsR2SxqGDeLhbLR0w2hig
-----END CERTIFICATE-----

+ 0
- 27
libs/db/remotedb/test.key View File

@ -1,27 +0,0 @@
-----BEGIN RSA PRIVATE KEY-----
MIIEpQIBAAKCAQEAt7YkYMJ5X5X3MT1tWG1KFO3uyZl962fInl+43xVESydpqYYH
Yei7b3T8c/3Ww6f3aKkkCHrvPtqHZjU6o+wp/AQMNlyUoyRN89+6Oj67u2C7iZjz
AJ+Pk87jMaStubvmZ9J+uk4op4rv5Rt4ns/Kg70RaMvqYR8tGqPcy3o8fWS+hCbu
wAS8b65yp+AgbnThDEBUnieN3OFLfDV//45qw2OlqlM/gHOVT2JMRbl14Y7xtW3/
Xe+lsB7B3+OC6NQ2Nu7DEA1X+TBNyItIGnQH6DwK2ZBRtyQEk26FAWVj8fHdA5I4
+RcGWXz4T6gJmDZN7+47WHO0ProjARbUV0GIuQIDAQABAoIBAQCEVFAZ3puc7aIU
NuIXqwmMz+KMFuMr+SL6aYr6LhB2bhpfQSr6LLEu1L6wMm1LnCbLneJVtW+1/6U+
SyNFRmzrmmLNmZx7c0AvZb14DQ4fJ8uOjryje0vptUHT1YJJ4n5R1L7yJjCElsC8
cDBPfO+sOzlaGmBmuxU7NkNp0k/WJc1Wnn5WFCKKk8BCH1AUKvn/vwbRV4zl/Be7
ApywPUouV+GJlTAG5KLb15CWKSqFNJxUJ6K7NnmfDoy7muUUv8MtrTn59XTH4qK7
p/3A8tdNpR/RpEJ8+y3kS9CDZBVnsk0j0ptT//jdt1vSsylXxrf7vjLnyguRZZ5H
Vwe2POotAoGBAOY1UaFjtIz2G5qromaUtrPb5EPWRU8fiLtUXUDKG8KqNAqsGbDz
Stw1mVFyyuaFMReO18djCvcja1xxF3TZbdpV1k7RfcpEZXiFzBAPgeEGdA3Tc3V2
byuJQthWamCBxF/7OGUmH/E/kH0pv5g9+eIitK/CUC2YUhCnubhchGAXAoGBAMxL
O7mnPqDJ2PqxVip/lL6VnchtF1bx1aDNr83rVTf+BEsOgCIFoDEBIVKDnhXlaJu7
8JN4la/esytq4j3nM1cl6mjvw2ixYmwQtKiDuNiyb88hhQ+nxVsbIpYxtbhsj+u5
hOrMN6jKd0GVWsYpdNvY/dXZG1MXhbWwExjRAY+vAoGBAKBu3jHUU5q9VWWIYciN
sXpNL5qbNHg86MRsugSSFaCnj1c0sz7ffvdSn0Pk9USL5Defw/9fpd+wHn0xD4DO
msFDevQ5CSoyWmkRDbLPq9sP7UdJariczkMQCLbOGpqhNSMS6C2N0UsG2oJv2ueV
oZUYTMYEbG4qLl8PFN5IE7UHAoGAGwEq4OyZm7lyxBii8jUxHUw7sh2xgx2uhnYJ
8idUeXVLbfx5tYWW2kNy+yxIvk432LYsI+JBryC6AFg9lb81CyUI6lwfMXyZLP28
U7Ytvf9ARloA88PSk6tvk/j4M2uuTpOUXVEnXll9EB9FA4LBXro9O4JaWU53rz+a
FqKyGSMCgYEAuYCGC+Fz7lIa0aE4tT9mwczQequxGYsL41KR/4pDO3t9QsnzunLY
fvCFhteBOstwTBBdfBaKIwSp3zI2QtA4K0Jx9SAJ9q0ft2ciB9ukUFBhC9+TqzXg
gSz3XpRtI8PhwAxZgCnov+NPQV8IxvD4ZgnnEiRBHrYnSEsaMLoVnkw=
-----END RSA PRIVATE KEY-----

+ 0
- 136
libs/db/types.go View File

@ -1,136 +0,0 @@
package db
// DBs are goroutine safe.
type DB interface {
// Get returns nil iff key doesn't exist.
// A nil key is interpreted as an empty byteslice.
// CONTRACT: key, value readonly []byte
Get([]byte) []byte
// Has checks if a key exists.
// A nil key is interpreted as an empty byteslice.
// CONTRACT: key, value readonly []byte
Has(key []byte) bool
// Set sets the key.
// A nil key is interpreted as an empty byteslice.
// CONTRACT: key, value readonly []byte
Set([]byte, []byte)
SetSync([]byte, []byte)
// Delete deletes the key.
// A nil key is interpreted as an empty byteslice.
// CONTRACT: key readonly []byte
Delete([]byte)
DeleteSync([]byte)
// Iterate over a domain of keys in ascending order. End is exclusive.
// Start must be less than end, or the Iterator is invalid.
// A nil start is interpreted as an empty byteslice.
// If end is nil, iterates up to the last item (inclusive).
// CONTRACT: No writes may happen within a domain while an iterator exists over it.
// CONTRACT: start, end readonly []byte
Iterator(start, end []byte) Iterator
// Iterate over a domain of keys in descending order. End is exclusive.
// Start must be less than end, or the Iterator is invalid.
// If start is nil, iterates up to the first/least item (inclusive).
// If end is nil, iterates from the last/greatest item (inclusive).
// CONTRACT: No writes may happen within a domain while an iterator exists over it.
// CONTRACT: start, end readonly []byte
ReverseIterator(start, end []byte) Iterator
// Closes the connection.
Close()
// Creates a batch for atomic updates.
NewBatch() Batch
// For debugging
Print()
// Stats returns a map of property values for all keys and the size of the cache.
Stats() map[string]string
}
//----------------------------------------
// Batch
// Batch Close must be called when the program no longer needs the object.
type Batch interface {
SetDeleter
Write()
WriteSync()
Close()
}
type SetDeleter interface {
Set(key, value []byte) // CONTRACT: key, value readonly []byte
Delete(key []byte) // CONTRACT: key readonly []byte
}
//----------------------------------------
// Iterator
/*
Usage:
var itr Iterator = ...
defer itr.Close()
for ; itr.Valid(); itr.Next() {
k, v := itr.Key(); itr.Value()
// ...
}
*/
type Iterator interface {
// The start & end (exclusive) limits to iterate over.
// If end < start, then the Iterator goes in reverse order.
//
// A domain of ([]byte{12, 13}, []byte{12, 14}) will iterate
// over anything with the prefix []byte{12, 13}.
//
// The smallest key is the empty byte array []byte{} - see BeginningKey().
// The largest key is the nil byte array []byte(nil) - see EndingKey().
// CONTRACT: start, end readonly []byte
Domain() (start []byte, end []byte)
// Valid returns whether the current position is valid.
// Once invalid, an Iterator is forever invalid.
Valid() bool
// Next moves the iterator to the next sequential key in the database, as
// defined by order of iteration.
//
// If Valid returns false, this method will panic.
Next()
// Key returns the key of the cursor.
// If Valid returns false, this method will panic.
// CONTRACT: key readonly []byte
Key() (key []byte)
// Value returns the value of the cursor.
// If Valid returns false, this method will panic.
// CONTRACT: value readonly []byte
Value() (value []byte)
// Close releases the Iterator.
Close()
}
// For testing convenience.
func bz(s string) []byte {
return []byte(s)
}
// We defensively turn nil keys or values into []byte{} for
// most operations.
func nonNilBytes(bz []byte) []byte {
if bz == nil {
return []byte{}
}
return bz
}

+ 0
- 45
libs/db/util.go View File

@ -1,45 +0,0 @@
package db
import (
"bytes"
)
func cp(bz []byte) (ret []byte) {
ret = make([]byte, len(bz))
copy(ret, bz)
return ret
}
// Returns a slice of the same length (big endian)
// except incremented by one.
// Returns nil on overflow (e.g. if bz bytes are all 0xFF)
// CONTRACT: len(bz) > 0
func cpIncr(bz []byte) (ret []byte) {
if len(bz) == 0 {
panic("cpIncr expects non-zero bz length")
}
ret = cp(bz)
for i := len(bz) - 1; i >= 0; i-- {
if ret[i] < byte(0xFF) {
ret[i]++
return
}
ret[i] = byte(0x00)
if i == 0 {
// Overflow
return nil
}
}
return nil
}
// See DB interface documentation for more information.
func IsKeyInDomain(key, start, end []byte) bool {
if bytes.Compare(key, start) < 0 {
return false
}
if end != nil && bytes.Compare(end, key) <= 0 {
return false
}
return true
}

+ 0
- 104
libs/db/util_test.go View File

@ -1,104 +0,0 @@
package db
import (
"fmt"
"os"
"testing"
)
// Empty iterator for empty db.
func TestPrefixIteratorNoMatchNil(t *testing.T) {
for backend := range backends {
t.Run(fmt.Sprintf("Prefix w/ backend %s", backend), func(t *testing.T) {
db, dir := newTempDB(t, backend)
defer os.RemoveAll(dir)
itr := IteratePrefix(db, []byte("2"))
checkInvalid(t, itr)
})
}
}
// Empty iterator for db populated after iterator created.
func TestPrefixIteratorNoMatch1(t *testing.T) {
for backend := range backends {
if backend == BoltDBBackend {
t.Log("bolt does not support concurrent writes while iterating")
continue
}
t.Run(fmt.Sprintf("Prefix w/ backend %s", backend), func(t *testing.T) {
db, dir := newTempDB(t, backend)
defer os.RemoveAll(dir)
itr := IteratePrefix(db, []byte("2"))
db.SetSync(bz("1"), bz("value_1"))
checkInvalid(t, itr)
})
}
}
// Empty iterator for prefix starting after db entry.
func TestPrefixIteratorNoMatch2(t *testing.T) {
for backend := range backends {
t.Run(fmt.Sprintf("Prefix w/ backend %s", backend), func(t *testing.T) {
db, dir := newTempDB(t, backend)
defer os.RemoveAll(dir)
db.SetSync(bz("3"), bz("value_3"))
itr := IteratePrefix(db, []byte("4"))
checkInvalid(t, itr)
})
}
}
// Iterator with single val for db with single val, starting from that val.
func TestPrefixIteratorMatch1(t *testing.T) {
for backend := range backends {
t.Run(fmt.Sprintf("Prefix w/ backend %s", backend), func(t *testing.T) {
db, dir := newTempDB(t, backend)
defer os.RemoveAll(dir)
db.SetSync(bz("2"), bz("value_2"))
itr := IteratePrefix(db, bz("2"))
checkValid(t, itr, true)
checkItem(t, itr, bz("2"), bz("value_2"))
checkNext(t, itr, false)
// Once invalid...
checkInvalid(t, itr)
})
}
}
// Iterator with prefix iterates over everything with same prefix.
func TestPrefixIteratorMatches1N(t *testing.T) {
for backend := range backends {
t.Run(fmt.Sprintf("Prefix w/ backend %s", backend), func(t *testing.T) {
db, dir := newTempDB(t, backend)
defer os.RemoveAll(dir)
// prefixed
db.SetSync(bz("a/1"), bz("value_1"))
db.SetSync(bz("a/3"), bz("value_3"))
// not
db.SetSync(bz("b/3"), bz("value_3"))
db.SetSync(bz("a-3"), bz("value_3"))
db.SetSync(bz("a.3"), bz("value_3"))
db.SetSync(bz("abcdefg"), bz("value_3"))
itr := IteratePrefix(db, bz("a/"))
checkValid(t, itr, true)
checkItem(t, itr, bz("a/1"), bz("value_1"))
checkNext(t, itr, true)
checkItem(t, itr, bz("a/3"), bz("value_3"))
// Bad!
checkNext(t, itr, false)
//Once invalid...
checkInvalid(t, itr)
})
}
}

+ 1
- 1
lite/dbprovider.go View File

@ -7,10 +7,10 @@ import (
amino "github.com/tendermint/go-amino"
cryptoAmino "github.com/tendermint/tendermint/crypto/encoding/amino"
dbm "github.com/tendermint/tendermint/libs/db"
log "github.com/tendermint/tendermint/libs/log"
lerr "github.com/tendermint/tendermint/lite/errors"
"github.com/tendermint/tendermint/types"
dbm "github.com/tendermint/tm-cmn/db"
)
var _ PersistentProvider = (*DBProvider)(nil)


+ 1
- 1
lite/dynamic_verifier_test.go View File

@ -8,9 +8,9 @@ import (
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
dbm "github.com/tendermint/tendermint/libs/db"
log "github.com/tendermint/tendermint/libs/log"
"github.com/tendermint/tendermint/types"
dbm "github.com/tendermint/tm-cmn/db"
)
func TestInquirerValidPath(t *testing.T) {


+ 1
- 1
lite/provider_test.go View File

@ -7,10 +7,10 @@ import (
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
dbm "github.com/tendermint/tendermint/libs/db"
log "github.com/tendermint/tendermint/libs/log"
lerr "github.com/tendermint/tendermint/lite/errors"
"github.com/tendermint/tendermint/types"
dbm "github.com/tendermint/tm-cmn/db"
)
// missingProvider doesn't store anything, always a miss.


+ 1
- 1
lite/proxy/verifier.go View File

@ -2,10 +2,10 @@ package proxy
import (
cmn "github.com/tendermint/tendermint/libs/common"
dbm "github.com/tendermint/tendermint/libs/db"
log "github.com/tendermint/tendermint/libs/log"
"github.com/tendermint/tendermint/lite"
lclient "github.com/tendermint/tendermint/lite/client"
dbm "github.com/tendermint/tm-cmn/db"
)
func NewVerifier(chainID, rootDir string, client lclient.SignStatusClient, logger log.Logger, cacheSize int) (*lite.DynamicVerifier, error) {


+ 1
- 1
node/node.go View File

@ -26,7 +26,6 @@ import (
"github.com/tendermint/tendermint/crypto/ed25519"
"github.com/tendermint/tendermint/evidence"
cmn "github.com/tendermint/tendermint/libs/common"
dbm "github.com/tendermint/tendermint/libs/db"
"github.com/tendermint/tendermint/libs/log"
tmpubsub "github.com/tendermint/tendermint/libs/pubsub"
mempl "github.com/tendermint/tendermint/mempool"
@ -45,6 +44,7 @@ import (
"github.com/tendermint/tendermint/types"
tmtime "github.com/tendermint/tendermint/types/time"
"github.com/tendermint/tendermint/version"
dbm "github.com/tendermint/tm-cmn/db"
)
// CustomReactorNamePrefix is a prefix for all custom reactors to prevent


+ 1
- 1
node/node_test.go View File

@ -17,7 +17,6 @@ import (
"github.com/tendermint/tendermint/crypto/ed25519"
"github.com/tendermint/tendermint/evidence"
cmn "github.com/tendermint/tendermint/libs/common"
dbm "github.com/tendermint/tendermint/libs/db"
"github.com/tendermint/tendermint/libs/log"
mempl "github.com/tendermint/tendermint/mempool"
"github.com/tendermint/tendermint/p2p"
@ -28,6 +27,7 @@ import (
"github.com/tendermint/tendermint/types"
tmtime "github.com/tendermint/tendermint/types/time"
"github.com/tendermint/tendermint/version"
dbm "github.com/tendermint/tm-cmn/db"
)
func TestNodeStartStop(t *testing.T) {


+ 1
- 1
p2p/trust/store.go View File

@ -10,7 +10,7 @@ import (
"time"
cmn "github.com/tendermint/tendermint/libs/common"
dbm "github.com/tendermint/tendermint/libs/db"
dbm "github.com/tendermint/tm-cmn/db"
)
const defaultStorePeriodicSaveInterval = 1 * time.Minute


+ 1
- 1
p2p/trust/store_test.go View File

@ -10,8 +10,8 @@ import (
"testing"
"github.com/stretchr/testify/assert"
dbm "github.com/tendermint/tendermint/libs/db"
"github.com/tendermint/tendermint/libs/log"
dbm "github.com/tendermint/tm-cmn/db"
)
func TestTrustMetricStoreSaveLoad(t *testing.T) {


+ 1
- 1
rpc/core/pipe.go View File

@ -6,7 +6,6 @@ import (
cfg "github.com/tendermint/tendermint/config"
"github.com/tendermint/tendermint/consensus"
"github.com/tendermint/tendermint/crypto"
dbm "github.com/tendermint/tendermint/libs/db"
"github.com/tendermint/tendermint/libs/log"
mempl "github.com/tendermint/tendermint/mempool"
"github.com/tendermint/tendermint/p2p"
@ -14,6 +13,7 @@ import (
sm "github.com/tendermint/tendermint/state"
"github.com/tendermint/tendermint/state/txindex"
"github.com/tendermint/tendermint/types"
dbm "github.com/tendermint/tm-cmn/db"
)
const (


+ 1
- 1
state/execution.go View File

@ -5,12 +5,12 @@ import (
"time"
abci "github.com/tendermint/tendermint/abci/types"
dbm "github.com/tendermint/tendermint/libs/db"
"github.com/tendermint/tendermint/libs/fail"
"github.com/tendermint/tendermint/libs/log"
mempl "github.com/tendermint/tendermint/mempool"
"github.com/tendermint/tendermint/proxy"
"github.com/tendermint/tendermint/types"
dbm "github.com/tendermint/tm-cmn/db"
)
//-----------------------------------------------------------------------------


+ 1
- 1
state/export_test.go View File

@ -2,8 +2,8 @@ package state
import (
abci "github.com/tendermint/tendermint/abci/types"
dbm "github.com/tendermint/tendermint/libs/db"
"github.com/tendermint/tendermint/types"
dbm "github.com/tendermint/tm-cmn/db"
)
//


+ 1
- 1
state/helpers_test.go View File

@ -7,11 +7,11 @@ import (
abci "github.com/tendermint/tendermint/abci/types"
"github.com/tendermint/tendermint/crypto"
"github.com/tendermint/tendermint/crypto/ed25519"
dbm "github.com/tendermint/tendermint/libs/db"
"github.com/tendermint/tendermint/proxy"
sm "github.com/tendermint/tendermint/state"
"github.com/tendermint/tendermint/types"
tmtime "github.com/tendermint/tendermint/types/time"
dbm "github.com/tendermint/tm-cmn/db"
)
type paramsChangeTestCase struct {


+ 1
- 1
state/state_test.go View File

@ -13,8 +13,8 @@ import (
abci "github.com/tendermint/tendermint/abci/types"
"github.com/tendermint/tendermint/crypto/ed25519"
cmn "github.com/tendermint/tendermint/libs/common"
dbm "github.com/tendermint/tendermint/libs/db"
sm "github.com/tendermint/tendermint/state"
dbm "github.com/tendermint/tm-cmn/db"
cfg "github.com/tendermint/tendermint/config"
"github.com/tendermint/tendermint/types"


+ 1
- 1
state/store.go View File

@ -5,8 +5,8 @@ import (
abci "github.com/tendermint/tendermint/abci/types"
cmn "github.com/tendermint/tendermint/libs/common"
dbm "github.com/tendermint/tendermint/libs/db"
"github.com/tendermint/tendermint/types"
dbm "github.com/tendermint/tm-cmn/db"
)
const (


+ 1
- 1
state/store_test.go View File

@ -9,9 +9,9 @@ import (
"github.com/stretchr/testify/require"
cfg "github.com/tendermint/tendermint/config"
dbm "github.com/tendermint/tendermint/libs/db"
sm "github.com/tendermint/tendermint/state"
"github.com/tendermint/tendermint/types"
dbm "github.com/tendermint/tm-cmn/db"
)
func TestStoreLoadValidators(t *testing.T) {


+ 1
- 1
state/tx_filter_test.go View File

@ -8,9 +8,9 @@ import (
"github.com/stretchr/testify/require"
cmn "github.com/tendermint/tendermint/libs/common"
dbm "github.com/tendermint/tendermint/libs/db"
sm "github.com/tendermint/tendermint/state"
"github.com/tendermint/tendermint/types"
dbm "github.com/tendermint/tm-cmn/db"
)
func TestTxFilter(t *testing.T) {


+ 1
- 1
state/txindex/indexer_service_test.go View File

@ -8,11 +8,11 @@ import (
"github.com/stretchr/testify/require"
abci "github.com/tendermint/tendermint/abci/types"
"github.com/tendermint/tendermint/libs/db"
"github.com/tendermint/tendermint/libs/log"
"github.com/tendermint/tendermint/state/txindex"
"github.com/tendermint/tendermint/state/txindex/kv"
"github.com/tendermint/tendermint/types"
"github.com/tendermint/tm-cmn/db"
)
func TestIndexerServiceIndexesBlocks(t *testing.T) {


+ 1
- 1
state/txindex/kv/kv.go View File

@ -12,10 +12,10 @@ import (
"github.com/pkg/errors"
cmn "github.com/tendermint/tendermint/libs/common"
dbm "github.com/tendermint/tendermint/libs/db"
"github.com/tendermint/tendermint/libs/pubsub/query"
"github.com/tendermint/tendermint/state/txindex"
"github.com/tendermint/tendermint/types"
dbm "github.com/tendermint/tm-cmn/db"
)
const (


+ 1
- 1
state/txindex/kv/kv_test.go View File

@ -10,7 +10,7 @@ import (
"github.com/stretchr/testify/require"
abci "github.com/tendermint/tendermint/abci/types"
cmn "github.com/tendermint/tendermint/libs/common"
db "github.com/tendermint/tendermint/libs/db"
db "github.com/tendermint/tm-cmn/db"
"github.com/tendermint/tendermint/libs/pubsub/query"
"github.com/tendermint/tendermint/state/txindex"


+ 1
- 1
state/validation.go View File

@ -6,8 +6,8 @@ import (
"fmt"
"github.com/tendermint/tendermint/crypto"
dbm "github.com/tendermint/tendermint/libs/db"
"github.com/tendermint/tendermint/types"
dbm "github.com/tendermint/tm-cmn/db"
)
//-----------------------------------------------------


Loading…
Cancel
Save