* Remove db from tendemrint in favor of tendermint/tm-cmn - remove db from `libs` - update dependancy, there have been no breaking changes in the updated deps - https://github.com/grpc/grpc-go/releases - https://github.com/golang/protobuf/releases Signed-off-by: Marko Baricevic <marbar3778@yahoo.com> * changelog add * gofmt * more gofmtpull/3926/head
@ -1,223 +0,0 @@ | |||
package db | |||
import ( | |||
"fmt" | |||
"io/ioutil" | |||
"os" | |||
"path/filepath" | |||
"testing" | |||
"github.com/stretchr/testify/assert" | |||
"github.com/stretchr/testify/require" | |||
cmn "github.com/tendermint/tendermint/libs/common" | |||
) | |||
func cleanupDBDir(dir, name string) { | |||
err := os.RemoveAll(filepath.Join(dir, name) + ".db") | |||
if err != nil { | |||
panic(err) | |||
} | |||
} | |||
func testBackendGetSetDelete(t *testing.T, backend DBBackendType) { | |||
// Default | |||
dirname, err := ioutil.TempDir("", fmt.Sprintf("test_backend_%s_", backend)) | |||
require.Nil(t, err) | |||
db := NewDB("testdb", backend, dirname) | |||
defer cleanupDBDir(dirname, "testdb") | |||
// A nonexistent key should return nil, even if the key is empty | |||
require.Nil(t, db.Get([]byte(""))) | |||
// A nonexistent key should return nil, even if the key is nil | |||
require.Nil(t, db.Get(nil)) | |||
// A nonexistent key should return nil. | |||
key := []byte("abc") | |||
require.Nil(t, db.Get(key)) | |||
// Set empty value. | |||
db.Set(key, []byte("")) | |||
require.NotNil(t, db.Get(key)) | |||
require.Empty(t, db.Get(key)) | |||
// Set nil value. | |||
db.Set(key, nil) | |||
require.NotNil(t, db.Get(key)) | |||
require.Empty(t, db.Get(key)) | |||
// Delete. | |||
db.Delete(key) | |||
require.Nil(t, db.Get(key)) | |||
} | |||
func TestBackendsGetSetDelete(t *testing.T) { | |||
for dbType := range backends { | |||
testBackendGetSetDelete(t, dbType) | |||
} | |||
} | |||
func withDB(t *testing.T, creator dbCreator, fn func(DB)) { | |||
name := fmt.Sprintf("test_%x", cmn.RandStr(12)) | |||
dir := os.TempDir() | |||
db, err := creator(name, dir) | |||
require.Nil(t, err) | |||
defer cleanupDBDir(dir, name) | |||
fn(db) | |||
db.Close() | |||
} | |||
func TestBackendsNilKeys(t *testing.T) { | |||
// Test all backends. | |||
for dbType, creator := range backends { | |||
withDB(t, creator, func(db DB) { | |||
t.Run(fmt.Sprintf("Testing %s", dbType), func(t *testing.T) { | |||
// Nil keys are treated as the empty key for most operations. | |||
expect := func(key, value []byte) { | |||
if len(key) == 0 { // nil or empty | |||
assert.Equal(t, db.Get(nil), db.Get([]byte(""))) | |||
assert.Equal(t, db.Has(nil), db.Has([]byte(""))) | |||
} | |||
assert.Equal(t, db.Get(key), value) | |||
assert.Equal(t, db.Has(key), value != nil) | |||
} | |||
// Not set | |||
expect(nil, nil) | |||
// Set nil value | |||
db.Set(nil, nil) | |||
expect(nil, []byte("")) | |||
// Set empty value | |||
db.Set(nil, []byte("")) | |||
expect(nil, []byte("")) | |||
// Set nil, Delete nil | |||
db.Set(nil, []byte("abc")) | |||
expect(nil, []byte("abc")) | |||
db.Delete(nil) | |||
expect(nil, nil) | |||
// Set nil, Delete empty | |||
db.Set(nil, []byte("abc")) | |||
expect(nil, []byte("abc")) | |||
db.Delete([]byte("")) | |||
expect(nil, nil) | |||
// Set empty, Delete nil | |||
db.Set([]byte(""), []byte("abc")) | |||
expect(nil, []byte("abc")) | |||
db.Delete(nil) | |||
expect(nil, nil) | |||
// Set empty, Delete empty | |||
db.Set([]byte(""), []byte("abc")) | |||
expect(nil, []byte("abc")) | |||
db.Delete([]byte("")) | |||
expect(nil, nil) | |||
// SetSync nil, DeleteSync nil | |||
db.SetSync(nil, []byte("abc")) | |||
expect(nil, []byte("abc")) | |||
db.DeleteSync(nil) | |||
expect(nil, nil) | |||
// SetSync nil, DeleteSync empty | |||
db.SetSync(nil, []byte("abc")) | |||
expect(nil, []byte("abc")) | |||
db.DeleteSync([]byte("")) | |||
expect(nil, nil) | |||
// SetSync empty, DeleteSync nil | |||
db.SetSync([]byte(""), []byte("abc")) | |||
expect(nil, []byte("abc")) | |||
db.DeleteSync(nil) | |||
expect(nil, nil) | |||
// SetSync empty, DeleteSync empty | |||
db.SetSync([]byte(""), []byte("abc")) | |||
expect(nil, []byte("abc")) | |||
db.DeleteSync([]byte("")) | |||
expect(nil, nil) | |||
}) | |||
}) | |||
} | |||
} | |||
func TestGoLevelDBBackend(t *testing.T) { | |||
name := fmt.Sprintf("test_%x", cmn.RandStr(12)) | |||
db := NewDB(name, GoLevelDBBackend, "") | |||
defer cleanupDBDir("", name) | |||
_, ok := db.(*GoLevelDB) | |||
assert.True(t, ok) | |||
} | |||
func TestDBIterator(t *testing.T) { | |||
for dbType := range backends { | |||
t.Run(fmt.Sprintf("%v", dbType), func(t *testing.T) { | |||
testDBIterator(t, dbType) | |||
}) | |||
} | |||
} | |||
func testDBIterator(t *testing.T, backend DBBackendType) { | |||
name := fmt.Sprintf("test_%x", cmn.RandStr(12)) | |||
dir := os.TempDir() | |||
db := NewDB(name, backend, dir) | |||
defer cleanupDBDir(dir, name) | |||
for i := 0; i < 10; i++ { | |||
if i != 6 { // but skip 6. | |||
db.Set(int642Bytes(int64(i)), nil) | |||
} | |||
} | |||
verifyIterator(t, db.Iterator(nil, nil), []int64{0, 1, 2, 3, 4, 5, 7, 8, 9}, "forward iterator") | |||
verifyIterator(t, db.ReverseIterator(nil, nil), []int64{9, 8, 7, 5, 4, 3, 2, 1, 0}, "reverse iterator") | |||
verifyIterator(t, db.Iterator(nil, int642Bytes(0)), []int64(nil), "forward iterator to 0") | |||
verifyIterator(t, db.ReverseIterator(int642Bytes(10), nil), []int64(nil), "reverse iterator from 10 (ex)") | |||
verifyIterator(t, db.Iterator(int642Bytes(0), nil), []int64{0, 1, 2, 3, 4, 5, 7, 8, 9}, "forward iterator from 0") | |||
verifyIterator(t, db.Iterator(int642Bytes(1), nil), []int64{1, 2, 3, 4, 5, 7, 8, 9}, "forward iterator from 1") | |||
verifyIterator(t, db.ReverseIterator(nil, int642Bytes(10)), []int64{9, 8, 7, 5, 4, 3, 2, 1, 0}, "reverse iterator from 10 (ex)") | |||
verifyIterator(t, db.ReverseIterator(nil, int642Bytes(9)), []int64{8, 7, 5, 4, 3, 2, 1, 0}, "reverse iterator from 9 (ex)") | |||
verifyIterator(t, db.ReverseIterator(nil, int642Bytes(8)), []int64{7, 5, 4, 3, 2, 1, 0}, "reverse iterator from 8 (ex)") | |||
verifyIterator(t, db.Iterator(int642Bytes(5), int642Bytes(6)), []int64{5}, "forward iterator from 5 to 6") | |||
verifyIterator(t, db.Iterator(int642Bytes(5), int642Bytes(7)), []int64{5}, "forward iterator from 5 to 7") | |||
verifyIterator(t, db.Iterator(int642Bytes(5), int642Bytes(8)), []int64{5, 7}, "forward iterator from 5 to 8") | |||
verifyIterator(t, db.Iterator(int642Bytes(6), int642Bytes(7)), []int64(nil), "forward iterator from 6 to 7") | |||
verifyIterator(t, db.Iterator(int642Bytes(6), int642Bytes(8)), []int64{7}, "forward iterator from 6 to 8") | |||
verifyIterator(t, db.Iterator(int642Bytes(7), int642Bytes(8)), []int64{7}, "forward iterator from 7 to 8") | |||
verifyIterator(t, db.ReverseIterator(int642Bytes(4), int642Bytes(5)), []int64{4}, "reverse iterator from 5 (ex) to 4") | |||
verifyIterator(t, db.ReverseIterator(int642Bytes(4), int642Bytes(6)), []int64{5, 4}, "reverse iterator from 6 (ex) to 4") | |||
verifyIterator(t, db.ReverseIterator(int642Bytes(4), int642Bytes(7)), []int64{5, 4}, "reverse iterator from 7 (ex) to 4") | |||
verifyIterator(t, db.ReverseIterator(int642Bytes(5), int642Bytes(6)), []int64{5}, "reverse iterator from 6 (ex) to 5") | |||
verifyIterator(t, db.ReverseIterator(int642Bytes(5), int642Bytes(7)), []int64{5}, "reverse iterator from 7 (ex) to 5") | |||
verifyIterator(t, db.ReverseIterator(int642Bytes(6), int642Bytes(7)), []int64(nil), "reverse iterator from 7 (ex) to 6") | |||
verifyIterator(t, db.Iterator(int642Bytes(0), int642Bytes(1)), []int64{0}, "forward iterator from 0 to 1") | |||
verifyIterator(t, db.ReverseIterator(int642Bytes(8), int642Bytes(9)), []int64{8}, "reverse iterator from 9 (ex) to 8") | |||
verifyIterator(t, db.Iterator(int642Bytes(2), int642Bytes(4)), []int64{2, 3}, "forward iterator from 2 to 4") | |||
verifyIterator(t, db.Iterator(int642Bytes(4), int642Bytes(2)), []int64(nil), "forward iterator from 4 to 2") | |||
verifyIterator(t, db.ReverseIterator(int642Bytes(2), int642Bytes(4)), []int64{3, 2}, "reverse iterator from 4 (ex) to 2") | |||
verifyIterator(t, db.ReverseIterator(int642Bytes(4), int642Bytes(2)), []int64(nil), "reverse iterator from 2 (ex) to 4") | |||
} | |||
func verifyIterator(t *testing.T, itr Iterator, expected []int64, msg string) { | |||
var list []int64 | |||
for itr.Valid() { | |||
list = append(list, bytes2Int64(itr.Key())) | |||
itr.Next() | |||
} | |||
assert.Equal(t, expected, list, msg) | |||
} |
@ -1,349 +0,0 @@ | |||
// +build boltdb | |||
package db | |||
import ( | |||
"bytes" | |||
"errors" | |||
"fmt" | |||
"os" | |||
"path/filepath" | |||
"github.com/etcd-io/bbolt" | |||
) | |||
var bucket = []byte("tm") | |||
func init() { | |||
registerDBCreator(BoltDBBackend, func(name, dir string) (DB, error) { | |||
return NewBoltDB(name, dir) | |||
}, false) | |||
} | |||
// BoltDB is a wrapper around etcd's fork of bolt | |||
// (https://github.com/etcd-io/bbolt). | |||
// | |||
// NOTE: All operations (including Set, Delete) are synchronous by default. One | |||
// can globally turn it off by using NoSync config option (not recommended). | |||
// | |||
// A single bucket ([]byte("tm")) is used per a database instance. This could | |||
// lead to performance issues when/if there will be lots of keys. | |||
type BoltDB struct { | |||
db *bbolt.DB | |||
} | |||
// NewBoltDB returns a BoltDB with default options. | |||
func NewBoltDB(name, dir string) (DB, error) { | |||
return NewBoltDBWithOpts(name, dir, bbolt.DefaultOptions) | |||
} | |||
// NewBoltDBWithOpts allows you to supply *bbolt.Options. ReadOnly: true is not | |||
// supported because NewBoltDBWithOpts creates a global bucket. | |||
func NewBoltDBWithOpts(name string, dir string, opts *bbolt.Options) (DB, error) { | |||
if opts.ReadOnly { | |||
return nil, errors.New("ReadOnly: true is not supported") | |||
} | |||
dbPath := filepath.Join(dir, name+".db") | |||
db, err := bbolt.Open(dbPath, os.ModePerm, opts) | |||
if err != nil { | |||
return nil, err | |||
} | |||
// create a global bucket | |||
err = db.Update(func(tx *bbolt.Tx) error { | |||
_, err := tx.CreateBucketIfNotExists(bucket) | |||
return err | |||
}) | |||
if err != nil { | |||
return nil, err | |||
} | |||
return &BoltDB{db: db}, nil | |||
} | |||
func (bdb *BoltDB) Get(key []byte) (value []byte) { | |||
key = nonEmptyKey(nonNilBytes(key)) | |||
err := bdb.db.View(func(tx *bbolt.Tx) error { | |||
b := tx.Bucket(bucket) | |||
if v := b.Get(key); v != nil { | |||
value = append([]byte{}, v...) | |||
} | |||
return nil | |||
}) | |||
if err != nil { | |||
panic(err) | |||
} | |||
return | |||
} | |||
func (bdb *BoltDB) Has(key []byte) bool { | |||
return bdb.Get(key) != nil | |||
} | |||
func (bdb *BoltDB) Set(key, value []byte) { | |||
key = nonEmptyKey(nonNilBytes(key)) | |||
value = nonNilBytes(value) | |||
err := bdb.db.Update(func(tx *bbolt.Tx) error { | |||
b := tx.Bucket(bucket) | |||
return b.Put(key, value) | |||
}) | |||
if err != nil { | |||
panic(err) | |||
} | |||
} | |||
func (bdb *BoltDB) SetSync(key, value []byte) { | |||
bdb.Set(key, value) | |||
} | |||
func (bdb *BoltDB) Delete(key []byte) { | |||
key = nonEmptyKey(nonNilBytes(key)) | |||
err := bdb.db.Update(func(tx *bbolt.Tx) error { | |||
return tx.Bucket(bucket).Delete(key) | |||
}) | |||
if err != nil { | |||
panic(err) | |||
} | |||
} | |||
func (bdb *BoltDB) DeleteSync(key []byte) { | |||
bdb.Delete(key) | |||
} | |||
func (bdb *BoltDB) Close() { | |||
bdb.db.Close() | |||
} | |||
func (bdb *BoltDB) Print() { | |||
stats := bdb.db.Stats() | |||
fmt.Printf("%v\n", stats) | |||
err := bdb.db.View(func(tx *bbolt.Tx) error { | |||
tx.Bucket(bucket).ForEach(func(k, v []byte) error { | |||
fmt.Printf("[%X]:\t[%X]\n", k, v) | |||
return nil | |||
}) | |||
return nil | |||
}) | |||
if err != nil { | |||
panic(err) | |||
} | |||
} | |||
func (bdb *BoltDB) Stats() map[string]string { | |||
stats := bdb.db.Stats() | |||
m := make(map[string]string) | |||
// Freelist stats | |||
m["FreePageN"] = fmt.Sprintf("%v", stats.FreePageN) | |||
m["PendingPageN"] = fmt.Sprintf("%v", stats.PendingPageN) | |||
m["FreeAlloc"] = fmt.Sprintf("%v", stats.FreeAlloc) | |||
m["FreelistInuse"] = fmt.Sprintf("%v", stats.FreelistInuse) | |||
// Transaction stats | |||
m["TxN"] = fmt.Sprintf("%v", stats.TxN) | |||
m["OpenTxN"] = fmt.Sprintf("%v", stats.OpenTxN) | |||
return m | |||
} | |||
// boltDBBatch stores key values in sync.Map and dumps them to the underlying | |||
// DB upon Write call. | |||
type boltDBBatch struct { | |||
db *BoltDB | |||
ops []operation | |||
} | |||
// NewBatch returns a new batch. | |||
func (bdb *BoltDB) NewBatch() Batch { | |||
return &boltDBBatch{ | |||
ops: nil, | |||
db: bdb, | |||
} | |||
} | |||
// It is safe to modify the contents of the argument after Set returns but not | |||
// before. | |||
func (bdb *boltDBBatch) Set(key, value []byte) { | |||
bdb.ops = append(bdb.ops, operation{opTypeSet, key, value}) | |||
} | |||
// It is safe to modify the contents of the argument after Delete returns but | |||
// not before. | |||
func (bdb *boltDBBatch) Delete(key []byte) { | |||
bdb.ops = append(bdb.ops, operation{opTypeDelete, key, nil}) | |||
} | |||
// NOTE: the operation is synchronous (see BoltDB for reasons) | |||
func (bdb *boltDBBatch) Write() { | |||
err := bdb.db.db.Batch(func(tx *bbolt.Tx) error { | |||
b := tx.Bucket(bucket) | |||
for _, op := range bdb.ops { | |||
key := nonEmptyKey(nonNilBytes(op.key)) | |||
switch op.opType { | |||
case opTypeSet: | |||
if putErr := b.Put(key, op.value); putErr != nil { | |||
return putErr | |||
} | |||
case opTypeDelete: | |||
if delErr := b.Delete(key); delErr != nil { | |||
return delErr | |||
} | |||
} | |||
} | |||
return nil | |||
}) | |||
if err != nil { | |||
panic(err) | |||
} | |||
} | |||
func (bdb *boltDBBatch) WriteSync() { | |||
bdb.Write() | |||
} | |||
func (bdb *boltDBBatch) Close() {} | |||
// WARNING: Any concurrent writes or reads will block until the iterator is | |||
// closed. | |||
func (bdb *BoltDB) Iterator(start, end []byte) Iterator { | |||
tx, err := bdb.db.Begin(false) | |||
if err != nil { | |||
panic(err) | |||
} | |||
return newBoltDBIterator(tx, start, end, false) | |||
} | |||
// WARNING: Any concurrent writes or reads will block until the iterator is | |||
// closed. | |||
func (bdb *BoltDB) ReverseIterator(start, end []byte) Iterator { | |||
tx, err := bdb.db.Begin(false) | |||
if err != nil { | |||
panic(err) | |||
} | |||
return newBoltDBIterator(tx, start, end, true) | |||
} | |||
// boltDBIterator allows you to iterate on range of keys/values given some | |||
// start / end keys (nil & nil will result in doing full scan). | |||
type boltDBIterator struct { | |||
tx *bbolt.Tx | |||
itr *bbolt.Cursor | |||
start []byte | |||
end []byte | |||
currentKey []byte | |||
currentValue []byte | |||
isInvalid bool | |||
isReverse bool | |||
} | |||
func newBoltDBIterator(tx *bbolt.Tx, start, end []byte, isReverse bool) *boltDBIterator { | |||
itr := tx.Bucket(bucket).Cursor() | |||
var ck, cv []byte | |||
if isReverse { | |||
if end == nil { | |||
ck, cv = itr.Last() | |||
} else { | |||
_, _ = itr.Seek(end) // after key | |||
ck, cv = itr.Prev() // return to end key | |||
} | |||
} else { | |||
if start == nil { | |||
ck, cv = itr.First() | |||
} else { | |||
ck, cv = itr.Seek(start) | |||
} | |||
} | |||
return &boltDBIterator{ | |||
tx: tx, | |||
itr: itr, | |||
start: start, | |||
end: end, | |||
currentKey: ck, | |||
currentValue: cv, | |||
isReverse: isReverse, | |||
isInvalid: false, | |||
} | |||
} | |||
func (itr *boltDBIterator) Domain() ([]byte, []byte) { | |||
return itr.start, itr.end | |||
} | |||
func (itr *boltDBIterator) Valid() bool { | |||
if itr.isInvalid { | |||
return false | |||
} | |||
// iterated to the end of the cursor | |||
if len(itr.currentKey) == 0 { | |||
itr.isInvalid = true | |||
return false | |||
} | |||
if itr.isReverse { | |||
if itr.start != nil && bytes.Compare(itr.currentKey, itr.start) < 0 { | |||
itr.isInvalid = true | |||
return false | |||
} | |||
} else { | |||
if itr.end != nil && bytes.Compare(itr.end, itr.currentKey) <= 0 { | |||
itr.isInvalid = true | |||
return false | |||
} | |||
} | |||
// Valid | |||
return true | |||
} | |||
func (itr *boltDBIterator) Next() { | |||
itr.assertIsValid() | |||
if itr.isReverse { | |||
itr.currentKey, itr.currentValue = itr.itr.Prev() | |||
} else { | |||
itr.currentKey, itr.currentValue = itr.itr.Next() | |||
} | |||
} | |||
func (itr *boltDBIterator) Key() []byte { | |||
itr.assertIsValid() | |||
return append([]byte{}, itr.currentKey...) | |||
} | |||
func (itr *boltDBIterator) Value() []byte { | |||
itr.assertIsValid() | |||
var value []byte | |||
if itr.currentValue != nil { | |||
value = append([]byte{}, itr.currentValue...) | |||
} | |||
return value | |||
} | |||
func (itr *boltDBIterator) Close() { | |||
err := itr.tx.Rollback() | |||
if err != nil { | |||
panic(err) | |||
} | |||
} | |||
func (itr *boltDBIterator) assertIsValid() { | |||
if !itr.Valid() { | |||
panic("Boltdb-iterator is invalid") | |||
} | |||
} | |||
// nonEmptyKey returns a []byte("nil") if key is empty. | |||
// WARNING: this may collude with "nil" user key! | |||
func nonEmptyKey(key []byte) []byte { | |||
if len(key) == 0 { | |||
return []byte("nil") | |||
} | |||
return key | |||
} |
@ -1,37 +0,0 @@ | |||
// +build boltdb | |||
package db | |||
import ( | |||
"fmt" | |||
"os" | |||
"testing" | |||
"github.com/stretchr/testify/require" | |||
cmn "github.com/tendermint/tendermint/libs/common" | |||
) | |||
func TestBoltDBNewBoltDB(t *testing.T) { | |||
name := fmt.Sprintf("test_%x", cmn.RandStr(12)) | |||
dir := os.TempDir() | |||
defer cleanupDBDir(dir, name) | |||
db, err := NewBoltDB(name, dir) | |||
require.NoError(t, err) | |||
db.Close() | |||
} | |||
func BenchmarkBoltDBRandomReadsWrites(b *testing.B) { | |||
name := fmt.Sprintf("test_%x", cmn.RandStr(12)) | |||
db, err := NewBoltDB(name, "") | |||
if err != nil { | |||
b.Fatal(err) | |||
} | |||
defer func() { | |||
db.Close() | |||
cleanupDBDir("", name) | |||
}() | |||
benchmarkRandomReadsWrites(b, db) | |||
} |
@ -1,325 +0,0 @@ | |||
// +build cleveldb | |||
package db | |||
import ( | |||
"bytes" | |||
"fmt" | |||
"path/filepath" | |||
"github.com/jmhodges/levigo" | |||
) | |||
func init() { | |||
dbCreator := func(name string, dir string) (DB, error) { | |||
return NewCLevelDB(name, dir) | |||
} | |||
registerDBCreator(CLevelDBBackend, dbCreator, false) | |||
} | |||
var _ DB = (*CLevelDB)(nil) | |||
type CLevelDB struct { | |||
db *levigo.DB | |||
ro *levigo.ReadOptions | |||
wo *levigo.WriteOptions | |||
woSync *levigo.WriteOptions | |||
} | |||
func NewCLevelDB(name string, dir string) (*CLevelDB, error) { | |||
dbPath := filepath.Join(dir, name+".db") | |||
opts := levigo.NewOptions() | |||
opts.SetCache(levigo.NewLRUCache(1 << 30)) | |||
opts.SetCreateIfMissing(true) | |||
db, err := levigo.Open(dbPath, opts) | |||
if err != nil { | |||
return nil, err | |||
} | |||
ro := levigo.NewReadOptions() | |||
wo := levigo.NewWriteOptions() | |||
woSync := levigo.NewWriteOptions() | |||
woSync.SetSync(true) | |||
database := &CLevelDB{ | |||
db: db, | |||
ro: ro, | |||
wo: wo, | |||
woSync: woSync, | |||
} | |||
return database, nil | |||
} | |||
// Implements DB. | |||
func (db *CLevelDB) Get(key []byte) []byte { | |||
key = nonNilBytes(key) | |||
res, err := db.db.Get(db.ro, key) | |||
if err != nil { | |||
panic(err) | |||
} | |||
return res | |||
} | |||
// Implements DB. | |||
func (db *CLevelDB) Has(key []byte) bool { | |||
return db.Get(key) != nil | |||
} | |||
// Implements DB. | |||
func (db *CLevelDB) Set(key []byte, value []byte) { | |||
key = nonNilBytes(key) | |||
value = nonNilBytes(value) | |||
err := db.db.Put(db.wo, key, value) | |||
if err != nil { | |||
panic(err) | |||
} | |||
} | |||
// Implements DB. | |||
func (db *CLevelDB) SetSync(key []byte, value []byte) { | |||
key = nonNilBytes(key) | |||
value = nonNilBytes(value) | |||
err := db.db.Put(db.woSync, key, value) | |||
if err != nil { | |||
panic(err) | |||
} | |||
} | |||
// Implements DB. | |||
func (db *CLevelDB) Delete(key []byte) { | |||
key = nonNilBytes(key) | |||
err := db.db.Delete(db.wo, key) | |||
if err != nil { | |||
panic(err) | |||
} | |||
} | |||
// Implements DB. | |||
func (db *CLevelDB) DeleteSync(key []byte) { | |||
key = nonNilBytes(key) | |||
err := db.db.Delete(db.woSync, key) | |||
if err != nil { | |||
panic(err) | |||
} | |||
} | |||
func (db *CLevelDB) DB() *levigo.DB { | |||
return db.db | |||
} | |||
// Implements DB. | |||
func (db *CLevelDB) Close() { | |||
db.db.Close() | |||
db.ro.Close() | |||
db.wo.Close() | |||
db.woSync.Close() | |||
} | |||
// Implements DB. | |||
func (db *CLevelDB) Print() { | |||
itr := db.Iterator(nil, nil) | |||
defer itr.Close() | |||
for ; itr.Valid(); itr.Next() { | |||
key := itr.Key() | |||
value := itr.Value() | |||
fmt.Printf("[%X]:\t[%X]\n", key, value) | |||
} | |||
} | |||
// Implements DB. | |||
func (db *CLevelDB) Stats() map[string]string { | |||
keys := []string{ | |||
"leveldb.aliveiters", | |||
"leveldb.alivesnaps", | |||
"leveldb.blockpool", | |||
"leveldb.cachedblock", | |||
"leveldb.num-files-at-level{n}", | |||
"leveldb.openedtables", | |||
"leveldb.sstables", | |||
"leveldb.stats", | |||
} | |||
stats := make(map[string]string, len(keys)) | |||
for _, key := range keys { | |||
str := db.db.PropertyValue(key) | |||
stats[key] = str | |||
} | |||
return stats | |||
} | |||
//---------------------------------------- | |||
// Batch | |||
// Implements DB. | |||
func (db *CLevelDB) NewBatch() Batch { | |||
batch := levigo.NewWriteBatch() | |||
return &cLevelDBBatch{db, batch} | |||
} | |||
type cLevelDBBatch struct { | |||
db *CLevelDB | |||
batch *levigo.WriteBatch | |||
} | |||
// Implements Batch. | |||
func (mBatch *cLevelDBBatch) Set(key, value []byte) { | |||
mBatch.batch.Put(key, value) | |||
} | |||
// Implements Batch. | |||
func (mBatch *cLevelDBBatch) Delete(key []byte) { | |||
mBatch.batch.Delete(key) | |||
} | |||
// Implements Batch. | |||
func (mBatch *cLevelDBBatch) Write() { | |||
err := mBatch.db.db.Write(mBatch.db.wo, mBatch.batch) | |||
if err != nil { | |||
panic(err) | |||
} | |||
} | |||
// Implements Batch. | |||
func (mBatch *cLevelDBBatch) WriteSync() { | |||
err := mBatch.db.db.Write(mBatch.db.woSync, mBatch.batch) | |||
if err != nil { | |||
panic(err) | |||
} | |||
} | |||
// Implements Batch. | |||
func (mBatch *cLevelDBBatch) Close() { | |||
mBatch.batch.Close() | |||
} | |||
//---------------------------------------- | |||
// Iterator | |||
// NOTE This is almost identical to db/go_level_db.Iterator | |||
// Before creating a third version, refactor. | |||
func (db *CLevelDB) Iterator(start, end []byte) Iterator { | |||
itr := db.db.NewIterator(db.ro) | |||
return newCLevelDBIterator(itr, start, end, false) | |||
} | |||
func (db *CLevelDB) ReverseIterator(start, end []byte) Iterator { | |||
itr := db.db.NewIterator(db.ro) | |||
return newCLevelDBIterator(itr, start, end, true) | |||
} | |||
var _ Iterator = (*cLevelDBIterator)(nil) | |||
type cLevelDBIterator struct { | |||
source *levigo.Iterator | |||
start, end []byte | |||
isReverse bool | |||
isInvalid bool | |||
} | |||
func newCLevelDBIterator(source *levigo.Iterator, start, end []byte, isReverse bool) *cLevelDBIterator { | |||
if isReverse { | |||
if end == nil { | |||
source.SeekToLast() | |||
} else { | |||
source.Seek(end) | |||
if source.Valid() { | |||
eoakey := source.Key() // end or after key | |||
if bytes.Compare(end, eoakey) <= 0 { | |||
source.Prev() | |||
} | |||
} else { | |||
source.SeekToLast() | |||
} | |||
} | |||
} else { | |||
if start == nil { | |||
source.SeekToFirst() | |||
} else { | |||
source.Seek(start) | |||
} | |||
} | |||
return &cLevelDBIterator{ | |||
source: source, | |||
start: start, | |||
end: end, | |||
isReverse: isReverse, | |||
isInvalid: false, | |||
} | |||
} | |||
func (itr cLevelDBIterator) Domain() ([]byte, []byte) { | |||
return itr.start, itr.end | |||
} | |||
func (itr cLevelDBIterator) Valid() bool { | |||
// Once invalid, forever invalid. | |||
if itr.isInvalid { | |||
return false | |||
} | |||
// Panic on DB error. No way to recover. | |||
itr.assertNoError() | |||
// If source is invalid, invalid. | |||
if !itr.source.Valid() { | |||
itr.isInvalid = true | |||
return false | |||
} | |||
// If key is end or past it, invalid. | |||
var start = itr.start | |||
var end = itr.end | |||
var key = itr.source.Key() | |||
if itr.isReverse { | |||
if start != nil && bytes.Compare(key, start) < 0 { | |||
itr.isInvalid = true | |||
return false | |||
} | |||
} else { | |||
if end != nil && bytes.Compare(end, key) <= 0 { | |||
itr.isInvalid = true | |||
return false | |||
} | |||
} | |||
// It's valid. | |||
return true | |||
} | |||
func (itr cLevelDBIterator) Key() []byte { | |||
itr.assertNoError() | |||
itr.assertIsValid() | |||
return itr.source.Key() | |||
} | |||
func (itr cLevelDBIterator) Value() []byte { | |||
itr.assertNoError() | |||
itr.assertIsValid() | |||
return itr.source.Value() | |||
} | |||
func (itr cLevelDBIterator) Next() { | |||
itr.assertNoError() | |||
itr.assertIsValid() | |||
if itr.isReverse { | |||
itr.source.Prev() | |||
} else { | |||
itr.source.Next() | |||
} | |||
} | |||
func (itr cLevelDBIterator) Close() { | |||
itr.source.Close() | |||
} | |||
func (itr cLevelDBIterator) assertNoError() { | |||
if err := itr.source.GetError(); err != nil { | |||
panic(err) | |||
} | |||
} | |||
func (itr cLevelDBIterator) assertIsValid() { | |||
if !itr.Valid() { | |||
panic("cLevelDBIterator is invalid") | |||
} | |||
} |
@ -1,110 +0,0 @@ | |||
// +build cleveldb | |||
package db | |||
import ( | |||
"bytes" | |||
"fmt" | |||
"os" | |||
"testing" | |||
"github.com/stretchr/testify/assert" | |||
cmn "github.com/tendermint/tendermint/libs/common" | |||
) | |||
func BenchmarkRandomReadsWrites2(b *testing.B) { | |||
b.StopTimer() | |||
numItems := int64(1000000) | |||
internal := map[int64]int64{} | |||
for i := 0; i < int(numItems); i++ { | |||
internal[int64(i)] = int64(0) | |||
} | |||
db, err := NewCLevelDB(fmt.Sprintf("test_%x", cmn.RandStr(12)), "") | |||
if err != nil { | |||
b.Fatal(err.Error()) | |||
return | |||
} | |||
fmt.Println("ok, starting") | |||
b.StartTimer() | |||
for i := 0; i < b.N; i++ { | |||
// Write something | |||
{ | |||
idx := (int64(cmn.RandInt()) % numItems) | |||
internal[idx]++ | |||
val := internal[idx] | |||
idxBytes := int642Bytes(int64(idx)) | |||
valBytes := int642Bytes(int64(val)) | |||
//fmt.Printf("Set %X -> %X\n", idxBytes, valBytes) | |||
db.Set( | |||
idxBytes, | |||
valBytes, | |||
) | |||
} | |||
// Read something | |||
{ | |||
idx := (int64(cmn.RandInt()) % numItems) | |||
val := internal[idx] | |||
idxBytes := int642Bytes(int64(idx)) | |||
valBytes := db.Get(idxBytes) | |||
//fmt.Printf("Get %X -> %X\n", idxBytes, valBytes) | |||
if val == 0 { | |||
if !bytes.Equal(valBytes, nil) { | |||
b.Errorf("Expected %v for %v, got %X", | |||
nil, idx, valBytes) | |||
break | |||
} | |||
} else { | |||
if len(valBytes) != 8 { | |||
b.Errorf("Expected length 8 for %v, got %X", | |||
idx, valBytes) | |||
break | |||
} | |||
valGot := bytes2Int64(valBytes) | |||
if val != valGot { | |||
b.Errorf("Expected %v for %v, got %v", | |||
val, idx, valGot) | |||
break | |||
} | |||
} | |||
} | |||
} | |||
db.Close() | |||
} | |||
/* | |||
func int642Bytes(i int64) []byte { | |||
buf := make([]byte, 8) | |||
binary.BigEndian.PutUint64(buf, uint64(i)) | |||
return buf | |||
} | |||
func bytes2Int64(buf []byte) int64 { | |||
return int64(binary.BigEndian.Uint64(buf)) | |||
} | |||
*/ | |||
func TestCLevelDBBackend(t *testing.T) { | |||
name := fmt.Sprintf("test_%x", cmn.RandStr(12)) | |||
// Can't use "" (current directory) or "./" here because levigo.Open returns: | |||
// "Error initializing DB: IO error: test_XXX.db: Invalid argument" | |||
dir := os.TempDir() | |||
db := NewDB(name, CLevelDBBackend, dir) | |||
defer cleanupDBDir(dir, name) | |||
_, ok := db.(*CLevelDB) | |||
assert.True(t, ok) | |||
} | |||
func TestCLevelDBStats(t *testing.T) { | |||
name := fmt.Sprintf("test_%x", cmn.RandStr(12)) | |||
dir := os.TempDir() | |||
db := NewDB(name, CLevelDBBackend, dir) | |||
defer cleanupDBDir(dir, name) | |||
assert.NotEmpty(t, db.Stats()) | |||
} |
@ -1,256 +0,0 @@ | |||
package db | |||
import ( | |||
"bytes" | |||
"encoding/binary" | |||
"fmt" | |||
"io/ioutil" | |||
"sync" | |||
"testing" | |||
"github.com/stretchr/testify/assert" | |||
"github.com/stretchr/testify/require" | |||
cmn "github.com/tendermint/tendermint/libs/common" | |||
) | |||
//---------------------------------------- | |||
// Helper functions. | |||
func checkValue(t *testing.T, db DB, key []byte, valueWanted []byte) { | |||
valueGot := db.Get(key) | |||
assert.Equal(t, valueWanted, valueGot) | |||
} | |||
func checkValid(t *testing.T, itr Iterator, expected bool) { | |||
valid := itr.Valid() | |||
require.Equal(t, expected, valid) | |||
} | |||
func checkNext(t *testing.T, itr Iterator, expected bool) { | |||
itr.Next() | |||
valid := itr.Valid() | |||
require.Equal(t, expected, valid) | |||
} | |||
func checkNextPanics(t *testing.T, itr Iterator) { | |||
assert.Panics(t, func() { itr.Next() }, "checkNextPanics expected panic but didn't") | |||
} | |||
func checkDomain(t *testing.T, itr Iterator, start, end []byte) { | |||
ds, de := itr.Domain() | |||
assert.Equal(t, start, ds, "checkDomain domain start incorrect") | |||
assert.Equal(t, end, de, "checkDomain domain end incorrect") | |||
} | |||
func checkItem(t *testing.T, itr Iterator, key []byte, value []byte) { | |||
k, v := itr.Key(), itr.Value() | |||
assert.Exactly(t, key, k) | |||
assert.Exactly(t, value, v) | |||
} | |||
func checkInvalid(t *testing.T, itr Iterator) { | |||
checkValid(t, itr, false) | |||
checkKeyPanics(t, itr) | |||
checkValuePanics(t, itr) | |||
checkNextPanics(t, itr) | |||
} | |||
func checkKeyPanics(t *testing.T, itr Iterator) { | |||
assert.Panics(t, func() { itr.Key() }, "checkKeyPanics expected panic but didn't") | |||
} | |||
func checkValuePanics(t *testing.T, itr Iterator) { | |||
assert.Panics(t, func() { itr.Value() }, "checkValuePanics expected panic but didn't") | |||
} | |||
func newTempDB(t *testing.T, backend DBBackendType) (db DB, dbDir string) { | |||
dirname, err := ioutil.TempDir("", "db_common_test") | |||
require.Nil(t, err) | |||
return NewDB("testdb", backend, dirname), dirname | |||
} | |||
//---------------------------------------- | |||
// mockDB | |||
// NOTE: not actually goroutine safe. | |||
// If you want something goroutine safe, maybe you just want a MemDB. | |||
type mockDB struct { | |||
mtx sync.Mutex | |||
calls map[string]int | |||
} | |||
func newMockDB() *mockDB { | |||
return &mockDB{ | |||
calls: make(map[string]int), | |||
} | |||
} | |||
func (mdb *mockDB) Mutex() *sync.Mutex { | |||
return &(mdb.mtx) | |||
} | |||
func (mdb *mockDB) Get([]byte) []byte { | |||
mdb.calls["Get"]++ | |||
return nil | |||
} | |||
func (mdb *mockDB) Has([]byte) bool { | |||
mdb.calls["Has"]++ | |||
return false | |||
} | |||
func (mdb *mockDB) Set([]byte, []byte) { | |||
mdb.calls["Set"]++ | |||
} | |||
func (mdb *mockDB) SetSync([]byte, []byte) { | |||
mdb.calls["SetSync"]++ | |||
} | |||
func (mdb *mockDB) SetNoLock([]byte, []byte) { | |||
mdb.calls["SetNoLock"]++ | |||
} | |||
func (mdb *mockDB) SetNoLockSync([]byte, []byte) { | |||
mdb.calls["SetNoLockSync"]++ | |||
} | |||
func (mdb *mockDB) Delete([]byte) { | |||
mdb.calls["Delete"]++ | |||
} | |||
func (mdb *mockDB) DeleteSync([]byte) { | |||
mdb.calls["DeleteSync"]++ | |||
} | |||
func (mdb *mockDB) DeleteNoLock([]byte) { | |||
mdb.calls["DeleteNoLock"]++ | |||
} | |||
func (mdb *mockDB) DeleteNoLockSync([]byte) { | |||
mdb.calls["DeleteNoLockSync"]++ | |||
} | |||
func (mdb *mockDB) Iterator(start, end []byte) Iterator { | |||
mdb.calls["Iterator"]++ | |||
return &mockIterator{} | |||
} | |||
func (mdb *mockDB) ReverseIterator(start, end []byte) Iterator { | |||
mdb.calls["ReverseIterator"]++ | |||
return &mockIterator{} | |||
} | |||
func (mdb *mockDB) Close() { | |||
mdb.calls["Close"]++ | |||
} | |||
func (mdb *mockDB) NewBatch() Batch { | |||
mdb.calls["NewBatch"]++ | |||
return &memBatch{db: mdb} | |||
} | |||
func (mdb *mockDB) Print() { | |||
mdb.calls["Print"]++ | |||
fmt.Printf("mockDB{%v}", mdb.Stats()) | |||
} | |||
func (mdb *mockDB) Stats() map[string]string { | |||
mdb.calls["Stats"]++ | |||
res := make(map[string]string) | |||
for key, count := range mdb.calls { | |||
res[key] = fmt.Sprintf("%d", count) | |||
} | |||
return res | |||
} | |||
//---------------------------------------- | |||
// mockIterator | |||
type mockIterator struct{} | |||
func (mockIterator) Domain() (start []byte, end []byte) { | |||
return nil, nil | |||
} | |||
func (mockIterator) Valid() bool { | |||
return false | |||
} | |||
func (mockIterator) Next() { | |||
} | |||
func (mockIterator) Key() []byte { | |||
return nil | |||
} | |||
func (mockIterator) Value() []byte { | |||
return nil | |||
} | |||
func (mockIterator) Close() { | |||
} | |||
func benchmarkRandomReadsWrites(b *testing.B, db DB) { | |||
b.StopTimer() | |||
// create dummy data | |||
const numItems = int64(1000000) | |||
internal := map[int64]int64{} | |||
for i := 0; i < int(numItems); i++ { | |||
internal[int64(i)] = int64(0) | |||
} | |||
// fmt.Println("ok, starting") | |||
b.StartTimer() | |||
for i := 0; i < b.N; i++ { | |||
// Write something | |||
{ | |||
idx := int64(cmn.RandInt()) % numItems | |||
internal[idx]++ | |||
val := internal[idx] | |||
idxBytes := int642Bytes(int64(idx)) | |||
valBytes := int642Bytes(int64(val)) | |||
//fmt.Printf("Set %X -> %X\n", idxBytes, valBytes) | |||
db.Set(idxBytes, valBytes) | |||
} | |||
// Read something | |||
{ | |||
idx := int64(cmn.RandInt()) % numItems | |||
valExp := internal[idx] | |||
idxBytes := int642Bytes(int64(idx)) | |||
valBytes := db.Get(idxBytes) | |||
//fmt.Printf("Get %X -> %X\n", idxBytes, valBytes) | |||
if valExp == 0 { | |||
if !bytes.Equal(valBytes, nil) { | |||
b.Errorf("Expected %v for %v, got %X", nil, idx, valBytes) | |||
break | |||
} | |||
} else { | |||
if len(valBytes) != 8 { | |||
b.Errorf("Expected length 8 for %v, got %X", idx, valBytes) | |||
break | |||
} | |||
valGot := bytes2Int64(valBytes) | |||
if valExp != valGot { | |||
b.Errorf("Expected %v for %v, got %v", valExp, idx, valGot) | |||
break | |||
} | |||
} | |||
} | |||
} | |||
} | |||
func int642Bytes(i int64) []byte { | |||
buf := make([]byte, 8) | |||
binary.BigEndian.PutUint64(buf, uint64(i)) | |||
return buf | |||
} | |||
func bytes2Int64(buf []byte) int64 { | |||
return int64(binary.BigEndian.Uint64(buf)) | |||
} |
@ -1,70 +0,0 @@ | |||
package db | |||
import ( | |||
"fmt" | |||
"strings" | |||
) | |||
type DBBackendType string | |||
// These are valid backend types. | |||
const ( | |||
// GoLevelDBBackend represents goleveldb (github.com/syndtr/goleveldb - most | |||
// popular implementation) | |||
// - pure go | |||
// - stable | |||
GoLevelDBBackend DBBackendType = "goleveldb" | |||
// CLevelDBBackend represents cleveldb (uses levigo wrapper) | |||
// - fast | |||
// - requires gcc | |||
// - use cleveldb build tag (go build -tags cleveldb) | |||
CLevelDBBackend DBBackendType = "cleveldb" | |||
// MemDBBackend represents in-memoty key value store, which is mostly used | |||
// for testing. | |||
MemDBBackend DBBackendType = "memdb" | |||
// FSDBBackend represents filesystem database | |||
// - EXPERIMENTAL | |||
// - slow | |||
FSDBBackend DBBackendType = "fsdb" | |||
// BoltDBBackend represents bolt (uses etcd's fork of bolt - | |||
// github.com/etcd-io/bbolt) | |||
// - EXPERIMENTAL | |||
// - may be faster is some use-cases (random reads - indexer) | |||
// - use boltdb build tag (go build -tags boltdb) | |||
BoltDBBackend DBBackendType = "boltdb" | |||
) | |||
type dbCreator func(name string, dir string) (DB, error) | |||
var backends = map[DBBackendType]dbCreator{} | |||
func registerDBCreator(backend DBBackendType, creator dbCreator, force bool) { | |||
_, ok := backends[backend] | |||
if !force && ok { | |||
return | |||
} | |||
backends[backend] = creator | |||
} | |||
// NewDB creates a new database of type backend with the given name. | |||
// NOTE: function panics if: | |||
// - backend is unknown (not registered) | |||
// - creator function, provided during registration, returns error | |||
func NewDB(name string, backend DBBackendType, dir string) DB { | |||
dbCreator, ok := backends[backend] | |||
if !ok { | |||
keys := make([]string, len(backends)) | |||
i := 0 | |||
for k := range backends { | |||
keys[i] = string(k) | |||
i++ | |||
} | |||
panic(fmt.Sprintf("Unknown db_backend %s, expected either %s", backend, strings.Join(keys, " or "))) | |||
} | |||
db, err := dbCreator(name, dir) | |||
if err != nil { | |||
panic(fmt.Sprintf("Error initializing DB: %v", err)) | |||
} | |||
return db | |||
} |
@ -1,194 +0,0 @@ | |||
package db | |||
import ( | |||
"fmt" | |||
"os" | |||
"testing" | |||
"github.com/stretchr/testify/assert" | |||
) | |||
func TestDBIteratorSingleKey(t *testing.T) { | |||
for backend := range backends { | |||
t.Run(fmt.Sprintf("Backend %s", backend), func(t *testing.T) { | |||
db, dir := newTempDB(t, backend) | |||
defer os.RemoveAll(dir) | |||
db.SetSync(bz("1"), bz("value_1")) | |||
itr := db.Iterator(nil, nil) | |||
checkValid(t, itr, true) | |||
checkNext(t, itr, false) | |||
checkValid(t, itr, false) | |||
checkNextPanics(t, itr) | |||
// Once invalid... | |||
checkInvalid(t, itr) | |||
}) | |||
} | |||
} | |||
func TestDBIteratorTwoKeys(t *testing.T) { | |||
for backend := range backends { | |||
t.Run(fmt.Sprintf("Backend %s", backend), func(t *testing.T) { | |||
db, dir := newTempDB(t, backend) | |||
defer os.RemoveAll(dir) | |||
db.SetSync(bz("1"), bz("value_1")) | |||
db.SetSync(bz("2"), bz("value_1")) | |||
{ // Fail by calling Next too much | |||
itr := db.Iterator(nil, nil) | |||
checkValid(t, itr, true) | |||
checkNext(t, itr, true) | |||
checkValid(t, itr, true) | |||
checkNext(t, itr, false) | |||
checkValid(t, itr, false) | |||
checkNextPanics(t, itr) | |||
// Once invalid... | |||
checkInvalid(t, itr) | |||
} | |||
}) | |||
} | |||
} | |||
func TestDBIteratorMany(t *testing.T) { | |||
for backend := range backends { | |||
t.Run(fmt.Sprintf("Backend %s", backend), func(t *testing.T) { | |||
db, dir := newTempDB(t, backend) | |||
defer os.RemoveAll(dir) | |||
keys := make([][]byte, 100) | |||
for i := 0; i < 100; i++ { | |||
keys[i] = []byte{byte(i)} | |||
} | |||
value := []byte{5} | |||
for _, k := range keys { | |||
db.Set(k, value) | |||
} | |||
itr := db.Iterator(nil, nil) | |||
defer itr.Close() | |||
for ; itr.Valid(); itr.Next() { | |||
assert.Equal(t, db.Get(itr.Key()), itr.Value()) | |||
} | |||
}) | |||
} | |||
} | |||
func TestDBIteratorEmpty(t *testing.T) { | |||
for backend := range backends { | |||
t.Run(fmt.Sprintf("Backend %s", backend), func(t *testing.T) { | |||
db, dir := newTempDB(t, backend) | |||
defer os.RemoveAll(dir) | |||
itr := db.Iterator(nil, nil) | |||
checkInvalid(t, itr) | |||
}) | |||
} | |||
} | |||
func TestDBIteratorEmptyBeginAfter(t *testing.T) { | |||
for backend := range backends { | |||
t.Run(fmt.Sprintf("Backend %s", backend), func(t *testing.T) { | |||
db, dir := newTempDB(t, backend) | |||
defer os.RemoveAll(dir) | |||
itr := db.Iterator(bz("1"), nil) | |||
checkInvalid(t, itr) | |||
}) | |||
} | |||
} | |||
func TestDBIteratorNonemptyBeginAfter(t *testing.T) { | |||
for backend := range backends { | |||
t.Run(fmt.Sprintf("Backend %s", backend), func(t *testing.T) { | |||
db, dir := newTempDB(t, backend) | |||
defer os.RemoveAll(dir) | |||
db.SetSync(bz("1"), bz("value_1")) | |||
itr := db.Iterator(bz("2"), nil) | |||
checkInvalid(t, itr) | |||
}) | |||
} | |||
} | |||
func TestDBBatchWrite(t *testing.T) { | |||
testCases := []struct { | |||
modify func(batch Batch) | |||
calls map[string]int | |||
}{ | |||
0: { | |||
func(batch Batch) { | |||
batch.Set(bz("1"), bz("1")) | |||
batch.Set(bz("2"), bz("2")) | |||
batch.Delete(bz("3")) | |||
batch.Set(bz("4"), bz("4")) | |||
batch.Write() | |||
}, | |||
map[string]int{ | |||
"Set": 0, "SetSync": 0, "SetNoLock": 3, "SetNoLockSync": 0, | |||
"Delete": 0, "DeleteSync": 0, "DeleteNoLock": 1, "DeleteNoLockSync": 0, | |||
}, | |||
}, | |||
1: { | |||
func(batch Batch) { | |||
batch.Set(bz("1"), bz("1")) | |||
batch.Set(bz("2"), bz("2")) | |||
batch.Set(bz("4"), bz("4")) | |||
batch.Delete(bz("3")) | |||
batch.Write() | |||
}, | |||
map[string]int{ | |||
"Set": 0, "SetSync": 0, "SetNoLock": 3, "SetNoLockSync": 0, | |||
"Delete": 0, "DeleteSync": 0, "DeleteNoLock": 1, "DeleteNoLockSync": 0, | |||
}, | |||
}, | |||
2: { | |||
func(batch Batch) { | |||
batch.Set(bz("1"), bz("1")) | |||
batch.Set(bz("2"), bz("2")) | |||
batch.Delete(bz("3")) | |||
batch.Set(bz("4"), bz("4")) | |||
batch.WriteSync() | |||
}, | |||
map[string]int{ | |||
"Set": 0, "SetSync": 0, "SetNoLock": 2, "SetNoLockSync": 1, | |||
"Delete": 0, "DeleteSync": 0, "DeleteNoLock": 1, "DeleteNoLockSync": 0, | |||
}, | |||
}, | |||
3: { | |||
func(batch Batch) { | |||
batch.Set(bz("1"), bz("1")) | |||
batch.Set(bz("2"), bz("2")) | |||
batch.Set(bz("4"), bz("4")) | |||
batch.Delete(bz("3")) | |||
batch.WriteSync() | |||
}, | |||
map[string]int{ | |||
"Set": 0, "SetSync": 0, "SetNoLock": 3, "SetNoLockSync": 0, | |||
"Delete": 0, "DeleteSync": 0, "DeleteNoLock": 0, "DeleteNoLockSync": 1, | |||
}, | |||
}, | |||
} | |||
for i, tc := range testCases { | |||
mdb := newMockDB() | |||
batch := mdb.NewBatch() | |||
tc.modify(batch) | |||
for call, exp := range tc.calls { | |||
got := mdb.calls[call] | |||
assert.Equal(t, exp, got, "#%v - key: %s", i, call) | |||
} | |||
} | |||
} |
@ -1,270 +0,0 @@ | |||
package db | |||
import ( | |||
"fmt" | |||
"io/ioutil" | |||
"net/url" | |||
"os" | |||
"path/filepath" | |||
"sort" | |||
"sync" | |||
"github.com/pkg/errors" | |||
cmn "github.com/tendermint/tendermint/libs/common" | |||
) | |||
const ( | |||
keyPerm = os.FileMode(0600) | |||
dirPerm = os.FileMode(0700) | |||
) | |||
func init() { | |||
registerDBCreator(FSDBBackend, func(name, dir string) (DB, error) { | |||
dbPath := filepath.Join(dir, name+".db") | |||
return NewFSDB(dbPath), nil | |||
}, false) | |||
} | |||
var _ DB = (*FSDB)(nil) | |||
// It's slow. | |||
type FSDB struct { | |||
mtx sync.Mutex | |||
dir string | |||
} | |||
func NewFSDB(dir string) *FSDB { | |||
err := os.MkdirAll(dir, dirPerm) | |||
if err != nil { | |||
panic(errors.Wrap(err, "Creating FSDB dir "+dir)) | |||
} | |||
database := &FSDB{ | |||
dir: dir, | |||
} | |||
return database | |||
} | |||
func (db *FSDB) Get(key []byte) []byte { | |||
db.mtx.Lock() | |||
defer db.mtx.Unlock() | |||
key = escapeKey(key) | |||
path := db.nameToPath(key) | |||
value, err := read(path) | |||
if os.IsNotExist(err) { | |||
return nil | |||
} else if err != nil { | |||
panic(errors.Wrapf(err, "Getting key %s (0x%X)", string(key), key)) | |||
} | |||
return value | |||
} | |||
func (db *FSDB) Has(key []byte) bool { | |||
db.mtx.Lock() | |||
defer db.mtx.Unlock() | |||
key = escapeKey(key) | |||
path := db.nameToPath(key) | |||
return cmn.FileExists(path) | |||
} | |||
func (db *FSDB) Set(key []byte, value []byte) { | |||
db.mtx.Lock() | |||
defer db.mtx.Unlock() | |||
db.SetNoLock(key, value) | |||
} | |||
func (db *FSDB) SetSync(key []byte, value []byte) { | |||
db.mtx.Lock() | |||
defer db.mtx.Unlock() | |||
db.SetNoLock(key, value) | |||
} | |||
// NOTE: Implements atomicSetDeleter. | |||
func (db *FSDB) SetNoLock(key []byte, value []byte) { | |||
key = escapeKey(key) | |||
value = nonNilBytes(value) | |||
path := db.nameToPath(key) | |||
err := write(path, value) | |||
if err != nil { | |||
panic(errors.Wrapf(err, "Setting key %s (0x%X)", string(key), key)) | |||
} | |||
} | |||
func (db *FSDB) Delete(key []byte) { | |||
db.mtx.Lock() | |||
defer db.mtx.Unlock() | |||
db.DeleteNoLock(key) | |||
} | |||
func (db *FSDB) DeleteSync(key []byte) { | |||
db.mtx.Lock() | |||
defer db.mtx.Unlock() | |||
db.DeleteNoLock(key) | |||
} | |||
// NOTE: Implements atomicSetDeleter. | |||
func (db *FSDB) DeleteNoLock(key []byte) { | |||
key = escapeKey(key) | |||
path := db.nameToPath(key) | |||
err := remove(path) | |||
if os.IsNotExist(err) { | |||
return | |||
} else if err != nil { | |||
panic(errors.Wrapf(err, "Removing key %s (0x%X)", string(key), key)) | |||
} | |||
} | |||
func (db *FSDB) Close() { | |||
// Nothing to do. | |||
} | |||
func (db *FSDB) Print() { | |||
db.mtx.Lock() | |||
defer db.mtx.Unlock() | |||
panic("FSDB.Print not yet implemented") | |||
} | |||
func (db *FSDB) Stats() map[string]string { | |||
db.mtx.Lock() | |||
defer db.mtx.Unlock() | |||
panic("FSDB.Stats not yet implemented") | |||
} | |||
func (db *FSDB) NewBatch() Batch { | |||
db.mtx.Lock() | |||
defer db.mtx.Unlock() | |||
// Not sure we would ever want to try... | |||
// It doesn't seem easy for general filesystems. | |||
panic("FSDB.NewBatch not yet implemented") | |||
} | |||
func (db *FSDB) Mutex() *sync.Mutex { | |||
return &(db.mtx) | |||
} | |||
func (db *FSDB) Iterator(start, end []byte) Iterator { | |||
return db.MakeIterator(start, end, false) | |||
} | |||
func (db *FSDB) MakeIterator(start, end []byte, isReversed bool) Iterator { | |||
db.mtx.Lock() | |||
defer db.mtx.Unlock() | |||
// We need a copy of all of the keys. | |||
// Not the best, but probably not a bottleneck depending. | |||
keys, err := list(db.dir, start, end) | |||
if err != nil { | |||
panic(errors.Wrapf(err, "Listing keys in %s", db.dir)) | |||
} | |||
if isReversed { | |||
sort.Sort(sort.Reverse(sort.StringSlice(keys))) | |||
} else { | |||
sort.Strings(keys) | |||
} | |||
return newMemDBIterator(db, keys, start, end) | |||
} | |||
func (db *FSDB) ReverseIterator(start, end []byte) Iterator { | |||
return db.MakeIterator(start, end, true) | |||
} | |||
func (db *FSDB) nameToPath(name []byte) string { | |||
n := url.PathEscape(string(name)) | |||
return filepath.Join(db.dir, n) | |||
} | |||
// Read some bytes to a file. | |||
// CONTRACT: returns os errors directly without wrapping. | |||
func read(path string) ([]byte, error) { | |||
f, err := os.Open(path) | |||
if err != nil { | |||
return nil, err | |||
} | |||
defer f.Close() | |||
d, err := ioutil.ReadAll(f) | |||
if err != nil { | |||
return nil, err | |||
} | |||
return d, nil | |||
} | |||
// Write some bytes from a file. | |||
// CONTRACT: returns os errors directly without wrapping. | |||
func write(path string, d []byte) error { | |||
f, err := os.OpenFile(path, os.O_CREATE|os.O_WRONLY, keyPerm) | |||
if err != nil { | |||
return err | |||
} | |||
defer f.Close() | |||
// fInfo, err := f.Stat() | |||
// if err != nil { | |||
// return err | |||
// } | |||
// if fInfo.Mode() != keyPerm { | |||
// return tmerrors.NewErrPermissionsChanged(f.Name(), keyPerm, fInfo.Mode()) | |||
// } | |||
_, err = f.Write(d) | |||
if err != nil { | |||
return err | |||
} | |||
err = f.Sync() | |||
return err | |||
} | |||
// Remove a file. | |||
// CONTRACT: returns os errors directly without wrapping. | |||
func remove(path string) error { | |||
return os.Remove(path) | |||
} | |||
// List keys in a directory, stripping of escape sequences and dir portions. | |||
// CONTRACT: returns os errors directly without wrapping. | |||
func list(dirPath string, start, end []byte) ([]string, error) { | |||
dir, err := os.Open(dirPath) | |||
if err != nil { | |||
return nil, err | |||
} | |||
defer dir.Close() | |||
names, err := dir.Readdirnames(0) | |||
if err != nil { | |||
return nil, err | |||
} | |||
var keys []string | |||
for _, name := range names { | |||
n, err := url.PathUnescape(name) | |||
if err != nil { | |||
return nil, fmt.Errorf("Failed to unescape %s while listing", name) | |||
} | |||
key := unescapeKey([]byte(n)) | |||
if IsKeyInDomain(key, start, end) { | |||
keys = append(keys, string(key)) | |||
} | |||
} | |||
return keys, nil | |||
} | |||
// To support empty or nil keys, while the file system doesn't allow empty | |||
// filenames. | |||
func escapeKey(key []byte) []byte { | |||
return []byte("k_" + string(key)) | |||
} | |||
func unescapeKey(escKey []byte) []byte { | |||
if len(escKey) < 2 { | |||
panic(fmt.Sprintf("Invalid esc key: %x", escKey)) | |||
} | |||
if string(escKey[:2]) != "k_" { | |||
panic(fmt.Sprintf("Invalid esc key: %x", escKey)) | |||
} | |||
return escKey[2:] | |||
} |
@ -1,333 +0,0 @@ | |||
package db | |||
import ( | |||
"bytes" | |||
"fmt" | |||
"path/filepath" | |||
"github.com/syndtr/goleveldb/leveldb" | |||
"github.com/syndtr/goleveldb/leveldb/errors" | |||
"github.com/syndtr/goleveldb/leveldb/iterator" | |||
"github.com/syndtr/goleveldb/leveldb/opt" | |||
) | |||
func init() { | |||
dbCreator := func(name string, dir string) (DB, error) { | |||
return NewGoLevelDB(name, dir) | |||
} | |||
registerDBCreator(GoLevelDBBackend, dbCreator, false) | |||
} | |||
var _ DB = (*GoLevelDB)(nil) | |||
type GoLevelDB struct { | |||
db *leveldb.DB | |||
} | |||
func NewGoLevelDB(name string, dir string) (*GoLevelDB, error) { | |||
return NewGoLevelDBWithOpts(name, dir, nil) | |||
} | |||
func NewGoLevelDBWithOpts(name string, dir string, o *opt.Options) (*GoLevelDB, error) { | |||
dbPath := filepath.Join(dir, name+".db") | |||
db, err := leveldb.OpenFile(dbPath, o) | |||
if err != nil { | |||
return nil, err | |||
} | |||
database := &GoLevelDB{ | |||
db: db, | |||
} | |||
return database, nil | |||
} | |||
// Implements DB. | |||
func (db *GoLevelDB) Get(key []byte) []byte { | |||
key = nonNilBytes(key) | |||
res, err := db.db.Get(key, nil) | |||
if err != nil { | |||
if err == errors.ErrNotFound { | |||
return nil | |||
} | |||
panic(err) | |||
} | |||
return res | |||
} | |||
// Implements DB. | |||
func (db *GoLevelDB) Has(key []byte) bool { | |||
return db.Get(key) != nil | |||
} | |||
// Implements DB. | |||
func (db *GoLevelDB) Set(key []byte, value []byte) { | |||
key = nonNilBytes(key) | |||
value = nonNilBytes(value) | |||
err := db.db.Put(key, value, nil) | |||
if err != nil { | |||
panic(err) | |||
} | |||
} | |||
// Implements DB. | |||
func (db *GoLevelDB) SetSync(key []byte, value []byte) { | |||
key = nonNilBytes(key) | |||
value = nonNilBytes(value) | |||
err := db.db.Put(key, value, &opt.WriteOptions{Sync: true}) | |||
if err != nil { | |||
panic(err) | |||
} | |||
} | |||
// Implements DB. | |||
func (db *GoLevelDB) Delete(key []byte) { | |||
key = nonNilBytes(key) | |||
err := db.db.Delete(key, nil) | |||
if err != nil { | |||
panic(err) | |||
} | |||
} | |||
// Implements DB. | |||
func (db *GoLevelDB) DeleteSync(key []byte) { | |||
key = nonNilBytes(key) | |||
err := db.db.Delete(key, &opt.WriteOptions{Sync: true}) | |||
if err != nil { | |||
panic(err) | |||
} | |||
} | |||
func (db *GoLevelDB) DB() *leveldb.DB { | |||
return db.db | |||
} | |||
// Implements DB. | |||
func (db *GoLevelDB) Close() { | |||
db.db.Close() | |||
} | |||
// Implements DB. | |||
func (db *GoLevelDB) Print() { | |||
str, _ := db.db.GetProperty("leveldb.stats") | |||
fmt.Printf("%v\n", str) | |||
itr := db.db.NewIterator(nil, nil) | |||
for itr.Next() { | |||
key := itr.Key() | |||
value := itr.Value() | |||
fmt.Printf("[%X]:\t[%X]\n", key, value) | |||
} | |||
} | |||
// Implements DB. | |||
func (db *GoLevelDB) Stats() map[string]string { | |||
keys := []string{ | |||
"leveldb.num-files-at-level{n}", | |||
"leveldb.stats", | |||
"leveldb.sstables", | |||
"leveldb.blockpool", | |||
"leveldb.cachedblock", | |||
"leveldb.openedtables", | |||
"leveldb.alivesnaps", | |||
"leveldb.aliveiters", | |||
} | |||
stats := make(map[string]string) | |||
for _, key := range keys { | |||
str, err := db.db.GetProperty(key) | |||
if err == nil { | |||
stats[key] = str | |||
} | |||
} | |||
return stats | |||
} | |||
//---------------------------------------- | |||
// Batch | |||
// Implements DB. | |||
func (db *GoLevelDB) NewBatch() Batch { | |||
batch := new(leveldb.Batch) | |||
return &goLevelDBBatch{db, batch} | |||
} | |||
type goLevelDBBatch struct { | |||
db *GoLevelDB | |||
batch *leveldb.Batch | |||
} | |||
// Implements Batch. | |||
func (mBatch *goLevelDBBatch) Set(key, value []byte) { | |||
mBatch.batch.Put(key, value) | |||
} | |||
// Implements Batch. | |||
func (mBatch *goLevelDBBatch) Delete(key []byte) { | |||
mBatch.batch.Delete(key) | |||
} | |||
// Implements Batch. | |||
func (mBatch *goLevelDBBatch) Write() { | |||
err := mBatch.db.db.Write(mBatch.batch, &opt.WriteOptions{Sync: false}) | |||
if err != nil { | |||
panic(err) | |||
} | |||
} | |||
// Implements Batch. | |||
func (mBatch *goLevelDBBatch) WriteSync() { | |||
err := mBatch.db.db.Write(mBatch.batch, &opt.WriteOptions{Sync: true}) | |||
if err != nil { | |||
panic(err) | |||
} | |||
} | |||
// Implements Batch. | |||
// Close is no-op for goLevelDBBatch. | |||
func (mBatch *goLevelDBBatch) Close() {} | |||
//---------------------------------------- | |||
// Iterator | |||
// NOTE This is almost identical to db/c_level_db.Iterator | |||
// Before creating a third version, refactor. | |||
// Implements DB. | |||
func (db *GoLevelDB) Iterator(start, end []byte) Iterator { | |||
itr := db.db.NewIterator(nil, nil) | |||
return newGoLevelDBIterator(itr, start, end, false) | |||
} | |||
// Implements DB. | |||
func (db *GoLevelDB) ReverseIterator(start, end []byte) Iterator { | |||
itr := db.db.NewIterator(nil, nil) | |||
return newGoLevelDBIterator(itr, start, end, true) | |||
} | |||
type goLevelDBIterator struct { | |||
source iterator.Iterator | |||
start []byte | |||
end []byte | |||
isReverse bool | |||
isInvalid bool | |||
} | |||
var _ Iterator = (*goLevelDBIterator)(nil) | |||
func newGoLevelDBIterator(source iterator.Iterator, start, end []byte, isReverse bool) *goLevelDBIterator { | |||
if isReverse { | |||
if end == nil { | |||
source.Last() | |||
} else { | |||
valid := source.Seek(end) | |||
if valid { | |||
eoakey := source.Key() // end or after key | |||
if bytes.Compare(end, eoakey) <= 0 { | |||
source.Prev() | |||
} | |||
} else { | |||
source.Last() | |||
} | |||
} | |||
} else { | |||
if start == nil { | |||
source.First() | |||
} else { | |||
source.Seek(start) | |||
} | |||
} | |||
return &goLevelDBIterator{ | |||
source: source, | |||
start: start, | |||
end: end, | |||
isReverse: isReverse, | |||
isInvalid: false, | |||
} | |||
} | |||
// Implements Iterator. | |||
func (itr *goLevelDBIterator) Domain() ([]byte, []byte) { | |||
return itr.start, itr.end | |||
} | |||
// Implements Iterator. | |||
func (itr *goLevelDBIterator) Valid() bool { | |||
// Once invalid, forever invalid. | |||
if itr.isInvalid { | |||
return false | |||
} | |||
// Panic on DB error. No way to recover. | |||
itr.assertNoError() | |||
// If source is invalid, invalid. | |||
if !itr.source.Valid() { | |||
itr.isInvalid = true | |||
return false | |||
} | |||
// If key is end or past it, invalid. | |||
var start = itr.start | |||
var end = itr.end | |||
var key = itr.source.Key() | |||
if itr.isReverse { | |||
if start != nil && bytes.Compare(key, start) < 0 { | |||
itr.isInvalid = true | |||
return false | |||
} | |||
} else { | |||
if end != nil && bytes.Compare(end, key) <= 0 { | |||
itr.isInvalid = true | |||
return false | |||
} | |||
} | |||
// Valid | |||
return true | |||
} | |||
// Implements Iterator. | |||
func (itr *goLevelDBIterator) Key() []byte { | |||
// Key returns a copy of the current key. | |||
// See https://github.com/syndtr/goleveldb/blob/52c212e6c196a1404ea59592d3f1c227c9f034b2/leveldb/iterator/iter.go#L88 | |||
itr.assertNoError() | |||
itr.assertIsValid() | |||
return cp(itr.source.Key()) | |||
} | |||
// Implements Iterator. | |||
func (itr *goLevelDBIterator) Value() []byte { | |||
// Value returns a copy of the current value. | |||
// See https://github.com/syndtr/goleveldb/blob/52c212e6c196a1404ea59592d3f1c227c9f034b2/leveldb/iterator/iter.go#L88 | |||
itr.assertNoError() | |||
itr.assertIsValid() | |||
return cp(itr.source.Value()) | |||
} | |||
// Implements Iterator. | |||
func (itr *goLevelDBIterator) Next() { | |||
itr.assertNoError() | |||
itr.assertIsValid() | |||
if itr.isReverse { | |||
itr.source.Prev() | |||
} else { | |||
itr.source.Next() | |||
} | |||
} | |||
// Implements Iterator. | |||
func (itr *goLevelDBIterator) Close() { | |||
itr.source.Release() | |||
} | |||
func (itr *goLevelDBIterator) assertNoError() { | |||
if err := itr.source.Error(); err != nil { | |||
panic(err) | |||
} | |||
} | |||
func (itr goLevelDBIterator) assertIsValid() { | |||
if !itr.Valid() { | |||
panic("goLevelDBIterator is invalid") | |||
} | |||
} |
@ -1,45 +0,0 @@ | |||
package db | |||
import ( | |||
"fmt" | |||
"testing" | |||
"github.com/stretchr/testify/require" | |||
"github.com/syndtr/goleveldb/leveldb/opt" | |||
cmn "github.com/tendermint/tendermint/libs/common" | |||
) | |||
func TestGoLevelDBNewGoLevelDB(t *testing.T) { | |||
name := fmt.Sprintf("test_%x", cmn.RandStr(12)) | |||
defer cleanupDBDir("", name) | |||
// Test we can't open the db twice for writing | |||
wr1, err := NewGoLevelDB(name, "") | |||
require.Nil(t, err) | |||
_, err = NewGoLevelDB(name, "") | |||
require.NotNil(t, err) | |||
wr1.Close() // Close the db to release the lock | |||
// Test we can open the db twice for reading only | |||
ro1, err := NewGoLevelDBWithOpts(name, "", &opt.Options{ReadOnly: true}) | |||
defer ro1.Close() | |||
require.Nil(t, err) | |||
ro2, err := NewGoLevelDBWithOpts(name, "", &opt.Options{ReadOnly: true}) | |||
defer ro2.Close() | |||
require.Nil(t, err) | |||
} | |||
func BenchmarkGoLevelDBRandomReadsWrites(b *testing.B) { | |||
name := fmt.Sprintf("test_%x", cmn.RandStr(12)) | |||
db, err := NewGoLevelDB(name, "") | |||
if err != nil { | |||
b.Fatal(err) | |||
} | |||
defer func() { | |||
db.Close() | |||
cleanupDBDir("", name) | |||
}() | |||
benchmarkRandomReadsWrites(b, db) | |||
} |
@ -1,74 +0,0 @@ | |||
package db | |||
import "sync" | |||
type atomicSetDeleter interface { | |||
Mutex() *sync.Mutex | |||
SetNoLock(key, value []byte) | |||
SetNoLockSync(key, value []byte) | |||
DeleteNoLock(key []byte) | |||
DeleteNoLockSync(key []byte) | |||
} | |||
type memBatch struct { | |||
db atomicSetDeleter | |||
ops []operation | |||
} | |||
type opType int | |||
const ( | |||
opTypeSet opType = 1 | |||
opTypeDelete opType = 2 | |||
) | |||
type operation struct { | |||
opType | |||
key []byte | |||
value []byte | |||
} | |||
func (mBatch *memBatch) Set(key, value []byte) { | |||
mBatch.ops = append(mBatch.ops, operation{opTypeSet, key, value}) | |||
} | |||
func (mBatch *memBatch) Delete(key []byte) { | |||
mBatch.ops = append(mBatch.ops, operation{opTypeDelete, key, nil}) | |||
} | |||
func (mBatch *memBatch) Write() { | |||
mBatch.write(false) | |||
} | |||
func (mBatch *memBatch) WriteSync() { | |||
mBatch.write(true) | |||
} | |||
func (mBatch *memBatch) Close() { | |||
mBatch.ops = nil | |||
} | |||
func (mBatch *memBatch) write(doSync bool) { | |||
if mtx := mBatch.db.Mutex(); mtx != nil { | |||
mtx.Lock() | |||
defer mtx.Unlock() | |||
} | |||
for i, op := range mBatch.ops { | |||
if doSync && i == (len(mBatch.ops)-1) { | |||
switch op.opType { | |||
case opTypeSet: | |||
mBatch.db.SetNoLockSync(op.key, op.value) | |||
case opTypeDelete: | |||
mBatch.db.DeleteNoLockSync(op.key) | |||
} | |||
break // we're done. | |||
} | |||
switch op.opType { | |||
case opTypeSet: | |||
mBatch.db.SetNoLock(op.key, op.value) | |||
case opTypeDelete: | |||
mBatch.db.DeleteNoLock(op.key) | |||
} | |||
} | |||
} |
@ -1,255 +0,0 @@ | |||
package db | |||
import ( | |||
"fmt" | |||
"sort" | |||
"sync" | |||
) | |||
func init() { | |||
registerDBCreator(MemDBBackend, func(name, dir string) (DB, error) { | |||
return NewMemDB(), nil | |||
}, false) | |||
} | |||
var _ DB = (*MemDB)(nil) | |||
type MemDB struct { | |||
mtx sync.Mutex | |||
db map[string][]byte | |||
} | |||
func NewMemDB() *MemDB { | |||
database := &MemDB{ | |||
db: make(map[string][]byte), | |||
} | |||
return database | |||
} | |||
// Implements atomicSetDeleter. | |||
func (db *MemDB) Mutex() *sync.Mutex { | |||
return &(db.mtx) | |||
} | |||
// Implements DB. | |||
func (db *MemDB) Get(key []byte) []byte { | |||
db.mtx.Lock() | |||
defer db.mtx.Unlock() | |||
key = nonNilBytes(key) | |||
value := db.db[string(key)] | |||
return value | |||
} | |||
// Implements DB. | |||
func (db *MemDB) Has(key []byte) bool { | |||
db.mtx.Lock() | |||
defer db.mtx.Unlock() | |||
key = nonNilBytes(key) | |||
_, ok := db.db[string(key)] | |||
return ok | |||
} | |||
// Implements DB. | |||
func (db *MemDB) Set(key []byte, value []byte) { | |||
db.mtx.Lock() | |||
defer db.mtx.Unlock() | |||
db.SetNoLock(key, value) | |||
} | |||
// Implements DB. | |||
func (db *MemDB) SetSync(key []byte, value []byte) { | |||
db.mtx.Lock() | |||
defer db.mtx.Unlock() | |||
db.SetNoLock(key, value) | |||
} | |||
// Implements atomicSetDeleter. | |||
func (db *MemDB) SetNoLock(key []byte, value []byte) { | |||
db.SetNoLockSync(key, value) | |||
} | |||
// Implements atomicSetDeleter. | |||
func (db *MemDB) SetNoLockSync(key []byte, value []byte) { | |||
key = nonNilBytes(key) | |||
value = nonNilBytes(value) | |||
db.db[string(key)] = value | |||
} | |||
// Implements DB. | |||
func (db *MemDB) Delete(key []byte) { | |||
db.mtx.Lock() | |||
defer db.mtx.Unlock() | |||
db.DeleteNoLock(key) | |||
} | |||
// Implements DB. | |||
func (db *MemDB) DeleteSync(key []byte) { | |||
db.mtx.Lock() | |||
defer db.mtx.Unlock() | |||
db.DeleteNoLock(key) | |||
} | |||
// Implements atomicSetDeleter. | |||
func (db *MemDB) DeleteNoLock(key []byte) { | |||
db.DeleteNoLockSync(key) | |||
} | |||
// Implements atomicSetDeleter. | |||
func (db *MemDB) DeleteNoLockSync(key []byte) { | |||
key = nonNilBytes(key) | |||
delete(db.db, string(key)) | |||
} | |||
// Implements DB. | |||
func (db *MemDB) Close() { | |||
// Close is a noop since for an in-memory | |||
// database, we don't have a destination | |||
// to flush contents to nor do we want | |||
// any data loss on invoking Close() | |||
// See the discussion in https://github.com/tendermint/tendermint/libs/pull/56 | |||
} | |||
// Implements DB. | |||
func (db *MemDB) Print() { | |||
db.mtx.Lock() | |||
defer db.mtx.Unlock() | |||
for key, value := range db.db { | |||
fmt.Printf("[%X]:\t[%X]\n", []byte(key), value) | |||
} | |||
} | |||
// Implements DB. | |||
func (db *MemDB) Stats() map[string]string { | |||
db.mtx.Lock() | |||
defer db.mtx.Unlock() | |||
stats := make(map[string]string) | |||
stats["database.type"] = "memDB" | |||
stats["database.size"] = fmt.Sprintf("%d", len(db.db)) | |||
return stats | |||
} | |||
// Implements DB. | |||
func (db *MemDB) NewBatch() Batch { | |||
db.mtx.Lock() | |||
defer db.mtx.Unlock() | |||
return &memBatch{db, nil} | |||
} | |||
//---------------------------------------- | |||
// Iterator | |||
// Implements DB. | |||
func (db *MemDB) Iterator(start, end []byte) Iterator { | |||
db.mtx.Lock() | |||
defer db.mtx.Unlock() | |||
keys := db.getSortedKeys(start, end, false) | |||
return newMemDBIterator(db, keys, start, end) | |||
} | |||
// Implements DB. | |||
func (db *MemDB) ReverseIterator(start, end []byte) Iterator { | |||
db.mtx.Lock() | |||
defer db.mtx.Unlock() | |||
keys := db.getSortedKeys(start, end, true) | |||
return newMemDBIterator(db, keys, start, end) | |||
} | |||
// We need a copy of all of the keys. | |||
// Not the best, but probably not a bottleneck depending. | |||
type memDBIterator struct { | |||
db DB | |||
cur int | |||
keys []string | |||
start []byte | |||
end []byte | |||
} | |||
var _ Iterator = (*memDBIterator)(nil) | |||
// Keys is expected to be in reverse order for reverse iterators. | |||
func newMemDBIterator(db DB, keys []string, start, end []byte) *memDBIterator { | |||
return &memDBIterator{ | |||
db: db, | |||
cur: 0, | |||
keys: keys, | |||
start: start, | |||
end: end, | |||
} | |||
} | |||
// Implements Iterator. | |||
func (itr *memDBIterator) Domain() ([]byte, []byte) { | |||
return itr.start, itr.end | |||
} | |||
// Implements Iterator. | |||
func (itr *memDBIterator) Valid() bool { | |||
return 0 <= itr.cur && itr.cur < len(itr.keys) | |||
} | |||
// Implements Iterator. | |||
func (itr *memDBIterator) Next() { | |||
itr.assertIsValid() | |||
itr.cur++ | |||
} | |||
// Implements Iterator. | |||
func (itr *memDBIterator) Key() []byte { | |||
itr.assertIsValid() | |||
return []byte(itr.keys[itr.cur]) | |||
} | |||
// Implements Iterator. | |||
func (itr *memDBIterator) Value() []byte { | |||
itr.assertIsValid() | |||
key := []byte(itr.keys[itr.cur]) | |||
return itr.db.Get(key) | |||
} | |||
// Implements Iterator. | |||
func (itr *memDBIterator) Close() { | |||
itr.keys = nil | |||
itr.db = nil | |||
} | |||
func (itr *memDBIterator) assertIsValid() { | |||
if !itr.Valid() { | |||
panic("memDBIterator is invalid") | |||
} | |||
} | |||
//---------------------------------------- | |||
// Misc. | |||
func (db *MemDB) getSortedKeys(start, end []byte, reverse bool) []string { | |||
keys := []string{} | |||
for key := range db.db { | |||
inDomain := IsKeyInDomain([]byte(key), start, end) | |||
if inDomain { | |||
keys = append(keys, key) | |||
} | |||
} | |||
sort.Strings(keys) | |||
if reverse { | |||
nkeys := len(keys) | |||
for i := 0; i < nkeys/2; i++ { | |||
temp := keys[i] | |||
keys[i] = keys[nkeys-i-1] | |||
keys[nkeys-i-1] = temp | |||
} | |||
} | |||
return keys | |||
} |
@ -1,336 +0,0 @@ | |||
package db | |||
import ( | |||
"bytes" | |||
"fmt" | |||
"sync" | |||
) | |||
// IteratePrefix is a convenience function for iterating over a key domain | |||
// restricted by prefix. | |||
func IteratePrefix(db DB, prefix []byte) Iterator { | |||
var start, end []byte | |||
if len(prefix) == 0 { | |||
start = nil | |||
end = nil | |||
} else { | |||
start = cp(prefix) | |||
end = cpIncr(prefix) | |||
} | |||
return db.Iterator(start, end) | |||
} | |||
/* | |||
TODO: Make test, maybe rename. | |||
// Like IteratePrefix but the iterator strips the prefix from the keys. | |||
func IteratePrefixStripped(db DB, prefix []byte) Iterator { | |||
start, end := ... | |||
return newPrefixIterator(prefix, start, end, IteratePrefix(db, prefix)) | |||
} | |||
*/ | |||
//---------------------------------------- | |||
// prefixDB | |||
type prefixDB struct { | |||
mtx sync.Mutex | |||
prefix []byte | |||
db DB | |||
} | |||
// NewPrefixDB lets you namespace multiple DBs within a single DB. | |||
func NewPrefixDB(db DB, prefix []byte) *prefixDB { | |||
return &prefixDB{ | |||
prefix: prefix, | |||
db: db, | |||
} | |||
} | |||
// Implements atomicSetDeleter. | |||
func (pdb *prefixDB) Mutex() *sync.Mutex { | |||
return &(pdb.mtx) | |||
} | |||
// Implements DB. | |||
func (pdb *prefixDB) Get(key []byte) []byte { | |||
pdb.mtx.Lock() | |||
defer pdb.mtx.Unlock() | |||
pkey := pdb.prefixed(key) | |||
value := pdb.db.Get(pkey) | |||
return value | |||
} | |||
// Implements DB. | |||
func (pdb *prefixDB) Has(key []byte) bool { | |||
pdb.mtx.Lock() | |||
defer pdb.mtx.Unlock() | |||
return pdb.db.Has(pdb.prefixed(key)) | |||
} | |||
// Implements DB. | |||
func (pdb *prefixDB) Set(key []byte, value []byte) { | |||
pdb.mtx.Lock() | |||
defer pdb.mtx.Unlock() | |||
pkey := pdb.prefixed(key) | |||
pdb.db.Set(pkey, value) | |||
} | |||
// Implements DB. | |||
func (pdb *prefixDB) SetSync(key []byte, value []byte) { | |||
pdb.mtx.Lock() | |||
defer pdb.mtx.Unlock() | |||
pdb.db.SetSync(pdb.prefixed(key), value) | |||
} | |||
// Implements DB. | |||
func (pdb *prefixDB) Delete(key []byte) { | |||
pdb.mtx.Lock() | |||
defer pdb.mtx.Unlock() | |||
pdb.db.Delete(pdb.prefixed(key)) | |||
} | |||
// Implements DB. | |||
func (pdb *prefixDB) DeleteSync(key []byte) { | |||
pdb.mtx.Lock() | |||
defer pdb.mtx.Unlock() | |||
pdb.db.DeleteSync(pdb.prefixed(key)) | |||
} | |||
// Implements DB. | |||
func (pdb *prefixDB) Iterator(start, end []byte) Iterator { | |||
pdb.mtx.Lock() | |||
defer pdb.mtx.Unlock() | |||
var pstart, pend []byte | |||
pstart = append(cp(pdb.prefix), start...) | |||
if end == nil { | |||
pend = cpIncr(pdb.prefix) | |||
} else { | |||
pend = append(cp(pdb.prefix), end...) | |||
} | |||
return newPrefixIterator( | |||
pdb.prefix, | |||
start, | |||
end, | |||
pdb.db.Iterator( | |||
pstart, | |||
pend, | |||
), | |||
) | |||
} | |||
// Implements DB. | |||
func (pdb *prefixDB) ReverseIterator(start, end []byte) Iterator { | |||
pdb.mtx.Lock() | |||
defer pdb.mtx.Unlock() | |||
var pstart, pend []byte | |||
pstart = append(cp(pdb.prefix), start...) | |||
if end == nil { | |||
pend = cpIncr(pdb.prefix) | |||
} else { | |||
pend = append(cp(pdb.prefix), end...) | |||
} | |||
ritr := pdb.db.ReverseIterator(pstart, pend) | |||
return newPrefixIterator( | |||
pdb.prefix, | |||
start, | |||
end, | |||
ritr, | |||
) | |||
} | |||
// Implements DB. | |||
// Panics if the underlying DB is not an | |||
// atomicSetDeleter. | |||
func (pdb *prefixDB) NewBatch() Batch { | |||
pdb.mtx.Lock() | |||
defer pdb.mtx.Unlock() | |||
return newPrefixBatch(pdb.prefix, pdb.db.NewBatch()) | |||
} | |||
/* NOTE: Uncomment to use memBatch instead of prefixBatch | |||
// Implements atomicSetDeleter. | |||
func (pdb *prefixDB) SetNoLock(key []byte, value []byte) { | |||
pdb.db.(atomicSetDeleter).SetNoLock(pdb.prefixed(key), value) | |||
} | |||
// Implements atomicSetDeleter. | |||
func (pdb *prefixDB) SetNoLockSync(key []byte, value []byte) { | |||
pdb.db.(atomicSetDeleter).SetNoLockSync(pdb.prefixed(key), value) | |||
} | |||
// Implements atomicSetDeleter. | |||
func (pdb *prefixDB) DeleteNoLock(key []byte) { | |||
pdb.db.(atomicSetDeleter).DeleteNoLock(pdb.prefixed(key)) | |||
} | |||
// Implements atomicSetDeleter. | |||
func (pdb *prefixDB) DeleteNoLockSync(key []byte) { | |||
pdb.db.(atomicSetDeleter).DeleteNoLockSync(pdb.prefixed(key)) | |||
} | |||
*/ | |||
// Implements DB. | |||
func (pdb *prefixDB) Close() { | |||
pdb.mtx.Lock() | |||
defer pdb.mtx.Unlock() | |||
pdb.db.Close() | |||
} | |||
// Implements DB. | |||
func (pdb *prefixDB) Print() { | |||
fmt.Printf("prefix: %X\n", pdb.prefix) | |||
itr := pdb.Iterator(nil, nil) | |||
defer itr.Close() | |||
for ; itr.Valid(); itr.Next() { | |||
key := itr.Key() | |||
value := itr.Value() | |||
fmt.Printf("[%X]:\t[%X]\n", key, value) | |||
} | |||
} | |||
// Implements DB. | |||
func (pdb *prefixDB) Stats() map[string]string { | |||
stats := make(map[string]string) | |||
stats["prefixdb.prefix.string"] = string(pdb.prefix) | |||
stats["prefixdb.prefix.hex"] = fmt.Sprintf("%X", pdb.prefix) | |||
source := pdb.db.Stats() | |||
for key, value := range source { | |||
stats["prefixdb.source."+key] = value | |||
} | |||
return stats | |||
} | |||
func (pdb *prefixDB) prefixed(key []byte) []byte { | |||
return append(cp(pdb.prefix), key...) | |||
} | |||
//---------------------------------------- | |||
// prefixBatch | |||
type prefixBatch struct { | |||
prefix []byte | |||
source Batch | |||
} | |||
func newPrefixBatch(prefix []byte, source Batch) prefixBatch { | |||
return prefixBatch{ | |||
prefix: prefix, | |||
source: source, | |||
} | |||
} | |||
func (pb prefixBatch) Set(key, value []byte) { | |||
pkey := append(cp(pb.prefix), key...) | |||
pb.source.Set(pkey, value) | |||
} | |||
func (pb prefixBatch) Delete(key []byte) { | |||
pkey := append(cp(pb.prefix), key...) | |||
pb.source.Delete(pkey) | |||
} | |||
func (pb prefixBatch) Write() { | |||
pb.source.Write() | |||
} | |||
func (pb prefixBatch) WriteSync() { | |||
pb.source.WriteSync() | |||
} | |||
func (pb prefixBatch) Close() { | |||
pb.source.Close() | |||
} | |||
//---------------------------------------- | |||
// prefixIterator | |||
var _ Iterator = (*prefixIterator)(nil) | |||
// Strips prefix while iterating from Iterator. | |||
type prefixIterator struct { | |||
prefix []byte | |||
start []byte | |||
end []byte | |||
source Iterator | |||
valid bool | |||
} | |||
func newPrefixIterator(prefix, start, end []byte, source Iterator) *prefixIterator { | |||
if !source.Valid() || !bytes.HasPrefix(source.Key(), prefix) { | |||
return &prefixIterator{ | |||
prefix: prefix, | |||
start: start, | |||
end: end, | |||
source: source, | |||
valid: false, | |||
} | |||
} else { | |||
return &prefixIterator{ | |||
prefix: prefix, | |||
start: start, | |||
end: end, | |||
source: source, | |||
valid: true, | |||
} | |||
} | |||
} | |||
func (itr *prefixIterator) Domain() (start []byte, end []byte) { | |||
return itr.start, itr.end | |||
} | |||
func (itr *prefixIterator) Valid() bool { | |||
return itr.valid && itr.source.Valid() | |||
} | |||
func (itr *prefixIterator) Next() { | |||
if !itr.valid { | |||
panic("prefixIterator invalid, cannot call Next()") | |||
} | |||
itr.source.Next() | |||
if !itr.source.Valid() || !bytes.HasPrefix(itr.source.Key(), itr.prefix) { | |||
itr.valid = false | |||
return | |||
} | |||
} | |||
func (itr *prefixIterator) Key() (key []byte) { | |||
if !itr.valid { | |||
panic("prefixIterator invalid, cannot call Key()") | |||
} | |||
return stripPrefix(itr.source.Key(), itr.prefix) | |||
} | |||
func (itr *prefixIterator) Value() (value []byte) { | |||
if !itr.valid { | |||
panic("prefixIterator invalid, cannot call Value()") | |||
} | |||
return itr.source.Value() | |||
} | |||
func (itr *prefixIterator) Close() { | |||
itr.source.Close() | |||
} | |||
//---------------------------------------- | |||
func stripPrefix(key []byte, prefix []byte) (stripped []byte) { | |||
if len(key) < len(prefix) { | |||
panic("should not happen") | |||
} | |||
if !bytes.Equal(key[:len(prefix)], prefix) { | |||
panic("should not happne") | |||
} | |||
return key[len(prefix):] | |||
} |
@ -1,192 +0,0 @@ | |||
package db | |||
import "testing" | |||
func mockDBWithStuff() DB { | |||
db := NewMemDB() | |||
// Under "key" prefix | |||
db.Set(bz("key"), bz("value")) | |||
db.Set(bz("key1"), bz("value1")) | |||
db.Set(bz("key2"), bz("value2")) | |||
db.Set(bz("key3"), bz("value3")) | |||
db.Set(bz("something"), bz("else")) | |||
db.Set(bz(""), bz("")) | |||
db.Set(bz("k"), bz("val")) | |||
db.Set(bz("ke"), bz("valu")) | |||
db.Set(bz("kee"), bz("valuu")) | |||
return db | |||
} | |||
func TestPrefixDBSimple(t *testing.T) { | |||
db := mockDBWithStuff() | |||
pdb := NewPrefixDB(db, bz("key")) | |||
checkValue(t, pdb, bz("key"), nil) | |||
checkValue(t, pdb, bz(""), bz("value")) | |||
checkValue(t, pdb, bz("key1"), nil) | |||
checkValue(t, pdb, bz("1"), bz("value1")) | |||
checkValue(t, pdb, bz("key2"), nil) | |||
checkValue(t, pdb, bz("2"), bz("value2")) | |||
checkValue(t, pdb, bz("key3"), nil) | |||
checkValue(t, pdb, bz("3"), bz("value3")) | |||
checkValue(t, pdb, bz("something"), nil) | |||
checkValue(t, pdb, bz("k"), nil) | |||
checkValue(t, pdb, bz("ke"), nil) | |||
checkValue(t, pdb, bz("kee"), nil) | |||
} | |||
func TestPrefixDBIterator1(t *testing.T) { | |||
db := mockDBWithStuff() | |||
pdb := NewPrefixDB(db, bz("key")) | |||
itr := pdb.Iterator(nil, nil) | |||
checkDomain(t, itr, nil, nil) | |||
checkItem(t, itr, bz(""), bz("value")) | |||
checkNext(t, itr, true) | |||
checkItem(t, itr, bz("1"), bz("value1")) | |||
checkNext(t, itr, true) | |||
checkItem(t, itr, bz("2"), bz("value2")) | |||
checkNext(t, itr, true) | |||
checkItem(t, itr, bz("3"), bz("value3")) | |||
checkNext(t, itr, false) | |||
checkInvalid(t, itr) | |||
itr.Close() | |||
} | |||
func TestPrefixDBIterator2(t *testing.T) { | |||
db := mockDBWithStuff() | |||
pdb := NewPrefixDB(db, bz("key")) | |||
itr := pdb.Iterator(nil, bz("")) | |||
checkDomain(t, itr, nil, bz("")) | |||
checkInvalid(t, itr) | |||
itr.Close() | |||
} | |||
func TestPrefixDBIterator3(t *testing.T) { | |||
db := mockDBWithStuff() | |||
pdb := NewPrefixDB(db, bz("key")) | |||
itr := pdb.Iterator(bz(""), nil) | |||
checkDomain(t, itr, bz(""), nil) | |||
checkItem(t, itr, bz(""), bz("value")) | |||
checkNext(t, itr, true) | |||
checkItem(t, itr, bz("1"), bz("value1")) | |||
checkNext(t, itr, true) | |||
checkItem(t, itr, bz("2"), bz("value2")) | |||
checkNext(t, itr, true) | |||
checkItem(t, itr, bz("3"), bz("value3")) | |||
checkNext(t, itr, false) | |||
checkInvalid(t, itr) | |||
itr.Close() | |||
} | |||
func TestPrefixDBIterator4(t *testing.T) { | |||
db := mockDBWithStuff() | |||
pdb := NewPrefixDB(db, bz("key")) | |||
itr := pdb.Iterator(bz(""), bz("")) | |||
checkDomain(t, itr, bz(""), bz("")) | |||
checkInvalid(t, itr) | |||
itr.Close() | |||
} | |||
func TestPrefixDBReverseIterator1(t *testing.T) { | |||
db := mockDBWithStuff() | |||
pdb := NewPrefixDB(db, bz("key")) | |||
itr := pdb.ReverseIterator(nil, nil) | |||
checkDomain(t, itr, nil, nil) | |||
checkItem(t, itr, bz("3"), bz("value3")) | |||
checkNext(t, itr, true) | |||
checkItem(t, itr, bz("2"), bz("value2")) | |||
checkNext(t, itr, true) | |||
checkItem(t, itr, bz("1"), bz("value1")) | |||
checkNext(t, itr, true) | |||
checkItem(t, itr, bz(""), bz("value")) | |||
checkNext(t, itr, false) | |||
checkInvalid(t, itr) | |||
itr.Close() | |||
} | |||
func TestPrefixDBReverseIterator2(t *testing.T) { | |||
db := mockDBWithStuff() | |||
pdb := NewPrefixDB(db, bz("key")) | |||
itr := pdb.ReverseIterator(bz(""), nil) | |||
checkDomain(t, itr, bz(""), nil) | |||
checkItem(t, itr, bz("3"), bz("value3")) | |||
checkNext(t, itr, true) | |||
checkItem(t, itr, bz("2"), bz("value2")) | |||
checkNext(t, itr, true) | |||
checkItem(t, itr, bz("1"), bz("value1")) | |||
checkNext(t, itr, true) | |||
checkItem(t, itr, bz(""), bz("value")) | |||
checkNext(t, itr, false) | |||
checkInvalid(t, itr) | |||
itr.Close() | |||
} | |||
func TestPrefixDBReverseIterator3(t *testing.T) { | |||
db := mockDBWithStuff() | |||
pdb := NewPrefixDB(db, bz("key")) | |||
itr := pdb.ReverseIterator(nil, bz("")) | |||
checkDomain(t, itr, nil, bz("")) | |||
checkInvalid(t, itr) | |||
itr.Close() | |||
} | |||
func TestPrefixDBReverseIterator4(t *testing.T) { | |||
db := mockDBWithStuff() | |||
pdb := NewPrefixDB(db, bz("key")) | |||
itr := pdb.ReverseIterator(bz(""), bz("")) | |||
checkDomain(t, itr, bz(""), bz("")) | |||
checkInvalid(t, itr) | |||
itr.Close() | |||
} | |||
func TestPrefixDBReverseIterator5(t *testing.T) { | |||
db := mockDBWithStuff() | |||
pdb := NewPrefixDB(db, bz("key")) | |||
itr := pdb.ReverseIterator(bz("1"), nil) | |||
checkDomain(t, itr, bz("1"), nil) | |||
checkItem(t, itr, bz("3"), bz("value3")) | |||
checkNext(t, itr, true) | |||
checkItem(t, itr, bz("2"), bz("value2")) | |||
checkNext(t, itr, true) | |||
checkItem(t, itr, bz("1"), bz("value1")) | |||
checkNext(t, itr, false) | |||
checkInvalid(t, itr) | |||
itr.Close() | |||
} | |||
func TestPrefixDBReverseIterator6(t *testing.T) { | |||
db := mockDBWithStuff() | |||
pdb := NewPrefixDB(db, bz("key")) | |||
itr := pdb.ReverseIterator(bz("2"), nil) | |||
checkDomain(t, itr, bz("2"), nil) | |||
checkItem(t, itr, bz("3"), bz("value3")) | |||
checkNext(t, itr, true) | |||
checkItem(t, itr, bz("2"), bz("value2")) | |||
checkNext(t, itr, false) | |||
checkInvalid(t, itr) | |||
itr.Close() | |||
} | |||
func TestPrefixDBReverseIterator7(t *testing.T) { | |||
db := mockDBWithStuff() | |||
pdb := NewPrefixDB(db, bz("key")) | |||
itr := pdb.ReverseIterator(nil, bz("2")) | |||
checkDomain(t, itr, nil, bz("2")) | |||
checkItem(t, itr, bz("1"), bz("value1")) | |||
checkNext(t, itr, true) | |||
checkItem(t, itr, bz(""), bz("value")) | |||
checkNext(t, itr, false) | |||
checkInvalid(t, itr) | |||
itr.Close() | |||
} |
@ -1,37 +0,0 @@ | |||
/* | |||
remotedb is a package for connecting to distributed Tendermint db.DB | |||
instances. The purpose is to detach difficult deployments such as | |||
CLevelDB that requires gcc or perhaps for databases that require | |||
custom configurations such as extra disk space. It also eases | |||
the burden and cost of deployment of dependencies for databases | |||
to be used by Tendermint developers. Most importantly it is built | |||
over the high performant gRPC transport. | |||
remotedb's RemoteDB implements db.DB so can be used normally | |||
like other databases. One just has to explicitly connect to the | |||
remote database with a client setup such as: | |||
client, err := remotedb.NewRemoteDB(addr, cert) | |||
// Make sure to invoke InitRemote! | |||
if err := client.InitRemote(&remotedb.Init{Name: "test-remote-db", Type: "leveldb"}); err != nil { | |||
log.Fatalf("Failed to initialize the remote db") | |||
} | |||
client.Set(key1, value) | |||
gv1 := client.SetSync(k2, v2) | |||
client.Delete(k1) | |||
gv2 := client.Get(k1) | |||
for itr := client.Iterator(k1, k9); itr.Valid(); itr.Next() { | |||
ik, iv := itr.Key(), itr.Value() | |||
ds, de := itr.Domain() | |||
} | |||
stats := client.Stats() | |||
if !client.Has(dk1) { | |||
client.SetSync(dk1, dv1) | |||
} | |||
*/ | |||
package remotedb |
@ -1,22 +0,0 @@ | |||
package grpcdb | |||
import ( | |||
"google.golang.org/grpc" | |||
"google.golang.org/grpc/credentials" | |||
protodb "github.com/tendermint/tendermint/libs/db/remotedb/proto" | |||
) | |||
// NewClient creates a gRPC client connected to the bound gRPC server at serverAddr. | |||
// Use kind to set the level of security to either Secure or Insecure. | |||
func NewClient(serverAddr, serverCert string) (protodb.DBClient, error) { | |||
creds, err := credentials.NewClientTLSFromFile(serverCert, "") | |||
if err != nil { | |||
return nil, err | |||
} | |||
cc, err := grpc.Dial(serverAddr, grpc.WithTransportCredentials(creds)) | |||
if err != nil { | |||
return nil, err | |||
} | |||
return protodb.NewDBClient(cc), nil | |||
} |
@ -1,32 +0,0 @@ | |||
/* | |||
grpcdb is the distribution of Tendermint's db.DB instances using | |||
the gRPC transport to decouple local db.DB usages from applications, | |||
to using them over a network in a highly performant manner. | |||
grpcdb allows users to initialize a database's server like | |||
they would locally and invoke the respective methods of db.DB. | |||
Most users shouldn't use this package, but should instead use | |||
remotedb. Only the lower level users and database server deployers | |||
should use it, for functionality such as: | |||
ln, err := net.Listen("tcp", "0.0.0.0:0") | |||
srv := grpcdb.NewServer() | |||
defer srv.Stop() | |||
go func() { | |||
if err := srv.Serve(ln); err != nil { | |||
t.Fatalf("BindServer: %v", err) | |||
} | |||
}() | |||
or | |||
addr := ":8998" | |||
cert := "server.crt" | |||
key := "server.key" | |||
go func() { | |||
if err := grpcdb.ListenAndServe(addr, cert, key); err != nil { | |||
log.Fatalf("BindServer: %v", err) | |||
} | |||
}() | |||
*/ | |||
package grpcdb |
@ -1,52 +0,0 @@ | |||
package grpcdb_test | |||
import ( | |||
"bytes" | |||
"context" | |||
"log" | |||
grpcdb "github.com/tendermint/tendermint/libs/db/remotedb/grpcdb" | |||
protodb "github.com/tendermint/tendermint/libs/db/remotedb/proto" | |||
) | |||
func Example() { | |||
addr := ":8998" | |||
cert := "server.crt" | |||
key := "server.key" | |||
go func() { | |||
if err := grpcdb.ListenAndServe(addr, cert, key); err != nil { | |||
log.Fatalf("BindServer: %v", err) | |||
} | |||
}() | |||
client, err := grpcdb.NewClient(addr, cert) | |||
if err != nil { | |||
log.Fatalf("Failed to create grpcDB client: %v", err) | |||
} | |||
ctx := context.Background() | |||
// 1. Initialize the DB | |||
in := &protodb.Init{ | |||
Type: "leveldb", | |||
Name: "grpc-uno-test", | |||
Dir: ".", | |||
} | |||
if _, err := client.Init(ctx, in); err != nil { | |||
log.Fatalf("Init error: %v", err) | |||
} | |||
// 2. Now it can be used! | |||
query1 := &protodb.Entity{Key: []byte("Project"), Value: []byte("Tmlibs-on-gRPC")} | |||
if _, err := client.SetSync(ctx, query1); err != nil { | |||
log.Fatalf("SetSync err: %v", err) | |||
} | |||
query2 := &protodb.Entity{Key: []byte("Project")} | |||
read, err := client.Get(ctx, query2) | |||
if err != nil { | |||
log.Fatalf("Get err: %v", err) | |||
} | |||
if g, w := read.Value, []byte("Tmlibs-on-gRPC"); !bytes.Equal(g, w) { | |||
log.Fatalf("got= (%q ==> % X)\nwant=(%q ==> % X)", g, g, w, w) | |||
} | |||
} |
@ -1,200 +0,0 @@ | |||
package grpcdb | |||
import ( | |||
"context" | |||
"net" | |||
"sync" | |||
"time" | |||
"google.golang.org/grpc" | |||
"google.golang.org/grpc/credentials" | |||
"github.com/tendermint/tendermint/libs/db" | |||
protodb "github.com/tendermint/tendermint/libs/db/remotedb/proto" | |||
) | |||
// ListenAndServe is a blocking function that sets up a gRPC based | |||
// server at the address supplied, with the gRPC options passed in. | |||
// Normally in usage, invoke it in a goroutine like you would for http.ListenAndServe. | |||
func ListenAndServe(addr, cert, key string, opts ...grpc.ServerOption) error { | |||
ln, err := net.Listen("tcp", addr) | |||
if err != nil { | |||
return err | |||
} | |||
srv, err := NewServer(cert, key, opts...) | |||
if err != nil { | |||
return err | |||
} | |||
return srv.Serve(ln) | |||
} | |||
func NewServer(cert, key string, opts ...grpc.ServerOption) (*grpc.Server, error) { | |||
creds, err := credentials.NewServerTLSFromFile(cert, key) | |||
if err != nil { | |||
return nil, err | |||
} | |||
opts = append(opts, grpc.Creds(creds)) | |||
srv := grpc.NewServer(opts...) | |||
protodb.RegisterDBServer(srv, new(server)) | |||
return srv, nil | |||
} | |||
type server struct { | |||
mu sync.Mutex | |||
db db.DB | |||
} | |||
var _ protodb.DBServer = (*server)(nil) | |||
// Init initializes the server's database. Only one type of database | |||
// can be initialized per server. | |||
// | |||
// Dir is the directory on the file system in which the DB will be stored(if backed by disk) (TODO: remove) | |||
// | |||
// Name is representative filesystem entry's basepath | |||
// | |||
// Type can be either one of: | |||
// * cleveldb (if built with gcc enabled) | |||
// * fsdb | |||
// * memdB | |||
// * leveldb | |||
// See https://godoc.org/github.com/tendermint/tendermint/libs/db#DBBackendType | |||
func (s *server) Init(ctx context.Context, in *protodb.Init) (*protodb.Entity, error) { | |||
s.mu.Lock() | |||
defer s.mu.Unlock() | |||
s.db = db.NewDB(in.Name, db.DBBackendType(in.Type), in.Dir) | |||
return &protodb.Entity{CreatedAt: time.Now().Unix()}, nil | |||
} | |||
func (s *server) Delete(ctx context.Context, in *protodb.Entity) (*protodb.Nothing, error) { | |||
s.db.Delete(in.Key) | |||
return nothing, nil | |||
} | |||
var nothing = new(protodb.Nothing) | |||
func (s *server) DeleteSync(ctx context.Context, in *protodb.Entity) (*protodb.Nothing, error) { | |||
s.db.DeleteSync(in.Key) | |||
return nothing, nil | |||
} | |||
func (s *server) Get(ctx context.Context, in *protodb.Entity) (*protodb.Entity, error) { | |||
value := s.db.Get(in.Key) | |||
return &protodb.Entity{Value: value}, nil | |||
} | |||
func (s *server) GetStream(ds protodb.DB_GetStreamServer) error { | |||
// Receive routine | |||
responsesChan := make(chan *protodb.Entity) | |||
go func() { | |||
defer close(responsesChan) | |||
ctx := context.Background() | |||
for { | |||
in, err := ds.Recv() | |||
if err != nil { | |||
responsesChan <- &protodb.Entity{Err: err.Error()} | |||
return | |||
} | |||
out, err := s.Get(ctx, in) | |||
if err != nil { | |||
if out == nil { | |||
out = new(protodb.Entity) | |||
out.Key = in.Key | |||
} | |||
out.Err = err.Error() | |||
responsesChan <- out | |||
return | |||
} | |||
// Otherwise continue on | |||
responsesChan <- out | |||
} | |||
}() | |||
// Send routine, block until we return | |||
for out := range responsesChan { | |||
if err := ds.Send(out); err != nil { | |||
return err | |||
} | |||
} | |||
return nil | |||
} | |||
func (s *server) Has(ctx context.Context, in *protodb.Entity) (*protodb.Entity, error) { | |||
exists := s.db.Has(in.Key) | |||
return &protodb.Entity{Exists: exists}, nil | |||
} | |||
func (s *server) Set(ctx context.Context, in *protodb.Entity) (*protodb.Nothing, error) { | |||
s.db.Set(in.Key, in.Value) | |||
return nothing, nil | |||
} | |||
func (s *server) SetSync(ctx context.Context, in *protodb.Entity) (*protodb.Nothing, error) { | |||
s.db.SetSync(in.Key, in.Value) | |||
return nothing, nil | |||
} | |||
func (s *server) Iterator(query *protodb.Entity, dis protodb.DB_IteratorServer) error { | |||
it := s.db.Iterator(query.Start, query.End) | |||
defer it.Close() | |||
return s.handleIterator(it, dis.Send) | |||
} | |||
func (s *server) handleIterator(it db.Iterator, sendFunc func(*protodb.Iterator) error) error { | |||
for it.Valid() { | |||
start, end := it.Domain() | |||
out := &protodb.Iterator{ | |||
Domain: &protodb.Domain{Start: start, End: end}, | |||
Valid: it.Valid(), | |||
Key: it.Key(), | |||
Value: it.Value(), | |||
} | |||
if err := sendFunc(out); err != nil { | |||
return err | |||
} | |||
// Finally move the iterator forward | |||
it.Next() | |||
} | |||
return nil | |||
} | |||
func (s *server) ReverseIterator(query *protodb.Entity, dis protodb.DB_ReverseIteratorServer) error { | |||
it := s.db.ReverseIterator(query.Start, query.End) | |||
defer it.Close() | |||
return s.handleIterator(it, dis.Send) | |||
} | |||
func (s *server) Stats(context.Context, *protodb.Nothing) (*protodb.Stats, error) { | |||
stats := s.db.Stats() | |||
return &protodb.Stats{Data: stats, TimeAt: time.Now().Unix()}, nil | |||
} | |||
func (s *server) BatchWrite(c context.Context, b *protodb.Batch) (*protodb.Nothing, error) { | |||
return s.batchWrite(c, b, false) | |||
} | |||
func (s *server) BatchWriteSync(c context.Context, b *protodb.Batch) (*protodb.Nothing, error) { | |||
return s.batchWrite(c, b, true) | |||
} | |||
func (s *server) batchWrite(c context.Context, b *protodb.Batch, sync bool) (*protodb.Nothing, error) { | |||
bat := s.db.NewBatch() | |||
defer bat.Close() | |||
for _, op := range b.Ops { | |||
switch op.Type { | |||
case protodb.Operation_SET: | |||
bat.Set(op.Entity.Key, op.Entity.Value) | |||
case protodb.Operation_DELETE: | |||
bat.Delete(op.Entity.Key) | |||
} | |||
} | |||
if sync { | |||
bat.WriteSync() | |||
} else { | |||
bat.Write() | |||
} | |||
return nothing, nil | |||
} |
@ -1,914 +0,0 @@ | |||
// Code generated by protoc-gen-go. DO NOT EDIT. | |||
// source: defs.proto | |||
/* | |||
Package protodb is a generated protocol buffer package. | |||
It is generated from these files: | |||
defs.proto | |||
It has these top-level messages: | |||
Batch | |||
Operation | |||
Entity | |||
Nothing | |||
Domain | |||
Iterator | |||
Stats | |||
Init | |||
*/ | |||
package protodb | |||
import proto "github.com/golang/protobuf/proto" | |||
import fmt "fmt" | |||
import math "math" | |||
import ( | |||
context "golang.org/x/net/context" | |||
grpc "google.golang.org/grpc" | |||
) | |||
// Reference imports to suppress errors if they are not otherwise used. | |||
var _ = proto.Marshal | |||
var _ = fmt.Errorf | |||
var _ = math.Inf | |||
// This is a compile-time assertion to ensure that this generated file | |||
// is compatible with the proto package it is being compiled against. | |||
// A compilation error at this line likely means your copy of the | |||
// proto package needs to be updated. | |||
const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package | |||
type Operation_Type int32 | |||
const ( | |||
Operation_SET Operation_Type = 0 | |||
Operation_DELETE Operation_Type = 1 | |||
) | |||
var Operation_Type_name = map[int32]string{ | |||
0: "SET", | |||
1: "DELETE", | |||
} | |||
var Operation_Type_value = map[string]int32{ | |||
"SET": 0, | |||
"DELETE": 1, | |||
} | |||
func (x Operation_Type) String() string { | |||
return proto.EnumName(Operation_Type_name, int32(x)) | |||
} | |||
func (Operation_Type) EnumDescriptor() ([]byte, []int) { return fileDescriptor0, []int{1, 0} } | |||
type Batch struct { | |||
Ops []*Operation `protobuf:"bytes,1,rep,name=ops" json:"ops,omitempty"` | |||
} | |||
func (m *Batch) Reset() { *m = Batch{} } | |||
func (m *Batch) String() string { return proto.CompactTextString(m) } | |||
func (*Batch) ProtoMessage() {} | |||
func (*Batch) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{0} } | |||
func (m *Batch) GetOps() []*Operation { | |||
if m != nil { | |||
return m.Ops | |||
} | |||
return nil | |||
} | |||
type Operation struct { | |||
Entity *Entity `protobuf:"bytes,1,opt,name=entity" json:"entity,omitempty"` | |||
Type Operation_Type `protobuf:"varint,2,opt,name=type,enum=protodb.Operation_Type" json:"type,omitempty"` | |||
} | |||
func (m *Operation) Reset() { *m = Operation{} } | |||
func (m *Operation) String() string { return proto.CompactTextString(m) } | |||
func (*Operation) ProtoMessage() {} | |||
func (*Operation) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{1} } | |||
func (m *Operation) GetEntity() *Entity { | |||
if m != nil { | |||
return m.Entity | |||
} | |||
return nil | |||
} | |||
func (m *Operation) GetType() Operation_Type { | |||
if m != nil { | |||
return m.Type | |||
} | |||
return Operation_SET | |||
} | |||
type Entity struct { | |||
Id int32 `protobuf:"varint,1,opt,name=id" json:"id,omitempty"` | |||
Key []byte `protobuf:"bytes,2,opt,name=key,proto3" json:"key,omitempty"` | |||
Value []byte `protobuf:"bytes,3,opt,name=value,proto3" json:"value,omitempty"` | |||
Exists bool `protobuf:"varint,4,opt,name=exists" json:"exists,omitempty"` | |||
Start []byte `protobuf:"bytes,5,opt,name=start,proto3" json:"start,omitempty"` | |||
End []byte `protobuf:"bytes,6,opt,name=end,proto3" json:"end,omitempty"` | |||
Err string `protobuf:"bytes,7,opt,name=err" json:"err,omitempty"` | |||
CreatedAt int64 `protobuf:"varint,8,opt,name=created_at,json=createdAt" json:"created_at,omitempty"` | |||
} | |||
func (m *Entity) Reset() { *m = Entity{} } | |||
func (m *Entity) String() string { return proto.CompactTextString(m) } | |||
func (*Entity) ProtoMessage() {} | |||
func (*Entity) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{2} } | |||
func (m *Entity) GetId() int32 { | |||
if m != nil { | |||
return m.Id | |||
} | |||
return 0 | |||
} | |||
func (m *Entity) GetKey() []byte { | |||
if m != nil { | |||
return m.Key | |||
} | |||
return nil | |||
} | |||
func (m *Entity) GetValue() []byte { | |||
if m != nil { | |||
return m.Value | |||
} | |||
return nil | |||
} | |||
func (m *Entity) GetExists() bool { | |||
if m != nil { | |||
return m.Exists | |||
} | |||
return false | |||
} | |||
func (m *Entity) GetStart() []byte { | |||
if m != nil { | |||
return m.Start | |||
} | |||
return nil | |||
} | |||
func (m *Entity) GetEnd() []byte { | |||
if m != nil { | |||
return m.End | |||
} | |||
return nil | |||
} | |||
func (m *Entity) GetErr() string { | |||
if m != nil { | |||
return m.Err | |||
} | |||
return "" | |||
} | |||
func (m *Entity) GetCreatedAt() int64 { | |||
if m != nil { | |||
return m.CreatedAt | |||
} | |||
return 0 | |||
} | |||
type Nothing struct { | |||
} | |||
func (m *Nothing) Reset() { *m = Nothing{} } | |||
func (m *Nothing) String() string { return proto.CompactTextString(m) } | |||
func (*Nothing) ProtoMessage() {} | |||
func (*Nothing) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{3} } | |||
type Domain struct { | |||
Start []byte `protobuf:"bytes,1,opt,name=start,proto3" json:"start,omitempty"` | |||
End []byte `protobuf:"bytes,2,opt,name=end,proto3" json:"end,omitempty"` | |||
} | |||
func (m *Domain) Reset() { *m = Domain{} } | |||
func (m *Domain) String() string { return proto.CompactTextString(m) } | |||
func (*Domain) ProtoMessage() {} | |||
func (*Domain) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{4} } | |||
func (m *Domain) GetStart() []byte { | |||
if m != nil { | |||
return m.Start | |||
} | |||
return nil | |||
} | |||
func (m *Domain) GetEnd() []byte { | |||
if m != nil { | |||
return m.End | |||
} | |||
return nil | |||
} | |||
type Iterator struct { | |||
Domain *Domain `protobuf:"bytes,1,opt,name=domain" json:"domain,omitempty"` | |||
Valid bool `protobuf:"varint,2,opt,name=valid" json:"valid,omitempty"` | |||
Key []byte `protobuf:"bytes,3,opt,name=key,proto3" json:"key,omitempty"` | |||
Value []byte `protobuf:"bytes,4,opt,name=value,proto3" json:"value,omitempty"` | |||
} | |||
func (m *Iterator) Reset() { *m = Iterator{} } | |||
func (m *Iterator) String() string { return proto.CompactTextString(m) } | |||
func (*Iterator) ProtoMessage() {} | |||
func (*Iterator) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{5} } | |||
func (m *Iterator) GetDomain() *Domain { | |||
if m != nil { | |||
return m.Domain | |||
} | |||
return nil | |||
} | |||
func (m *Iterator) GetValid() bool { | |||
if m != nil { | |||
return m.Valid | |||
} | |||
return false | |||
} | |||
func (m *Iterator) GetKey() []byte { | |||
if m != nil { | |||
return m.Key | |||
} | |||
return nil | |||
} | |||
func (m *Iterator) GetValue() []byte { | |||
if m != nil { | |||
return m.Value | |||
} | |||
return nil | |||
} | |||
type Stats struct { | |||
Data map[string]string `protobuf:"bytes,1,rep,name=data" json:"data,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` | |||
TimeAt int64 `protobuf:"varint,2,opt,name=time_at,json=timeAt" json:"time_at,omitempty"` | |||
} | |||
func (m *Stats) Reset() { *m = Stats{} } | |||
func (m *Stats) String() string { return proto.CompactTextString(m) } | |||
func (*Stats) ProtoMessage() {} | |||
func (*Stats) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{6} } | |||
func (m *Stats) GetData() map[string]string { | |||
if m != nil { | |||
return m.Data | |||
} | |||
return nil | |||
} | |||
func (m *Stats) GetTimeAt() int64 { | |||
if m != nil { | |||
return m.TimeAt | |||
} | |||
return 0 | |||
} | |||
type Init struct { | |||
Type string `protobuf:"bytes,1,opt,name=Type" json:"Type,omitempty"` | |||
Name string `protobuf:"bytes,2,opt,name=Name" json:"Name,omitempty"` | |||
Dir string `protobuf:"bytes,3,opt,name=Dir" json:"Dir,omitempty"` | |||
} | |||
func (m *Init) Reset() { *m = Init{} } | |||
func (m *Init) String() string { return proto.CompactTextString(m) } | |||
func (*Init) ProtoMessage() {} | |||
func (*Init) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{7} } | |||
func (m *Init) GetType() string { | |||
if m != nil { | |||
return m.Type | |||
} | |||
return "" | |||
} | |||
func (m *Init) GetName() string { | |||
if m != nil { | |||
return m.Name | |||
} | |||
return "" | |||
} | |||
func (m *Init) GetDir() string { | |||
if m != nil { | |||
return m.Dir | |||
} | |||
return "" | |||
} | |||
func init() { | |||
proto.RegisterType((*Batch)(nil), "protodb.Batch") | |||
proto.RegisterType((*Operation)(nil), "protodb.Operation") | |||
proto.RegisterType((*Entity)(nil), "protodb.Entity") | |||
proto.RegisterType((*Nothing)(nil), "protodb.Nothing") | |||
proto.RegisterType((*Domain)(nil), "protodb.Domain") | |||
proto.RegisterType((*Iterator)(nil), "protodb.Iterator") | |||
proto.RegisterType((*Stats)(nil), "protodb.Stats") | |||
proto.RegisterType((*Init)(nil), "protodb.Init") | |||
proto.RegisterEnum("protodb.Operation_Type", Operation_Type_name, Operation_Type_value) | |||
} | |||
// Reference imports to suppress errors if they are not otherwise used. | |||
var _ context.Context | |||
var _ grpc.ClientConn | |||
// This is a compile-time assertion to ensure that this generated file | |||
// is compatible with the grpc package it is being compiled against. | |||
const _ = grpc.SupportPackageIsVersion4 | |||
// Client API for DB service | |||
type DBClient interface { | |||
Init(ctx context.Context, in *Init, opts ...grpc.CallOption) (*Entity, error) | |||
Get(ctx context.Context, in *Entity, opts ...grpc.CallOption) (*Entity, error) | |||
GetStream(ctx context.Context, opts ...grpc.CallOption) (DB_GetStreamClient, error) | |||
Has(ctx context.Context, in *Entity, opts ...grpc.CallOption) (*Entity, error) | |||
Set(ctx context.Context, in *Entity, opts ...grpc.CallOption) (*Nothing, error) | |||
SetSync(ctx context.Context, in *Entity, opts ...grpc.CallOption) (*Nothing, error) | |||
Delete(ctx context.Context, in *Entity, opts ...grpc.CallOption) (*Nothing, error) | |||
DeleteSync(ctx context.Context, in *Entity, opts ...grpc.CallOption) (*Nothing, error) | |||
Iterator(ctx context.Context, in *Entity, opts ...grpc.CallOption) (DB_IteratorClient, error) | |||
ReverseIterator(ctx context.Context, in *Entity, opts ...grpc.CallOption) (DB_ReverseIteratorClient, error) | |||
// rpc print(Nothing) returns (Entity) {} | |||
Stats(ctx context.Context, in *Nothing, opts ...grpc.CallOption) (*Stats, error) | |||
BatchWrite(ctx context.Context, in *Batch, opts ...grpc.CallOption) (*Nothing, error) | |||
BatchWriteSync(ctx context.Context, in *Batch, opts ...grpc.CallOption) (*Nothing, error) | |||
} | |||
type dBClient struct { | |||
cc *grpc.ClientConn | |||
} | |||
func NewDBClient(cc *grpc.ClientConn) DBClient { | |||
return &dBClient{cc} | |||
} | |||
func (c *dBClient) Init(ctx context.Context, in *Init, opts ...grpc.CallOption) (*Entity, error) { | |||
out := new(Entity) | |||
err := grpc.Invoke(ctx, "/protodb.DB/init", in, out, c.cc, opts...) | |||
if err != nil { | |||
return nil, err | |||
} | |||
return out, nil | |||
} | |||
func (c *dBClient) Get(ctx context.Context, in *Entity, opts ...grpc.CallOption) (*Entity, error) { | |||
out := new(Entity) | |||
err := grpc.Invoke(ctx, "/protodb.DB/get", in, out, c.cc, opts...) | |||
if err != nil { | |||
return nil, err | |||
} | |||
return out, nil | |||
} | |||
func (c *dBClient) GetStream(ctx context.Context, opts ...grpc.CallOption) (DB_GetStreamClient, error) { | |||
stream, err := grpc.NewClientStream(ctx, &_DB_serviceDesc.Streams[0], c.cc, "/protodb.DB/getStream", opts...) | |||
if err != nil { | |||
return nil, err | |||
} | |||
x := &dBGetStreamClient{stream} | |||
return x, nil | |||
} | |||
type DB_GetStreamClient interface { | |||
Send(*Entity) error | |||
Recv() (*Entity, error) | |||
grpc.ClientStream | |||
} | |||
type dBGetStreamClient struct { | |||
grpc.ClientStream | |||
} | |||
func (x *dBGetStreamClient) Send(m *Entity) error { | |||
return x.ClientStream.SendMsg(m) | |||
} | |||
func (x *dBGetStreamClient) Recv() (*Entity, error) { | |||
m := new(Entity) | |||
if err := x.ClientStream.RecvMsg(m); err != nil { | |||
return nil, err | |||
} | |||
return m, nil | |||
} | |||
func (c *dBClient) Has(ctx context.Context, in *Entity, opts ...grpc.CallOption) (*Entity, error) { | |||
out := new(Entity) | |||
err := grpc.Invoke(ctx, "/protodb.DB/has", in, out, c.cc, opts...) | |||
if err != nil { | |||
return nil, err | |||
} | |||
return out, nil | |||
} | |||
func (c *dBClient) Set(ctx context.Context, in *Entity, opts ...grpc.CallOption) (*Nothing, error) { | |||
out := new(Nothing) | |||
err := grpc.Invoke(ctx, "/protodb.DB/set", in, out, c.cc, opts...) | |||
if err != nil { | |||
return nil, err | |||
} | |||
return out, nil | |||
} | |||
func (c *dBClient) SetSync(ctx context.Context, in *Entity, opts ...grpc.CallOption) (*Nothing, error) { | |||
out := new(Nothing) | |||
err := grpc.Invoke(ctx, "/protodb.DB/setSync", in, out, c.cc, opts...) | |||
if err != nil { | |||
return nil, err | |||
} | |||
return out, nil | |||
} | |||
func (c *dBClient) Delete(ctx context.Context, in *Entity, opts ...grpc.CallOption) (*Nothing, error) { | |||
out := new(Nothing) | |||
err := grpc.Invoke(ctx, "/protodb.DB/delete", in, out, c.cc, opts...) | |||
if err != nil { | |||
return nil, err | |||
} | |||
return out, nil | |||
} | |||
func (c *dBClient) DeleteSync(ctx context.Context, in *Entity, opts ...grpc.CallOption) (*Nothing, error) { | |||
out := new(Nothing) | |||
err := grpc.Invoke(ctx, "/protodb.DB/deleteSync", in, out, c.cc, opts...) | |||
if err != nil { | |||
return nil, err | |||
} | |||
return out, nil | |||
} | |||
func (c *dBClient) Iterator(ctx context.Context, in *Entity, opts ...grpc.CallOption) (DB_IteratorClient, error) { | |||
stream, err := grpc.NewClientStream(ctx, &_DB_serviceDesc.Streams[1], c.cc, "/protodb.DB/iterator", opts...) | |||
if err != nil { | |||
return nil, err | |||
} | |||
x := &dBIteratorClient{stream} | |||
if err := x.ClientStream.SendMsg(in); err != nil { | |||
return nil, err | |||
} | |||
if err := x.ClientStream.CloseSend(); err != nil { | |||
return nil, err | |||
} | |||
return x, nil | |||
} | |||
type DB_IteratorClient interface { | |||
Recv() (*Iterator, error) | |||
grpc.ClientStream | |||
} | |||
type dBIteratorClient struct { | |||
grpc.ClientStream | |||
} | |||
func (x *dBIteratorClient) Recv() (*Iterator, error) { | |||
m := new(Iterator) | |||
if err := x.ClientStream.RecvMsg(m); err != nil { | |||
return nil, err | |||
} | |||
return m, nil | |||
} | |||
func (c *dBClient) ReverseIterator(ctx context.Context, in *Entity, opts ...grpc.CallOption) (DB_ReverseIteratorClient, error) { | |||
stream, err := grpc.NewClientStream(ctx, &_DB_serviceDesc.Streams[2], c.cc, "/protodb.DB/reverseIterator", opts...) | |||
if err != nil { | |||
return nil, err | |||
} | |||
x := &dBReverseIteratorClient{stream} | |||
if err := x.ClientStream.SendMsg(in); err != nil { | |||
return nil, err | |||
} | |||
if err := x.ClientStream.CloseSend(); err != nil { | |||
return nil, err | |||
} | |||
return x, nil | |||
} | |||
type DB_ReverseIteratorClient interface { | |||
Recv() (*Iterator, error) | |||
grpc.ClientStream | |||
} | |||
type dBReverseIteratorClient struct { | |||
grpc.ClientStream | |||
} | |||
func (x *dBReverseIteratorClient) Recv() (*Iterator, error) { | |||
m := new(Iterator) | |||
if err := x.ClientStream.RecvMsg(m); err != nil { | |||
return nil, err | |||
} | |||
return m, nil | |||
} | |||
func (c *dBClient) Stats(ctx context.Context, in *Nothing, opts ...grpc.CallOption) (*Stats, error) { | |||
out := new(Stats) | |||
err := grpc.Invoke(ctx, "/protodb.DB/stats", in, out, c.cc, opts...) | |||
if err != nil { | |||
return nil, err | |||
} | |||
return out, nil | |||
} | |||
func (c *dBClient) BatchWrite(ctx context.Context, in *Batch, opts ...grpc.CallOption) (*Nothing, error) { | |||
out := new(Nothing) | |||
err := grpc.Invoke(ctx, "/protodb.DB/batchWrite", in, out, c.cc, opts...) | |||
if err != nil { | |||
return nil, err | |||
} | |||
return out, nil | |||
} | |||
func (c *dBClient) BatchWriteSync(ctx context.Context, in *Batch, opts ...grpc.CallOption) (*Nothing, error) { | |||
out := new(Nothing) | |||
err := grpc.Invoke(ctx, "/protodb.DB/batchWriteSync", in, out, c.cc, opts...) | |||
if err != nil { | |||
return nil, err | |||
} | |||
return out, nil | |||
} | |||
// Server API for DB service | |||
type DBServer interface { | |||
Init(context.Context, *Init) (*Entity, error) | |||
Get(context.Context, *Entity) (*Entity, error) | |||
GetStream(DB_GetStreamServer) error | |||
Has(context.Context, *Entity) (*Entity, error) | |||
Set(context.Context, *Entity) (*Nothing, error) | |||
SetSync(context.Context, *Entity) (*Nothing, error) | |||
Delete(context.Context, *Entity) (*Nothing, error) | |||
DeleteSync(context.Context, *Entity) (*Nothing, error) | |||
Iterator(*Entity, DB_IteratorServer) error | |||
ReverseIterator(*Entity, DB_ReverseIteratorServer) error | |||
// rpc print(Nothing) returns (Entity) {} | |||
Stats(context.Context, *Nothing) (*Stats, error) | |||
BatchWrite(context.Context, *Batch) (*Nothing, error) | |||
BatchWriteSync(context.Context, *Batch) (*Nothing, error) | |||
} | |||
func RegisterDBServer(s *grpc.Server, srv DBServer) { | |||
s.RegisterService(&_DB_serviceDesc, srv) | |||
} | |||
func _DB_Init_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { | |||
in := new(Init) | |||
if err := dec(in); err != nil { | |||
return nil, err | |||
} | |||
if interceptor == nil { | |||
return srv.(DBServer).Init(ctx, in) | |||
} | |||
info := &grpc.UnaryServerInfo{ | |||
Server: srv, | |||
FullMethod: "/protodb.DB/Init", | |||
} | |||
handler := func(ctx context.Context, req interface{}) (interface{}, error) { | |||
return srv.(DBServer).Init(ctx, req.(*Init)) | |||
} | |||
return interceptor(ctx, in, info, handler) | |||
} | |||
func _DB_Get_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { | |||
in := new(Entity) | |||
if err := dec(in); err != nil { | |||
return nil, err | |||
} | |||
if interceptor == nil { | |||
return srv.(DBServer).Get(ctx, in) | |||
} | |||
info := &grpc.UnaryServerInfo{ | |||
Server: srv, | |||
FullMethod: "/protodb.DB/Get", | |||
} | |||
handler := func(ctx context.Context, req interface{}) (interface{}, error) { | |||
return srv.(DBServer).Get(ctx, req.(*Entity)) | |||
} | |||
return interceptor(ctx, in, info, handler) | |||
} | |||
func _DB_GetStream_Handler(srv interface{}, stream grpc.ServerStream) error { | |||
return srv.(DBServer).GetStream(&dBGetStreamServer{stream}) | |||
} | |||
type DB_GetStreamServer interface { | |||
Send(*Entity) error | |||
Recv() (*Entity, error) | |||
grpc.ServerStream | |||
} | |||
type dBGetStreamServer struct { | |||
grpc.ServerStream | |||
} | |||
func (x *dBGetStreamServer) Send(m *Entity) error { | |||
return x.ServerStream.SendMsg(m) | |||
} | |||
func (x *dBGetStreamServer) Recv() (*Entity, error) { | |||
m := new(Entity) | |||
if err := x.ServerStream.RecvMsg(m); err != nil { | |||
return nil, err | |||
} | |||
return m, nil | |||
} | |||
func _DB_Has_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { | |||
in := new(Entity) | |||
if err := dec(in); err != nil { | |||
return nil, err | |||
} | |||
if interceptor == nil { | |||
return srv.(DBServer).Has(ctx, in) | |||
} | |||
info := &grpc.UnaryServerInfo{ | |||
Server: srv, | |||
FullMethod: "/protodb.DB/Has", | |||
} | |||
handler := func(ctx context.Context, req interface{}) (interface{}, error) { | |||
return srv.(DBServer).Has(ctx, req.(*Entity)) | |||
} | |||
return interceptor(ctx, in, info, handler) | |||
} | |||
func _DB_Set_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { | |||
in := new(Entity) | |||
if err := dec(in); err != nil { | |||
return nil, err | |||
} | |||
if interceptor == nil { | |||
return srv.(DBServer).Set(ctx, in) | |||
} | |||
info := &grpc.UnaryServerInfo{ | |||
Server: srv, | |||
FullMethod: "/protodb.DB/Set", | |||
} | |||
handler := func(ctx context.Context, req interface{}) (interface{}, error) { | |||
return srv.(DBServer).Set(ctx, req.(*Entity)) | |||
} | |||
return interceptor(ctx, in, info, handler) | |||
} | |||
func _DB_SetSync_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { | |||
in := new(Entity) | |||
if err := dec(in); err != nil { | |||
return nil, err | |||
} | |||
if interceptor == nil { | |||
return srv.(DBServer).SetSync(ctx, in) | |||
} | |||
info := &grpc.UnaryServerInfo{ | |||
Server: srv, | |||
FullMethod: "/protodb.DB/SetSync", | |||
} | |||
handler := func(ctx context.Context, req interface{}) (interface{}, error) { | |||
return srv.(DBServer).SetSync(ctx, req.(*Entity)) | |||
} | |||
return interceptor(ctx, in, info, handler) | |||
} | |||
func _DB_Delete_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { | |||
in := new(Entity) | |||
if err := dec(in); err != nil { | |||
return nil, err | |||
} | |||
if interceptor == nil { | |||
return srv.(DBServer).Delete(ctx, in) | |||
} | |||
info := &grpc.UnaryServerInfo{ | |||
Server: srv, | |||
FullMethod: "/protodb.DB/Delete", | |||
} | |||
handler := func(ctx context.Context, req interface{}) (interface{}, error) { | |||
return srv.(DBServer).Delete(ctx, req.(*Entity)) | |||
} | |||
return interceptor(ctx, in, info, handler) | |||
} | |||
func _DB_DeleteSync_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { | |||
in := new(Entity) | |||
if err := dec(in); err != nil { | |||
return nil, err | |||
} | |||
if interceptor == nil { | |||
return srv.(DBServer).DeleteSync(ctx, in) | |||
} | |||
info := &grpc.UnaryServerInfo{ | |||
Server: srv, | |||
FullMethod: "/protodb.DB/DeleteSync", | |||
} | |||
handler := func(ctx context.Context, req interface{}) (interface{}, error) { | |||
return srv.(DBServer).DeleteSync(ctx, req.(*Entity)) | |||
} | |||
return interceptor(ctx, in, info, handler) | |||
} | |||
func _DB_Iterator_Handler(srv interface{}, stream grpc.ServerStream) error { | |||
m := new(Entity) | |||
if err := stream.RecvMsg(m); err != nil { | |||
return err | |||
} | |||
return srv.(DBServer).Iterator(m, &dBIteratorServer{stream}) | |||
} | |||
type DB_IteratorServer interface { | |||
Send(*Iterator) error | |||
grpc.ServerStream | |||
} | |||
type dBIteratorServer struct { | |||
grpc.ServerStream | |||
} | |||
func (x *dBIteratorServer) Send(m *Iterator) error { | |||
return x.ServerStream.SendMsg(m) | |||
} | |||
func _DB_ReverseIterator_Handler(srv interface{}, stream grpc.ServerStream) error { | |||
m := new(Entity) | |||
if err := stream.RecvMsg(m); err != nil { | |||
return err | |||
} | |||
return srv.(DBServer).ReverseIterator(m, &dBReverseIteratorServer{stream}) | |||
} | |||
type DB_ReverseIteratorServer interface { | |||
Send(*Iterator) error | |||
grpc.ServerStream | |||
} | |||
type dBReverseIteratorServer struct { | |||
grpc.ServerStream | |||
} | |||
func (x *dBReverseIteratorServer) Send(m *Iterator) error { | |||
return x.ServerStream.SendMsg(m) | |||
} | |||
func _DB_Stats_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { | |||
in := new(Nothing) | |||
if err := dec(in); err != nil { | |||
return nil, err | |||
} | |||
if interceptor == nil { | |||
return srv.(DBServer).Stats(ctx, in) | |||
} | |||
info := &grpc.UnaryServerInfo{ | |||
Server: srv, | |||
FullMethod: "/protodb.DB/Stats", | |||
} | |||
handler := func(ctx context.Context, req interface{}) (interface{}, error) { | |||
return srv.(DBServer).Stats(ctx, req.(*Nothing)) | |||
} | |||
return interceptor(ctx, in, info, handler) | |||
} | |||
func _DB_BatchWrite_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { | |||
in := new(Batch) | |||
if err := dec(in); err != nil { | |||
return nil, err | |||
} | |||
if interceptor == nil { | |||
return srv.(DBServer).BatchWrite(ctx, in) | |||
} | |||
info := &grpc.UnaryServerInfo{ | |||
Server: srv, | |||
FullMethod: "/protodb.DB/BatchWrite", | |||
} | |||
handler := func(ctx context.Context, req interface{}) (interface{}, error) { | |||
return srv.(DBServer).BatchWrite(ctx, req.(*Batch)) | |||
} | |||
return interceptor(ctx, in, info, handler) | |||
} | |||
func _DB_BatchWriteSync_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { | |||
in := new(Batch) | |||
if err := dec(in); err != nil { | |||
return nil, err | |||
} | |||
if interceptor == nil { | |||
return srv.(DBServer).BatchWriteSync(ctx, in) | |||
} | |||
info := &grpc.UnaryServerInfo{ | |||
Server: srv, | |||
FullMethod: "/protodb.DB/BatchWriteSync", | |||
} | |||
handler := func(ctx context.Context, req interface{}) (interface{}, error) { | |||
return srv.(DBServer).BatchWriteSync(ctx, req.(*Batch)) | |||
} | |||
return interceptor(ctx, in, info, handler) | |||
} | |||
var _DB_serviceDesc = grpc.ServiceDesc{ | |||
ServiceName: "protodb.DB", | |||
HandlerType: (*DBServer)(nil), | |||
Methods: []grpc.MethodDesc{ | |||
{ | |||
MethodName: "init", | |||
Handler: _DB_Init_Handler, | |||
}, | |||
{ | |||
MethodName: "get", | |||
Handler: _DB_Get_Handler, | |||
}, | |||
{ | |||
MethodName: "has", | |||
Handler: _DB_Has_Handler, | |||
}, | |||
{ | |||
MethodName: "set", | |||
Handler: _DB_Set_Handler, | |||
}, | |||
{ | |||
MethodName: "setSync", | |||
Handler: _DB_SetSync_Handler, | |||
}, | |||
{ | |||
MethodName: "delete", | |||
Handler: _DB_Delete_Handler, | |||
}, | |||
{ | |||
MethodName: "deleteSync", | |||
Handler: _DB_DeleteSync_Handler, | |||
}, | |||
{ | |||
MethodName: "stats", | |||
Handler: _DB_Stats_Handler, | |||
}, | |||
{ | |||
MethodName: "batchWrite", | |||
Handler: _DB_BatchWrite_Handler, | |||
}, | |||
{ | |||
MethodName: "batchWriteSync", | |||
Handler: _DB_BatchWriteSync_Handler, | |||
}, | |||
}, | |||
Streams: []grpc.StreamDesc{ | |||
{ | |||
StreamName: "getStream", | |||
Handler: _DB_GetStream_Handler, | |||
ServerStreams: true, | |||
ClientStreams: true, | |||
}, | |||
{ | |||
StreamName: "iterator", | |||
Handler: _DB_Iterator_Handler, | |||
ServerStreams: true, | |||
}, | |||
{ | |||
StreamName: "reverseIterator", | |||
Handler: _DB_ReverseIterator_Handler, | |||
ServerStreams: true, | |||
}, | |||
}, | |||
Metadata: "defs.proto", | |||
} | |||
func init() { proto.RegisterFile("defs.proto", fileDescriptor0) } | |||
var fileDescriptor0 = []byte{ | |||
// 606 bytes of a gzipped FileDescriptorProto | |||
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x94, 0x54, 0x4f, 0x6f, 0xd3, 0x4e, | |||
0x10, 0xcd, 0xda, 0x8e, 0x13, 0x4f, 0x7f, 0xbf, 0x34, 0x8c, 0x10, 0xb5, 0x8a, 0x90, 0x22, 0x0b, | |||
0x09, 0x43, 0x69, 0x14, 0x52, 0x24, 0xfe, 0x9c, 0x68, 0x95, 0x1c, 0x2a, 0xa1, 0x22, 0x39, 0x95, | |||
0x38, 0xa2, 0x6d, 0x3d, 0x34, 0x2b, 0x1a, 0x3b, 0xac, 0x87, 0x8a, 0x5c, 0xb8, 0xf2, 0x79, 0xf8, | |||
0x7c, 0x5c, 0xd0, 0xae, 0x1d, 0x87, 0x36, 0x39, 0x84, 0x53, 0x76, 0x66, 0xde, 0x7b, 0xb3, 0xf3, | |||
0x32, 0x5e, 0x80, 0x94, 0x3e, 0x17, 0xfd, 0xb9, 0xce, 0x39, 0xc7, 0x96, 0xfd, 0x49, 0x2f, 0xa2, | |||
0x43, 0x68, 0x9e, 0x48, 0xbe, 0x9c, 0xe2, 0x63, 0x70, 0xf3, 0x79, 0x11, 0x8a, 0x9e, 0x1b, 0xef, | |||
0x0c, 0xb1, 0x5f, 0xd5, 0xfb, 0x1f, 0xe6, 0xa4, 0x25, 0xab, 0x3c, 0x4b, 0x4c, 0x39, 0xfa, 0x01, | |||
0x41, 0x9d, 0xc1, 0x27, 0xe0, 0x53, 0xc6, 0x8a, 0x17, 0xa1, 0xe8, 0x89, 0x78, 0x67, 0xb8, 0x5b, | |||
0xb3, 0xc6, 0x36, 0x9d, 0x54, 0x65, 0x3c, 0x00, 0x8f, 0x17, 0x73, 0x0a, 0x9d, 0x9e, 0x88, 0x3b, | |||
0xc3, 0xbd, 0x75, 0xf1, 0xfe, 0xf9, 0x62, 0x4e, 0x89, 0x05, 0x45, 0x0f, 0xc1, 0x33, 0x11, 0xb6, | |||
0xc0, 0x9d, 0x8c, 0xcf, 0xbb, 0x0d, 0x04, 0xf0, 0x47, 0xe3, 0xf7, 0xe3, 0xf3, 0x71, 0x57, 0x44, | |||
0xbf, 0x04, 0xf8, 0xa5, 0x38, 0x76, 0xc0, 0x51, 0xa9, 0xed, 0xdc, 0x4c, 0x1c, 0x95, 0x62, 0x17, | |||
0xdc, 0x2f, 0xb4, 0xb0, 0x3d, 0xfe, 0x4b, 0xcc, 0x11, 0xef, 0x43, 0xf3, 0x46, 0x5e, 0x7f, 0xa3, | |||
0xd0, 0xb5, 0xb9, 0x32, 0xc0, 0x07, 0xe0, 0xd3, 0x77, 0x55, 0x70, 0x11, 0x7a, 0x3d, 0x11, 0xb7, | |||
0x93, 0x2a, 0x32, 0xe8, 0x82, 0xa5, 0xe6, 0xb0, 0x59, 0xa2, 0x6d, 0x60, 0x54, 0x29, 0x4b, 0x43, | |||
0xbf, 0x54, 0xa5, 0xcc, 0xf6, 0x21, 0xad, 0xc3, 0x56, 0x4f, 0xc4, 0x41, 0x62, 0x8e, 0xf8, 0x08, | |||
0xe0, 0x52, 0x93, 0x64, 0x4a, 0x3f, 0x49, 0x0e, 0xdb, 0x3d, 0x11, 0xbb, 0x49, 0x50, 0x65, 0x8e, | |||
0x39, 0x0a, 0xa0, 0x75, 0x96, 0xf3, 0x54, 0x65, 0x57, 0xd1, 0x00, 0xfc, 0x51, 0x3e, 0x93, 0x2a, | |||
0x5b, 0x75, 0x13, 0x1b, 0xba, 0x39, 0x75, 0xb7, 0xe8, 0x2b, 0xb4, 0x4f, 0xd9, 0xb8, 0x94, 0x6b, | |||
0xe3, 0x77, 0x6a, 0xd9, 0x6b, 0x7e, 0x97, 0xa2, 0x49, 0x55, 0xae, 0x06, 0x57, 0xa5, 0x50, 0x3b, | |||
0x29, 0x83, 0xa5, 0x41, 0xee, 0x06, 0x83, 0xbc, 0xbf, 0x0c, 0x8a, 0x7e, 0x0a, 0x68, 0x4e, 0x58, | |||
0x72, 0x81, 0xcf, 0xc1, 0x4b, 0x25, 0xcb, 0x6a, 0x29, 0xc2, 0xba, 0x9d, 0xad, 0xf6, 0x47, 0x92, | |||
0xe5, 0x38, 0x63, 0xbd, 0x48, 0x2c, 0x0a, 0xf7, 0xa0, 0xc5, 0x6a, 0x46, 0xc6, 0x03, 0xc7, 0x7a, | |||
0xe0, 0x9b, 0xf0, 0x98, 0xf7, 0x5f, 0x41, 0x50, 0x63, 0x97, 0xb7, 0x10, 0xa5, 0x7d, 0xb7, 0x6e, | |||
0xe1, 0xd8, 0x5c, 0x19, 0xbc, 0x75, 0x5e, 0x8b, 0xe8, 0x1d, 0x78, 0xa7, 0x99, 0x62, 0xc4, 0x72, | |||
0x25, 0x2a, 0x52, 0xb9, 0x1e, 0x08, 0xde, 0x99, 0x9c, 0x2d, 0x49, 0xf6, 0x6c, 0xb4, 0x47, 0x4a, | |||
0xdb, 0x09, 0x83, 0xc4, 0x1c, 0x87, 0xbf, 0x3d, 0x70, 0x46, 0x27, 0x18, 0x83, 0xa7, 0x8c, 0xd0, | |||
0xff, 0xf5, 0x08, 0x46, 0x77, 0xff, 0xee, 0xc2, 0x46, 0x0d, 0x7c, 0x0a, 0xee, 0x15, 0x31, 0xde, | |||
0xad, 0x6c, 0x82, 0x1e, 0x41, 0x70, 0x45, 0x3c, 0x61, 0x4d, 0x72, 0xb6, 0x0d, 0x21, 0x16, 0x03, | |||
0x61, 0xf4, 0xa7, 0xb2, 0xd8, 0x4a, 0xff, 0x19, 0xb8, 0xc5, 0xa6, 0xab, 0x74, 0xeb, 0xc4, 0x72, | |||
0xad, 0x1a, 0xd8, 0x87, 0x56, 0x41, 0x3c, 0x59, 0x64, 0x97, 0xdb, 0xe1, 0x0f, 0xc1, 0x4f, 0xe9, | |||
0x9a, 0x98, 0xb6, 0x83, 0xbf, 0x30, 0x8f, 0x87, 0x81, 0x6f, 0xdf, 0x61, 0x08, 0x6d, 0xb5, 0x5c, | |||
0xdc, 0x35, 0xc2, 0xbd, 0xd5, 0xff, 0x50, 0x61, 0xa2, 0xc6, 0x40, 0xe0, 0x1b, 0xd8, 0xd5, 0x74, | |||
0x43, 0xba, 0xa0, 0xd3, 0x7f, 0xa5, 0x1e, 0xd8, 0xef, 0x89, 0x0b, 0x5c, 0xbb, 0xcb, 0x7e, 0xe7, | |||
0xf6, 0xde, 0x46, 0x0d, 0x1c, 0x00, 0x5c, 0x98, 0x47, 0xef, 0xa3, 0x56, 0x4c, 0xb8, 0xaa, 0xdb, | |||
0x97, 0x70, 0xe3, 0x34, 0x2f, 0xa1, 0xb3, 0x62, 0x58, 0x13, 0xb6, 0x60, 0x5d, 0xf8, 0x36, 0x75, | |||
0xf4, 0x27, 0x00, 0x00, 0xff, 0xff, 0x95, 0xf4, 0xe3, 0x82, 0x7a, 0x05, 0x00, 0x00, | |||
} |
@ -1,71 +0,0 @@ | |||
syntax = "proto3"; | |||
package protodb; | |||
message Batch { | |||
repeated Operation ops = 1; | |||
} | |||
message Operation { | |||
Entity entity = 1; | |||
enum Type { | |||
SET = 0; | |||
DELETE = 1; | |||
} | |||
Type type = 2; | |||
} | |||
message Entity { | |||
int32 id = 1; | |||
bytes key = 2; | |||
bytes value = 3; | |||
bool exists = 4; | |||
bytes start = 5; | |||
bytes end = 6; | |||
string err = 7; | |||
int64 created_at = 8; | |||
} | |||
message Nothing { | |||
} | |||
message Domain { | |||
bytes start = 1; | |||
bytes end = 2; | |||
} | |||
message Iterator { | |||
Domain domain = 1; | |||
bool valid = 2; | |||
bytes key = 3; | |||
bytes value = 4; | |||
} | |||
message Stats { | |||
map<string, string> data = 1; | |||
int64 time_at = 2; | |||
} | |||
message Init { | |||
string Type = 1; | |||
string Name = 2; | |||
string Dir = 3; | |||
} | |||
service DB { | |||
rpc init(Init) returns (Entity) {} | |||
rpc get(Entity) returns (Entity) {} | |||
rpc getStream(stream Entity) returns (stream Entity) {} | |||
rpc has(Entity) returns (Entity) {} | |||
rpc set(Entity) returns (Nothing) {} | |||
rpc setSync(Entity) returns (Nothing) {} | |||
rpc delete(Entity) returns (Nothing) {} | |||
rpc deleteSync(Entity) returns (Nothing) {} | |||
rpc iterator(Entity) returns (stream Iterator) {} | |||
rpc reverseIterator(Entity) returns (stream Iterator) {} | |||
// rpc print(Nothing) returns (Entity) {} | |||
rpc stats(Nothing) returns (Stats) {} | |||
rpc batchWrite(Batch) returns (Nothing) {} | |||
rpc batchWriteSync(Batch) returns (Nothing) {} | |||
} |
@ -1,266 +0,0 @@ | |||
package remotedb | |||
import ( | |||
"context" | |||
"fmt" | |||
"github.com/tendermint/tendermint/libs/db" | |||
"github.com/tendermint/tendermint/libs/db/remotedb/grpcdb" | |||
protodb "github.com/tendermint/tendermint/libs/db/remotedb/proto" | |||
) | |||
type RemoteDB struct { | |||
ctx context.Context | |||
dc protodb.DBClient | |||
} | |||
func NewRemoteDB(serverAddr string, serverKey string) (*RemoteDB, error) { | |||
return newRemoteDB(grpcdb.NewClient(serverAddr, serverKey)) | |||
} | |||
func newRemoteDB(gdc protodb.DBClient, err error) (*RemoteDB, error) { | |||
if err != nil { | |||
return nil, err | |||
} | |||
return &RemoteDB{dc: gdc, ctx: context.Background()}, nil | |||
} | |||
type Init struct { | |||
Dir string | |||
Name string | |||
Type string | |||
} | |||
func (rd *RemoteDB) InitRemote(in *Init) error { | |||
_, err := rd.dc.Init(rd.ctx, &protodb.Init{Dir: in.Dir, Type: in.Type, Name: in.Name}) | |||
return err | |||
} | |||
var _ db.DB = (*RemoteDB)(nil) | |||
// Close is a noop currently | |||
func (rd *RemoteDB) Close() { | |||
} | |||
func (rd *RemoteDB) Delete(key []byte) { | |||
if _, err := rd.dc.Delete(rd.ctx, &protodb.Entity{Key: key}); err != nil { | |||
panic(fmt.Sprintf("RemoteDB.Delete: %v", err)) | |||
} | |||
} | |||
func (rd *RemoteDB) DeleteSync(key []byte) { | |||
if _, err := rd.dc.DeleteSync(rd.ctx, &protodb.Entity{Key: key}); err != nil { | |||
panic(fmt.Sprintf("RemoteDB.DeleteSync: %v", err)) | |||
} | |||
} | |||
func (rd *RemoteDB) Set(key, value []byte) { | |||
if _, err := rd.dc.Set(rd.ctx, &protodb.Entity{Key: key, Value: value}); err != nil { | |||
panic(fmt.Sprintf("RemoteDB.Set: %v", err)) | |||
} | |||
} | |||
func (rd *RemoteDB) SetSync(key, value []byte) { | |||
if _, err := rd.dc.SetSync(rd.ctx, &protodb.Entity{Key: key, Value: value}); err != nil { | |||
panic(fmt.Sprintf("RemoteDB.SetSync: %v", err)) | |||
} | |||
} | |||
func (rd *RemoteDB) Get(key []byte) []byte { | |||
res, err := rd.dc.Get(rd.ctx, &protodb.Entity{Key: key}) | |||
if err != nil { | |||
panic(fmt.Sprintf("RemoteDB.Get error: %v", err)) | |||
} | |||
return res.Value | |||
} | |||
func (rd *RemoteDB) Has(key []byte) bool { | |||
res, err := rd.dc.Has(rd.ctx, &protodb.Entity{Key: key}) | |||
if err != nil { | |||
panic(fmt.Sprintf("RemoteDB.Has error: %v", err)) | |||
} | |||
return res.Exists | |||
} | |||
func (rd *RemoteDB) ReverseIterator(start, end []byte) db.Iterator { | |||
dic, err := rd.dc.ReverseIterator(rd.ctx, &protodb.Entity{Start: start, End: end}) | |||
if err != nil { | |||
panic(fmt.Sprintf("RemoteDB.Iterator error: %v", err)) | |||
} | |||
return makeReverseIterator(dic) | |||
} | |||
func (rd *RemoteDB) NewBatch() db.Batch { | |||
return &batch{ | |||
db: rd, | |||
ops: nil, | |||
} | |||
} | |||
// TODO: Implement Print when db.DB implements a method | |||
// to print to a string and not db.Print to stdout. | |||
func (rd *RemoteDB) Print() { | |||
panic("Unimplemented") | |||
} | |||
func (rd *RemoteDB) Stats() map[string]string { | |||
stats, err := rd.dc.Stats(rd.ctx, &protodb.Nothing{}) | |||
if err != nil { | |||
panic(fmt.Sprintf("RemoteDB.Stats error: %v", err)) | |||
} | |||
if stats == nil { | |||
return nil | |||
} | |||
return stats.Data | |||
} | |||
func (rd *RemoteDB) Iterator(start, end []byte) db.Iterator { | |||
dic, err := rd.dc.Iterator(rd.ctx, &protodb.Entity{Start: start, End: end}) | |||
if err != nil { | |||
panic(fmt.Sprintf("RemoteDB.Iterator error: %v", err)) | |||
} | |||
return makeIterator(dic) | |||
} | |||
func makeIterator(dic protodb.DB_IteratorClient) db.Iterator { | |||
return &iterator{dic: dic} | |||
} | |||
func makeReverseIterator(dric protodb.DB_ReverseIteratorClient) db.Iterator { | |||
return &reverseIterator{dric: dric} | |||
} | |||
type reverseIterator struct { | |||
dric protodb.DB_ReverseIteratorClient | |||
cur *protodb.Iterator | |||
} | |||
var _ db.Iterator = (*iterator)(nil) | |||
func (rItr *reverseIterator) Valid() bool { | |||
return rItr.cur != nil && rItr.cur.Valid | |||
} | |||
func (rItr *reverseIterator) Domain() (start, end []byte) { | |||
if rItr.cur == nil || rItr.cur.Domain == nil { | |||
return nil, nil | |||
} | |||
return rItr.cur.Domain.Start, rItr.cur.Domain.End | |||
} | |||
// Next advances the current reverseIterator | |||
func (rItr *reverseIterator) Next() { | |||
var err error | |||
rItr.cur, err = rItr.dric.Recv() | |||
if err != nil { | |||
panic(fmt.Sprintf("RemoteDB.ReverseIterator.Next error: %v", err)) | |||
} | |||
} | |||
func (rItr *reverseIterator) Key() []byte { | |||
if rItr.cur == nil { | |||
return nil | |||
} | |||
return rItr.cur.Key | |||
} | |||
func (rItr *reverseIterator) Value() []byte { | |||
if rItr.cur == nil { | |||
return nil | |||
} | |||
return rItr.cur.Value | |||
} | |||
func (rItr *reverseIterator) Close() { | |||
} | |||
// iterator implements the db.Iterator by retrieving | |||
// streamed iterators from the remote backend as | |||
// needed. It is NOT safe for concurrent usage, | |||
// matching the behavior of other iterators. | |||
type iterator struct { | |||
dic protodb.DB_IteratorClient | |||
cur *protodb.Iterator | |||
} | |||
var _ db.Iterator = (*iterator)(nil) | |||
func (itr *iterator) Valid() bool { | |||
return itr.cur != nil && itr.cur.Valid | |||
} | |||
func (itr *iterator) Domain() (start, end []byte) { | |||
if itr.cur == nil || itr.cur.Domain == nil { | |||
return nil, nil | |||
} | |||
return itr.cur.Domain.Start, itr.cur.Domain.End | |||
} | |||
// Next advances the current iterator | |||
func (itr *iterator) Next() { | |||
var err error | |||
itr.cur, err = itr.dic.Recv() | |||
if err != nil { | |||
panic(fmt.Sprintf("RemoteDB.Iterator.Next error: %v", err)) | |||
} | |||
} | |||
func (itr *iterator) Key() []byte { | |||
if itr.cur == nil { | |||
return nil | |||
} | |||
return itr.cur.Key | |||
} | |||
func (itr *iterator) Value() []byte { | |||
if itr.cur == nil { | |||
return nil | |||
} | |||
return itr.cur.Value | |||
} | |||
func (itr *iterator) Close() { | |||
err := itr.dic.CloseSend() | |||
if err != nil { | |||
panic(fmt.Sprintf("Error closing iterator: %v", err)) | |||
} | |||
} | |||
type batch struct { | |||
db *RemoteDB | |||
ops []*protodb.Operation | |||
} | |||
var _ db.Batch = (*batch)(nil) | |||
func (bat *batch) Set(key, value []byte) { | |||
op := &protodb.Operation{ | |||
Entity: &protodb.Entity{Key: key, Value: value}, | |||
Type: protodb.Operation_SET, | |||
} | |||
bat.ops = append(bat.ops, op) | |||
} | |||
func (bat *batch) Delete(key []byte) { | |||
op := &protodb.Operation{ | |||
Entity: &protodb.Entity{Key: key}, | |||
Type: protodb.Operation_DELETE, | |||
} | |||
bat.ops = append(bat.ops, op) | |||
} | |||
func (bat *batch) Write() { | |||
if _, err := bat.db.dc.BatchWrite(bat.db.ctx, &protodb.Batch{Ops: bat.ops}); err != nil { | |||
panic(fmt.Sprintf("RemoteDB.BatchWrite: %v", err)) | |||
} | |||
} | |||
func (bat *batch) WriteSync() { | |||
if _, err := bat.db.dc.BatchWriteSync(bat.db.ctx, &protodb.Batch{Ops: bat.ops}); err != nil { | |||
panic(fmt.Sprintf("RemoteDB.BatchWriteSync: %v", err)) | |||
} | |||
} | |||
func (bat *batch) Close() { | |||
bat.ops = nil | |||
} |
@ -1,123 +0,0 @@ | |||
package remotedb_test | |||
import ( | |||
"net" | |||
"os" | |||
"testing" | |||
"github.com/stretchr/testify/require" | |||
"github.com/tendermint/tendermint/libs/db/remotedb" | |||
"github.com/tendermint/tendermint/libs/db/remotedb/grpcdb" | |||
) | |||
func TestRemoteDB(t *testing.T) { | |||
cert := "test.crt" | |||
key := "test.key" | |||
ln, err := net.Listen("tcp", "localhost:0") | |||
require.Nil(t, err, "expecting a port to have been assigned on which we can listen") | |||
srv, err := grpcdb.NewServer(cert, key) | |||
require.Nil(t, err) | |||
defer srv.Stop() | |||
go func() { | |||
if err := srv.Serve(ln); err != nil { | |||
t.Fatalf("BindServer: %v", err) | |||
} | |||
}() | |||
client, err := remotedb.NewRemoteDB(ln.Addr().String(), cert) | |||
require.Nil(t, err, "expecting a successful client creation") | |||
dbName := "test-remote-db" | |||
require.Nil(t, client.InitRemote(&remotedb.Init{Name: dbName, Type: "goleveldb"})) | |||
defer func() { | |||
err := os.RemoveAll(dbName + ".db") | |||
if err != nil { | |||
panic(err) | |||
} | |||
}() | |||
k1 := []byte("key-1") | |||
v1 := client.Get(k1) | |||
require.Equal(t, 0, len(v1), "expecting no key1 to have been stored, got %X (%s)", v1, v1) | |||
vv1 := []byte("value-1") | |||
client.Set(k1, vv1) | |||
gv1 := client.Get(k1) | |||
require.Equal(t, gv1, vv1) | |||
// Simple iteration | |||
itr := client.Iterator(nil, nil) | |||
itr.Next() | |||
require.Equal(t, itr.Key(), []byte("key-1")) | |||
require.Equal(t, itr.Value(), []byte("value-1")) | |||
require.Panics(t, itr.Next) | |||
itr.Close() | |||
// Set some more keys | |||
k2 := []byte("key-2") | |||
v2 := []byte("value-2") | |||
client.SetSync(k2, v2) | |||
has := client.Has(k2) | |||
require.True(t, has) | |||
gv2 := client.Get(k2) | |||
require.Equal(t, gv2, v2) | |||
// More iteration | |||
itr = client.Iterator(nil, nil) | |||
itr.Next() | |||
require.Equal(t, itr.Key(), []byte("key-1")) | |||
require.Equal(t, itr.Value(), []byte("value-1")) | |||
itr.Next() | |||
require.Equal(t, itr.Key(), []byte("key-2")) | |||
require.Equal(t, itr.Value(), []byte("value-2")) | |||
require.Panics(t, itr.Next) | |||
itr.Close() | |||
// Deletion | |||
client.Delete(k1) | |||
client.DeleteSync(k2) | |||
gv1 = client.Get(k1) | |||
gv2 = client.Get(k2) | |||
require.Equal(t, len(gv2), 0, "after deletion, not expecting the key to exist anymore") | |||
require.Equal(t, len(gv1), 0, "after deletion, not expecting the key to exist anymore") | |||
// Batch tests - set | |||
k3 := []byte("key-3") | |||
k4 := []byte("key-4") | |||
k5 := []byte("key-5") | |||
v3 := []byte("value-3") | |||
v4 := []byte("value-4") | |||
v5 := []byte("value-5") | |||
bat := client.NewBatch() | |||
bat.Set(k3, v3) | |||
bat.Set(k4, v4) | |||
rv3 := client.Get(k3) | |||
require.Equal(t, 0, len(rv3), "expecting no k3 to have been stored") | |||
rv4 := client.Get(k4) | |||
require.Equal(t, 0, len(rv4), "expecting no k4 to have been stored") | |||
bat.Write() | |||
rv3 = client.Get(k3) | |||
require.Equal(t, rv3, v3, "expecting k3 to have been stored") | |||
rv4 = client.Get(k4) | |||
require.Equal(t, rv4, v4, "expecting k4 to have been stored") | |||
// Batch tests - deletion | |||
bat = client.NewBatch() | |||
bat.Delete(k4) | |||
bat.Delete(k3) | |||
bat.WriteSync() | |||
rv3 = client.Get(k3) | |||
require.Equal(t, 0, len(rv3), "expecting k3 to have been deleted") | |||
rv4 = client.Get(k4) | |||
require.Equal(t, 0, len(rv4), "expecting k4 to have been deleted") | |||
// Batch tests - set and delete | |||
bat = client.NewBatch() | |||
bat.Set(k4, v4) | |||
bat.Set(k5, v5) | |||
bat.Delete(k4) | |||
bat.WriteSync() | |||
rv4 = client.Get(k4) | |||
require.Equal(t, 0, len(rv4), "expecting k4 to have been deleted") | |||
rv5 := client.Get(k5) | |||
require.Equal(t, rv5, v5, "expecting k5 to have been stored") | |||
} |
@ -1,25 +0,0 @@ | |||
-----BEGIN CERTIFICATE----- | |||
MIIEOjCCAiKgAwIBAgIQYO+jRR0Sbs+WzU/hj2aoxzANBgkqhkiG9w0BAQsFADAZ | |||
MRcwFQYDVQQDEw50ZW5kZXJtaW50LmNvbTAeFw0xOTA2MDIxMTAyMDdaFw0yMDEy | |||
MDIxMTAyMDRaMBMxETAPBgNVBAMTCHJlbW90ZWRiMIIBIjANBgkqhkiG9w0BAQEF | |||
AAOCAQ8AMIIBCgKCAQEAt7YkYMJ5X5X3MT1tWG1KFO3uyZl962fInl+43xVESydp | |||
qYYHYei7b3T8c/3Ww6f3aKkkCHrvPtqHZjU6o+wp/AQMNlyUoyRN89+6Oj67u2C7 | |||
iZjzAJ+Pk87jMaStubvmZ9J+uk4op4rv5Rt4ns/Kg70RaMvqYR8tGqPcy3o8fWS+ | |||
hCbuwAS8b65yp+AgbnThDEBUnieN3OFLfDV//45qw2OlqlM/gHOVT2JMRbl14Y7x | |||
tW3/Xe+lsB7B3+OC6NQ2Nu7DEA1X+TBNyItIGnQH6DwK2ZBRtyQEk26FAWVj8fHd | |||
A5I4+RcGWXz4T6gJmDZN7+47WHO0ProjARbUV0GIuQIDAQABo4GDMIGAMA4GA1Ud | |||
DwEB/wQEAwIDuDAdBgNVHSUEFjAUBggrBgEFBQcDAQYIKwYBBQUHAwIwHQYDVR0O | |||
BBYEFOA8wzCYhoZmy0WHgnv/0efijUMKMB8GA1UdIwQYMBaAFNSTPe743aIx7rIp | |||
vn5HV3gJ4z1hMA8GA1UdEQQIMAaHBH8AAAEwDQYJKoZIhvcNAQELBQADggIBAKZf | |||
EVo0i9nMZv6ZJjbmAlMfo5FH41/oBYC8pyGAnJKl42raXKJAbl45h80iGn3vNggf | |||
7HJjN+znAHDFYjIwK2IV2WhHPyxK6uk+FA5uBR/aAPcw+zhRfXUMYdhNHr6KBlZZ | |||
bvD7Iq4UALg+XFQz/fQkIi7QvTBwkYyPNA2+a/TGf6myMp26hoz73DQXklqm6Zle | |||
myPs1Vp9bTgOv/3l64BMUV37FZ2TyiisBkV1qPEoDxT7Fbi8G1K8gMDLd0wu0jvX | |||
nz96nk30TDnZewV1fhkMJVKKGiLbaIgHcu1lWsWJZ0tdc+MF7R9bLBO5T0cTDgNy | |||
V8/51g+Cxu5SSHKjFkT0vBBONhjPmRqzJpxOQfHjiv8mmHwwiaNNy2VkJHj5GHer | |||
64r67fQTSqAifzgwAbXYK+ObUbx4PnHvSYSF5dbcR1Oj6UTVtGAgdmN2Y03AIc1B | |||
CiaojcMVuMRz/SvmPWl34GBvvT5/h9VCpHEB3vV6bQxJb5U1fLyo4GABA2Ic3DHr | |||
kV5p7CZI06UNbyQyFtnEb5XoXywRa4Df7FzDIv3HL13MtyXrYrJqC1eAbn+3jGdh | |||
bQa510mWYAlQQmzHSf/SLKott4QKR3SmhOGqGKNvquAYJ9XLdYdsPmKKGH6iGUD8 | |||
n7yEi0KMD/BHsPQNNLatsR2SxqGDeLhbLR0w2hig | |||
-----END CERTIFICATE----- |
@ -1,27 +0,0 @@ | |||
-----BEGIN RSA PRIVATE KEY----- | |||
MIIEpQIBAAKCAQEAt7YkYMJ5X5X3MT1tWG1KFO3uyZl962fInl+43xVESydpqYYH | |||
Yei7b3T8c/3Ww6f3aKkkCHrvPtqHZjU6o+wp/AQMNlyUoyRN89+6Oj67u2C7iZjz | |||
AJ+Pk87jMaStubvmZ9J+uk4op4rv5Rt4ns/Kg70RaMvqYR8tGqPcy3o8fWS+hCbu | |||
wAS8b65yp+AgbnThDEBUnieN3OFLfDV//45qw2OlqlM/gHOVT2JMRbl14Y7xtW3/ | |||
Xe+lsB7B3+OC6NQ2Nu7DEA1X+TBNyItIGnQH6DwK2ZBRtyQEk26FAWVj8fHdA5I4 | |||
+RcGWXz4T6gJmDZN7+47WHO0ProjARbUV0GIuQIDAQABAoIBAQCEVFAZ3puc7aIU | |||
NuIXqwmMz+KMFuMr+SL6aYr6LhB2bhpfQSr6LLEu1L6wMm1LnCbLneJVtW+1/6U+ | |||
SyNFRmzrmmLNmZx7c0AvZb14DQ4fJ8uOjryje0vptUHT1YJJ4n5R1L7yJjCElsC8 | |||
cDBPfO+sOzlaGmBmuxU7NkNp0k/WJc1Wnn5WFCKKk8BCH1AUKvn/vwbRV4zl/Be7 | |||
ApywPUouV+GJlTAG5KLb15CWKSqFNJxUJ6K7NnmfDoy7muUUv8MtrTn59XTH4qK7 | |||
p/3A8tdNpR/RpEJ8+y3kS9CDZBVnsk0j0ptT//jdt1vSsylXxrf7vjLnyguRZZ5H | |||
Vwe2POotAoGBAOY1UaFjtIz2G5qromaUtrPb5EPWRU8fiLtUXUDKG8KqNAqsGbDz | |||
Stw1mVFyyuaFMReO18djCvcja1xxF3TZbdpV1k7RfcpEZXiFzBAPgeEGdA3Tc3V2 | |||
byuJQthWamCBxF/7OGUmH/E/kH0pv5g9+eIitK/CUC2YUhCnubhchGAXAoGBAMxL | |||
O7mnPqDJ2PqxVip/lL6VnchtF1bx1aDNr83rVTf+BEsOgCIFoDEBIVKDnhXlaJu7 | |||
8JN4la/esytq4j3nM1cl6mjvw2ixYmwQtKiDuNiyb88hhQ+nxVsbIpYxtbhsj+u5 | |||
hOrMN6jKd0GVWsYpdNvY/dXZG1MXhbWwExjRAY+vAoGBAKBu3jHUU5q9VWWIYciN | |||
sXpNL5qbNHg86MRsugSSFaCnj1c0sz7ffvdSn0Pk9USL5Defw/9fpd+wHn0xD4DO | |||
msFDevQ5CSoyWmkRDbLPq9sP7UdJariczkMQCLbOGpqhNSMS6C2N0UsG2oJv2ueV | |||
oZUYTMYEbG4qLl8PFN5IE7UHAoGAGwEq4OyZm7lyxBii8jUxHUw7sh2xgx2uhnYJ | |||
8idUeXVLbfx5tYWW2kNy+yxIvk432LYsI+JBryC6AFg9lb81CyUI6lwfMXyZLP28 | |||
U7Ytvf9ARloA88PSk6tvk/j4M2uuTpOUXVEnXll9EB9FA4LBXro9O4JaWU53rz+a | |||
FqKyGSMCgYEAuYCGC+Fz7lIa0aE4tT9mwczQequxGYsL41KR/4pDO3t9QsnzunLY | |||
fvCFhteBOstwTBBdfBaKIwSp3zI2QtA4K0Jx9SAJ9q0ft2ciB9ukUFBhC9+TqzXg | |||
gSz3XpRtI8PhwAxZgCnov+NPQV8IxvD4ZgnnEiRBHrYnSEsaMLoVnkw= | |||
-----END RSA PRIVATE KEY----- |
@ -1,136 +0,0 @@ | |||
package db | |||
// DBs are goroutine safe. | |||
type DB interface { | |||
// Get returns nil iff key doesn't exist. | |||
// A nil key is interpreted as an empty byteslice. | |||
// CONTRACT: key, value readonly []byte | |||
Get([]byte) []byte | |||
// Has checks if a key exists. | |||
// A nil key is interpreted as an empty byteslice. | |||
// CONTRACT: key, value readonly []byte | |||
Has(key []byte) bool | |||
// Set sets the key. | |||
// A nil key is interpreted as an empty byteslice. | |||
// CONTRACT: key, value readonly []byte | |||
Set([]byte, []byte) | |||
SetSync([]byte, []byte) | |||
// Delete deletes the key. | |||
// A nil key is interpreted as an empty byteslice. | |||
// CONTRACT: key readonly []byte | |||
Delete([]byte) | |||
DeleteSync([]byte) | |||
// Iterate over a domain of keys in ascending order. End is exclusive. | |||
// Start must be less than end, or the Iterator is invalid. | |||
// A nil start is interpreted as an empty byteslice. | |||
// If end is nil, iterates up to the last item (inclusive). | |||
// CONTRACT: No writes may happen within a domain while an iterator exists over it. | |||
// CONTRACT: start, end readonly []byte | |||
Iterator(start, end []byte) Iterator | |||
// Iterate over a domain of keys in descending order. End is exclusive. | |||
// Start must be less than end, or the Iterator is invalid. | |||
// If start is nil, iterates up to the first/least item (inclusive). | |||
// If end is nil, iterates from the last/greatest item (inclusive). | |||
// CONTRACT: No writes may happen within a domain while an iterator exists over it. | |||
// CONTRACT: start, end readonly []byte | |||
ReverseIterator(start, end []byte) Iterator | |||
// Closes the connection. | |||
Close() | |||
// Creates a batch for atomic updates. | |||
NewBatch() Batch | |||
// For debugging | |||
Print() | |||
// Stats returns a map of property values for all keys and the size of the cache. | |||
Stats() map[string]string | |||
} | |||
//---------------------------------------- | |||
// Batch | |||
// Batch Close must be called when the program no longer needs the object. | |||
type Batch interface { | |||
SetDeleter | |||
Write() | |||
WriteSync() | |||
Close() | |||
} | |||
type SetDeleter interface { | |||
Set(key, value []byte) // CONTRACT: key, value readonly []byte | |||
Delete(key []byte) // CONTRACT: key readonly []byte | |||
} | |||
//---------------------------------------- | |||
// Iterator | |||
/* | |||
Usage: | |||
var itr Iterator = ... | |||
defer itr.Close() | |||
for ; itr.Valid(); itr.Next() { | |||
k, v := itr.Key(); itr.Value() | |||
// ... | |||
} | |||
*/ | |||
type Iterator interface { | |||
// The start & end (exclusive) limits to iterate over. | |||
// If end < start, then the Iterator goes in reverse order. | |||
// | |||
// A domain of ([]byte{12, 13}, []byte{12, 14}) will iterate | |||
// over anything with the prefix []byte{12, 13}. | |||
// | |||
// The smallest key is the empty byte array []byte{} - see BeginningKey(). | |||
// The largest key is the nil byte array []byte(nil) - see EndingKey(). | |||
// CONTRACT: start, end readonly []byte | |||
Domain() (start []byte, end []byte) | |||
// Valid returns whether the current position is valid. | |||
// Once invalid, an Iterator is forever invalid. | |||
Valid() bool | |||
// Next moves the iterator to the next sequential key in the database, as | |||
// defined by order of iteration. | |||
// | |||
// If Valid returns false, this method will panic. | |||
Next() | |||
// Key returns the key of the cursor. | |||
// If Valid returns false, this method will panic. | |||
// CONTRACT: key readonly []byte | |||
Key() (key []byte) | |||
// Value returns the value of the cursor. | |||
// If Valid returns false, this method will panic. | |||
// CONTRACT: value readonly []byte | |||
Value() (value []byte) | |||
// Close releases the Iterator. | |||
Close() | |||
} | |||
// For testing convenience. | |||
func bz(s string) []byte { | |||
return []byte(s) | |||
} | |||
// We defensively turn nil keys or values into []byte{} for | |||
// most operations. | |||
func nonNilBytes(bz []byte) []byte { | |||
if bz == nil { | |||
return []byte{} | |||
} | |||
return bz | |||
} |
@ -1,45 +0,0 @@ | |||
package db | |||
import ( | |||
"bytes" | |||
) | |||
func cp(bz []byte) (ret []byte) { | |||
ret = make([]byte, len(bz)) | |||
copy(ret, bz) | |||
return ret | |||
} | |||
// Returns a slice of the same length (big endian) | |||
// except incremented by one. | |||
// Returns nil on overflow (e.g. if bz bytes are all 0xFF) | |||
// CONTRACT: len(bz) > 0 | |||
func cpIncr(bz []byte) (ret []byte) { | |||
if len(bz) == 0 { | |||
panic("cpIncr expects non-zero bz length") | |||
} | |||
ret = cp(bz) | |||
for i := len(bz) - 1; i >= 0; i-- { | |||
if ret[i] < byte(0xFF) { | |||
ret[i]++ | |||
return | |||
} | |||
ret[i] = byte(0x00) | |||
if i == 0 { | |||
// Overflow | |||
return nil | |||
} | |||
} | |||
return nil | |||
} | |||
// See DB interface documentation for more information. | |||
func IsKeyInDomain(key, start, end []byte) bool { | |||
if bytes.Compare(key, start) < 0 { | |||
return false | |||
} | |||
if end != nil && bytes.Compare(end, key) <= 0 { | |||
return false | |||
} | |||
return true | |||
} |
@ -1,104 +0,0 @@ | |||
package db | |||
import ( | |||
"fmt" | |||
"os" | |||
"testing" | |||
) | |||
// Empty iterator for empty db. | |||
func TestPrefixIteratorNoMatchNil(t *testing.T) { | |||
for backend := range backends { | |||
t.Run(fmt.Sprintf("Prefix w/ backend %s", backend), func(t *testing.T) { | |||
db, dir := newTempDB(t, backend) | |||
defer os.RemoveAll(dir) | |||
itr := IteratePrefix(db, []byte("2")) | |||
checkInvalid(t, itr) | |||
}) | |||
} | |||
} | |||
// Empty iterator for db populated after iterator created. | |||
func TestPrefixIteratorNoMatch1(t *testing.T) { | |||
for backend := range backends { | |||
if backend == BoltDBBackend { | |||
t.Log("bolt does not support concurrent writes while iterating") | |||
continue | |||
} | |||
t.Run(fmt.Sprintf("Prefix w/ backend %s", backend), func(t *testing.T) { | |||
db, dir := newTempDB(t, backend) | |||
defer os.RemoveAll(dir) | |||
itr := IteratePrefix(db, []byte("2")) | |||
db.SetSync(bz("1"), bz("value_1")) | |||
checkInvalid(t, itr) | |||
}) | |||
} | |||
} | |||
// Empty iterator for prefix starting after db entry. | |||
func TestPrefixIteratorNoMatch2(t *testing.T) { | |||
for backend := range backends { | |||
t.Run(fmt.Sprintf("Prefix w/ backend %s", backend), func(t *testing.T) { | |||
db, dir := newTempDB(t, backend) | |||
defer os.RemoveAll(dir) | |||
db.SetSync(bz("3"), bz("value_3")) | |||
itr := IteratePrefix(db, []byte("4")) | |||
checkInvalid(t, itr) | |||
}) | |||
} | |||
} | |||
// Iterator with single val for db with single val, starting from that val. | |||
func TestPrefixIteratorMatch1(t *testing.T) { | |||
for backend := range backends { | |||
t.Run(fmt.Sprintf("Prefix w/ backend %s", backend), func(t *testing.T) { | |||
db, dir := newTempDB(t, backend) | |||
defer os.RemoveAll(dir) | |||
db.SetSync(bz("2"), bz("value_2")) | |||
itr := IteratePrefix(db, bz("2")) | |||
checkValid(t, itr, true) | |||
checkItem(t, itr, bz("2"), bz("value_2")) | |||
checkNext(t, itr, false) | |||
// Once invalid... | |||
checkInvalid(t, itr) | |||
}) | |||
} | |||
} | |||
// Iterator with prefix iterates over everything with same prefix. | |||
func TestPrefixIteratorMatches1N(t *testing.T) { | |||
for backend := range backends { | |||
t.Run(fmt.Sprintf("Prefix w/ backend %s", backend), func(t *testing.T) { | |||
db, dir := newTempDB(t, backend) | |||
defer os.RemoveAll(dir) | |||
// prefixed | |||
db.SetSync(bz("a/1"), bz("value_1")) | |||
db.SetSync(bz("a/3"), bz("value_3")) | |||
// not | |||
db.SetSync(bz("b/3"), bz("value_3")) | |||
db.SetSync(bz("a-3"), bz("value_3")) | |||
db.SetSync(bz("a.3"), bz("value_3")) | |||
db.SetSync(bz("abcdefg"), bz("value_3")) | |||
itr := IteratePrefix(db, bz("a/")) | |||
checkValid(t, itr, true) | |||
checkItem(t, itr, bz("a/1"), bz("value_1")) | |||
checkNext(t, itr, true) | |||
checkItem(t, itr, bz("a/3"), bz("value_3")) | |||
// Bad! | |||
checkNext(t, itr, false) | |||
//Once invalid... | |||
checkInvalid(t, itr) | |||
}) | |||
} | |||
} |