From 8481c49c824e2d71f9c2d00ff5a8d1ee7ad045d0 Mon Sep 17 00:00:00 2001 From: Jae Kwon Date: Thu, 9 Nov 2017 17:42:32 -0500 Subject: [PATCH 01/68] CacheDB (#67) * Add CacheDB & SimpleMap * Generic memBatch; Fix cLevelDB tests * CacheWrap() for CacheDB and MemDB * Change Iterator to match LeviGo Iterator * Fixes from review * cacheWrapWriteMutex and some race fixes * Use tmlibs/common * NewCWWMutex is exposed. DB can be CacheWrap'd * Remove GetOK, not needed * Fsdb (#72) * Add FSDB * Review fixes from Anton * Review changes * Fixes from review --- .gitignore | 2 +- Makefile | 19 ++-- db/backend_test.go | 43 +++++++ db/c_level_db.go | 103 +++++++++++++---- db/c_level_db_test.go | 8 +- db/cache_db.go | 230 +++++++++++++++++++++++++++++++++++++ db/cache_db_test.go | 83 ++++++++++++++ db/common_test.go | 172 ++++++++++++++++++++++++++++ db/db.go | 57 +++++++++- db/fsdb.go | 231 ++++++++++++++++++++++++++++++++++++++ db/go_level_db.go | 115 +++++++++++++------ db/go_level_db_test.go | 8 +- db/mem_batch.go | 50 +++++++++ db/mem_db.go | 160 ++++++++++++++------------ db/mem_db_test.go | 2 +- db/stats.go | 7 ++ db/util.go | 82 ++++++++++++++ db/util_test.go | 209 ++++++++++++++++++++++++++++++++++ merkle/kvpairs.go | 48 ++++++++ merkle/simple_map.go | 26 +++++ merkle/simple_map_test.go | 47 ++++++++ merkle/simple_proof.go | 131 +++++++++++++++++++++ merkle/simple_tree.go | 184 ------------------------------ 23 files changed, 1681 insertions(+), 336 deletions(-) create mode 100644 db/backend_test.go create mode 100644 db/cache_db.go create mode 100644 db/cache_db_test.go create mode 100644 db/common_test.go create mode 100644 db/fsdb.go create mode 100644 db/mem_batch.go create mode 100644 db/stats.go create mode 100644 db/util.go create mode 100644 db/util_test.go create mode 100644 merkle/kvpairs.go create mode 100644 merkle/simple_map.go create mode 100644 merkle/simple_map_test.go create mode 100644 merkle/simple_proof.go diff --git a/.gitignore b/.gitignore index e0a06eaf6..a2ebfde29 100644 --- a/.gitignore +++ b/.gitignore @@ -1,4 +1,4 @@ -*.swp +*.sw[opqr] vendor .glide diff --git a/Makefile b/Makefile index 25773ed36..a24306f32 100644 --- a/Makefile +++ b/Makefile @@ -11,7 +11,7 @@ all: test NOVENDOR = go list github.com/tendermint/tmlibs/... | grep -v /vendor/ test: - go test `glide novendor` + go test -tags gcc `glide novendor` get_vendor_deps: ensure_tools @rm -rf vendor/ @@ -32,20 +32,19 @@ metalinter_test: ensure_tools --enable=gas \ --enable=goconst \ --enable=gosimple \ - --enable=ineffassign \ - --enable=interfacer \ + --enable=ineffassign \ + --enable=interfacer \ --enable=megacheck \ - --enable=misspell \ - --enable=staticcheck \ + --enable=misspell \ + --enable=staticcheck \ --enable=safesql \ - --enable=structcheck \ - --enable=unconvert \ + --enable=structcheck \ + --enable=unconvert \ --enable=unused \ - --enable=varcheck \ + --enable=varcheck \ --enable=vetshadow \ --enable=vet \ ./... - #--enable=aligncheck \ #--enable=dupl \ #--enable=errcheck \ @@ -53,4 +52,4 @@ metalinter_test: ensure_tools #--enable=goimports \ #--enable=golint \ <== comments on anything exported #--enable=gotype \ - #--enable=unparam \ + #--enable=unparam \ diff --git a/db/backend_test.go b/db/backend_test.go new file mode 100644 index 000000000..b4ffecdc6 --- /dev/null +++ b/db/backend_test.go @@ -0,0 +1,43 @@ +package db + +import ( + "fmt" + "testing" + + "github.com/stretchr/testify/require" + cmn "github.com/tendermint/tmlibs/common" +) + +func testBackend(t *testing.T, backend string) { + // Default + dir, dirname := cmn.Tempdir(fmt.Sprintf("test_backend_%s_", backend)) + defer dir.Close() + db := NewDB("testdb", backend, dirname) + require.Nil(t, db.Get([]byte(""))) + require.Nil(t, db.Get(nil)) + + // Set empty ("") + db.Set([]byte(""), []byte("")) + require.NotNil(t, db.Get([]byte(""))) + require.NotNil(t, db.Get(nil)) + require.Empty(t, db.Get([]byte(""))) + require.Empty(t, db.Get(nil)) + + // Set empty (nil) + db.Set([]byte(""), nil) + require.NotNil(t, db.Get([]byte(""))) + require.NotNil(t, db.Get(nil)) + require.Empty(t, db.Get([]byte(""))) + require.Empty(t, db.Get(nil)) + + // Delete + db.Delete([]byte("")) + require.Nil(t, db.Get([]byte(""))) + require.Nil(t, db.Get(nil)) +} + +func TestBackends(t *testing.T) { + testBackend(t, CLevelDBBackendStr) + testBackend(t, GoLevelDBBackendStr) + testBackend(t, MemDBBackendStr) +} diff --git a/db/c_level_db.go b/db/c_level_db.go index b1ae49a12..95651c0a2 100644 --- a/db/c_level_db.go +++ b/db/c_level_db.go @@ -7,8 +7,6 @@ import ( "path" "github.com/jmhodges/levigo" - - . "github.com/tendermint/tmlibs/common" ) func init() { @@ -24,6 +22,8 @@ type CLevelDB struct { ro *levigo.ReadOptions wo *levigo.WriteOptions woSync *levigo.WriteOptions + + cwwMutex } func NewCLevelDB(name string, dir string) (*CLevelDB, error) { @@ -45,6 +45,8 @@ func NewCLevelDB(name string, dir string) (*CLevelDB, error) { ro: ro, wo: wo, woSync: woSync, + + cwwMutex: NewCWWMutex(), } return database, nil } @@ -52,7 +54,7 @@ func NewCLevelDB(name string, dir string) (*CLevelDB, error) { func (db *CLevelDB) Get(key []byte) []byte { res, err := db.db.Get(db.ro, key) if err != nil { - PanicCrisis(err) + panic(err) } return res } @@ -60,28 +62,28 @@ func (db *CLevelDB) Get(key []byte) []byte { func (db *CLevelDB) Set(key []byte, value []byte) { err := db.db.Put(db.wo, key, value) if err != nil { - PanicCrisis(err) + panic(err) } } func (db *CLevelDB) SetSync(key []byte, value []byte) { err := db.db.Put(db.woSync, key, value) if err != nil { - PanicCrisis(err) + panic(err) } } func (db *CLevelDB) Delete(key []byte) { err := db.db.Delete(db.wo, key) if err != nil { - PanicCrisis(err) + panic(err) } } func (db *CLevelDB) DeleteSync(key []byte) { err := db.db.Delete(db.woSync, key) if err != nil { - PanicCrisis(err) + panic(err) } } @@ -97,11 +99,11 @@ func (db *CLevelDB) Close() { } func (db *CLevelDB) Print() { - iter := db.db.NewIterator(db.ro) - defer iter.Close() - for iter.Seek(nil); iter.Valid(); iter.Next() { - key := iter.Key() - value := iter.Value() + itr := db.Iterator() + defer itr.Close() + for itr.Seek(nil); itr.Valid(); itr.Next() { + key := itr.Key() + value := itr.Value() fmt.Printf("[%X]:\t[%X]\n", key, value) } } @@ -112,25 +114,24 @@ func (db *CLevelDB) Stats() map[string]string { stats := make(map[string]string) for _, key := range keys { - str, err := db.db.GetProperty(key) - if err == nil { - stats[key] = str - } + str := db.db.PropertyValue(key) + stats[key] = str } return stats } -func (db *CLevelDB) Iterator() Iterator { - return db.db.NewIterator(nil, nil) +func (db *CLevelDB) CacheWrap() interface{} { + return NewCacheDB(db, db.GetWriteLockVersion()) } +//---------------------------------------- +// Batch + func (db *CLevelDB) NewBatch() Batch { batch := levigo.NewWriteBatch() return &cLevelDBBatch{db, batch} } -//-------------------------------------------------------------------------------- - type cLevelDBBatch struct { db *CLevelDB batch *levigo.WriteBatch @@ -147,6 +148,66 @@ func (mBatch *cLevelDBBatch) Delete(key []byte) { func (mBatch *cLevelDBBatch) Write() { err := mBatch.db.db.Write(mBatch.db.wo, mBatch.batch) if err != nil { - PanicCrisis(err) + panic(err) } } + +//---------------------------------------- +// Iterator + +func (db *CLevelDB) Iterator() Iterator { + itr := db.db.NewIterator(db.ro) + itr.Seek([]byte{0x00}) + return cLevelDBIterator{itr} +} + +type cLevelDBIterator struct { + itr *levigo.Iterator +} + +func (c cLevelDBIterator) Seek(key []byte) { + if key == nil { + key = []byte{0x00} + } + c.itr.Seek(key) +} + +func (c cLevelDBIterator) Valid() bool { + return c.itr.Valid() +} + +func (c cLevelDBIterator) Key() []byte { + if !c.itr.Valid() { + panic("cLevelDBIterator Key() called when invalid") + } + return c.itr.Key() +} + +func (c cLevelDBIterator) Value() []byte { + if !c.itr.Valid() { + panic("cLevelDBIterator Value() called when invalid") + } + return c.itr.Value() +} + +func (c cLevelDBIterator) Next() { + if !c.itr.Valid() { + panic("cLevelDBIterator Next() called when invalid") + } + c.itr.Next() +} + +func (c cLevelDBIterator) Prev() { + if !c.itr.Valid() { + panic("cLevelDBIterator Prev() called when invalid") + } + c.itr.Prev() +} + +func (c cLevelDBIterator) Close() { + c.itr.Close() +} + +func (c cLevelDBIterator) GetError() error { + return c.itr.GetError() +} diff --git a/db/c_level_db_test.go b/db/c_level_db_test.go index e7336cc5f..864362332 100644 --- a/db/c_level_db_test.go +++ b/db/c_level_db_test.go @@ -7,7 +7,7 @@ import ( "fmt" "testing" - . "github.com/tendermint/tmlibs/common" + cmn "github.com/tendermint/tmlibs/common" ) func BenchmarkRandomReadsWrites2(b *testing.B) { @@ -18,7 +18,7 @@ func BenchmarkRandomReadsWrites2(b *testing.B) { for i := 0; i < int(numItems); i++ { internal[int64(i)] = int64(0) } - db, err := NewCLevelDB(Fmt("test_%x", RandStr(12)), "") + db, err := NewCLevelDB(cmn.Fmt("test_%x", cmn.RandStr(12)), "") if err != nil { b.Fatal(err.Error()) return @@ -30,7 +30,7 @@ func BenchmarkRandomReadsWrites2(b *testing.B) { for i := 0; i < b.N; i++ { // Write something { - idx := (int64(RandInt()) % numItems) + idx := (int64(cmn.RandInt()) % numItems) internal[idx] += 1 val := internal[idx] idxBytes := int642Bytes(int64(idx)) @@ -43,7 +43,7 @@ func BenchmarkRandomReadsWrites2(b *testing.B) { } // Read something { - idx := (int64(RandInt()) % numItems) + idx := (int64(cmn.RandInt()) % numItems) val := internal[idx] idxBytes := int642Bytes(int64(idx)) valBytes := db.Get(idxBytes) diff --git a/db/cache_db.go b/db/cache_db.go new file mode 100644 index 000000000..a41680c1b --- /dev/null +++ b/db/cache_db.go @@ -0,0 +1,230 @@ +package db + +import ( + "fmt" + "sort" + "sync" + "sync/atomic" +) + +// If value is nil but deleted is false, +// it means the parent doesn't have the key. +// (No need to delete upon Write()) +type cDBValue struct { + value []byte + deleted bool + dirty bool +} + +// CacheDB wraps an in-memory cache around an underlying DB. +type CacheDB struct { + mtx sync.Mutex + cache map[string]cDBValue + parent DB + lockVersion interface{} + + cwwMutex +} + +// Needed by MultiStore.CacheWrap(). +var _ atomicSetDeleter = (*CacheDB)(nil) + +// Users should typically not be required to call NewCacheDB directly, as the +// DB implementations here provide a .CacheWrap() function already. +// `lockVersion` is typically provided by parent.GetWriteLockVersion(). +func NewCacheDB(parent DB, lockVersion interface{}) *CacheDB { + db := &CacheDB{ + cache: make(map[string]cDBValue), + parent: parent, + lockVersion: lockVersion, + cwwMutex: NewCWWMutex(), + } + return db +} + +func (db *CacheDB) Get(key []byte) []byte { + db.mtx.Lock() + defer db.mtx.Unlock() + + dbValue, ok := db.cache[string(key)] + if !ok { + data := db.parent.Get(key) + dbValue = cDBValue{value: data, deleted: false, dirty: false} + db.cache[string(key)] = dbValue + } + return dbValue.value +} + +func (db *CacheDB) Set(key []byte, value []byte) { + db.mtx.Lock() + defer db.mtx.Unlock() + + db.SetNoLock(key, value) +} + +func (db *CacheDB) SetSync(key []byte, value []byte) { + db.mtx.Lock() + defer db.mtx.Unlock() + + db.SetNoLock(key, value) +} + +func (db *CacheDB) SetNoLock(key []byte, value []byte) { + db.cache[string(key)] = cDBValue{value: value, deleted: false, dirty: true} +} + +func (db *CacheDB) Delete(key []byte) { + db.mtx.Lock() + defer db.mtx.Unlock() + + db.DeleteNoLock(key) +} + +func (db *CacheDB) DeleteSync(key []byte) { + db.mtx.Lock() + defer db.mtx.Unlock() + + db.DeleteNoLock(key) +} + +func (db *CacheDB) DeleteNoLock(key []byte) { + db.cache[string(key)] = cDBValue{value: nil, deleted: true, dirty: true} +} + +func (db *CacheDB) Close() { + db.mtx.Lock() + defer db.mtx.Unlock() + + db.parent.Close() +} + +func (db *CacheDB) Print() { + db.mtx.Lock() + defer db.mtx.Unlock() + + fmt.Println("CacheDB\ncache:") + for key, value := range db.cache { + fmt.Printf("[%X]:\t[%v]\n", []byte(key), value) + } + fmt.Println("\nparent:") + db.parent.Print() +} + +func (db *CacheDB) Stats() map[string]string { + db.mtx.Lock() + defer db.mtx.Unlock() + + stats := make(map[string]string) + stats["cache.size"] = fmt.Sprintf("%d", len(db.cache)) + stats["cache.lock_version"] = fmt.Sprintf("%v", db.lockVersion) + mergeStats(db.parent.Stats(), stats, "parent.") + return stats +} + +func (db *CacheDB) Iterator() Iterator { + panic("CacheDB.Iterator() not yet supported") +} + +func (db *CacheDB) NewBatch() Batch { + return &memBatch{db, nil} +} + +// Implements `atomicSetDeleter` for Batch support. +func (db *CacheDB) Mutex() *sync.Mutex { + return &(db.mtx) +} + +// Write writes pending updates to the parent database and clears the cache. +func (db *CacheDB) Write() { + db.mtx.Lock() + defer db.mtx.Unlock() + + // Optional sanity check to ensure that CacheDB is valid + if parent, ok := db.parent.(WriteLocker); ok { + if parent.TryWriteLock(db.lockVersion) { + // All good! + } else { + panic("CacheDB.Write() failed. Did this CacheDB expire?") + } + } + + // We need a copy of all of the keys. + // Not the best, but probably not a bottleneck depending. + keys := make([]string, 0, len(db.cache)) + for key, dbValue := range db.cache { + if dbValue.dirty { + keys = append(keys, key) + } + } + sort.Strings(keys) + + batch := db.parent.NewBatch() + for _, key := range keys { + dbValue := db.cache[key] + if dbValue.deleted { + batch.Delete([]byte(key)) + } else if dbValue.value == nil { + // Skip, it already doesn't exist in parent. + } else { + batch.Set([]byte(key), dbValue.value) + } + } + batch.Write() + + // Clear the cache + db.cache = make(map[string]cDBValue) +} + +//---------------------------------------- +// To CacheWrap this CacheDB further. + +func (db *CacheDB) CacheWrap() interface{} { + return NewCacheDB(db, db.GetWriteLockVersion()) +} + +// If the parent parent DB implements this, (e.g. such as a CacheDB parent to a +// CacheDB child), CacheDB will call `parent.TryWriteLock()` before attempting +// to write. +type WriteLocker interface { + GetWriteLockVersion() (lockVersion interface{}) + TryWriteLock(lockVersion interface{}) bool +} + +// Implements TryWriteLocker. Embed this in DB structs if desired. +type cwwMutex struct { + mtx sync.Mutex + // CONTRACT: reading/writing to `*written` should use `atomic.*`. + // CONTRACT: replacing `written` with another *int32 should use `.mtx`. + written *int32 +} + +func NewCWWMutex() cwwMutex { + return cwwMutex{ + written: new(int32), + } +} + +func (cww *cwwMutex) GetWriteLockVersion() interface{} { + cww.mtx.Lock() + defer cww.mtx.Unlock() + + // `written` works as a "version" object because it gets replaced upon + // successful TryWriteLock. + return cww.written +} + +func (cww *cwwMutex) TryWriteLock(version interface{}) bool { + cww.mtx.Lock() + defer cww.mtx.Unlock() + + if version != cww.written { + return false // wrong "WriteLockVersion" + } + if !atomic.CompareAndSwapInt32(cww.written, 0, 1) { + return false // already written + } + + // New "WriteLockVersion" + cww.written = new(int32) + return true +} diff --git a/db/cache_db_test.go b/db/cache_db_test.go new file mode 100644 index 000000000..1de08e3f0 --- /dev/null +++ b/db/cache_db_test.go @@ -0,0 +1,83 @@ +package db + +import ( + "testing" + + "github.com/stretchr/testify/require" +) + +func bz(s string) []byte { return []byte(s) } + +func TestCacheDB(t *testing.T) { + mem := NewMemDB() + cdb := mem.CacheWrap().(*CacheDB) + + require.Empty(t, cdb.Get(bz("key1")), "Expected `key1` to be empty") + + mem.Set(bz("key1"), bz("value1")) + cdb.Set(bz("key1"), bz("value1")) + require.Equal(t, bz("value1"), cdb.Get(bz("key1"))) + + cdb.Set(bz("key1"), bz("value2")) + require.Equal(t, bz("value2"), cdb.Get(bz("key1"))) + require.Equal(t, bz("value1"), mem.Get(bz("key1"))) + + cdb.Write() + require.Equal(t, bz("value2"), mem.Get(bz("key1"))) + + require.Panics(t, func() { cdb.Write() }, "Expected second cdb.Write() to fail") + + cdb = mem.CacheWrap().(*CacheDB) + cdb.Delete(bz("key1")) + require.Empty(t, cdb.Get(bz("key1"))) + require.Equal(t, mem.Get(bz("key1")), bz("value2")) + + cdb.Write() + require.Empty(t, cdb.Get(bz("key1")), "Expected `key1` to be empty") + require.Empty(t, mem.Get(bz("key1")), "Expected `key1` to be empty") +} + +func TestCacheDBWriteLock(t *testing.T) { + mem := NewMemDB() + cdb := mem.CacheWrap().(*CacheDB) + require.NotPanics(t, func() { cdb.Write() }) + require.Panics(t, func() { cdb.Write() }) + cdb = mem.CacheWrap().(*CacheDB) + require.NotPanics(t, func() { cdb.Write() }) + require.Panics(t, func() { cdb.Write() }) +} + +func TestCacheDBWriteLockNested(t *testing.T) { + mem := NewMemDB() + cdb := mem.CacheWrap().(*CacheDB) + cdb2 := cdb.CacheWrap().(*CacheDB) + require.NotPanics(t, func() { cdb2.Write() }) + require.Panics(t, func() { cdb2.Write() }) + cdb2 = cdb.CacheWrap().(*CacheDB) + require.NotPanics(t, func() { cdb2.Write() }) + require.Panics(t, func() { cdb2.Write() }) +} + +func TestCacheDBNested(t *testing.T) { + mem := NewMemDB() + cdb := mem.CacheWrap().(*CacheDB) + cdb.Set(bz("key1"), bz("value1")) + + require.Empty(t, mem.Get(bz("key1"))) + require.Equal(t, bz("value1"), cdb.Get(bz("key1"))) + cdb2 := cdb.CacheWrap().(*CacheDB) + require.Equal(t, bz("value1"), cdb2.Get(bz("key1"))) + + cdb2.Set(bz("key1"), bz("VALUE2")) + require.Equal(t, []byte(nil), mem.Get(bz("key1"))) + require.Equal(t, bz("value1"), cdb.Get(bz("key1"))) + require.Equal(t, bz("VALUE2"), cdb2.Get(bz("key1"))) + + cdb2.Write() + require.Equal(t, []byte(nil), mem.Get(bz("key1"))) + require.Equal(t, bz("VALUE2"), cdb.Get(bz("key1"))) + + cdb.Write() + require.Equal(t, bz("VALUE2"), mem.Get(bz("key1"))) + +} diff --git a/db/common_test.go b/db/common_test.go new file mode 100644 index 000000000..505864c20 --- /dev/null +++ b/db/common_test.go @@ -0,0 +1,172 @@ +package db + +import ( + "fmt" + "testing" + + "github.com/stretchr/testify/assert" + cmn "github.com/tendermint/tmlibs/common" +) + +func checkValid(t *testing.T, itr Iterator, expected bool) { + valid := itr.Valid() + assert.Equal(t, expected, valid) +} + +func checkNext(t *testing.T, itr Iterator, expected bool) { + itr.Next() + valid := itr.Valid() + assert.Equal(t, expected, valid) +} + +func checkNextPanics(t *testing.T, itr Iterator) { + assert.Panics(t, func() { itr.Next() }, "checkNextPanics expected panic but didn't") +} + +func checkPrevPanics(t *testing.T, itr Iterator) { + assert.Panics(t, func() { itr.Prev() }, "checkPrevPanics expected panic but didn't") +} + +func checkPrev(t *testing.T, itr Iterator, expected bool) { + itr.Prev() + valid := itr.Valid() + assert.Equal(t, expected, valid) +} + +func checkItem(t *testing.T, itr Iterator, key []byte, value []byte) { + k, v := itr.Key(), itr.Value() + assert.Exactly(t, key, k) + assert.Exactly(t, value, v) +} + +func checkInvalid(t *testing.T, itr Iterator) { + checkValid(t, itr, false) + checkKeyPanics(t, itr) + checkValuePanics(t, itr) + checkNextPanics(t, itr) + checkPrevPanics(t, itr) +} + +func checkKeyPanics(t *testing.T, itr Iterator) { + assert.Panics(t, func() { itr.Key() }, "checkKeyPanics expected panic but didn't") +} + +func checkValuePanics(t *testing.T, itr Iterator) { + assert.Panics(t, func() { itr.Key() }, "checkValuePanics expected panic but didn't") +} + +func newTempDB(t *testing.T, backend string) (db DB) { + dir, dirname := cmn.Tempdir("test_go_iterator") + db = NewDB("testdb", backend, dirname) + dir.Close() + return db +} + +func TestDBIteratorSingleKey(t *testing.T) { + for backend, _ := range backends { + t.Run(fmt.Sprintf("Backend %s", backend), func(t *testing.T) { + db := newTempDB(t, backend) + db.SetSync(bz("1"), bz("value_1")) + itr := db.Iterator() + + checkValid(t, itr, true) + checkNext(t, itr, false) + checkValid(t, itr, false) + checkNextPanics(t, itr) + + // Once invalid... + checkInvalid(t, itr) + }) + } +} + +func TestDBIteratorTwoKeys(t *testing.T) { + for backend, _ := range backends { + t.Run(fmt.Sprintf("Backend %s", backend), func(t *testing.T) { + db := newTempDB(t, backend) + db.SetSync(bz("1"), bz("value_1")) + db.SetSync(bz("2"), bz("value_1")) + + { // Fail by calling Next too much + itr := db.Iterator() + checkValid(t, itr, true) + + for i := 0; i < 10; i++ { + checkNext(t, itr, true) + checkValid(t, itr, true) + + checkPrev(t, itr, true) + checkValid(t, itr, true) + } + + checkNext(t, itr, true) + checkValid(t, itr, true) + + checkNext(t, itr, false) + checkValid(t, itr, false) + + checkNextPanics(t, itr) + + // Once invalid... + checkInvalid(t, itr) + } + + { // Fail by calling Prev too much + itr := db.Iterator() + checkValid(t, itr, true) + + for i := 0; i < 10; i++ { + checkNext(t, itr, true) + checkValid(t, itr, true) + + checkPrev(t, itr, true) + checkValid(t, itr, true) + } + + checkPrev(t, itr, false) + checkValid(t, itr, false) + + checkPrevPanics(t, itr) + + // Once invalid... + checkInvalid(t, itr) + } + }) + } +} + +func TestDBIteratorEmpty(t *testing.T) { + for backend, _ := range backends { + t.Run(fmt.Sprintf("Backend %s", backend), func(t *testing.T) { + db := newTempDB(t, backend) + itr := db.Iterator() + + checkInvalid(t, itr) + }) + } +} + +func TestDBIteratorEmptySeek(t *testing.T) { + for backend, _ := range backends { + t.Run(fmt.Sprintf("Backend %s", backend), func(t *testing.T) { + db := newTempDB(t, backend) + itr := db.Iterator() + itr.Seek(bz("1")) + + checkInvalid(t, itr) + }) + } +} + +func TestDBIteratorBadSeek(t *testing.T) { + for backend, _ := range backends { + t.Run(fmt.Sprintf("Backend %s", backend), func(t *testing.T) { + db := newTempDB(t, backend) + db.SetSync(bz("1"), bz("value_1")) + itr := db.Iterator() + itr.Seek(bz("2")) + + checkInvalid(t, itr) + }) + } +} diff --git a/db/db.go b/db/db.go index 8156c1e92..6c8bd4800 100644 --- a/db/db.go +++ b/db/db.go @@ -3,7 +3,7 @@ package db import . "github.com/tendermint/tmlibs/common" type DB interface { - Get([]byte) []byte + Get([]byte) []byte // NOTE: returns nil iff never set or deleted. Set([]byte, []byte) SetSync([]byte, []byte) Delete([]byte) @@ -11,11 +11,15 @@ type DB interface { Close() NewBatch() Batch Iterator() Iterator - IteratorPrefix([]byte) Iterator // For debugging Print() + + // Stats returns a map of property values for all keys and the size of the cache. Stats() map[string]string + + // CacheWrap wraps the DB w/ a CacheDB. + CacheWrap() interface{} } type Batch interface { @@ -24,23 +28,66 @@ type Batch interface { Write() } +/* + Usage: + + for itr.Seek(mykey); itr.Valid(); itr.Next() { + k, v := itr.Key(); itr.Value() + .... + } +*/ type Iterator interface { - Next() bool + // Seek moves the iterator the position of the key given or, if the key + // doesn't exist, the next key that does exist in the database. If the key + // doesn't exist, and there is no next key, the Iterator becomes invalid. + Seek(key []byte) + + // Valid returns false only when an Iterator has iterated past either the + // first or the last key in the database. + Valid() bool + + // Next moves the iterator to the next sequential key in the database, as + // defined by the Comparator in the ReadOptions used to create this Iterator. + // + // If Valid returns false, this method will panic. + Next() + + // Prev moves the iterator to the previous sequential key in the database, as + // defined by the Comparator in the ReadOptions used to create this Iterator. + // + // If Valid returns false, this method will panic. + Prev() + + // Key returns the key of the cursor. + // + // If Valid returns false, this method will panic. Key() []byte + + // Value returns the key of the cursor. + // + // If Valid returns false, this method will panic. Value() []byte - Release() - Error() error + // GetError returns an IteratorError from LevelDB if it had one during + // iteration. + // + // This method is safe to call when Valid returns false. + GetError() error + + // Close deallocates the given Iterator. + Close() } //----------------------------------------------------------------------------- +// Main entry const ( LevelDBBackendStr = "leveldb" // legacy, defaults to goleveldb. CLevelDBBackendStr = "cleveldb" GoLevelDBBackendStr = "goleveldb" MemDBBackendStr = "memdb" + FSDBBackendStr = "fsdb" // using the filesystem naively ) type dbCreator func(name string, dir string) (DB, error) diff --git a/db/fsdb.go b/db/fsdb.go new file mode 100644 index 000000000..65ac3c38e --- /dev/null +++ b/db/fsdb.go @@ -0,0 +1,231 @@ +package db + +import ( + "fmt" + "io/ioutil" + "net/url" + "os" + "path" + "path/filepath" + "sort" + "sync" + + "github.com/pkg/errors" +) + +const ( + keyPerm = os.FileMode(0600) + dirPerm = os.FileMode(0700) +) + +func init() { + registerDBCreator(FSDBBackendStr, func(name string, dir string) (DB, error) { + dbPath := filepath.Join(dir, name+".db") + return NewFSDB(dbPath), nil + }, false) +} + +// It's slow. +type FSDB struct { + mtx sync.Mutex + dir string + + cwwMutex +} + +func NewFSDB(dir string) *FSDB { + err := os.MkdirAll(dir, dirPerm) + if err != nil { + panic(errors.Wrap(err, "Creating FSDB dir "+dir)) + } + database := &FSDB{ + dir: dir, + cwwMutex: NewCWWMutex(), + } + return database +} + +func (db *FSDB) Get(key []byte) []byte { + db.mtx.Lock() + defer db.mtx.Unlock() + + path := db.nameToPath(key) + value, err := read(path) + if os.IsNotExist(err) { + return nil + } else if err != nil { + panic(errors.Wrap(err, fmt.Sprintf("Getting key %s (0x%X)", string(key), key))) + } + return value +} + +func (db *FSDB) Set(key []byte, value []byte) { + db.mtx.Lock() + defer db.mtx.Unlock() + + db.SetNoLock(key, value) +} + +func (db *FSDB) SetSync(key []byte, value []byte) { + db.mtx.Lock() + defer db.mtx.Unlock() + + db.SetNoLock(key, value) +} + +// NOTE: Implements atomicSetDeleter. +func (db *FSDB) SetNoLock(key []byte, value []byte) { + if value == nil { + value = []byte{} + } + path := db.nameToPath(key) + err := write(path, value) + if err != nil { + panic(errors.Wrap(err, fmt.Sprintf("Setting key %s (0x%X)", string(key), key))) + } +} + +func (db *FSDB) Delete(key []byte) { + db.mtx.Lock() + defer db.mtx.Unlock() + + db.DeleteNoLock(key) +} + +func (db *FSDB) DeleteSync(key []byte) { + db.mtx.Lock() + defer db.mtx.Unlock() + + db.DeleteNoLock(key) +} + +// NOTE: Implements atomicSetDeleter. +func (db *FSDB) DeleteNoLock(key []byte) { + err := remove(string(key)) + if os.IsNotExist(err) { + return + } else if err != nil { + panic(errors.Wrap(err, fmt.Sprintf("Removing key %s (0x%X)", string(key), key))) + } +} + +func (db *FSDB) Close() { + // Nothing to do. +} + +func (db *FSDB) Print() { + db.mtx.Lock() + defer db.mtx.Unlock() + + panic("FSDB.Print not yet implemented") +} + +func (db *FSDB) Stats() map[string]string { + db.mtx.Lock() + defer db.mtx.Unlock() + + panic("FSDB.Stats not yet implemented") +} + +func (db *FSDB) NewBatch() Batch { + db.mtx.Lock() + defer db.mtx.Unlock() + + // Not sure we would ever want to try... + // It doesn't seem easy for general filesystems. + panic("FSDB.NewBatch not yet implemented") +} + +func (db *FSDB) Mutex() *sync.Mutex { + return &(db.mtx) +} + +func (db *FSDB) CacheWrap() interface{} { + return NewCacheDB(db, db.GetWriteLockVersion()) +} + +func (db *FSDB) Iterator() Iterator { + it := newMemDBIterator() + it.db = db + it.cur = 0 + + db.mtx.Lock() + defer db.mtx.Unlock() + + // We need a copy of all of the keys. + // Not the best, but probably not a bottleneck depending. + keys, err := list(db.dir) + if err != nil { + panic(errors.Wrap(err, fmt.Sprintf("Listing keys in %s", db.dir))) + } + sort.Strings(keys) + it.keys = keys + return it +} + +func (db *FSDB) nameToPath(name []byte) string { + n := url.PathEscape(string(name)) + return path.Join(db.dir, n) +} + +// Read some bytes to a file. +// CONTRACT: returns os errors directly without wrapping. +func read(path string) ([]byte, error) { + f, err := os.Open(path) + if err != nil { + return nil, err + } + defer f.Close() + + d, err := ioutil.ReadAll(f) + if err != nil { + return nil, err + } + return d, nil +} + +// Write some bytes from a file. +// CONTRACT: returns os errors directly without wrapping. +func write(path string, d []byte) error { + f, err := os.OpenFile(path, os.O_CREATE|os.O_EXCL|os.O_WRONLY, keyPerm) + if err != nil { + return err + } + defer f.Close() + _, err = f.Write(d) + if err != nil { + return err + } + err = f.Sync() + return err +} + +// Remove a file. +// CONTRACT: returns os errors directly without wrapping. +func remove(path string) error { + return os.Remove(path) +} + +// List files of a path. +// Paths will NOT include dir as the prefix. +// CONTRACT: returns os errors directly without wrapping. +func list(dirPath string) (paths []string, err error) { + dir, err := os.Open(dirPath) + if err != nil { + return nil, err + } + defer dir.Close() + + names, err := dir.Readdirnames(0) + if err != nil { + return nil, err + } + for i, name := range names { + n, err := url.PathUnescape(name) + if err != nil { + return nil, fmt.Errorf("Failed to unescape %s while listing", name) + } + names[i] = n + } + return names, nil +} diff --git a/db/go_level_db.go b/db/go_level_db.go index 4abd76112..d9cec519c 100644 --- a/db/go_level_db.go +++ b/db/go_level_db.go @@ -8,7 +8,6 @@ import ( "github.com/syndtr/goleveldb/leveldb/errors" "github.com/syndtr/goleveldb/leveldb/iterator" "github.com/syndtr/goleveldb/leveldb/opt" - "github.com/syndtr/goleveldb/leveldb/util" . "github.com/tendermint/tmlibs/common" ) @@ -23,6 +22,8 @@ func init() { type GoLevelDB struct { db *leveldb.DB + + cwwMutex } func NewGoLevelDB(name string, dir string) (*GoLevelDB, error) { @@ -31,7 +32,10 @@ func NewGoLevelDB(name string, dir string) (*GoLevelDB, error) { if err != nil { return nil, err } - database := &GoLevelDB{db: db} + database := &GoLevelDB{ + db: db, + cwwMutex: NewCWWMutex(), + } return database, nil } @@ -117,12 +121,59 @@ func (db *GoLevelDB) Stats() map[string]string { return stats } +func (db *GoLevelDB) CacheWrap() interface{} { + return NewCacheDB(db, db.GetWriteLockVersion()) +} + +//---------------------------------------- +// Batch + +func (db *GoLevelDB) NewBatch() Batch { + batch := new(leveldb.Batch) + return &goLevelDBBatch{db, batch} +} + +type goLevelDBBatch struct { + db *GoLevelDB + batch *leveldb.Batch +} + +func (mBatch *goLevelDBBatch) Set(key, value []byte) { + mBatch.batch.Put(key, value) +} + +func (mBatch *goLevelDBBatch) Delete(key []byte) { + mBatch.batch.Delete(key) +} + +func (mBatch *goLevelDBBatch) Write() { + err := mBatch.db.db.Write(mBatch.batch, nil) + if err != nil { + PanicCrisis(err) + } +} + +//---------------------------------------- +// Iterator + +func (db *GoLevelDB) Iterator() Iterator { + itr := &goLevelDBIterator{ + source: db.db.NewIterator(nil, nil), + } + itr.Seek(nil) + return itr +} + type goLevelDBIterator struct { - source iterator.Iterator + source iterator.Iterator + invalid bool } // Key returns a copy of the current key. func (it *goLevelDBIterator) Key() []byte { + if !it.Valid() { + panic("goLevelDBIterator Key() called when invalid") + } key := it.source.Key() k := make([]byte, len(key)) copy(k, key) @@ -132,6 +183,9 @@ func (it *goLevelDBIterator) Key() []byte { // Value returns a copy of the current value. func (it *goLevelDBIterator) Value() []byte { + if !it.Valid() { + panic("goLevelDBIterator Value() called when invalid") + } val := it.source.Value() v := make([]byte, len(val)) copy(v, val) @@ -139,49 +193,36 @@ func (it *goLevelDBIterator) Value() []byte { return v } -func (it *goLevelDBIterator) Error() error { +func (it *goLevelDBIterator) GetError() error { return it.source.Error() } -func (it *goLevelDBIterator) Next() bool { - return it.source.Next() -} - -func (it *goLevelDBIterator) Release() { - it.source.Release() -} - -func (db *GoLevelDB) Iterator() Iterator { - return &goLevelDBIterator{db.db.NewIterator(nil, nil)} +func (it *goLevelDBIterator) Seek(key []byte) { + it.source.Seek(key) } -func (db *GoLevelDB) IteratorPrefix(prefix []byte) Iterator { - return &goLevelDBIterator{db.db.NewIterator(util.BytesPrefix(prefix), nil)} -} - -func (db *GoLevelDB) NewBatch() Batch { - batch := new(leveldb.Batch) - return &goLevelDBBatch{db, batch} -} - -//-------------------------------------------------------------------------------- - -type goLevelDBBatch struct { - db *GoLevelDB - batch *leveldb.Batch +func (it *goLevelDBIterator) Valid() bool { + if it.invalid { + return false + } + it.invalid = !it.source.Valid() + return !it.invalid } -func (mBatch *goLevelDBBatch) Set(key, value []byte) { - mBatch.batch.Put(key, value) +func (it *goLevelDBIterator) Next() { + if !it.Valid() { + panic("goLevelDBIterator Next() called when invalid") + } + it.source.Next() } -func (mBatch *goLevelDBBatch) Delete(key []byte) { - mBatch.batch.Delete(key) +func (it *goLevelDBIterator) Prev() { + if !it.Valid() { + panic("goLevelDBIterator Prev() called when invalid") + } + it.source.Prev() } -func (mBatch *goLevelDBBatch) Write() { - err := mBatch.db.db.Write(mBatch.batch, nil) - if err != nil { - PanicCrisis(err) - } +func (it *goLevelDBIterator) Close() { + it.source.Release() } diff --git a/db/go_level_db_test.go b/db/go_level_db_test.go index 2cd3192c3..88b6730f3 100644 --- a/db/go_level_db_test.go +++ b/db/go_level_db_test.go @@ -6,7 +6,7 @@ import ( "fmt" "testing" - . "github.com/tendermint/tmlibs/common" + cmn "github.com/tendermint/tmlibs/common" ) func BenchmarkRandomReadsWrites(b *testing.B) { @@ -17,7 +17,7 @@ func BenchmarkRandomReadsWrites(b *testing.B) { for i := 0; i < int(numItems); i++ { internal[int64(i)] = int64(0) } - db, err := NewGoLevelDB(Fmt("test_%x", RandStr(12)), "") + db, err := NewGoLevelDB(cmn.Fmt("test_%x", cmn.RandStr(12)), "") if err != nil { b.Fatal(err.Error()) return @@ -29,7 +29,7 @@ func BenchmarkRandomReadsWrites(b *testing.B) { for i := 0; i < b.N; i++ { // Write something { - idx := (int64(RandInt()) % numItems) + idx := (int64(cmn.RandInt()) % numItems) internal[idx] += 1 val := internal[idx] idxBytes := int642Bytes(int64(idx)) @@ -42,7 +42,7 @@ func BenchmarkRandomReadsWrites(b *testing.B) { } // Read something { - idx := (int64(RandInt()) % numItems) + idx := (int64(cmn.RandInt()) % numItems) val := internal[idx] idxBytes := int642Bytes(int64(idx)) valBytes := db.Get(idxBytes) diff --git a/db/mem_batch.go b/db/mem_batch.go new file mode 100644 index 000000000..7072d931a --- /dev/null +++ b/db/mem_batch.go @@ -0,0 +1,50 @@ +package db + +import "sync" + +type atomicSetDeleter interface { + Mutex() *sync.Mutex + SetNoLock(key, value []byte) + DeleteNoLock(key []byte) +} + +type memBatch struct { + db atomicSetDeleter + ops []operation +} + +type opType int + +const ( + opTypeSet opType = 1 + opTypeDelete opType = 2 +) + +type operation struct { + opType + key []byte + value []byte +} + +func (mBatch *memBatch) Set(key, value []byte) { + mBatch.ops = append(mBatch.ops, operation{opTypeSet, key, value}) +} + +func (mBatch *memBatch) Delete(key []byte) { + mBatch.ops = append(mBatch.ops, operation{opTypeDelete, key, nil}) +} + +func (mBatch *memBatch) Write() { + mtx := mBatch.db.Mutex() + mtx.Lock() + defer mtx.Unlock() + + for _, op := range mBatch.ops { + switch op.opType { + case opTypeSet: + mBatch.db.SetNoLock(op.key, op.value) + case opTypeDelete: + mBatch.db.DeleteNoLock(op.key) + } + } +} diff --git a/db/mem_db.go b/db/mem_db.go index 077427509..30697adcf 100644 --- a/db/mem_db.go +++ b/db/mem_db.go @@ -1,8 +1,9 @@ package db import ( + "bytes" "fmt" - "strings" + "sort" "sync" ) @@ -15,40 +16,63 @@ func init() { type MemDB struct { mtx sync.Mutex db map[string][]byte + + cwwMutex } func NewMemDB() *MemDB { - database := &MemDB{db: make(map[string][]byte)} + database := &MemDB{ + db: make(map[string][]byte), + cwwMutex: NewCWWMutex(), + } return database } func (db *MemDB) Get(key []byte) []byte { db.mtx.Lock() defer db.mtx.Unlock() + return db.db[string(key)] } func (db *MemDB) Set(key []byte, value []byte) { db.mtx.Lock() defer db.mtx.Unlock() - db.db[string(key)] = value + + db.SetNoLock(key, value) } func (db *MemDB) SetSync(key []byte, value []byte) { db.mtx.Lock() defer db.mtx.Unlock() + + db.SetNoLock(key, value) +} + +// NOTE: Implements atomicSetDeleter +func (db *MemDB) SetNoLock(key []byte, value []byte) { + if value == nil { + value = []byte{} + } db.db[string(key)] = value } func (db *MemDB) Delete(key []byte) { db.mtx.Lock() defer db.mtx.Unlock() + delete(db.db, string(key)) } func (db *MemDB) DeleteSync(key []byte) { db.mtx.Lock() defer db.mtx.Unlock() + + delete(db.db, string(key)) +} + +// NOTE: Implements atomicSetDeleter +func (db *MemDB) DeleteNoLock(key []byte) { delete(db.db, string(key)) } @@ -63,115 +87,113 @@ func (db *MemDB) Close() { func (db *MemDB) Print() { db.mtx.Lock() defer db.mtx.Unlock() + for key, value := range db.db { fmt.Printf("[%X]:\t[%X]\n", []byte(key), value) } } func (db *MemDB) Stats() map[string]string { + db.mtx.Lock() + defer db.mtx.Unlock() + stats := make(map[string]string) stats["database.type"] = "memDB" + stats["database.size"] = fmt.Sprintf("%d", len(db.db)) return stats } -type memDBIterator struct { - last int - keys []string - db *MemDB -} - -func newMemDBIterator() *memDBIterator { - return &memDBIterator{} -} - -func (it *memDBIterator) Next() bool { - if it.last >= len(it.keys)-1 { - return false - } - it.last++ - return true -} +func (db *MemDB) NewBatch() Batch { + db.mtx.Lock() + defer db.mtx.Unlock() -func (it *memDBIterator) Key() []byte { - return []byte(it.keys[it.last]) + return &memBatch{db, nil} } -func (it *memDBIterator) Value() []byte { - return it.db.Get(it.Key()) +func (db *MemDB) Mutex() *sync.Mutex { + return &(db.mtx) } -func (it *memDBIterator) Release() { - it.db = nil - it.keys = nil +func (db *MemDB) CacheWrap() interface{} { + return NewCacheDB(db, db.GetWriteLockVersion()) } -func (it *memDBIterator) Error() error { - return nil -} +//---------------------------------------- func (db *MemDB) Iterator() Iterator { - return db.IteratorPrefix([]byte{}) -} - -func (db *MemDB) IteratorPrefix(prefix []byte) Iterator { it := newMemDBIterator() it.db = db - it.last = -1 + it.cur = 0 db.mtx.Lock() defer db.mtx.Unlock() - // unfortunately we need a copy of all of the keys + // We need a copy of all of the keys. + // Not the best, but probably not a bottleneck depending. for key, _ := range db.db { - if strings.HasPrefix(key, string(prefix)) { - it.keys = append(it.keys, key) - } + it.keys = append(it.keys, key) } + sort.Strings(it.keys) return it } -func (db *MemDB) NewBatch() Batch { - return &memDBBatch{db, nil} +type memDBIterator struct { + cur int + keys []string + db DB } -//-------------------------------------------------------------------------------- - -type memDBBatch struct { - db *MemDB - ops []operation +func newMemDBIterator() *memDBIterator { + return &memDBIterator{} } -type opType int - -const ( - opTypeSet = 1 - opTypeDelete = 2 -) +func (it *memDBIterator) Seek(key []byte) { + for i, ik := range it.keys { + it.cur = i + if bytes.Compare(key, []byte(ik)) <= 0 { + return + } + } + it.cur += 1 // If not found, becomes invalid. +} -type operation struct { - opType - key []byte - value []byte +func (it *memDBIterator) Valid() bool { + return 0 <= it.cur && it.cur < len(it.keys) } -func (mBatch *memDBBatch) Set(key, value []byte) { - mBatch.ops = append(mBatch.ops, operation{opTypeSet, key, value}) +func (it *memDBIterator) Next() { + if !it.Valid() { + panic("memDBIterator Next() called when invalid") + } + it.cur++ } -func (mBatch *memDBBatch) Delete(key []byte) { - mBatch.ops = append(mBatch.ops, operation{opTypeDelete, key, nil}) +func (it *memDBIterator) Prev() { + if !it.Valid() { + panic("memDBIterator Next() called when invalid") + } + it.cur-- } -func (mBatch *memDBBatch) Write() { - mBatch.db.mtx.Lock() - defer mBatch.db.mtx.Unlock() +func (it *memDBIterator) Key() []byte { + if !it.Valid() { + panic("memDBIterator Key() called when invalid") + } + return []byte(it.keys[it.cur]) +} - for _, op := range mBatch.ops { - if op.opType == opTypeSet { - mBatch.db.db[string(op.key)] = op.value - } else if op.opType == opTypeDelete { - delete(mBatch.db.db, string(op.key)) - } +func (it *memDBIterator) Value() []byte { + if !it.Valid() { + panic("memDBIterator Value() called when invalid") } + return it.db.Get(it.Key()) +} + +func (it *memDBIterator) Close() { + it.db = nil + it.keys = nil +} +func (it *memDBIterator) GetError() error { + return nil } diff --git a/db/mem_db_test.go b/db/mem_db_test.go index 503e361f1..b5c9167c8 100644 --- a/db/mem_db_test.go +++ b/db/mem_db_test.go @@ -21,7 +21,7 @@ func TestMemDbIterator(t *testing.T) { iter := db.Iterator() i := 0 - for iter.Next() { + for ; iter.Valid(); iter.Next() { assert.Equal(t, db.Get(iter.Key()), iter.Value(), "values dont match for key") i += 1 } diff --git a/db/stats.go b/db/stats.go new file mode 100644 index 000000000..ef4b0dd0f --- /dev/null +++ b/db/stats.go @@ -0,0 +1,7 @@ +package db + +func mergeStats(src, dest map[string]string, prefix string) { + for key, value := range src { + dest[prefix+key] = value + } +} diff --git a/db/util.go b/db/util.go new file mode 100644 index 000000000..5f381a5be --- /dev/null +++ b/db/util.go @@ -0,0 +1,82 @@ +package db + +import "bytes" + +// A wrapper around itr that tries to keep the iterator +// within the bounds as defined by `prefix` +type prefixIterator struct { + itr Iterator + prefix []byte + invalid bool +} + +func (pi *prefixIterator) Seek(key []byte) { + if !bytes.HasPrefix(key, pi.prefix) { + pi.invalid = true + return + } + pi.itr.Seek(key) + pi.checkInvalid() +} + +func (pi *prefixIterator) checkInvalid() { + if !pi.itr.Valid() { + pi.invalid = true + } +} + +func (pi *prefixIterator) Valid() bool { + if pi.invalid { + return false + } + key := pi.itr.Key() + ok := bytes.HasPrefix(key, pi.prefix) + if !ok { + pi.invalid = true + return false + } + return true +} + +func (pi *prefixIterator) Next() { + if pi.invalid { + panic("prefixIterator Next() called when invalid") + } + pi.itr.Next() + pi.checkInvalid() +} + +func (pi *prefixIterator) Prev() { + if pi.invalid { + panic("prefixIterator Prev() called when invalid") + } + pi.itr.Prev() + pi.checkInvalid() +} + +func (pi *prefixIterator) Key() []byte { + if pi.invalid { + panic("prefixIterator Key() called when invalid") + } + return pi.itr.Key() +} + +func (pi *prefixIterator) Value() []byte { + if pi.invalid { + panic("prefixIterator Value() called when invalid") + } + return pi.itr.Value() +} + +func (pi *prefixIterator) Close() { pi.itr.Close() } +func (pi *prefixIterator) GetError() error { return pi.itr.GetError() } + +func IteratePrefix(db DB, prefix []byte) Iterator { + itr := db.Iterator() + pi := &prefixIterator{ + itr: itr, + prefix: prefix, + } + pi.Seek(prefix) + return pi +} diff --git a/db/util_test.go b/db/util_test.go new file mode 100644 index 000000000..55a41bf5b --- /dev/null +++ b/db/util_test.go @@ -0,0 +1,209 @@ +package db + +import ( + "fmt" + "testing" +) + +func TestPrefixIteratorNoMatchNil(t *testing.T) { + for backend, _ := range backends { + t.Run(fmt.Sprintf("Prefix w/ backend %s", backend), func(t *testing.T) { + db := newTempDB(t, backend) + itr := IteratePrefix(db, []byte("2")) + + checkInvalid(t, itr) + }) + } +} + +func TestPrefixIteratorNoMatch1(t *testing.T) { + for backend, _ := range backends { + t.Run(fmt.Sprintf("Prefix w/ backend %s", backend), func(t *testing.T) { + db := newTempDB(t, backend) + itr := IteratePrefix(db, []byte("2")) + db.SetSync(bz("1"), bz("value_1")) + + checkInvalid(t, itr) + }) + } +} + +func TestPrefixIteratorMatch2(t *testing.T) { + for backend, _ := range backends { + t.Run(fmt.Sprintf("Prefix w/ backend %s", backend), func(t *testing.T) { + db := newTempDB(t, backend) + db.SetSync(bz("2"), bz("value_2")) + itr := IteratePrefix(db, []byte("2")) + + checkValid(t, itr, true) + checkItem(t, itr, bz("2"), bz("value_2")) + checkNext(t, itr, false) + + // Once invalid... + checkInvalid(t, itr) + }) + } +} + +func TestPrefixIteratorMatch3(t *testing.T) { + for backend, _ := range backends { + t.Run(fmt.Sprintf("Prefix w/ backend %s", backend), func(t *testing.T) { + db := newTempDB(t, backend) + db.SetSync(bz("3"), bz("value_3")) + itr := IteratePrefix(db, []byte("2")) + + // Once invalid... + checkInvalid(t, itr) + }) + } +} + +// Search for a/1, fail by too much Next() +func TestPrefixIteratorMatches1N(t *testing.T) { + for backend, _ := range backends { + t.Run(fmt.Sprintf("Prefix w/ backend %s", backend), func(t *testing.T) { + db := newTempDB(t, backend) + db.SetSync(bz("a/1"), bz("value_1")) + db.SetSync(bz("a/3"), bz("value_3")) + itr := IteratePrefix(db, []byte("a/")) + itr.Seek(bz("a/1")) + + checkValid(t, itr, true) + checkItem(t, itr, bz("a/1"), bz("value_1")) + checkNext(t, itr, true) + checkItem(t, itr, bz("a/3"), bz("value_3")) + + // Bad! + checkNext(t, itr, false) + + // Once invalid... + checkInvalid(t, itr) + }) + } +} + +// Search for a/1, fail by too much Prev() +func TestPrefixIteratorMatches1P(t *testing.T) { + for backend, _ := range backends { + t.Run(fmt.Sprintf("Prefix w/ backend %s", backend), func(t *testing.T) { + db := newTempDB(t, backend) + db.SetSync(bz("a/1"), bz("value_1")) + db.SetSync(bz("a/3"), bz("value_3")) + itr := IteratePrefix(db, []byte("a/")) + itr.Seek(bz("a/1")) + + checkValid(t, itr, true) + checkItem(t, itr, bz("a/1"), bz("value_1")) + checkNext(t, itr, true) + checkItem(t, itr, bz("a/3"), bz("value_3")) + checkPrev(t, itr, true) + checkItem(t, itr, bz("a/1"), bz("value_1")) + + // Bad! + checkPrev(t, itr, false) + + // Once invalid... + checkInvalid(t, itr) + }) + } +} + +// Search for a/2, fail by too much Next() +func TestPrefixIteratorMatches2N(t *testing.T) { + for backend, _ := range backends { + t.Run(fmt.Sprintf("Prefix w/ backend %s", backend), func(t *testing.T) { + db := newTempDB(t, backend) + db.SetSync(bz("a/1"), bz("value_1")) + db.SetSync(bz("a/3"), bz("value_3")) + itr := IteratePrefix(db, []byte("a/")) + itr.Seek(bz("a/2")) + + checkValid(t, itr, true) + checkItem(t, itr, bz("a/3"), bz("value_3")) + checkPrev(t, itr, true) + checkItem(t, itr, bz("a/1"), bz("value_1")) + checkNext(t, itr, true) + checkItem(t, itr, bz("a/3"), bz("value_3")) + + // Bad! + checkNext(t, itr, false) + + // Once invalid... + checkInvalid(t, itr) + }) + } +} + +// Search for a/2, fail by too much Prev() +func TestPrefixIteratorMatches2P(t *testing.T) { + for backend, _ := range backends { + t.Run(fmt.Sprintf("Prefix w/ backend %s", backend), func(t *testing.T) { + db := newTempDB(t, backend) + db.SetSync(bz("a/1"), bz("value_1")) + db.SetSync(bz("a/3"), bz("value_3")) + itr := IteratePrefix(db, []byte("a/")) + itr.Seek(bz("a/2")) + + checkValid(t, itr, true) + checkItem(t, itr, bz("a/3"), bz("value_3")) + checkPrev(t, itr, true) + checkItem(t, itr, bz("a/1"), bz("value_1")) + + // Bad! + checkPrev(t, itr, false) + + // Once invalid... + checkInvalid(t, itr) + }) + } +} + +// Search for a/3, fail by too much Next() +func TestPrefixIteratorMatches3N(t *testing.T) { + for backend, _ := range backends { + t.Run(fmt.Sprintf("Prefix w/ backend %s", backend), func(t *testing.T) { + db := newTempDB(t, backend) + db.SetSync(bz("a/1"), bz("value_1")) + db.SetSync(bz("a/3"), bz("value_3")) + itr := IteratePrefix(db, []byte("a/")) + itr.Seek(bz("a/3")) + + checkValid(t, itr, true) + checkItem(t, itr, bz("a/3"), bz("value_3")) + checkPrev(t, itr, true) + checkItem(t, itr, bz("a/1"), bz("value_1")) + checkNext(t, itr, true) + checkItem(t, itr, bz("a/3"), bz("value_3")) + + // Bad! + checkNext(t, itr, false) + + // Once invalid... + checkInvalid(t, itr) + }) + } +} + +// Search for a/3, fail by too much Prev() +func TestPrefixIteratorMatches3P(t *testing.T) { + for backend, _ := range backends { + t.Run(fmt.Sprintf("Prefix w/ backend %s", backend), func(t *testing.T) { + db := newTempDB(t, backend) + db.SetSync(bz("a/1"), bz("value_1")) + db.SetSync(bz("a/3"), bz("value_3")) + itr := IteratePrefix(db, []byte("a/")) + itr.Seek(bz("a/3")) + + checkValid(t, itr, true) + checkItem(t, itr, bz("a/3"), bz("value_3")) + checkPrev(t, itr, true) + checkItem(t, itr, bz("a/1"), bz("value_1")) + + // Bad! + checkPrev(t, itr, false) + + // Once invalid... + checkInvalid(t, itr) + }) + } +} diff --git a/merkle/kvpairs.go b/merkle/kvpairs.go new file mode 100644 index 000000000..3d67049f2 --- /dev/null +++ b/merkle/kvpairs.go @@ -0,0 +1,48 @@ +package merkle + +import ( + "sort" + + wire "github.com/tendermint/go-wire" + "golang.org/x/crypto/ripemd160" +) + +// NOTE: Behavior is undefined with dup keys. +type KVPair struct { + Key string + Value interface{} // Can be Hashable or not. +} + +func (kv KVPair) Hash() []byte { + hasher, n, err := ripemd160.New(), new(int), new(error) + wire.WriteString(kv.Key, hasher, n, err) + if kvH, ok := kv.Value.(Hashable); ok { + wire.WriteByteSlice(kvH.Hash(), hasher, n, err) + } else { + wire.WriteBinary(kv.Value, hasher, n, err) + } + if *err != nil { + panic(*err) + } + return hasher.Sum(nil) +} + +type KVPairs []KVPair + +func (kvps KVPairs) Len() int { return len(kvps) } +func (kvps KVPairs) Less(i, j int) bool { return kvps[i].Key < kvps[j].Key } +func (kvps KVPairs) Swap(i, j int) { kvps[i], kvps[j] = kvps[j], kvps[i] } +func (kvps KVPairs) Sort() { sort.Sort(kvps) } + +func MakeSortedKVPairs(m map[string]interface{}) []Hashable { + kvPairs := make([]KVPair, 0, len(m)) + for k, v := range m { + kvPairs = append(kvPairs, KVPair{k, v}) + } + KVPairs(kvPairs).Sort() + kvPairsH := make([]Hashable, 0, len(kvPairs)) + for _, kvp := range kvPairs { + kvPairsH = append(kvPairsH, kvp) + } + return kvPairsH +} diff --git a/merkle/simple_map.go b/merkle/simple_map.go new file mode 100644 index 000000000..43dce990f --- /dev/null +++ b/merkle/simple_map.go @@ -0,0 +1,26 @@ +package merkle + +type SimpleMap struct { + kvz KVPairs +} + +func NewSimpleMap() *SimpleMap { + return &SimpleMap{ + kvz: nil, + } +} + +func (sm *SimpleMap) Set(k string, o interface{}) { + sm.kvz = append(sm.kvz, KVPair{Key: k, Value: o}) +} + +// Merkle root hash of items sorted by key. +// NOTE: Behavior is undefined when key is duplicate. +func (sm *SimpleMap) Hash() []byte { + sm.kvz.Sort() + kvPairsH := make([]Hashable, 0, len(sm.kvz)) + for _, kvp := range sm.kvz { + kvPairsH = append(kvPairsH, kvp) + } + return SimpleHashFromHashables(kvPairsH) +} diff --git a/merkle/simple_map_test.go b/merkle/simple_map_test.go new file mode 100644 index 000000000..5eb218274 --- /dev/null +++ b/merkle/simple_map_test.go @@ -0,0 +1,47 @@ +package merkle + +import ( + "fmt" + "testing" + + "github.com/stretchr/testify/assert" +) + +func TestSimpleMap(t *testing.T) { + { + db := NewSimpleMap() + db.Set("key1", "value1") + assert.Equal(t, "376bf717ebe3659a34f68edb833dfdcf4a2d3c10", fmt.Sprintf("%x", db.Hash()), "Hash didn't match") + } + { + db := NewSimpleMap() + db.Set("key1", "value2") + assert.Equal(t, "72fd3a7224674377952214cb10ef21753ec803eb", fmt.Sprintf("%x", db.Hash()), "Hash didn't match") + } + { + db := NewSimpleMap() + db.Set("key1", "value1") + db.Set("key2", "value2") + assert.Equal(t, "23a160bd4eea5b2fcc0755d722f9112a15999abc", fmt.Sprintf("%x", db.Hash()), "Hash didn't match") + } + { + db := NewSimpleMap() + db.Set("key2", "value2") // NOTE: out of order + db.Set("key1", "value1") + assert.Equal(t, "23a160bd4eea5b2fcc0755d722f9112a15999abc", fmt.Sprintf("%x", db.Hash()), "Hash didn't match") + } + { + db := NewSimpleMap() + db.Set("key1", "value1") + db.Set("key2", "value2") + db.Set("key3", "value3") + assert.Equal(t, "40df7416429148d03544cfafa86e1080615cd2bc", fmt.Sprintf("%x", db.Hash()), "Hash didn't match") + } + { + db := NewSimpleMap() + db.Set("key2", "value2") // NOTE: out of order + db.Set("key1", "value1") + db.Set("key3", "value3") + assert.Equal(t, "40df7416429148d03544cfafa86e1080615cd2bc", fmt.Sprintf("%x", db.Hash()), "Hash didn't match") + } +} diff --git a/merkle/simple_proof.go b/merkle/simple_proof.go new file mode 100644 index 000000000..f75568fd9 --- /dev/null +++ b/merkle/simple_proof.go @@ -0,0 +1,131 @@ +package merkle + +import ( + "bytes" + "fmt" +) + +type SimpleProof struct { + Aunts [][]byte `json:"aunts"` // Hashes from leaf's sibling to a root's child. +} + +// proofs[0] is the proof for items[0]. +func SimpleProofsFromHashables(items []Hashable) (rootHash []byte, proofs []*SimpleProof) { + trails, rootSPN := trailsFromHashables(items) + rootHash = rootSPN.Hash + proofs = make([]*SimpleProof, len(items)) + for i, trail := range trails { + proofs[i] = &SimpleProof{ + Aunts: trail.FlattenAunts(), + } + } + return +} + +// Verify that leafHash is a leaf hash of the simple-merkle-tree +// which hashes to rootHash. +func (sp *SimpleProof) Verify(index int, total int, leafHash []byte, rootHash []byte) bool { + computedHash := computeHashFromAunts(index, total, leafHash, sp.Aunts) + return computedHash != nil && bytes.Equal(computedHash, rootHash) +} + +func (sp *SimpleProof) String() string { + return sp.StringIndented("") +} + +func (sp *SimpleProof) StringIndented(indent string) string { + return fmt.Sprintf(`SimpleProof{ +%s Aunts: %X +%s}`, + indent, sp.Aunts, + indent) +} + +// Use the leafHash and innerHashes to get the root merkle hash. +// If the length of the innerHashes slice isn't exactly correct, the result is nil. +func computeHashFromAunts(index int, total int, leafHash []byte, innerHashes [][]byte) []byte { + // Recursive impl. + if index >= total { + return nil + } + switch total { + case 0: + panic("Cannot call computeHashFromAunts() with 0 total") + case 1: + if len(innerHashes) != 0 { + return nil + } + return leafHash + default: + if len(innerHashes) == 0 { + return nil + } + numLeft := (total + 1) / 2 + if index < numLeft { + leftHash := computeHashFromAunts(index, numLeft, leafHash, innerHashes[:len(innerHashes)-1]) + if leftHash == nil { + return nil + } + return SimpleHashFromTwoHashes(leftHash, innerHashes[len(innerHashes)-1]) + } else { + rightHash := computeHashFromAunts(index-numLeft, total-numLeft, leafHash, innerHashes[:len(innerHashes)-1]) + if rightHash == nil { + return nil + } + return SimpleHashFromTwoHashes(innerHashes[len(innerHashes)-1], rightHash) + } + } +} + +// Helper structure to construct merkle proof. +// The node and the tree is thrown away afterwards. +// Exactly one of node.Left and node.Right is nil, unless node is the root, in which case both are nil. +// node.Parent.Hash = hash(node.Hash, node.Right.Hash) or +// hash(node.Left.Hash, node.Hash), depending on whether node is a left/right child. +type SimpleProofNode struct { + Hash []byte + Parent *SimpleProofNode + Left *SimpleProofNode // Left sibling (only one of Left,Right is set) + Right *SimpleProofNode // Right sibling (only one of Left,Right is set) +} + +// Starting from a leaf SimpleProofNode, FlattenAunts() will return +// the inner hashes for the item corresponding to the leaf. +func (spn *SimpleProofNode) FlattenAunts() [][]byte { + // Nonrecursive impl. + innerHashes := [][]byte{} + for spn != nil { + if spn.Left != nil { + innerHashes = append(innerHashes, spn.Left.Hash) + } else if spn.Right != nil { + innerHashes = append(innerHashes, spn.Right.Hash) + } else { + break + } + spn = spn.Parent + } + return innerHashes +} + +// trails[0].Hash is the leaf hash for items[0]. +// trails[i].Parent.Parent....Parent == root for all i. +func trailsFromHashables(items []Hashable) (trails []*SimpleProofNode, root *SimpleProofNode) { + // Recursive impl. + switch len(items) { + case 0: + return nil, nil + case 1: + trail := &SimpleProofNode{items[0].Hash(), nil, nil, nil} + return []*SimpleProofNode{trail}, trail + default: + lefts, leftRoot := trailsFromHashables(items[:(len(items)+1)/2]) + rights, rightRoot := trailsFromHashables(items[(len(items)+1)/2:]) + rootHash := SimpleHashFromTwoHashes(leftRoot.Hash, rightRoot.Hash) + root := &SimpleProofNode{rootHash, nil, nil, nil} + leftRoot.Parent = root + leftRoot.Right = rightRoot + rightRoot.Parent = root + rightRoot.Left = leftRoot + return append(lefts, rights...), root + } +} diff --git a/merkle/simple_tree.go b/merkle/simple_tree.go index 8106246d6..d64082b43 100644 --- a/merkle/simple_tree.go +++ b/merkle/simple_tree.go @@ -25,10 +25,6 @@ For larger datasets, use IAVLTree. package merkle import ( - "bytes" - "fmt" - "sort" - "golang.org/x/crypto/ripemd160" "github.com/tendermint/go-wire" @@ -95,183 +91,3 @@ func SimpleHashFromMap(m map[string]interface{}) []byte { kpPairsH := MakeSortedKVPairs(m) return SimpleHashFromHashables(kpPairsH) } - -//-------------------------------------------------------------------------------- - -/* Convenience struct for key-value pairs. -A list of KVPairs is hashed via `SimpleHashFromHashables`. -NOTE: Each `Value` is encoded for hashing without extra type information, -so the user is presumed to be aware of the Value types. -*/ -type KVPair struct { - Key string - Value interface{} -} - -func (kv KVPair) Hash() []byte { - hasher, n, err := ripemd160.New(), new(int), new(error) - wire.WriteString(kv.Key, hasher, n, err) - if kvH, ok := kv.Value.(Hashable); ok { - wire.WriteByteSlice(kvH.Hash(), hasher, n, err) - } else { - wire.WriteBinary(kv.Value, hasher, n, err) - } - if *err != nil { - PanicSanity(*err) - } - return hasher.Sum(nil) -} - -type KVPairs []KVPair - -func (kvps KVPairs) Len() int { return len(kvps) } -func (kvps KVPairs) Less(i, j int) bool { return kvps[i].Key < kvps[j].Key } -func (kvps KVPairs) Swap(i, j int) { kvps[i], kvps[j] = kvps[j], kvps[i] } -func (kvps KVPairs) Sort() { sort.Sort(kvps) } - -func MakeSortedKVPairs(m map[string]interface{}) []Hashable { - kvPairs := []KVPair{} - for k, v := range m { - kvPairs = append(kvPairs, KVPair{k, v}) - } - KVPairs(kvPairs).Sort() - kvPairsH := []Hashable{} - for _, kvp := range kvPairs { - kvPairsH = append(kvPairsH, kvp) - } - return kvPairsH -} - -//-------------------------------------------------------------------------------- - -type SimpleProof struct { - Aunts [][]byte `json:"aunts"` // Hashes from leaf's sibling to a root's child. -} - -// proofs[0] is the proof for items[0]. -func SimpleProofsFromHashables(items []Hashable) (rootHash []byte, proofs []*SimpleProof) { - trails, rootSPN := trailsFromHashables(items) - rootHash = rootSPN.Hash - proofs = make([]*SimpleProof, len(items)) - for i, trail := range trails { - proofs[i] = &SimpleProof{ - Aunts: trail.FlattenAunts(), - } - } - return -} - -// Verify that leafHash is a leaf hash of the simple-merkle-tree -// which hashes to rootHash. -func (sp *SimpleProof) Verify(index int, total int, leafHash []byte, rootHash []byte) bool { - computedHash := computeHashFromAunts(index, total, leafHash, sp.Aunts) - if computedHash == nil { - return false - } - if !bytes.Equal(computedHash, rootHash) { - return false - } - return true -} - -func (sp *SimpleProof) String() string { - return sp.StringIndented("") -} - -func (sp *SimpleProof) StringIndented(indent string) string { - return fmt.Sprintf(`SimpleProof{ -%s Aunts: %X -%s}`, - indent, sp.Aunts, - indent) -} - -// Use the leafHash and innerHashes to get the root merkle hash. -// If the length of the innerHashes slice isn't exactly correct, the result is nil. -func computeHashFromAunts(index int, total int, leafHash []byte, innerHashes [][]byte) []byte { - // Recursive impl. - if index >= total { - return nil - } - switch total { - case 0: - PanicSanity("Cannot call computeHashFromAunts() with 0 total") - return nil - case 1: - if len(innerHashes) != 0 { - return nil - } - return leafHash - default: - if len(innerHashes) == 0 { - return nil - } - numLeft := (total + 1) / 2 - if index < numLeft { - leftHash := computeHashFromAunts(index, numLeft, leafHash, innerHashes[:len(innerHashes)-1]) - if leftHash == nil { - return nil - } - return SimpleHashFromTwoHashes(leftHash, innerHashes[len(innerHashes)-1]) - } else { - rightHash := computeHashFromAunts(index-numLeft, total-numLeft, leafHash, innerHashes[:len(innerHashes)-1]) - if rightHash == nil { - return nil - } - return SimpleHashFromTwoHashes(innerHashes[len(innerHashes)-1], rightHash) - } - } -} - -// Helper structure to construct merkle proof. -// The node and the tree is thrown away afterwards. -// Exactly one of node.Left and node.Right is nil, unless node is the root, in which case both are nil. -// node.Parent.Hash = hash(node.Hash, node.Right.Hash) or -// hash(node.Left.Hash, node.Hash), depending on whether node is a left/right child. -type SimpleProofNode struct { - Hash []byte - Parent *SimpleProofNode - Left *SimpleProofNode // Left sibling (only one of Left,Right is set) - Right *SimpleProofNode // Right sibling (only one of Left,Right is set) -} - -// Starting from a leaf SimpleProofNode, FlattenAunts() will return -// the inner hashes for the item corresponding to the leaf. -func (spn *SimpleProofNode) FlattenAunts() [][]byte { - // Nonrecursive impl. - innerHashes := [][]byte{} - for spn != nil { - if spn.Left != nil { - innerHashes = append(innerHashes, spn.Left.Hash) - } else if spn.Right != nil { - innerHashes = append(innerHashes, spn.Right.Hash) - } else { - break - } - spn = spn.Parent - } - return innerHashes -} - -// trails[0].Hash is the leaf hash for items[0]. -// trails[i].Parent.Parent....Parent == root for all i. -func trailsFromHashables(items []Hashable) (trails []*SimpleProofNode, root *SimpleProofNode) { - // Recursive impl. - switch len(items) { - case 0: - return nil, nil - case 1: - trail := &SimpleProofNode{items[0].Hash(), nil, nil, nil} - return []*SimpleProofNode{trail}, trail - default: - lefts, leftRoot := trailsFromHashables(items[:(len(items)+1)/2]) - rights, rightRoot := trailsFromHashables(items[(len(items)+1)/2:]) - rootHash := SimpleHashFromTwoHashes(leftRoot.Hash, rightRoot.Hash) - root := &SimpleProofNode{rootHash, nil, nil, nil} - leftRoot.Parent = root - leftRoot.Right = rightRoot - rightRoot.Parent = root - rightRoot.Left = leftRoot - return append(lefts, rights...), root - } -} From 56e51bc1133bd54a331e0314d5f80fb547460362 Mon Sep 17 00:00:00 2001 From: Jae Kwon Date: Sat, 11 Nov 2017 17:33:41 -0500 Subject: [PATCH 02/68] CacheWrap() -> CacheDB() CacheDB --- db/c_level_db.go | 2 +- db/cache_db.go | 57 +++++++++++++++++++++++---------------------- db/cache_db_test.go | 18 +++++++------- db/db.go | 9 +++++-- db/fsdb.go | 2 +- db/go_level_db.go | 2 +- db/mem_db.go | 2 +- 7 files changed, 49 insertions(+), 43 deletions(-) diff --git a/db/c_level_db.go b/db/c_level_db.go index 95651c0a2..e4450aaa6 100644 --- a/db/c_level_db.go +++ b/db/c_level_db.go @@ -120,7 +120,7 @@ func (db *CLevelDB) Stats() map[string]string { return stats } -func (db *CLevelDB) CacheWrap() interface{} { +func (db *CLevelDB) CacheDB() CacheDB { return NewCacheDB(db, db.GetWriteLockVersion()) } diff --git a/db/cache_db.go b/db/cache_db.go index a41680c1b..ed85e3057 100644 --- a/db/cache_db.go +++ b/db/cache_db.go @@ -16,8 +16,8 @@ type cDBValue struct { dirty bool } -// CacheDB wraps an in-memory cache around an underlying DB. -type CacheDB struct { +// cacheDB wraps an in-memory cache around an underlying DB. +type cacheDB struct { mtx sync.Mutex cache map[string]cDBValue parent DB @@ -27,13 +27,14 @@ type CacheDB struct { } // Needed by MultiStore.CacheWrap(). -var _ atomicSetDeleter = (*CacheDB)(nil) +var _ atomicSetDeleter = (*cacheDB)(nil) +var _ CacheDB = (*cacheDB)(nil) // Users should typically not be required to call NewCacheDB directly, as the -// DB implementations here provide a .CacheWrap() function already. +// DB implementations here provide a .CacheDB() function already. // `lockVersion` is typically provided by parent.GetWriteLockVersion(). -func NewCacheDB(parent DB, lockVersion interface{}) *CacheDB { - db := &CacheDB{ +func NewCacheDB(parent DB, lockVersion interface{}) CacheDB { + db := &cacheDB{ cache: make(map[string]cDBValue), parent: parent, lockVersion: lockVersion, @@ -42,7 +43,7 @@ func NewCacheDB(parent DB, lockVersion interface{}) *CacheDB { return db } -func (db *CacheDB) Get(key []byte) []byte { +func (db *cacheDB) Get(key []byte) []byte { db.mtx.Lock() defer db.mtx.Unlock() @@ -55,54 +56,54 @@ func (db *CacheDB) Get(key []byte) []byte { return dbValue.value } -func (db *CacheDB) Set(key []byte, value []byte) { +func (db *cacheDB) Set(key []byte, value []byte) { db.mtx.Lock() defer db.mtx.Unlock() db.SetNoLock(key, value) } -func (db *CacheDB) SetSync(key []byte, value []byte) { +func (db *cacheDB) SetSync(key []byte, value []byte) { db.mtx.Lock() defer db.mtx.Unlock() db.SetNoLock(key, value) } -func (db *CacheDB) SetNoLock(key []byte, value []byte) { +func (db *cacheDB) SetNoLock(key []byte, value []byte) { db.cache[string(key)] = cDBValue{value: value, deleted: false, dirty: true} } -func (db *CacheDB) Delete(key []byte) { +func (db *cacheDB) Delete(key []byte) { db.mtx.Lock() defer db.mtx.Unlock() db.DeleteNoLock(key) } -func (db *CacheDB) DeleteSync(key []byte) { +func (db *cacheDB) DeleteSync(key []byte) { db.mtx.Lock() defer db.mtx.Unlock() db.DeleteNoLock(key) } -func (db *CacheDB) DeleteNoLock(key []byte) { +func (db *cacheDB) DeleteNoLock(key []byte) { db.cache[string(key)] = cDBValue{value: nil, deleted: true, dirty: true} } -func (db *CacheDB) Close() { +func (db *cacheDB) Close() { db.mtx.Lock() defer db.mtx.Unlock() db.parent.Close() } -func (db *CacheDB) Print() { +func (db *cacheDB) Print() { db.mtx.Lock() defer db.mtx.Unlock() - fmt.Println("CacheDB\ncache:") + fmt.Println("cacheDB\ncache:") for key, value := range db.cache { fmt.Printf("[%X]:\t[%v]\n", []byte(key), value) } @@ -110,7 +111,7 @@ func (db *CacheDB) Print() { db.parent.Print() } -func (db *CacheDB) Stats() map[string]string { +func (db *cacheDB) Stats() map[string]string { db.mtx.Lock() defer db.mtx.Unlock() @@ -121,30 +122,30 @@ func (db *CacheDB) Stats() map[string]string { return stats } -func (db *CacheDB) Iterator() Iterator { - panic("CacheDB.Iterator() not yet supported") +func (db *cacheDB) Iterator() Iterator { + panic("cacheDB.Iterator() not yet supported") } -func (db *CacheDB) NewBatch() Batch { +func (db *cacheDB) NewBatch() Batch { return &memBatch{db, nil} } // Implements `atomicSetDeleter` for Batch support. -func (db *CacheDB) Mutex() *sync.Mutex { +func (db *cacheDB) Mutex() *sync.Mutex { return &(db.mtx) } // Write writes pending updates to the parent database and clears the cache. -func (db *CacheDB) Write() { +func (db *cacheDB) Write() { db.mtx.Lock() defer db.mtx.Unlock() - // Optional sanity check to ensure that CacheDB is valid + // Optional sanity check to ensure that cacheDB is valid if parent, ok := db.parent.(WriteLocker); ok { if parent.TryWriteLock(db.lockVersion) { // All good! } else { - panic("CacheDB.Write() failed. Did this CacheDB expire?") + panic("cacheDB.Write() failed. Did this CacheDB expire?") } } @@ -176,14 +177,14 @@ func (db *CacheDB) Write() { } //---------------------------------------- -// To CacheWrap this CacheDB further. +// To cache-wrap this cacheDB further. -func (db *CacheDB) CacheWrap() interface{} { +func (db *cacheDB) CacheDB() CacheDB { return NewCacheDB(db, db.GetWriteLockVersion()) } -// If the parent parent DB implements this, (e.g. such as a CacheDB parent to a -// CacheDB child), CacheDB will call `parent.TryWriteLock()` before attempting +// If the parent parent DB implements this, (e.g. such as a cacheDB parent to a +// cacheDB child), cacheDB will call `parent.TryWriteLock()` before attempting // to write. type WriteLocker interface { GetWriteLockVersion() (lockVersion interface{}) diff --git a/db/cache_db_test.go b/db/cache_db_test.go index 1de08e3f0..2a2684fe2 100644 --- a/db/cache_db_test.go +++ b/db/cache_db_test.go @@ -10,7 +10,7 @@ func bz(s string) []byte { return []byte(s) } func TestCacheDB(t *testing.T) { mem := NewMemDB() - cdb := mem.CacheWrap().(*CacheDB) + cdb := mem.CacheDB() require.Empty(t, cdb.Get(bz("key1")), "Expected `key1` to be empty") @@ -27,7 +27,7 @@ func TestCacheDB(t *testing.T) { require.Panics(t, func() { cdb.Write() }, "Expected second cdb.Write() to fail") - cdb = mem.CacheWrap().(*CacheDB) + cdb = mem.CacheDB() cdb.Delete(bz("key1")) require.Empty(t, cdb.Get(bz("key1"))) require.Equal(t, mem.Get(bz("key1")), bz("value2")) @@ -39,33 +39,33 @@ func TestCacheDB(t *testing.T) { func TestCacheDBWriteLock(t *testing.T) { mem := NewMemDB() - cdb := mem.CacheWrap().(*CacheDB) + cdb := mem.CacheDB() require.NotPanics(t, func() { cdb.Write() }) require.Panics(t, func() { cdb.Write() }) - cdb = mem.CacheWrap().(*CacheDB) + cdb = mem.CacheDB() require.NotPanics(t, func() { cdb.Write() }) require.Panics(t, func() { cdb.Write() }) } func TestCacheDBWriteLockNested(t *testing.T) { mem := NewMemDB() - cdb := mem.CacheWrap().(*CacheDB) - cdb2 := cdb.CacheWrap().(*CacheDB) + cdb := mem.CacheDB() + cdb2 := cdb.CacheDB() require.NotPanics(t, func() { cdb2.Write() }) require.Panics(t, func() { cdb2.Write() }) - cdb2 = cdb.CacheWrap().(*CacheDB) + cdb2 = cdb.CacheDB() require.NotPanics(t, func() { cdb2.Write() }) require.Panics(t, func() { cdb2.Write() }) } func TestCacheDBNested(t *testing.T) { mem := NewMemDB() - cdb := mem.CacheWrap().(*CacheDB) + cdb := mem.CacheDB() cdb.Set(bz("key1"), bz("value1")) require.Empty(t, mem.Get(bz("key1"))) require.Equal(t, bz("value1"), cdb.Get(bz("key1"))) - cdb2 := cdb.CacheWrap().(*CacheDB) + cdb2 := cdb.CacheDB() require.Equal(t, bz("value1"), cdb2.Get(bz("key1"))) cdb2.Set(bz("key1"), bz("VALUE2")) diff --git a/db/db.go b/db/db.go index 6c8bd4800..e86394713 100644 --- a/db/db.go +++ b/db/db.go @@ -18,8 +18,13 @@ type DB interface { // Stats returns a map of property values for all keys and the size of the cache. Stats() map[string]string - // CacheWrap wraps the DB w/ a CacheDB. - CacheWrap() interface{} + // CacheDB wraps the DB w/ a cache. + CacheDB() CacheDB +} + +type CacheDB interface { + DB + Write() // Write to the underlying DB } type Batch interface { diff --git a/db/fsdb.go b/db/fsdb.go index 65ac3c38e..4b1914453 100644 --- a/db/fsdb.go +++ b/db/fsdb.go @@ -140,7 +140,7 @@ func (db *FSDB) Mutex() *sync.Mutex { return &(db.mtx) } -func (db *FSDB) CacheWrap() interface{} { +func (db *FSDB) CacheDB() CacheDB { return NewCacheDB(db, db.GetWriteLockVersion()) } diff --git a/db/go_level_db.go b/db/go_level_db.go index d9cec519c..cffe7329c 100644 --- a/db/go_level_db.go +++ b/db/go_level_db.go @@ -121,7 +121,7 @@ func (db *GoLevelDB) Stats() map[string]string { return stats } -func (db *GoLevelDB) CacheWrap() interface{} { +func (db *GoLevelDB) CacheDB() CacheDB { return NewCacheDB(db, db.GetWriteLockVersion()) } diff --git a/db/mem_db.go b/db/mem_db.go index 30697adcf..f5d55f3ae 100644 --- a/db/mem_db.go +++ b/db/mem_db.go @@ -114,7 +114,7 @@ func (db *MemDB) Mutex() *sync.Mutex { return &(db.mtx) } -func (db *MemDB) CacheWrap() interface{} { +func (db *MemDB) CacheDB() CacheDB { return NewCacheDB(db, db.GetWriteLockVersion()) } From 17dc8a74497d3fee933592ef860275e6b0dd71d6 Mon Sep 17 00:00:00 2001 From: Jae Kwon Date: Sun, 3 Dec 2017 21:44:48 -0800 Subject: [PATCH 03/68] SetDeleter/Batch separation --- db/db.go | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/db/db.go b/db/db.go index e86394713..08ebaeaac 100644 --- a/db/db.go +++ b/db/db.go @@ -27,9 +27,13 @@ type CacheDB interface { Write() // Write to the underlying DB } -type Batch interface { +type SetDeleter interface { Set(key, value []byte) Delete(key []byte) +} + +type Batch interface { + SetDeleter Write() } From 988e190ef745b93d41d3aabebeb9c4192ee8a2f1 Mon Sep 17 00:00:00 2001 From: Jae Kwon Date: Sat, 9 Dec 2017 09:26:03 -0800 Subject: [PATCH 04/68] Deprecated Panic* --- common/errors.go | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/common/errors.go b/common/errors.go index 039342a67..4710b9ee0 100644 --- a/common/errors.go +++ b/common/errors.go @@ -22,6 +22,7 @@ func (se StackError) Error() string { // A panic resulting from a sanity check means there is a programmer error // and some guarantee is not satisfied. +// XXX DEPRECATED func PanicSanity(v interface{}) { panic(Fmt("Panicked on a Sanity Check: %v", v)) } @@ -29,17 +30,20 @@ func PanicSanity(v interface{}) { // A panic here means something has gone horribly wrong, in the form of data corruption or // failure of the operating system. In a correct/healthy system, these should never fire. // If they do, it's indicative of a much more serious problem. +// XXX DEPRECATED func PanicCrisis(v interface{}) { panic(Fmt("Panicked on a Crisis: %v", v)) } // Indicates a failure of consensus. Someone was malicious or something has // gone horribly wrong. These should really boot us into an "emergency-recover" mode +// XXX DEPRECATED func PanicConsensus(v interface{}) { panic(Fmt("Panicked on a Consensus Failure: %v", v)) } // For those times when we're not sure if we should panic +// XXX DEPRECATED func PanicQ(v interface{}) { panic(Fmt("Panicked questionably: %v", v)) } From 03dfb724c7d931fa76fc218c4dbac8056aacd752 Mon Sep 17 00:00:00 2001 From: Jae Kwon Date: Sun, 10 Dec 2017 10:18:35 -0800 Subject: [PATCH 05/68] Change heap.Push to mean int priority --- common/heap.go | 45 ++++++++++++++++++++++++++++----------------- 1 file changed, 28 insertions(+), 17 deletions(-) diff --git a/common/heap.go b/common/heap.go index 4a96d7aaa..f3f55a1c0 100644 --- a/common/heap.go +++ b/common/heap.go @@ -4,25 +4,21 @@ import ( "container/heap" ) -type Comparable interface { - Less(o interface{}) bool -} - -//----------------------------------------------------------------------------- - /* -Example usage: + Example usage: + + ``` h := NewHeap() - h.Push(String("msg1"), 1) - h.Push(String("msg3"), 3) - h.Push(String("msg2"), 2) + h.Push("msg1", 1) + h.Push("msg3", 3) + h.Push("msg2", 2) - fmt.Println(h.Pop()) - fmt.Println(h.Pop()) - fmt.Println(h.Pop()) + fmt.Println(h.Pop()) // msg1 + fmt.Println(h.Pop()) // msg2 + fmt.Println(h.Pop()) // msg3 + ``` */ - type Heap struct { pq priorityQueue } @@ -35,7 +31,11 @@ func (h *Heap) Len() int64 { return int64(len(h.pq)) } -func (h *Heap) Push(value interface{}, priority Comparable) { +func (h *Heap) Push(value interface{}, priority int) { + heap.Push(&h.pq, &pqItem{value: value, priority: cmpInt(priority)}) +} + +func (h *Heap) PushComparable(value interface{}, priority Comparable) { heap.Push(&h.pq, &pqItem{value: value, priority: priority}) } @@ -56,8 +56,6 @@ func (h *Heap) Pop() interface{} { } //----------------------------------------------------------------------------- - -/////////////////////// // From: http://golang.org/pkg/container/heap/#example__priorityQueue type pqItem struct { @@ -101,3 +99,16 @@ func (pq *priorityQueue) Update(item *pqItem, value interface{}, priority Compar item.priority = priority heap.Fix(pq, item.index) } + +//-------------------------------------------------------------------------------- +// Comparable + +type Comparable interface { + Less(o interface{}) bool +} + +type cmpInt int + +func (i cmpInt) Less(o interface{}) bool { + return int(i) < int(o.(cmpInt)) +} From a0b692c86d248a7203cab9f5361677bcf6fc11db Mon Sep 17 00:00:00 2001 From: Jae Kwon Date: Sun, 10 Dec 2017 14:23:27 -0800 Subject: [PATCH 06/68] Add PushBytes to Heap --- common/heap.go | 11 +++++++++++ 1 file changed, 11 insertions(+) diff --git a/common/heap.go b/common/heap.go index f3f55a1c0..b3bcb9db8 100644 --- a/common/heap.go +++ b/common/heap.go @@ -1,6 +1,7 @@ package common import ( + "bytes" "container/heap" ) @@ -35,6 +36,10 @@ func (h *Heap) Push(value interface{}, priority int) { heap.Push(&h.pq, &pqItem{value: value, priority: cmpInt(priority)}) } +func (h *Heap) PushBytes(value interface{}, priority []byte) { + heap.Push(&h.pq, &pqItem{value: value, priority: cmpBytes(priority)}) +} + func (h *Heap) PushComparable(value interface{}, priority Comparable) { heap.Push(&h.pq, &pqItem{value: value, priority: priority}) } @@ -112,3 +117,9 @@ type cmpInt int func (i cmpInt) Less(o interface{}) bool { return int(i) < int(o.(cmpInt)) } + +type cmpBytes []byte + +func (bz cmpBytes) Less(o interface{}) bool { + return bytes.Compare([]byte(bz), []byte(o.(cmpBytes))) < 0 +} From 50a30aafc18bfbd5890e4bab20633e843e173843 Mon Sep 17 00:00:00 2001 From: Jae Kwon Date: Tue, 12 Dec 2017 12:44:47 -0800 Subject: [PATCH 07/68] New canonical Iterator --- db/cache_db.go | 2 + db/db.go | 88 --------------------------------------- db/types.go | 111 +++++++++++++++++++++++++++++++++++++++++++++++++ 3 files changed, 113 insertions(+), 88 deletions(-) create mode 100644 db/types.go diff --git a/db/cache_db.go b/db/cache_db.go index ed85e3057..586f2f679 100644 --- a/db/cache_db.go +++ b/db/cache_db.go @@ -1,3 +1,4 @@ +/* package db import ( @@ -229,3 +230,4 @@ func (cww *cwwMutex) TryWriteLock(version interface{}) bool { cww.written = new(int32) return true } +*/ diff --git a/db/db.go b/db/db.go index 08ebaeaac..ba137743b 100644 --- a/db/db.go +++ b/db/db.go @@ -1,93 +1,5 @@ package db -import . "github.com/tendermint/tmlibs/common" - -type DB interface { - Get([]byte) []byte // NOTE: returns nil iff never set or deleted. - Set([]byte, []byte) - SetSync([]byte, []byte) - Delete([]byte) - DeleteSync([]byte) - Close() - NewBatch() Batch - Iterator() Iterator - - // For debugging - Print() - - // Stats returns a map of property values for all keys and the size of the cache. - Stats() map[string]string - - // CacheDB wraps the DB w/ a cache. - CacheDB() CacheDB -} - -type CacheDB interface { - DB - Write() // Write to the underlying DB -} - -type SetDeleter interface { - Set(key, value []byte) - Delete(key []byte) -} - -type Batch interface { - SetDeleter - Write() -} - -/* - Usage: - - for itr.Seek(mykey); itr.Valid(); itr.Next() { - k, v := itr.Key(); itr.Value() - .... - } -*/ -type Iterator interface { - - // Seek moves the iterator the position of the key given or, if the key - // doesn't exist, the next key that does exist in the database. If the key - // doesn't exist, and there is no next key, the Iterator becomes invalid. - Seek(key []byte) - - // Valid returns false only when an Iterator has iterated past either the - // first or the last key in the database. - Valid() bool - - // Next moves the iterator to the next sequential key in the database, as - // defined by the Comparator in the ReadOptions used to create this Iterator. - // - // If Valid returns false, this method will panic. - Next() - - // Prev moves the iterator to the previous sequential key in the database, as - // defined by the Comparator in the ReadOptions used to create this Iterator. - // - // If Valid returns false, this method will panic. - Prev() - - // Key returns the key of the cursor. - // - // If Valid returns false, this method will panic. - Key() []byte - - // Value returns the key of the cursor. - // - // If Valid returns false, this method will panic. - Value() []byte - - // GetError returns an IteratorError from LevelDB if it had one during - // iteration. - // - // This method is safe to call when Valid returns false. - GetError() error - - // Close deallocates the given Iterator. - Close() -} - //----------------------------------------------------------------------------- // Main entry diff --git a/db/types.go b/db/types.go new file mode 100644 index 000000000..f343e1d72 --- /dev/null +++ b/db/types.go @@ -0,0 +1,111 @@ +package db + +type DB interface { + + // Get returns nil iff key doesn't exist. Panics on nil key. + Get([]byte) []byte + + // Has checks if a key exists. Panics on nil key. + Has(key []byte) bool + + // Set sets the key. Panics on nil key. + Set([]byte, []byte) + SetSync([]byte, []byte) + + // Delete deletes the key. Panics on nil key. + Delete([]byte) + DeleteSync([]byte) + + // Iterator over a domain of keys in ascending order. End is exclusive. + // Start must be less than end, or the Iterator is invalid. + // CONTRACT: No writes may happen within a domain while an iterator exists over it. + Iterator(start, end []byte) Iterator + + // Iterator over a domain of keys in descending order. End is exclusive. + // Start must be greater than end, or the Iterator is invalid. + // CONTRACT: No writes may happen within a domain while an iterator exists over it. + ReverseIterator(start, end []byte) Iterator + + // Releases the connection. + Close() + + // Creates a batch for atomic updates. + NewBatch() Batch + + // For debugging + Print() + + // Stats returns a map of property values for all keys and the size of the cache. + Stats() map[string]string +} + +//---------------------------------------- +// Batch + +type Batch interface { + SetDeleter + Write() +} + +type SetDeleter interface { + Set(key, value []byte) + Delete(key []byte) +} + +//---------------------------------------- + +/* + Usage: + + for itr.Seek(mykey); itr.Valid(); itr.Next() { + k, v := itr.Key(); itr.Value() + .... + } +*/ +type Iterator interface { + + // The start & end (exclusive) limits to iterate over. + // If end < start, then the Iterator goes in reverse order. + // + // A domain of ([]byte{12, 13}, []byte{12, 14}) will iterate + // over anything with the prefix []byte{12, 13}. + // + // The smallest key is the empty byte array []byte{}. + // The largest key is the nil byte array []byte(nil). + Domain() (start []byte, end []byte) + + // Valid returns whether the current position is valid. + // Once invalid, an Iterator is forever invalid. + Valid() bool + + // Next moves the iterator to the next sequential key in the database, as + // defined by the Comparator in the ReadOptions used to create this Iterator. + // + // If Valid returns false, this method will panic. + Next() + + // Prev moves the iterator to the previous sequential key in the database, as + // defined by the Comparator in the ReadOptions used to create this Iterator. + // + // If Valid returns false, this method will panic. + Prev() + + // Key returns the key of the cursor. + // + // If Valid returns false, this method will panic. + Key() []byte + + // Value returns the key of the cursor. + // + // If Valid returns false, this method will panic. + Value() []byte + + // GetError returns an IteratorError from LevelDB if it had one during + // iteration. + // + // This method is safe to call when Valid returns false. + GetError() error + + // Close deallocates the given Iterator. + Close() +} From 0d03cd9e31a1cebca1616a42acb65e307535223a Mon Sep 17 00:00:00 2001 From: Jae Kwon Date: Tue, 12 Dec 2017 14:00:03 -0800 Subject: [PATCH 08/68] Make it compile --- db/c_level_db.go | 37 ++++---- db/cache_db.go | 233 ---------------------------------------------- db/db.go | 4 +- db/fsdb.go | 61 +++++++----- db/go_level_db.go | 42 ++++++--- db/mem_db.go | 53 ++++++----- db/types.go | 19 +++- db/util.go | 97 +++++-------------- 8 files changed, 161 insertions(+), 385 deletions(-) delete mode 100644 db/cache_db.go diff --git a/db/c_level_db.go b/db/c_level_db.go index e4450aaa6..e867b0004 100644 --- a/db/c_level_db.go +++ b/db/c_level_db.go @@ -22,8 +22,6 @@ type CLevelDB struct { ro *levigo.ReadOptions wo *levigo.WriteOptions woSync *levigo.WriteOptions - - cwwMutex } func NewCLevelDB(name string, dir string) (*CLevelDB, error) { @@ -45,8 +43,6 @@ func NewCLevelDB(name string, dir string) (*CLevelDB, error) { ro: ro, wo: wo, woSync: woSync, - - cwwMutex: NewCWWMutex(), } return database, nil } @@ -59,6 +55,10 @@ func (db *CLevelDB) Get(key []byte) []byte { return res } +func (db *CLevelDB) Has(key []byte) bool { + panic("not implemented yet") +} + func (db *CLevelDB) Set(key []byte, value []byte) { err := db.db.Put(db.wo, key, value) if err != nil { @@ -99,9 +99,9 @@ func (db *CLevelDB) Close() { } func (db *CLevelDB) Print() { - itr := db.Iterator() - defer itr.Close() - for itr.Seek(nil); itr.Valid(); itr.Next() { + itr := db.Iterator(BeginningKey(), EndingKey()) + defer itr.Release() + for ; itr.Valid(); itr.Next() { key := itr.Key() value := itr.Value() fmt.Printf("[%X]:\t[%X]\n", key, value) @@ -120,10 +120,6 @@ func (db *CLevelDB) Stats() map[string]string { return stats } -func (db *CLevelDB) CacheDB() CacheDB { - return NewCacheDB(db, db.GetWriteLockVersion()) -} - //---------------------------------------- // Batch @@ -155,10 +151,19 @@ func (mBatch *cLevelDBBatch) Write() { //---------------------------------------- // Iterator -func (db *CLevelDB) Iterator() Iterator { - itr := db.db.NewIterator(db.ro) - itr.Seek([]byte{0x00}) - return cLevelDBIterator{itr} +func (db *CLevelDB) Iterator(start, end []byte) Iterator { + /* + XXX + itr := db.db.NewIterator(db.ro) + itr.Seek([]byte{0x00}) + return cLevelDBIterator{itr} + */ + return nil +} + +func (db *CLevelDB) ReverseIterator(start, end []byte) Iterator { + // XXX + return nil } type cLevelDBIterator struct { @@ -204,7 +209,7 @@ func (c cLevelDBIterator) Prev() { c.itr.Prev() } -func (c cLevelDBIterator) Close() { +func (c cLevelDBIterator) Release() { c.itr.Close() } diff --git a/db/cache_db.go b/db/cache_db.go deleted file mode 100644 index 586f2f679..000000000 --- a/db/cache_db.go +++ /dev/null @@ -1,233 +0,0 @@ -/* -package db - -import ( - "fmt" - "sort" - "sync" - "sync/atomic" -) - -// If value is nil but deleted is false, -// it means the parent doesn't have the key. -// (No need to delete upon Write()) -type cDBValue struct { - value []byte - deleted bool - dirty bool -} - -// cacheDB wraps an in-memory cache around an underlying DB. -type cacheDB struct { - mtx sync.Mutex - cache map[string]cDBValue - parent DB - lockVersion interface{} - - cwwMutex -} - -// Needed by MultiStore.CacheWrap(). -var _ atomicSetDeleter = (*cacheDB)(nil) -var _ CacheDB = (*cacheDB)(nil) - -// Users should typically not be required to call NewCacheDB directly, as the -// DB implementations here provide a .CacheDB() function already. -// `lockVersion` is typically provided by parent.GetWriteLockVersion(). -func NewCacheDB(parent DB, lockVersion interface{}) CacheDB { - db := &cacheDB{ - cache: make(map[string]cDBValue), - parent: parent, - lockVersion: lockVersion, - cwwMutex: NewCWWMutex(), - } - return db -} - -func (db *cacheDB) Get(key []byte) []byte { - db.mtx.Lock() - defer db.mtx.Unlock() - - dbValue, ok := db.cache[string(key)] - if !ok { - data := db.parent.Get(key) - dbValue = cDBValue{value: data, deleted: false, dirty: false} - db.cache[string(key)] = dbValue - } - return dbValue.value -} - -func (db *cacheDB) Set(key []byte, value []byte) { - db.mtx.Lock() - defer db.mtx.Unlock() - - db.SetNoLock(key, value) -} - -func (db *cacheDB) SetSync(key []byte, value []byte) { - db.mtx.Lock() - defer db.mtx.Unlock() - - db.SetNoLock(key, value) -} - -func (db *cacheDB) SetNoLock(key []byte, value []byte) { - db.cache[string(key)] = cDBValue{value: value, deleted: false, dirty: true} -} - -func (db *cacheDB) Delete(key []byte) { - db.mtx.Lock() - defer db.mtx.Unlock() - - db.DeleteNoLock(key) -} - -func (db *cacheDB) DeleteSync(key []byte) { - db.mtx.Lock() - defer db.mtx.Unlock() - - db.DeleteNoLock(key) -} - -func (db *cacheDB) DeleteNoLock(key []byte) { - db.cache[string(key)] = cDBValue{value: nil, deleted: true, dirty: true} -} - -func (db *cacheDB) Close() { - db.mtx.Lock() - defer db.mtx.Unlock() - - db.parent.Close() -} - -func (db *cacheDB) Print() { - db.mtx.Lock() - defer db.mtx.Unlock() - - fmt.Println("cacheDB\ncache:") - for key, value := range db.cache { - fmt.Printf("[%X]:\t[%v]\n", []byte(key), value) - } - fmt.Println("\nparent:") - db.parent.Print() -} - -func (db *cacheDB) Stats() map[string]string { - db.mtx.Lock() - defer db.mtx.Unlock() - - stats := make(map[string]string) - stats["cache.size"] = fmt.Sprintf("%d", len(db.cache)) - stats["cache.lock_version"] = fmt.Sprintf("%v", db.lockVersion) - mergeStats(db.parent.Stats(), stats, "parent.") - return stats -} - -func (db *cacheDB) Iterator() Iterator { - panic("cacheDB.Iterator() not yet supported") -} - -func (db *cacheDB) NewBatch() Batch { - return &memBatch{db, nil} -} - -// Implements `atomicSetDeleter` for Batch support. -func (db *cacheDB) Mutex() *sync.Mutex { - return &(db.mtx) -} - -// Write writes pending updates to the parent database and clears the cache. -func (db *cacheDB) Write() { - db.mtx.Lock() - defer db.mtx.Unlock() - - // Optional sanity check to ensure that cacheDB is valid - if parent, ok := db.parent.(WriteLocker); ok { - if parent.TryWriteLock(db.lockVersion) { - // All good! - } else { - panic("cacheDB.Write() failed. Did this CacheDB expire?") - } - } - - // We need a copy of all of the keys. - // Not the best, but probably not a bottleneck depending. - keys := make([]string, 0, len(db.cache)) - for key, dbValue := range db.cache { - if dbValue.dirty { - keys = append(keys, key) - } - } - sort.Strings(keys) - - batch := db.parent.NewBatch() - for _, key := range keys { - dbValue := db.cache[key] - if dbValue.deleted { - batch.Delete([]byte(key)) - } else if dbValue.value == nil { - // Skip, it already doesn't exist in parent. - } else { - batch.Set([]byte(key), dbValue.value) - } - } - batch.Write() - - // Clear the cache - db.cache = make(map[string]cDBValue) -} - -//---------------------------------------- -// To cache-wrap this cacheDB further. - -func (db *cacheDB) CacheDB() CacheDB { - return NewCacheDB(db, db.GetWriteLockVersion()) -} - -// If the parent parent DB implements this, (e.g. such as a cacheDB parent to a -// cacheDB child), cacheDB will call `parent.TryWriteLock()` before attempting -// to write. -type WriteLocker interface { - GetWriteLockVersion() (lockVersion interface{}) - TryWriteLock(lockVersion interface{}) bool -} - -// Implements TryWriteLocker. Embed this in DB structs if desired. -type cwwMutex struct { - mtx sync.Mutex - // CONTRACT: reading/writing to `*written` should use `atomic.*`. - // CONTRACT: replacing `written` with another *int32 should use `.mtx`. - written *int32 -} - -func NewCWWMutex() cwwMutex { - return cwwMutex{ - written: new(int32), - } -} - -func (cww *cwwMutex) GetWriteLockVersion() interface{} { - cww.mtx.Lock() - defer cww.mtx.Unlock() - - // `written` works as a "version" object because it gets replaced upon - // successful TryWriteLock. - return cww.written -} - -func (cww *cwwMutex) TryWriteLock(version interface{}) bool { - cww.mtx.Lock() - defer cww.mtx.Unlock() - - if version != cww.written { - return false // wrong "WriteLockVersion" - } - if !atomic.CompareAndSwapInt32(cww.written, 0, 1) { - return false // already written - } - - // New "WriteLockVersion" - cww.written = new(int32) - return true -} -*/ diff --git a/db/db.go b/db/db.go index ba137743b..7eec04d56 100644 --- a/db/db.go +++ b/db/db.go @@ -1,5 +1,7 @@ package db +import "fmt" + //----------------------------------------------------------------------------- // Main entry @@ -26,7 +28,7 @@ func registerDBCreator(backend string, creator dbCreator, force bool) { func NewDB(name string, backend string, dir string) DB { db, err := backends[backend](name, dir) if err != nil { - PanicSanity(Fmt("Error initializing DB: %v", err)) + panic(fmt.Sprintf("Error initializing DB: %v", err)) } return db } diff --git a/db/fsdb.go b/db/fsdb.go index 4b1914453..b6e08daf5 100644 --- a/db/fsdb.go +++ b/db/fsdb.go @@ -7,7 +7,6 @@ import ( "os" "path" "path/filepath" - "sort" "sync" "github.com/pkg/errors" @@ -29,8 +28,6 @@ func init() { type FSDB struct { mtx sync.Mutex dir string - - cwwMutex } func NewFSDB(dir string) *FSDB { @@ -39,8 +36,7 @@ func NewFSDB(dir string) *FSDB { panic(errors.Wrap(err, "Creating FSDB dir "+dir)) } database := &FSDB{ - dir: dir, - cwwMutex: NewCWWMutex(), + dir: dir, } return database } @@ -59,6 +55,20 @@ func (db *FSDB) Get(key []byte) []byte { return value } +func (db *FSDB) Has(key []byte) bool { + db.mtx.Lock() + defer db.mtx.Unlock() + + path := db.nameToPath(key) + _, err := read(path) + if os.IsNotExist(err) { + return false + } else if err != nil { + panic(errors.Wrap(err, fmt.Sprintf("Getting key %s (0x%X)", string(key), key))) + } + return true +} + func (db *FSDB) Set(key []byte, value []byte) { db.mtx.Lock() defer db.mtx.Unlock() @@ -140,27 +150,32 @@ func (db *FSDB) Mutex() *sync.Mutex { return &(db.mtx) } -func (db *FSDB) CacheDB() CacheDB { - return NewCacheDB(db, db.GetWriteLockVersion()) -} +func (db *FSDB) Iterator(start, end []byte) Iterator { + /* + XXX + it := newMemDBIterator() + it.db = db + it.cur = 0 -func (db *FSDB) Iterator() Iterator { - it := newMemDBIterator() - it.db = db - it.cur = 0 + db.mtx.Lock() + defer db.mtx.Unlock() - db.mtx.Lock() - defer db.mtx.Unlock() + // We need a copy of all of the keys. + // Not the best, but probably not a bottleneck depending. + keys, err := list(db.dir) + if err != nil { + panic(errors.Wrap(err, fmt.Sprintf("Listing keys in %s", db.dir))) + } + sort.Strings(keys) + it.keys = keys + return it + */ + return nil +} - // We need a copy of all of the keys. - // Not the best, but probably not a bottleneck depending. - keys, err := list(db.dir) - if err != nil { - panic(errors.Wrap(err, fmt.Sprintf("Listing keys in %s", db.dir))) - } - sort.Strings(keys) - it.keys = keys - return it +func (db *FSDB) ReverseIterator(start, end []byte) Iterator { + // XXX + return nil } func (db *FSDB) nameToPath(name []byte) string { diff --git a/db/go_level_db.go b/db/go_level_db.go index cffe7329c..e8ed99dee 100644 --- a/db/go_level_db.go +++ b/db/go_level_db.go @@ -22,8 +22,6 @@ func init() { type GoLevelDB struct { db *leveldb.DB - - cwwMutex } func NewGoLevelDB(name string, dir string) (*GoLevelDB, error) { @@ -33,8 +31,7 @@ func NewGoLevelDB(name string, dir string) (*GoLevelDB, error) { return nil, err } database := &GoLevelDB{ - db: db, - cwwMutex: NewCWWMutex(), + db: db, } return database, nil } @@ -51,6 +48,18 @@ func (db *GoLevelDB) Get(key []byte) []byte { return res } +func (db *GoLevelDB) Has(key []byte) bool { + _, err := db.db.Get(key, nil) + if err != nil { + if err == errors.ErrNotFound { + return false + } else { + PanicCrisis(err) + } + } + return true +} + func (db *GoLevelDB) Set(key []byte, value []byte) { err := db.db.Put(key, value, nil) if err != nil { @@ -121,10 +130,6 @@ func (db *GoLevelDB) Stats() map[string]string { return stats } -func (db *GoLevelDB) CacheDB() CacheDB { - return NewCacheDB(db, db.GetWriteLockVersion()) -} - //---------------------------------------- // Batch @@ -156,12 +161,21 @@ func (mBatch *goLevelDBBatch) Write() { //---------------------------------------- // Iterator -func (db *GoLevelDB) Iterator() Iterator { - itr := &goLevelDBIterator{ - source: db.db.NewIterator(nil, nil), - } - itr.Seek(nil) - return itr +func (db *GoLevelDB) Iterator(start, end []byte) Iterator { + /* + XXX + itr := &goLevelDBIterator{ + source: db.db.NewIterator(nil, nil), + } + itr.Seek(nil) + return itr + */ + return nil +} + +func (db *GoLevelDB) ReverseIterator(start, end []byte) Iterator { + // XXX + return nil } type goLevelDBIterator struct { diff --git a/db/mem_db.go b/db/mem_db.go index f5d55f3ae..3127030ae 100644 --- a/db/mem_db.go +++ b/db/mem_db.go @@ -3,7 +3,6 @@ package db import ( "bytes" "fmt" - "sort" "sync" ) @@ -16,14 +15,11 @@ func init() { type MemDB struct { mtx sync.Mutex db map[string][]byte - - cwwMutex } func NewMemDB() *MemDB { database := &MemDB{ - db: make(map[string][]byte), - cwwMutex: NewCWWMutex(), + db: make(map[string][]byte), } return database } @@ -35,6 +31,14 @@ func (db *MemDB) Get(key []byte) []byte { return db.db[string(key)] } +func (db *MemDB) Has(key []byte) bool { + db.mtx.Lock() + defer db.mtx.Unlock() + + _, ok := db.db[string(key)] + return ok +} + func (db *MemDB) Set(key []byte, value []byte) { db.mtx.Lock() defer db.mtx.Unlock() @@ -114,27 +118,32 @@ func (db *MemDB) Mutex() *sync.Mutex { return &(db.mtx) } -func (db *MemDB) CacheDB() CacheDB { - return NewCacheDB(db, db.GetWriteLockVersion()) -} - //---------------------------------------- -func (db *MemDB) Iterator() Iterator { - it := newMemDBIterator() - it.db = db - it.cur = 0 +func (db *MemDB) Iterator(start, end []byte) Iterator { + /* + XXX + it := newMemDBIterator() + it.db = db + it.cur = 0 - db.mtx.Lock() - defer db.mtx.Unlock() + db.mtx.Lock() + defer db.mtx.Unlock() - // We need a copy of all of the keys. - // Not the best, but probably not a bottleneck depending. - for key, _ := range db.db { - it.keys = append(it.keys, key) - } - sort.Strings(it.keys) - return it + // We need a copy of all of the keys. + // Not the best, but probably not a bottleneck depending. + for key, _ := range db.db { + it.keys = append(it.keys, key) + } + sort.Strings(it.keys) + return it + */ + return nil +} + +func (db *MemDB) ReverseIterator(start, end []byte) Iterator { + // XXX + return nil } type memDBIterator struct { diff --git a/db/types.go b/db/types.go index f343e1d72..7422a5155 100644 --- a/db/types.go +++ b/db/types.go @@ -54,12 +54,23 @@ type SetDeleter interface { //---------------------------------------- +func BeginningKey() []byte { + return []byte{} +} + +func EndingKey() []byte { + return nil +} + /* Usage: - for itr.Seek(mykey); itr.Valid(); itr.Next() { + var itr Iterator = ... + defer itr.Release() + + for ; itr.Valid(); itr.Next() { k, v := itr.Key(); itr.Value() - .... + // ... } */ type Iterator interface { @@ -106,6 +117,6 @@ type Iterator interface { // This method is safe to call when Valid returns false. GetError() error - // Close deallocates the given Iterator. - Close() + // Release deallocates the given Iterator. + Release() } diff --git a/db/util.go b/db/util.go index 5f381a5be..89c777622 100644 --- a/db/util.go +++ b/db/util.go @@ -1,82 +1,35 @@ package db -import "bytes" - -// A wrapper around itr that tries to keep the iterator -// within the bounds as defined by `prefix` -type prefixIterator struct { - itr Iterator - prefix []byte - invalid bool -} - -func (pi *prefixIterator) Seek(key []byte) { - if !bytes.HasPrefix(key, pi.prefix) { - pi.invalid = true - return - } - pi.itr.Seek(key) - pi.checkInvalid() -} - -func (pi *prefixIterator) checkInvalid() { - if !pi.itr.Valid() { - pi.invalid = true - } -} - -func (pi *prefixIterator) Valid() bool { - if pi.invalid { - return false - } - key := pi.itr.Key() - ok := bytes.HasPrefix(key, pi.prefix) - if !ok { - pi.invalid = true - return false - } - return true -} - -func (pi *prefixIterator) Next() { - if pi.invalid { - panic("prefixIterator Next() called when invalid") +func IteratePrefix(db DB, prefix []byte) Iterator { + var start, end []byte + if len(prefix) == 0 { + start = BeginningKey() + end = EndingKey() + } else { + start = cp(prefix) + end = cpIncr(prefix) } - pi.itr.Next() - pi.checkInvalid() + return db.Iterator(start, end) } -func (pi *prefixIterator) Prev() { - if pi.invalid { - panic("prefixIterator Prev() called when invalid") - } - pi.itr.Prev() - pi.checkInvalid() -} +//---------------------------------------- -func (pi *prefixIterator) Key() []byte { - if pi.invalid { - panic("prefixIterator Key() called when invalid") - } - return pi.itr.Key() +func cp(bz []byte) (ret []byte) { + ret = make([]byte, len(bz)) + copy(ret, bz) + return ret } -func (pi *prefixIterator) Value() []byte { - if pi.invalid { - panic("prefixIterator Value() called when invalid") - } - return pi.itr.Value() -} - -func (pi *prefixIterator) Close() { pi.itr.Close() } -func (pi *prefixIterator) GetError() error { return pi.itr.GetError() } - -func IteratePrefix(db DB, prefix []byte) Iterator { - itr := db.Iterator() - pi := &prefixIterator{ - itr: itr, - prefix: prefix, +// CONTRACT: len(bz) > 0 +func cpIncr(bz []byte) (ret []byte) { + ret = cp(bz) + for i := len(bz) - 1; i >= 0; i-- { + if ret[i] < byte(0xFF) { + ret[i] += 1 + return + } else { + ret[i] = byte(0x00) + } } - pi.Seek(prefix) - return pi + return EndingKey() } From 7f650cea8673ee3169e51a41c1a8038e38c59ef4 Mon Sep 17 00:00:00 2001 From: Jae Kwon Date: Tue, 12 Dec 2017 14:45:31 -0800 Subject: [PATCH 09/68] Remove Prev from Iterator --- db/c_level_db.go | 7 ---- db/cache_db_test.go | 83 --------------------------------------------- db/common_test.go | 51 ++++------------------------ db/mem_db_test.go | 2 +- db/types.go | 13 ++++--- db/util_test.go | 82 +------------------------------------------- 6 files changed, 15 insertions(+), 223 deletions(-) delete mode 100644 db/cache_db_test.go diff --git a/db/c_level_db.go b/db/c_level_db.go index e867b0004..11a6e5ff7 100644 --- a/db/c_level_db.go +++ b/db/c_level_db.go @@ -202,13 +202,6 @@ func (c cLevelDBIterator) Next() { c.itr.Next() } -func (c cLevelDBIterator) Prev() { - if !c.itr.Valid() { - panic("cLevelDBIterator Prev() called when invalid") - } - c.itr.Prev() -} - func (c cLevelDBIterator) Release() { c.itr.Close() } diff --git a/db/cache_db_test.go b/db/cache_db_test.go deleted file mode 100644 index 2a2684fe2..000000000 --- a/db/cache_db_test.go +++ /dev/null @@ -1,83 +0,0 @@ -package db - -import ( - "testing" - - "github.com/stretchr/testify/require" -) - -func bz(s string) []byte { return []byte(s) } - -func TestCacheDB(t *testing.T) { - mem := NewMemDB() - cdb := mem.CacheDB() - - require.Empty(t, cdb.Get(bz("key1")), "Expected `key1` to be empty") - - mem.Set(bz("key1"), bz("value1")) - cdb.Set(bz("key1"), bz("value1")) - require.Equal(t, bz("value1"), cdb.Get(bz("key1"))) - - cdb.Set(bz("key1"), bz("value2")) - require.Equal(t, bz("value2"), cdb.Get(bz("key1"))) - require.Equal(t, bz("value1"), mem.Get(bz("key1"))) - - cdb.Write() - require.Equal(t, bz("value2"), mem.Get(bz("key1"))) - - require.Panics(t, func() { cdb.Write() }, "Expected second cdb.Write() to fail") - - cdb = mem.CacheDB() - cdb.Delete(bz("key1")) - require.Empty(t, cdb.Get(bz("key1"))) - require.Equal(t, mem.Get(bz("key1")), bz("value2")) - - cdb.Write() - require.Empty(t, cdb.Get(bz("key1")), "Expected `key1` to be empty") - require.Empty(t, mem.Get(bz("key1")), "Expected `key1` to be empty") -} - -func TestCacheDBWriteLock(t *testing.T) { - mem := NewMemDB() - cdb := mem.CacheDB() - require.NotPanics(t, func() { cdb.Write() }) - require.Panics(t, func() { cdb.Write() }) - cdb = mem.CacheDB() - require.NotPanics(t, func() { cdb.Write() }) - require.Panics(t, func() { cdb.Write() }) -} - -func TestCacheDBWriteLockNested(t *testing.T) { - mem := NewMemDB() - cdb := mem.CacheDB() - cdb2 := cdb.CacheDB() - require.NotPanics(t, func() { cdb2.Write() }) - require.Panics(t, func() { cdb2.Write() }) - cdb2 = cdb.CacheDB() - require.NotPanics(t, func() { cdb2.Write() }) - require.Panics(t, func() { cdb2.Write() }) -} - -func TestCacheDBNested(t *testing.T) { - mem := NewMemDB() - cdb := mem.CacheDB() - cdb.Set(bz("key1"), bz("value1")) - - require.Empty(t, mem.Get(bz("key1"))) - require.Equal(t, bz("value1"), cdb.Get(bz("key1"))) - cdb2 := cdb.CacheDB() - require.Equal(t, bz("value1"), cdb2.Get(bz("key1"))) - - cdb2.Set(bz("key1"), bz("VALUE2")) - require.Equal(t, []byte(nil), mem.Get(bz("key1"))) - require.Equal(t, bz("value1"), cdb.Get(bz("key1"))) - require.Equal(t, bz("VALUE2"), cdb2.Get(bz("key1"))) - - cdb2.Write() - require.Equal(t, []byte(nil), mem.Get(bz("key1"))) - require.Equal(t, bz("VALUE2"), cdb.Get(bz("key1"))) - - cdb.Write() - require.Equal(t, bz("VALUE2"), mem.Get(bz("key1"))) - -} diff --git a/db/common_test.go b/db/common_test.go index 505864c20..09fad8424 100644 --- a/db/common_test.go +++ b/db/common_test.go @@ -23,16 +23,6 @@ func checkNextPanics(t *testing.T, itr Iterator) { assert.Panics(t, func() { itr.Next() }, "checkNextPanics expected panic but didn't") } -func checkPrevPanics(t *testing.T, itr Iterator) { - assert.Panics(t, func() { itr.Prev() }, "checkPrevPanics expected panic but didn't") -} - -func checkPrev(t *testing.T, itr Iterator, expected bool) { - itr.Prev() - valid := itr.Valid() - assert.Equal(t, expected, valid) -} - func checkItem(t *testing.T, itr Iterator, key []byte, value []byte) { k, v := itr.Key(), itr.Value() assert.Exactly(t, key, k) @@ -44,7 +34,6 @@ func checkInvalid(t *testing.T, itr Iterator) { checkKeyPanics(t, itr) checkValuePanics(t, itr) checkNextPanics(t, itr) - checkPrevPanics(t, itr) } func checkKeyPanics(t *testing.T, itr Iterator) { @@ -67,7 +56,7 @@ func TestDBIteratorSingleKey(t *testing.T) { t.Run(fmt.Sprintf("Backend %s", backend), func(t *testing.T) { db := newTempDB(t, backend) db.SetSync(bz("1"), bz("value_1")) - itr := db.Iterator() + itr := db.Iterator(BeginningKey(), EndingKey()) checkValid(t, itr, true) checkNext(t, itr, false) @@ -88,15 +77,12 @@ func TestDBIteratorTwoKeys(t *testing.T) { db.SetSync(bz("2"), bz("value_1")) { // Fail by calling Next too much - itr := db.Iterator() + itr := db.Iterator(BeginningKey(), EndingKey()) checkValid(t, itr, true) for i := 0; i < 10; i++ { checkNext(t, itr, true) checkValid(t, itr, true) - - checkPrev(t, itr, true) - checkValid(t, itr, true) } checkNext(t, itr, true) @@ -110,27 +96,6 @@ func TestDBIteratorTwoKeys(t *testing.T) { // Once invalid... checkInvalid(t, itr) } - - { // Fail by calling Prev too much - itr := db.Iterator() - checkValid(t, itr, true) - - for i := 0; i < 10; i++ { - checkNext(t, itr, true) - checkValid(t, itr, true) - - checkPrev(t, itr, true) - checkValid(t, itr, true) - } - - checkPrev(t, itr, false) - checkValid(t, itr, false) - - checkPrevPanics(t, itr) - - // Once invalid... - checkInvalid(t, itr) - } }) } } @@ -139,32 +104,30 @@ func TestDBIteratorEmpty(t *testing.T) { for backend, _ := range backends { t.Run(fmt.Sprintf("Backend %s", backend), func(t *testing.T) { db := newTempDB(t, backend) - itr := db.Iterator() + itr := db.Iterator(BeginningKey(), EndingKey()) checkInvalid(t, itr) }) } } -func TestDBIteratorEmptySeek(t *testing.T) { +func TestDBIteratorEmptyBeginAfter(t *testing.T) { for backend, _ := range backends { t.Run(fmt.Sprintf("Backend %s", backend), func(t *testing.T) { db := newTempDB(t, backend) - itr := db.Iterator() - itr.Seek(bz("1")) + itr := db.Iterator(bz("1"), EndingKey()) checkInvalid(t, itr) }) } } -func TestDBIteratorBadSeek(t *testing.T) { +func TestDBIteratorNonemptyBeginAfter(t *testing.T) { for backend, _ := range backends { t.Run(fmt.Sprintf("Backend %s", backend), func(t *testing.T) { db := newTempDB(t, backend) db.SetSync(bz("1"), bz("value_1")) - itr := db.Iterator() - itr.Seek(bz("2")) + itr := db.Iterator(bz("2"), EndingKey()) checkInvalid(t, itr) }) diff --git a/db/mem_db_test.go b/db/mem_db_test.go index b5c9167c8..42e242857 100644 --- a/db/mem_db_test.go +++ b/db/mem_db_test.go @@ -19,7 +19,7 @@ func TestMemDbIterator(t *testing.T) { db.Set(k, value) } - iter := db.Iterator() + iter := db.Iterator(BeginningKey(), EndingKey()) i := 0 for ; iter.Valid(); iter.Next() { assert.Equal(t, db.Get(iter.Key()), iter.Value(), "values dont match for key") diff --git a/db/types.go b/db/types.go index 7422a5155..8306813c7 100644 --- a/db/types.go +++ b/db/types.go @@ -90,17 +90,11 @@ type Iterator interface { Valid() bool // Next moves the iterator to the next sequential key in the database, as - // defined by the Comparator in the ReadOptions used to create this Iterator. + // defined by order of iteration. // // If Valid returns false, this method will panic. Next() - // Prev moves the iterator to the previous sequential key in the database, as - // defined by the Comparator in the ReadOptions used to create this Iterator. - // - // If Valid returns false, this method will panic. - Prev() - // Key returns the key of the cursor. // // If Valid returns false, this method will panic. @@ -120,3 +114,8 @@ type Iterator interface { // Release deallocates the given Iterator. Release() } + +// For testing convenience. +func bz(s string) []byte { + return []byte(s) +} diff --git a/db/util_test.go b/db/util_test.go index 55a41bf5b..4f8b9c456 100644 --- a/db/util_test.go +++ b/db/util_test.go @@ -66,7 +66,6 @@ func TestPrefixIteratorMatches1N(t *testing.T) { db.SetSync(bz("a/1"), bz("value_1")) db.SetSync(bz("a/3"), bz("value_3")) itr := IteratePrefix(db, []byte("a/")) - itr.Seek(bz("a/1")) checkValid(t, itr, true) checkItem(t, itr, bz("a/1"), bz("value_1")) @@ -82,32 +81,6 @@ func TestPrefixIteratorMatches1N(t *testing.T) { } } -// Search for a/1, fail by too much Prev() -func TestPrefixIteratorMatches1P(t *testing.T) { - for backend, _ := range backends { - t.Run(fmt.Sprintf("Prefix w/ backend %s", backend), func(t *testing.T) { - db := newTempDB(t, backend) - db.SetSync(bz("a/1"), bz("value_1")) - db.SetSync(bz("a/3"), bz("value_3")) - itr := IteratePrefix(db, []byte("a/")) - itr.Seek(bz("a/1")) - - checkValid(t, itr, true) - checkItem(t, itr, bz("a/1"), bz("value_1")) - checkNext(t, itr, true) - checkItem(t, itr, bz("a/3"), bz("value_3")) - checkPrev(t, itr, true) - checkItem(t, itr, bz("a/1"), bz("value_1")) - - // Bad! - checkPrev(t, itr, false) - - // Once invalid... - checkInvalid(t, itr) - }) - } -} - // Search for a/2, fail by too much Next() func TestPrefixIteratorMatches2N(t *testing.T) { for backend, _ := range backends { @@ -116,41 +89,15 @@ func TestPrefixIteratorMatches2N(t *testing.T) { db.SetSync(bz("a/1"), bz("value_1")) db.SetSync(bz("a/3"), bz("value_3")) itr := IteratePrefix(db, []byte("a/")) - itr.Seek(bz("a/2")) checkValid(t, itr, true) - checkItem(t, itr, bz("a/3"), bz("value_3")) - checkPrev(t, itr, true) checkItem(t, itr, bz("a/1"), bz("value_1")) checkNext(t, itr, true) - checkItem(t, itr, bz("a/3"), bz("value_3")) - - // Bad! - checkNext(t, itr, false) - - // Once invalid... - checkInvalid(t, itr) - }) - } -} - -// Search for a/2, fail by too much Prev() -func TestPrefixIteratorMatches2P(t *testing.T) { - for backend, _ := range backends { - t.Run(fmt.Sprintf("Prefix w/ backend %s", backend), func(t *testing.T) { - db := newTempDB(t, backend) - db.SetSync(bz("a/1"), bz("value_1")) - db.SetSync(bz("a/3"), bz("value_3")) - itr := IteratePrefix(db, []byte("a/")) - itr.Seek(bz("a/2")) - checkValid(t, itr, true) checkItem(t, itr, bz("a/3"), bz("value_3")) - checkPrev(t, itr, true) - checkItem(t, itr, bz("a/1"), bz("value_1")) // Bad! - checkPrev(t, itr, false) + checkNext(t, itr, false) // Once invalid... checkInvalid(t, itr) @@ -166,11 +113,8 @@ func TestPrefixIteratorMatches3N(t *testing.T) { db.SetSync(bz("a/1"), bz("value_1")) db.SetSync(bz("a/3"), bz("value_3")) itr := IteratePrefix(db, []byte("a/")) - itr.Seek(bz("a/3")) checkValid(t, itr, true) - checkItem(t, itr, bz("a/3"), bz("value_3")) - checkPrev(t, itr, true) checkItem(t, itr, bz("a/1"), bz("value_1")) checkNext(t, itr, true) checkItem(t, itr, bz("a/3"), bz("value_3")) @@ -183,27 +127,3 @@ func TestPrefixIteratorMatches3N(t *testing.T) { }) } } - -// Search for a/3, fail by too much Prev() -func TestPrefixIteratorMatches3P(t *testing.T) { - for backend, _ := range backends { - t.Run(fmt.Sprintf("Prefix w/ backend %s", backend), func(t *testing.T) { - db := newTempDB(t, backend) - db.SetSync(bz("a/1"), bz("value_1")) - db.SetSync(bz("a/3"), bz("value_3")) - itr := IteratePrefix(db, []byte("a/")) - itr.Seek(bz("a/3")) - - checkValid(t, itr, true) - checkItem(t, itr, bz("a/3"), bz("value_3")) - checkPrev(t, itr, true) - checkItem(t, itr, bz("a/1"), bz("value_1")) - - // Bad! - checkPrev(t, itr, false) - - // Once invalid... - checkInvalid(t, itr) - }) - } -} From 781f6c5d228c34227dcd059673175ea86398218f Mon Sep 17 00:00:00 2001 From: Ethan Buchman Date: Tue, 12 Dec 2017 18:04:05 -0500 Subject: [PATCH 10/68] db: some comments in types.go --- db/types.go | 8 +++++--- 1 file changed, 5 insertions(+), 3 deletions(-) diff --git a/db/types.go b/db/types.go index 8306813c7..a6edbdd85 100644 --- a/db/types.go +++ b/db/types.go @@ -54,10 +54,12 @@ type SetDeleter interface { //---------------------------------------- +// BeginningKey is the smallest key. func BeginningKey() []byte { return []byte{} } +// EndingKey is the largest key. func EndingKey() []byte { return nil } @@ -81,8 +83,8 @@ type Iterator interface { // A domain of ([]byte{12, 13}, []byte{12, 14}) will iterate // over anything with the prefix []byte{12, 13}. // - // The smallest key is the empty byte array []byte{}. - // The largest key is the nil byte array []byte(nil). + // The smallest key is the empty byte array []byte{} - see BeginningKey(). + // The largest key is the nil byte array []byte(nil) - see EndingKey(). Domain() (start []byte, end []byte) // Valid returns whether the current position is valid. @@ -100,7 +102,7 @@ type Iterator interface { // If Valid returns false, this method will panic. Key() []byte - // Value returns the key of the cursor. + // Value returns the value of the cursor. // // If Valid returns false, this method will panic. Value() []byte From 5b7f90dfb258c4b26f8209b42181a814b6978eb1 Mon Sep 17 00:00:00 2001 From: Ethan Buchman Date: Tue, 12 Dec 2017 18:42:47 -0500 Subject: [PATCH 11/68] db: test panic on nil key --- db/backend_test.go | 59 +++++++++++++++++++++++++++++++++++++--------- db/fsdb.go | 8 +++++++ db/go_level_db.go | 6 +++++ db/mem_db.go | 14 ++++++----- db/types.go | 7 ++++++ 5 files changed, 77 insertions(+), 17 deletions(-) diff --git a/db/backend_test.go b/db/backend_test.go index b4ffecdc6..b21ce0037 100644 --- a/db/backend_test.go +++ b/db/backend_test.go @@ -2,42 +2,79 @@ package db import ( "fmt" + "os" "testing" + "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" cmn "github.com/tendermint/tmlibs/common" ) -func testBackend(t *testing.T, backend string) { +func testBackendGetSetDelete(t *testing.T, backend string) { // Default dir, dirname := cmn.Tempdir(fmt.Sprintf("test_backend_%s_", backend)) defer dir.Close() db := NewDB("testdb", backend, dirname) require.Nil(t, db.Get([]byte(""))) - require.Nil(t, db.Get(nil)) // Set empty ("") db.Set([]byte(""), []byte("")) require.NotNil(t, db.Get([]byte(""))) - require.NotNil(t, db.Get(nil)) require.Empty(t, db.Get([]byte(""))) - require.Empty(t, db.Get(nil)) // Set empty (nil) db.Set([]byte(""), nil) require.NotNil(t, db.Get([]byte(""))) - require.NotNil(t, db.Get(nil)) require.Empty(t, db.Get([]byte(""))) - require.Empty(t, db.Get(nil)) // Delete db.Delete([]byte("")) require.Nil(t, db.Get([]byte(""))) - require.Nil(t, db.Get(nil)) } -func TestBackends(t *testing.T) { - testBackend(t, CLevelDBBackendStr) - testBackend(t, GoLevelDBBackendStr) - testBackend(t, MemDBBackendStr) +func TestBackendsGetSetDelete(t *testing.T) { + for dbType, _ := range backends { + if dbType == "fsdb" { + // TODO: handle + // fsdb cant deal with length 0 keys + continue + } + testBackendGetSetDelete(t, dbType) + } +} + +func assertPanics(t *testing.T, dbType, name string, fn func()) { + defer func() { + r := recover() + assert.NotNil(t, r, cmn.Fmt("expecting %s.%s to panic", dbType, name)) + }() + + fn() +} + +func TestBackendsNilKeys(t *testing.T) { + // test all backends + for dbType, creator := range backends { + name := cmn.Fmt("test_%x", cmn.RandStr(12)) + db, err := creator(name, "") + assert.Nil(t, err) + defer os.RemoveAll(name) + + assertPanics(t, dbType, "get", func() { db.Get(nil) }) + assertPanics(t, dbType, "has", func() { db.Has(nil) }) + assertPanics(t, dbType, "set", func() { db.Set(nil, []byte("abc")) }) + assertPanics(t, dbType, "setsync", func() { db.SetSync(nil, []byte("abc")) }) + assertPanics(t, dbType, "delete", func() { db.Delete(nil) }) + assertPanics(t, dbType, "deletesync", func() { db.DeleteSync(nil) }) + + db.Close() + } +} + +func TestLevelDBBackendStr(t *testing.T) { + name := cmn.Fmt("test_%x", cmn.RandStr(12)) + db := NewDB(name, LevelDBBackendStr, "") + defer os.RemoveAll(name) + _, ok := db.(*GoLevelDB) + assert.True(t, ok) } diff --git a/db/fsdb.go b/db/fsdb.go index b6e08daf5..19ea9fa3c 100644 --- a/db/fsdb.go +++ b/db/fsdb.go @@ -44,6 +44,7 @@ func NewFSDB(dir string) *FSDB { func (db *FSDB) Get(key []byte) []byte { db.mtx.Lock() defer db.mtx.Unlock() + panicNilKey(key) path := db.nameToPath(key) value, err := read(path) @@ -58,6 +59,7 @@ func (db *FSDB) Get(key []byte) []byte { func (db *FSDB) Has(key []byte) bool { db.mtx.Lock() defer db.mtx.Unlock() + panicNilKey(key) path := db.nameToPath(key) _, err := read(path) @@ -72,6 +74,7 @@ func (db *FSDB) Has(key []byte) bool { func (db *FSDB) Set(key []byte, value []byte) { db.mtx.Lock() defer db.mtx.Unlock() + panicNilKey(key) db.SetNoLock(key, value) } @@ -79,12 +82,14 @@ func (db *FSDB) Set(key []byte, value []byte) { func (db *FSDB) SetSync(key []byte, value []byte) { db.mtx.Lock() defer db.mtx.Unlock() + panicNilKey(key) db.SetNoLock(key, value) } // NOTE: Implements atomicSetDeleter. func (db *FSDB) SetNoLock(key []byte, value []byte) { + panicNilKey(key) if value == nil { value = []byte{} } @@ -98,6 +103,7 @@ func (db *FSDB) SetNoLock(key []byte, value []byte) { func (db *FSDB) Delete(key []byte) { db.mtx.Lock() defer db.mtx.Unlock() + panicNilKey(key) db.DeleteNoLock(key) } @@ -105,12 +111,14 @@ func (db *FSDB) Delete(key []byte) { func (db *FSDB) DeleteSync(key []byte) { db.mtx.Lock() defer db.mtx.Unlock() + panicNilKey(key) db.DeleteNoLock(key) } // NOTE: Implements atomicSetDeleter. func (db *FSDB) DeleteNoLock(key []byte) { + panicNilKey(key) err := remove(string(key)) if os.IsNotExist(err) { return diff --git a/db/go_level_db.go b/db/go_level_db.go index e8ed99dee..201a31949 100644 --- a/db/go_level_db.go +++ b/db/go_level_db.go @@ -37,6 +37,7 @@ func NewGoLevelDB(name string, dir string) (*GoLevelDB, error) { } func (db *GoLevelDB) Get(key []byte) []byte { + panicNilKey(key) res, err := db.db.Get(key, nil) if err != nil { if err == errors.ErrNotFound { @@ -49,6 +50,7 @@ func (db *GoLevelDB) Get(key []byte) []byte { } func (db *GoLevelDB) Has(key []byte) bool { + panicNilKey(key) _, err := db.db.Get(key, nil) if err != nil { if err == errors.ErrNotFound { @@ -61,6 +63,7 @@ func (db *GoLevelDB) Has(key []byte) bool { } func (db *GoLevelDB) Set(key []byte, value []byte) { + panicNilKey(key) err := db.db.Put(key, value, nil) if err != nil { PanicCrisis(err) @@ -68,6 +71,7 @@ func (db *GoLevelDB) Set(key []byte, value []byte) { } func (db *GoLevelDB) SetSync(key []byte, value []byte) { + panicNilKey(key) err := db.db.Put(key, value, &opt.WriteOptions{Sync: true}) if err != nil { PanicCrisis(err) @@ -75,6 +79,7 @@ func (db *GoLevelDB) SetSync(key []byte, value []byte) { } func (db *GoLevelDB) Delete(key []byte) { + panicNilKey(key) err := db.db.Delete(key, nil) if err != nil { PanicCrisis(err) @@ -82,6 +87,7 @@ func (db *GoLevelDB) Delete(key []byte) { } func (db *GoLevelDB) DeleteSync(key []byte) { + panicNilKey(key) err := db.db.Delete(key, &opt.WriteOptions{Sync: true}) if err != nil { PanicCrisis(err) diff --git a/db/mem_db.go b/db/mem_db.go index 3127030ae..ebeb2dded 100644 --- a/db/mem_db.go +++ b/db/mem_db.go @@ -27,14 +27,14 @@ func NewMemDB() *MemDB { func (db *MemDB) Get(key []byte) []byte { db.mtx.Lock() defer db.mtx.Unlock() - + panicNilKey(key) return db.db[string(key)] } func (db *MemDB) Has(key []byte) bool { db.mtx.Lock() defer db.mtx.Unlock() - + panicNilKey(key) _, ok := db.db[string(key)] return ok } @@ -42,14 +42,14 @@ func (db *MemDB) Has(key []byte) bool { func (db *MemDB) Set(key []byte, value []byte) { db.mtx.Lock() defer db.mtx.Unlock() - + panicNilKey(key) db.SetNoLock(key, value) } func (db *MemDB) SetSync(key []byte, value []byte) { db.mtx.Lock() defer db.mtx.Unlock() - + panicNilKey(key) db.SetNoLock(key, value) } @@ -58,25 +58,27 @@ func (db *MemDB) SetNoLock(key []byte, value []byte) { if value == nil { value = []byte{} } + panicNilKey(key) db.db[string(key)] = value } func (db *MemDB) Delete(key []byte) { db.mtx.Lock() defer db.mtx.Unlock() - + panicNilKey(key) delete(db.db, string(key)) } func (db *MemDB) DeleteSync(key []byte) { db.mtx.Lock() defer db.mtx.Unlock() - + panicNilKey(key) delete(db.db, string(key)) } // NOTE: Implements atomicSetDeleter func (db *MemDB) DeleteNoLock(key []byte) { + panicNilKey(key) delete(db.db, string(key)) } diff --git a/db/types.go b/db/types.go index a6edbdd85..54c1025a0 100644 --- a/db/types.go +++ b/db/types.go @@ -121,3 +121,10 @@ type Iterator interface { func bz(s string) []byte { return []byte(s) } + +// All DB funcs should panic on nil key. +func panicNilKey(key []byte) { + if key == nil { + panic("nil key") + } +} From c547caf04f17dee0390733fc9167e68975aecdb9 Mon Sep 17 00:00:00 2001 From: Ethan Buchman Date: Tue, 12 Dec 2017 19:08:22 -0500 Subject: [PATCH 12/68] db: some test cleanup --- db/backend_test.go | 14 +++++++++----- db/c_level_db_test.go | 11 +++++++++++ db/db.go | 2 +- 3 files changed, 21 insertions(+), 6 deletions(-) diff --git a/db/backend_test.go b/db/backend_test.go index b21ce0037..3d10c66cb 100644 --- a/db/backend_test.go +++ b/db/backend_test.go @@ -58,7 +58,6 @@ func TestBackendsNilKeys(t *testing.T) { name := cmn.Fmt("test_%x", cmn.RandStr(12)) db, err := creator(name, "") assert.Nil(t, err) - defer os.RemoveAll(name) assertPanics(t, dbType, "get", func() { db.Get(nil) }) assertPanics(t, dbType, "has", func() { db.Has(nil) }) @@ -68,13 +67,18 @@ func TestBackendsNilKeys(t *testing.T) { assertPanics(t, dbType, "deletesync", func() { db.DeleteSync(nil) }) db.Close() + err = os.RemoveAll(name + ".db") + assert.Nil(t, err) } } -func TestLevelDBBackendStr(t *testing.T) { +func TestGoLevelDBBackendStr(t *testing.T) { name := cmn.Fmt("test_%x", cmn.RandStr(12)) db := NewDB(name, LevelDBBackendStr, "") - defer os.RemoveAll(name) - _, ok := db.(*GoLevelDB) - assert.True(t, ok) + defer os.RemoveAll(name + ".db") + + if _, ok := backends[CLevelDBBackendStr]; !ok { + _, ok := db.(*GoLevelDB) + assert.True(t, ok) + } } diff --git a/db/c_level_db_test.go b/db/c_level_db_test.go index 864362332..606c34519 100644 --- a/db/c_level_db_test.go +++ b/db/c_level_db_test.go @@ -5,8 +5,10 @@ package db import ( "bytes" "fmt" + "os" "testing" + "github.com/stretchr/testify/assert" cmn "github.com/tendermint/tmlibs/common" ) @@ -84,3 +86,12 @@ func bytes2Int64(buf []byte) int64 { return int64(binary.BigEndian.Uint64(buf)) } */ + +func TestCLevelDBBackendStr(t *testing.T) { + name := cmn.Fmt("test_%x", cmn.RandStr(12)) + db := NewDB(name, LevelDBBackendStr, "") + defer os.RemoveAll(name) + + _, ok := db.(*CLevelDB) + assert.True(t, ok) +} diff --git a/db/db.go b/db/db.go index 7eec04d56..b43b06554 100644 --- a/db/db.go +++ b/db/db.go @@ -6,7 +6,7 @@ import "fmt" // Main entry const ( - LevelDBBackendStr = "leveldb" // legacy, defaults to goleveldb. + LevelDBBackendStr = "leveldb" // legacy, defaults to goleveldb unless +gcc CLevelDBBackendStr = "cleveldb" GoLevelDBBackendStr = "goleveldb" MemDBBackendStr = "memdb" From ba8c5045b5c67df9fbba08974d0194b6e735cbd2 Mon Sep 17 00:00:00 2001 From: Ethan Buchman Date: Tue, 12 Dec 2017 19:22:08 -0500 Subject: [PATCH 13/68] db: fixes to fsdb and clevledb --- db/backend_test.go | 24 ++++++++++-------------- db/c_level_db.go | 6 ++++++ db/fsdb.go | 5 +++-- 3 files changed, 19 insertions(+), 16 deletions(-) diff --git a/db/backend_test.go b/db/backend_test.go index 3d10c66cb..9dc17201a 100644 --- a/db/backend_test.go +++ b/db/backend_test.go @@ -15,30 +15,26 @@ func testBackendGetSetDelete(t *testing.T, backend string) { dir, dirname := cmn.Tempdir(fmt.Sprintf("test_backend_%s_", backend)) defer dir.Close() db := NewDB("testdb", backend, dirname) - require.Nil(t, db.Get([]byte(""))) + key := []byte("abc") + require.Nil(t, db.Get(key)) // Set empty ("") - db.Set([]byte(""), []byte("")) - require.NotNil(t, db.Get([]byte(""))) - require.Empty(t, db.Get([]byte(""))) + db.Set(key, []byte("")) + require.NotNil(t, db.Get(key)) + require.Empty(t, db.Get(key)) // Set empty (nil) - db.Set([]byte(""), nil) - require.NotNil(t, db.Get([]byte(""))) - require.Empty(t, db.Get([]byte(""))) + db.Set(key, nil) + require.NotNil(t, db.Get(key)) + require.Empty(t, db.Get(key)) // Delete - db.Delete([]byte("")) - require.Nil(t, db.Get([]byte(""))) + db.Delete(key) + require.Nil(t, db.Get(key)) } func TestBackendsGetSetDelete(t *testing.T) { for dbType, _ := range backends { - if dbType == "fsdb" { - // TODO: handle - // fsdb cant deal with length 0 keys - continue - } testBackendGetSetDelete(t, dbType) } } diff --git a/db/c_level_db.go b/db/c_level_db.go index 11a6e5ff7..47e79dfa6 100644 --- a/db/c_level_db.go +++ b/db/c_level_db.go @@ -48,6 +48,7 @@ func NewCLevelDB(name string, dir string) (*CLevelDB, error) { } func (db *CLevelDB) Get(key []byte) []byte { + panicNilKey(key) res, err := db.db.Get(db.ro, key) if err != nil { panic(err) @@ -56,10 +57,12 @@ func (db *CLevelDB) Get(key []byte) []byte { } func (db *CLevelDB) Has(key []byte) bool { + panicNilKey(key) panic("not implemented yet") } func (db *CLevelDB) Set(key []byte, value []byte) { + panicNilKey(key) err := db.db.Put(db.wo, key, value) if err != nil { panic(err) @@ -67,6 +70,7 @@ func (db *CLevelDB) Set(key []byte, value []byte) { } func (db *CLevelDB) SetSync(key []byte, value []byte) { + panicNilKey(key) err := db.db.Put(db.woSync, key, value) if err != nil { panic(err) @@ -74,6 +78,7 @@ func (db *CLevelDB) SetSync(key []byte, value []byte) { } func (db *CLevelDB) Delete(key []byte) { + panicNilKey(key) err := db.db.Delete(db.wo, key) if err != nil { panic(err) @@ -81,6 +86,7 @@ func (db *CLevelDB) Delete(key []byte) { } func (db *CLevelDB) DeleteSync(key []byte) { + panicNilKey(key) err := db.db.Delete(db.woSync, key) if err != nil { panic(err) diff --git a/db/fsdb.go b/db/fsdb.go index 19ea9fa3c..116dc3eef 100644 --- a/db/fsdb.go +++ b/db/fsdb.go @@ -119,7 +119,8 @@ func (db *FSDB) DeleteSync(key []byte) { // NOTE: Implements atomicSetDeleter. func (db *FSDB) DeleteNoLock(key []byte) { panicNilKey(key) - err := remove(string(key)) + path := db.nameToPath(key) + err := remove(path) if os.IsNotExist(err) { return } else if err != nil { @@ -210,7 +211,7 @@ func read(path string) ([]byte, error) { // Write some bytes from a file. // CONTRACT: returns os errors directly without wrapping. func write(path string, d []byte) error { - f, err := os.OpenFile(path, os.O_CREATE|os.O_EXCL|os.O_WRONLY, keyPerm) + f, err := os.OpenFile(path, os.O_CREATE|os.O_WRONLY, keyPerm) if err != nil { return err } From bb115d4d615bdbe04e664b76346900151f83729e Mon Sep 17 00:00:00 2001 From: Ethan Buchman Date: Tue, 12 Dec 2017 19:28:52 -0500 Subject: [PATCH 14/68] cleanupDBDir --- db/backend_test.go | 11 ++++++++--- db/c_level_db_test.go | 3 +-- 2 files changed, 9 insertions(+), 5 deletions(-) diff --git a/db/backend_test.go b/db/backend_test.go index 9dc17201a..16649cd20 100644 --- a/db/backend_test.go +++ b/db/backend_test.go @@ -3,6 +3,7 @@ package db import ( "fmt" "os" + "path" "testing" "github.com/stretchr/testify/assert" @@ -10,11 +11,16 @@ import ( cmn "github.com/tendermint/tmlibs/common" ) +func cleanupDBDir(dir, name string) { + os.RemoveAll(path.Join(dir, name) + ".db") +} + func testBackendGetSetDelete(t *testing.T, backend string) { // Default dir, dirname := cmn.Tempdir(fmt.Sprintf("test_backend_%s_", backend)) defer dir.Close() db := NewDB("testdb", backend, dirname) + key := []byte("abc") require.Nil(t, db.Get(key)) @@ -53,6 +59,7 @@ func TestBackendsNilKeys(t *testing.T) { for dbType, creator := range backends { name := cmn.Fmt("test_%x", cmn.RandStr(12)) db, err := creator(name, "") + defer cleanupDBDir("", name) assert.Nil(t, err) assertPanics(t, dbType, "get", func() { db.Get(nil) }) @@ -63,15 +70,13 @@ func TestBackendsNilKeys(t *testing.T) { assertPanics(t, dbType, "deletesync", func() { db.DeleteSync(nil) }) db.Close() - err = os.RemoveAll(name + ".db") - assert.Nil(t, err) } } func TestGoLevelDBBackendStr(t *testing.T) { name := cmn.Fmt("test_%x", cmn.RandStr(12)) db := NewDB(name, LevelDBBackendStr, "") - defer os.RemoveAll(name + ".db") + defer cleanupDBDir("", name) if _, ok := backends[CLevelDBBackendStr]; !ok { _, ok := db.(*GoLevelDB) diff --git a/db/c_level_db_test.go b/db/c_level_db_test.go index 606c34519..89993fbac 100644 --- a/db/c_level_db_test.go +++ b/db/c_level_db_test.go @@ -5,7 +5,6 @@ package db import ( "bytes" "fmt" - "os" "testing" "github.com/stretchr/testify/assert" @@ -90,7 +89,7 @@ func bytes2Int64(buf []byte) int64 { func TestCLevelDBBackendStr(t *testing.T) { name := cmn.Fmt("test_%x", cmn.RandStr(12)) db := NewDB(name, LevelDBBackendStr, "") - defer os.RemoveAll(name) + defer cleanupDBDir("", name) _, ok := db.(*CLevelDB) assert.True(t, ok) From 39e40ff5ce8dd496475db872426cd7d5860b2a05 Mon Sep 17 00:00:00 2001 From: Ethan Buchman Date: Tue, 12 Dec 2017 20:06:50 -0500 Subject: [PATCH 15/68] db: memdb iterator --- db/mem_db.go | 88 ++++++++++++++++++++++++++++++----------------- db/mem_db_test.go | 2 +- 2 files changed, 57 insertions(+), 33 deletions(-) diff --git a/db/mem_db.go b/db/mem_db.go index ebeb2dded..84d14de98 100644 --- a/db/mem_db.go +++ b/db/mem_db.go @@ -3,6 +3,8 @@ package db import ( "bytes" "fmt" + "sort" + "strings" "sync" ) @@ -12,6 +14,8 @@ func init() { }, false) } +var _ DB = (*MemDB)(nil) + type MemDB struct { mtx sync.Mutex db map[string][]byte @@ -123,49 +127,67 @@ func (db *MemDB) Mutex() *sync.Mutex { //---------------------------------------- func (db *MemDB) Iterator(start, end []byte) Iterator { - /* - XXX - it := newMemDBIterator() - it.db = db - it.cur = 0 - - db.mtx.Lock() - defer db.mtx.Unlock() - - // We need a copy of all of the keys. - // Not the best, but probably not a bottleneck depending. - for key, _ := range db.db { - it.keys = append(it.keys, key) - } - sort.Strings(it.keys) - return it - */ - return nil + it := newMemDBIterator(db, start, end) + + db.mtx.Lock() + defer db.mtx.Unlock() + + // We need a copy of all of the keys. + // Not the best, but probably not a bottleneck depending. + it.keys = db.getSortedKeys(start, end) + return it } func (db *MemDB) ReverseIterator(start, end []byte) Iterator { - // XXX + it := newMemDBIterator(db, start, end) + + db.mtx.Lock() + defer db.mtx.Unlock() + + // We need a copy of all of the keys. + // Not the best, but probably not a bottleneck depending. + it.keys = db.getSortedKeys(end, start) + // reverse the order + l := len(it.keys) - 1 + for i, v := range it.keys { + it.keys[i] = it.keys[l-i] + it.keys[l-i] = v + } return nil } -type memDBIterator struct { - cur int - keys []string - db DB +func (db *MemDB) getSortedKeys(start, end []byte) []string { + keys := []string{} + for key, _ := range db.db { + leftCondition := bytes.Equal(start, BeginningKey()) || strings.Compare(key, string(start)) >= 0 + rightCondition := bytes.Equal(end, EndingKey()) || strings.Compare(key, string(end)) < 0 + if leftCondition && rightCondition { + keys = append(keys, key) + } + } + sort.Strings(keys) + return keys } -func newMemDBIterator() *memDBIterator { - return &memDBIterator{} +var _ Iterator = (*memDBIterator)(nil) + +type memDBIterator struct { + cur int + keys []string + db DB + start, end []byte } -func (it *memDBIterator) Seek(key []byte) { - for i, ik := range it.keys { - it.cur = i - if bytes.Compare(key, []byte(ik)) <= 0 { - return - } +func newMemDBIterator(db DB, start, end []byte) *memDBIterator { + return &memDBIterator{ + db: db, + start: start, + end: end, } - it.cur += 1 // If not found, becomes invalid. +} + +func (it *memDBIterator) Domain() ([]byte, []byte) { + return it.start, it.end } func (it *memDBIterator) Valid() bool { @@ -208,3 +230,5 @@ func (it *memDBIterator) Close() { func (it *memDBIterator) GetError() error { return nil } + +func (it *memDBIterator) Release() {} diff --git a/db/mem_db_test.go b/db/mem_db_test.go index 42e242857..a08a3679b 100644 --- a/db/mem_db_test.go +++ b/db/mem_db_test.go @@ -7,7 +7,7 @@ import ( "github.com/stretchr/testify/require" ) -func TestMemDbIterator(t *testing.T) { +func TestMemDBIterator(t *testing.T) { db := NewMemDB() keys := make([][]byte, 100) for i := 0; i < 100; i++ { From 3e5dbef6a216a8fcd24ac72c6f65f793753d1a4b Mon Sep 17 00:00:00 2001 From: Ethan Buchman Date: Tue, 12 Dec 2017 20:38:03 -0500 Subject: [PATCH 16/68] db: goleveldb iterator --- db/common_test.go | 5 ----- db/go_level_db.go | 37 ++++++++++++++++++++++--------------- db/util_test.go | 2 +- 3 files changed, 23 insertions(+), 21 deletions(-) diff --git a/db/common_test.go b/db/common_test.go index 09fad8424..59c86a2c7 100644 --- a/db/common_test.go +++ b/db/common_test.go @@ -80,11 +80,6 @@ func TestDBIteratorTwoKeys(t *testing.T) { itr := db.Iterator(BeginningKey(), EndingKey()) checkValid(t, itr, true) - for i := 0; i < 10; i++ { - checkNext(t, itr, true) - checkValid(t, itr, true) - } - checkNext(t, itr, true) checkValid(t, itr, true) diff --git a/db/go_level_db.go b/db/go_level_db.go index 201a31949..45b437d2b 100644 --- a/db/go_level_db.go +++ b/db/go_level_db.go @@ -20,6 +20,8 @@ func init() { registerDBCreator(GoLevelDBBackendStr, dbCreator, false) } +var _ DB = (*GoLevelDB)(nil) + type GoLevelDB struct { db *leveldb.DB } @@ -168,15 +170,13 @@ func (mBatch *goLevelDBBatch) Write() { // Iterator func (db *GoLevelDB) Iterator(start, end []byte) Iterator { - /* - XXX - itr := &goLevelDBIterator{ - source: db.db.NewIterator(nil, nil), - } - itr.Seek(nil) - return itr - */ - return nil + itr := &goLevelDBIterator{ + source: db.db.NewIterator(nil, nil), + start: start, + end: end, + } + itr.source.Seek(start) + return itr } func (db *GoLevelDB) ReverseIterator(start, end []byte) Iterator { @@ -184,9 +184,16 @@ func (db *GoLevelDB) ReverseIterator(start, end []byte) Iterator { return nil } +var _ Iterator = (*goLevelDBIterator)(nil) + type goLevelDBIterator struct { - source iterator.Iterator - invalid bool + source iterator.Iterator + invalid bool + start, end []byte +} + +func (it *goLevelDBIterator) Domain() ([]byte, []byte) { + return it.start, it.end } // Key returns a copy of the current key. @@ -217,10 +224,6 @@ func (it *goLevelDBIterator) GetError() error { return it.source.Error() } -func (it *goLevelDBIterator) Seek(key []byte) { - it.source.Seek(key) -} - func (it *goLevelDBIterator) Valid() bool { if it.invalid { return false @@ -246,3 +249,7 @@ func (it *goLevelDBIterator) Prev() { func (it *goLevelDBIterator) Close() { it.source.Release() } + +func (it *goLevelDBIterator) Release() { + it.source.Release() +} diff --git a/db/util_test.go b/db/util_test.go index 4f8b9c456..a0ce9cd51 100644 --- a/db/util_test.go +++ b/db/util_test.go @@ -50,7 +50,7 @@ func TestPrefixIteratorMatch3(t *testing.T) { t.Run(fmt.Sprintf("Prefix w/ backend %s", backend), func(t *testing.T) { db := newTempDB(t, backend) db.SetSync(bz("3"), bz("value_3")) - itr := IteratePrefix(db, []byte("2")) + itr := IteratePrefix(db, []byte("4")) // Once invalid... checkInvalid(t, itr) From bcacaf164b8b79cc09ff2abec2ff4ec212315aba Mon Sep 17 00:00:00 2001 From: Ethan Buchman Date: Tue, 12 Dec 2017 20:58:35 -0500 Subject: [PATCH 17/68] db: cleveldb iterator --- db/c_level_db.go | 32 +++++++++++++++++++------------- db/common_test.go | 5 +++-- 2 files changed, 22 insertions(+), 15 deletions(-) diff --git a/db/c_level_db.go b/db/c_level_db.go index 47e79dfa6..527fd7da3 100644 --- a/db/c_level_db.go +++ b/db/c_level_db.go @@ -17,6 +17,8 @@ func init() { registerDBCreator(CLevelDBBackendStr, dbCreator, false) } +var _ DB = (*CLevelDB)(nil) + type CLevelDB struct { db *levigo.DB ro *levigo.ReadOptions @@ -158,13 +160,17 @@ func (mBatch *cLevelDBBatch) Write() { // Iterator func (db *CLevelDB) Iterator(start, end []byte) Iterator { - /* - XXX - itr := db.db.NewIterator(db.ro) - itr.Seek([]byte{0x00}) - return cLevelDBIterator{itr} - */ - return nil + itr := db.db.NewIterator(db.ro) + if len(start) > 0 { + itr.Seek(start) + } else { + itr.SeekToFirst() + } + return cLevelDBIterator{ + itr: itr, + start: start, + end: end, + } } func (db *CLevelDB) ReverseIterator(start, end []byte) Iterator { @@ -172,15 +178,15 @@ func (db *CLevelDB) ReverseIterator(start, end []byte) Iterator { return nil } +var _ Iterator = (*cLevelDBIterator)(nil) + type cLevelDBIterator struct { - itr *levigo.Iterator + itr *levigo.Iterator + start, end []byte } -func (c cLevelDBIterator) Seek(key []byte) { - if key == nil { - key = []byte{0x00} - } - c.itr.Seek(key) +func (c cLevelDBIterator) Domain() ([]byte, []byte) { + return c.start, c.end } func (c cLevelDBIterator) Valid() bool { diff --git a/db/common_test.go b/db/common_test.go index 59c86a2c7..6b3009795 100644 --- a/db/common_test.go +++ b/db/common_test.go @@ -5,18 +5,19 @@ import ( "testing" "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" cmn "github.com/tendermint/tmlibs/common" ) func checkValid(t *testing.T, itr Iterator, expected bool) { valid := itr.Valid() - assert.Equal(t, expected, valid) + require.Equal(t, expected, valid) } func checkNext(t *testing.T, itr Iterator, expected bool) { itr.Next() valid := itr.Valid() - assert.Equal(t, expected, valid) + require.Equal(t, expected, valid) } func checkNextPanics(t *testing.T, itr Iterator) { From edf07760d6f45663a992cf8a0978521084a6c597 Mon Sep 17 00:00:00 2001 From: Ethan Buchman Date: Tue, 12 Dec 2017 21:08:38 -0500 Subject: [PATCH 18/68] db: fsdb iterator --- db/fsdb.go | 48 ++++++++++++++++++++++++------------------------ db/mem_db.go | 6 +----- db/util.go | 11 +++++++++++ 3 files changed, 36 insertions(+), 29 deletions(-) diff --git a/db/fsdb.go b/db/fsdb.go index 116dc3eef..ac9cdd741 100644 --- a/db/fsdb.go +++ b/db/fsdb.go @@ -7,6 +7,7 @@ import ( "os" "path" "path/filepath" + "sort" "sync" "github.com/pkg/errors" @@ -24,6 +25,8 @@ func init() { }, false) } +var _ DB = (*FSDB)(nil) + // It's slow. type FSDB struct { mtx sync.Mutex @@ -160,26 +163,20 @@ func (db *FSDB) Mutex() *sync.Mutex { } func (db *FSDB) Iterator(start, end []byte) Iterator { - /* - XXX - it := newMemDBIterator() - it.db = db - it.cur = 0 - - db.mtx.Lock() - defer db.mtx.Unlock() - - // We need a copy of all of the keys. - // Not the best, but probably not a bottleneck depending. - keys, err := list(db.dir) - if err != nil { - panic(errors.Wrap(err, fmt.Sprintf("Listing keys in %s", db.dir))) - } - sort.Strings(keys) - it.keys = keys - return it - */ - return nil + it := newMemDBIterator(db, start, end) + + db.mtx.Lock() + defer db.mtx.Unlock() + + // We need a copy of all of the keys. + // Not the best, but probably not a bottleneck depending. + keys, err := list(db.dir, start, end) + if err != nil { + panic(errors.Wrap(err, fmt.Sprintf("Listing keys in %s", db.dir))) + } + sort.Strings(keys) + it.keys = keys + return it } func (db *FSDB) ReverseIterator(start, end []byte) Iterator { @@ -233,7 +230,7 @@ func remove(path string) error { // List files of a path. // Paths will NOT include dir as the prefix. // CONTRACT: returns os errors directly without wrapping. -func list(dirPath string) (paths []string, err error) { +func list(dirPath string, start, end []byte) ([]string, error) { dir, err := os.Open(dirPath) if err != nil { return nil, err @@ -244,12 +241,15 @@ func list(dirPath string) (paths []string, err error) { if err != nil { return nil, err } - for i, name := range names { + var paths []string + for _, name := range names { n, err := url.PathUnescape(name) if err != nil { return nil, fmt.Errorf("Failed to unescape %s while listing", name) } - names[i] = n + if checkKeyCondition(n, start, end) { + paths = append(paths, n) + } } - return names, nil + return paths, nil } diff --git a/db/mem_db.go b/db/mem_db.go index 84d14de98..a9f21d526 100644 --- a/db/mem_db.go +++ b/db/mem_db.go @@ -1,10 +1,8 @@ package db import ( - "bytes" "fmt" "sort" - "strings" "sync" ) @@ -159,9 +157,7 @@ func (db *MemDB) ReverseIterator(start, end []byte) Iterator { func (db *MemDB) getSortedKeys(start, end []byte) []string { keys := []string{} for key, _ := range db.db { - leftCondition := bytes.Equal(start, BeginningKey()) || strings.Compare(key, string(start)) >= 0 - rightCondition := bytes.Equal(end, EndingKey()) || strings.Compare(key, string(end)) < 0 - if leftCondition && rightCondition { + if checkKeyCondition(key, start, end) { keys = append(keys, key) } } diff --git a/db/util.go b/db/util.go index 89c777622..02f4a52f0 100644 --- a/db/util.go +++ b/db/util.go @@ -1,5 +1,10 @@ package db +import ( + "bytes" + "strings" +) + func IteratePrefix(db DB, prefix []byte) Iterator { var start, end []byte if len(prefix) == 0 { @@ -33,3 +38,9 @@ func cpIncr(bz []byte) (ret []byte) { } return EndingKey() } + +func checkKeyCondition(key string, start, end []byte) bool { + leftCondition := bytes.Equal(start, BeginningKey()) || strings.Compare(key, string(start)) >= 0 + rightCondition := bytes.Equal(end, EndingKey()) || strings.Compare(key, string(end)) < 0 + return leftCondition && rightCondition +} From 5636a02d035258701974da39c62d13c1d76f8ae8 Mon Sep 17 00:00:00 2001 From: Jae Kwon Date: Tue, 12 Dec 2017 19:19:26 -0800 Subject: [PATCH 19/68] Remove GetError() from Iterator --- db/c_level_db.go | 7 +++++-- db/go_level_db.go | 11 +++++++---- db/mem_db.go | 4 ---- db/types.go | 6 ------ 4 files changed, 12 insertions(+), 16 deletions(-) diff --git a/db/c_level_db.go b/db/c_level_db.go index 527fd7da3..93bc1dfb7 100644 --- a/db/c_level_db.go +++ b/db/c_level_db.go @@ -190,6 +190,7 @@ func (c cLevelDBIterator) Domain() ([]byte, []byte) { } func (c cLevelDBIterator) Valid() bool { + c.assertNoError() return c.itr.Valid() } @@ -218,6 +219,8 @@ func (c cLevelDBIterator) Release() { c.itr.Close() } -func (c cLevelDBIterator) GetError() error { - return c.itr.GetError() +func (c cLevelDBIterator) assertNoError() { + if err := c.itr.GetError(); err != nil { + panic(err) + } } diff --git a/db/go_level_db.go b/db/go_level_db.go index 45b437d2b..89015547a 100644 --- a/db/go_level_db.go +++ b/db/go_level_db.go @@ -220,11 +220,8 @@ func (it *goLevelDBIterator) Value() []byte { return v } -func (it *goLevelDBIterator) GetError() error { - return it.source.Error() -} - func (it *goLevelDBIterator) Valid() bool { + it.assertNoError() if it.invalid { return false } @@ -253,3 +250,9 @@ func (it *goLevelDBIterator) Close() { func (it *goLevelDBIterator) Release() { it.source.Release() } + +func (it *goLevelDBIterator) assertNoError() { + if err := it.source.Error(); err != nil { + panic(err) + } +} diff --git a/db/mem_db.go b/db/mem_db.go index a9f21d526..81e209648 100644 --- a/db/mem_db.go +++ b/db/mem_db.go @@ -223,8 +223,4 @@ func (it *memDBIterator) Close() { it.keys = nil } -func (it *memDBIterator) GetError() error { - return nil -} - func (it *memDBIterator) Release() {} diff --git a/db/types.go b/db/types.go index 54c1025a0..8370ff2da 100644 --- a/db/types.go +++ b/db/types.go @@ -107,12 +107,6 @@ type Iterator interface { // If Valid returns false, this method will panic. Value() []byte - // GetError returns an IteratorError from LevelDB if it had one during - // iteration. - // - // This method is safe to call when Valid returns false. - GetError() error - // Release deallocates the given Iterator. Release() } From 318982c0babe627c7dda57e23a1eae2bf0d2c1bf Mon Sep 17 00:00:00 2001 From: Ethan Buchman Date: Wed, 13 Dec 2017 01:33:38 -0500 Subject: [PATCH 20/68] checkKeyCondition -> IsKeyInDomain --- db/fsdb.go | 2 +- db/mem_db.go | 2 +- db/util.go | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) diff --git a/db/fsdb.go b/db/fsdb.go index ac9cdd741..8a40d4f19 100644 --- a/db/fsdb.go +++ b/db/fsdb.go @@ -247,7 +247,7 @@ func list(dirPath string, start, end []byte) ([]string, error) { if err != nil { return nil, fmt.Errorf("Failed to unescape %s while listing", name) } - if checkKeyCondition(n, start, end) { + if IsKeyInDomain(n, start, end) { paths = append(paths, n) } } diff --git a/db/mem_db.go b/db/mem_db.go index 81e209648..d20d0e7ea 100644 --- a/db/mem_db.go +++ b/db/mem_db.go @@ -157,7 +157,7 @@ func (db *MemDB) ReverseIterator(start, end []byte) Iterator { func (db *MemDB) getSortedKeys(start, end []byte) []string { keys := []string{} for key, _ := range db.db { - if checkKeyCondition(key, start, end) { + if IsKeyInDomain(key, start, end) { keys = append(keys, key) } } diff --git a/db/util.go b/db/util.go index 02f4a52f0..203ddcfaf 100644 --- a/db/util.go +++ b/db/util.go @@ -39,7 +39,7 @@ func cpIncr(bz []byte) (ret []byte) { return EndingKey() } -func checkKeyCondition(key string, start, end []byte) bool { +func IsKeyInDomain(key string, start, end []byte) bool { leftCondition := bytes.Equal(start, BeginningKey()) || strings.Compare(key, string(start)) >= 0 rightCondition := bytes.Equal(end, EndingKey()) || strings.Compare(key, string(end)) < 0 return leftCondition && rightCondition From d4aeca8ce30c1cdf84f45e5160240d292fb848bb Mon Sep 17 00:00:00 2001 From: Ethan Buchman Date: Wed, 13 Dec 2017 20:11:11 -0500 Subject: [PATCH 21/68] fixes from @melekes --- db/backend_test.go | 26 +++++++++----------------- db/c_level_db.go | 4 ++-- db/fsdb.go | 13 ++++++------- db/go_level_db.go | 16 ++++++++++------ 4 files changed, 27 insertions(+), 32 deletions(-) diff --git a/db/backend_test.go b/db/backend_test.go index 16649cd20..7ead549b0 100644 --- a/db/backend_test.go +++ b/db/backend_test.go @@ -3,7 +3,7 @@ package db import ( "fmt" "os" - "path" + "path/filepath" "testing" "github.com/stretchr/testify/assert" @@ -12,7 +12,7 @@ import ( ) func cleanupDBDir(dir, name string) { - os.RemoveAll(path.Join(dir, name) + ".db") + os.RemoveAll(filepath.Join(dir, name) + ".db") } func testBackendGetSetDelete(t *testing.T, backend string) { @@ -45,15 +45,6 @@ func TestBackendsGetSetDelete(t *testing.T) { } } -func assertPanics(t *testing.T, dbType, name string, fn func()) { - defer func() { - r := recover() - assert.NotNil(t, r, cmn.Fmt("expecting %s.%s to panic", dbType, name)) - }() - - fn() -} - func TestBackendsNilKeys(t *testing.T) { // test all backends for dbType, creator := range backends { @@ -62,12 +53,13 @@ func TestBackendsNilKeys(t *testing.T) { defer cleanupDBDir("", name) assert.Nil(t, err) - assertPanics(t, dbType, "get", func() { db.Get(nil) }) - assertPanics(t, dbType, "has", func() { db.Has(nil) }) - assertPanics(t, dbType, "set", func() { db.Set(nil, []byte("abc")) }) - assertPanics(t, dbType, "setsync", func() { db.SetSync(nil, []byte("abc")) }) - assertPanics(t, dbType, "delete", func() { db.Delete(nil) }) - assertPanics(t, dbType, "deletesync", func() { db.DeleteSync(nil) }) + panicMsg := "expecting %s.%s to panic" + assert.Panics(t, func() { db.Get(nil) }, panicMsg, dbType, "get") + assert.Panics(t, func() { db.Has(nil) }, panicMsg, dbType, "has") + assert.Panics(t, func() { db.Set(nil, []byte("abc")) }, panicMsg, dbType, "set") + assert.Panics(t, func() { db.SetSync(nil, []byte("abc")) }, panicMsg, dbType, "setsync") + assert.Panics(t, func() { db.Delete(nil) }, panicMsg, dbType, "delete") + assert.Panics(t, func() { db.DeleteSync(nil) }, panicMsg, dbType, "deletesync") db.Close() } diff --git a/db/c_level_db.go b/db/c_level_db.go index 93bc1dfb7..ff8347cc7 100644 --- a/db/c_level_db.go +++ b/db/c_level_db.go @@ -4,7 +4,7 @@ package db import ( "fmt" - "path" + "path/filepath" "github.com/jmhodges/levigo" ) @@ -27,7 +27,7 @@ type CLevelDB struct { } func NewCLevelDB(name string, dir string) (*CLevelDB, error) { - dbPath := path.Join(dir, name+".db") + dbPath := filepath.Join(dir, name+".db") opts := levigo.NewOptions() opts.SetCache(levigo.NewLRUCache(1 << 30)) diff --git a/db/fsdb.go b/db/fsdb.go index 8a40d4f19..85adae630 100644 --- a/db/fsdb.go +++ b/db/fsdb.go @@ -5,7 +5,6 @@ import ( "io/ioutil" "net/url" "os" - "path" "path/filepath" "sort" "sync" @@ -54,7 +53,7 @@ func (db *FSDB) Get(key []byte) []byte { if os.IsNotExist(err) { return nil } else if err != nil { - panic(errors.Wrap(err, fmt.Sprintf("Getting key %s (0x%X)", string(key), key))) + panic(errors.Wrapf(err, "Getting key %s (0x%X)", string(key), key)) } return value } @@ -69,7 +68,7 @@ func (db *FSDB) Has(key []byte) bool { if os.IsNotExist(err) { return false } else if err != nil { - panic(errors.Wrap(err, fmt.Sprintf("Getting key %s (0x%X)", string(key), key))) + panic(errors.Wrapf(err, "Getting key %s (0x%X)", string(key), key)) } return true } @@ -99,7 +98,7 @@ func (db *FSDB) SetNoLock(key []byte, value []byte) { path := db.nameToPath(key) err := write(path, value) if err != nil { - panic(errors.Wrap(err, fmt.Sprintf("Setting key %s (0x%X)", string(key), key))) + panic(errors.Wrapf(err, "Setting key %s (0x%X)", string(key), key)) } } @@ -127,7 +126,7 @@ func (db *FSDB) DeleteNoLock(key []byte) { if os.IsNotExist(err) { return } else if err != nil { - panic(errors.Wrap(err, fmt.Sprintf("Removing key %s (0x%X)", string(key), key))) + panic(errors.Wrapf(err, "Removing key %s (0x%X)", string(key), key)) } } @@ -172,7 +171,7 @@ func (db *FSDB) Iterator(start, end []byte) Iterator { // Not the best, but probably not a bottleneck depending. keys, err := list(db.dir, start, end) if err != nil { - panic(errors.Wrap(err, fmt.Sprintf("Listing keys in %s", db.dir))) + panic(errors.Wrapf(err, "Listing keys in %s", db.dir)) } sort.Strings(keys) it.keys = keys @@ -186,7 +185,7 @@ func (db *FSDB) ReverseIterator(start, end []byte) Iterator { func (db *FSDB) nameToPath(name []byte) string { n := url.PathEscape(string(name)) - return path.Join(db.dir, n) + return filepath.Join(db.dir, n) } // Read some bytes to a file. diff --git a/db/go_level_db.go b/db/go_level_db.go index 89015547a..d741c6904 100644 --- a/db/go_level_db.go +++ b/db/go_level_db.go @@ -2,7 +2,7 @@ package db import ( "fmt" - "path" + "path/filepath" "github.com/syndtr/goleveldb/leveldb" "github.com/syndtr/goleveldb/leveldb/errors" @@ -27,7 +27,7 @@ type GoLevelDB struct { } func NewGoLevelDB(name string, dir string) (*GoLevelDB, error) { - dbPath := path.Join(dir, name+".db") + dbPath := filepath.Join(dir, name+".db") db, err := leveldb.OpenFile(dbPath, nil) if err != nil { return nil, err @@ -170,13 +170,17 @@ func (mBatch *goLevelDBBatch) Write() { // Iterator func (db *GoLevelDB) Iterator(start, end []byte) Iterator { - itr := &goLevelDBIterator{ - source: db.db.NewIterator(nil, nil), + itr := db.db.NewIterator(nil, nil) + if len(start) > 0 { + itr.Seek(start) + } else { + itr.First() + } + return &goLevelDBIterator{ + source: itr, start: start, end: end, } - itr.source.Seek(start) - return itr } func (db *GoLevelDB) ReverseIterator(start, end []byte) Iterator { From a2f7898b6d89b2d2ef9563f622a7ddee8b193a13 Mon Sep 17 00:00:00 2001 From: Ethan Buchman Date: Wed, 13 Dec 2017 22:28:37 -0500 Subject: [PATCH 22/68] db: fix c and go iterators --- db/c_level_db.go | 40 +++++++++++++++++-------- db/go_level_db.go | 24 +++++++++++---- db/util_test.go | 75 +++++++++++++---------------------------------- 3 files changed, 66 insertions(+), 73 deletions(-) diff --git a/db/c_level_db.go b/db/c_level_db.go index ff8347cc7..8e2a9372d 100644 --- a/db/c_level_db.go +++ b/db/c_level_db.go @@ -3,6 +3,7 @@ package db import ( + "bytes" "fmt" "path/filepath" @@ -166,7 +167,7 @@ func (db *CLevelDB) Iterator(start, end []byte) Iterator { } else { itr.SeekToFirst() } - return cLevelDBIterator{ + return &cLevelDBIterator{ itr: itr, start: start, end: end, @@ -183,43 +184,58 @@ var _ Iterator = (*cLevelDBIterator)(nil) type cLevelDBIterator struct { itr *levigo.Iterator start, end []byte + invalid bool } -func (c cLevelDBIterator) Domain() ([]byte, []byte) { +func (c *cLevelDBIterator) Domain() ([]byte, []byte) { return c.start, c.end } -func (c cLevelDBIterator) Valid() bool { +func (c *cLevelDBIterator) Valid() bool { c.assertNoError() - return c.itr.Valid() + if c.invalid { + return false + } + c.invalid = !c.itr.Valid() + return !c.invalid } -func (c cLevelDBIterator) Key() []byte { - if !c.itr.Valid() { +func (c *cLevelDBIterator) Key() []byte { + if !c.Valid() { panic("cLevelDBIterator Key() called when invalid") } return c.itr.Key() } -func (c cLevelDBIterator) Value() []byte { - if !c.itr.Valid() { +func (c *cLevelDBIterator) Value() []byte { + if !c.Valid() { panic("cLevelDBIterator Value() called when invalid") } return c.itr.Value() } -func (c cLevelDBIterator) Next() { - if !c.itr.Valid() { +func (c *cLevelDBIterator) Next() { + if !c.Valid() { panic("cLevelDBIterator Next() called when invalid") } c.itr.Next() + c.checkEndKey() // if we've exceeded the range, we're now invalid +} + +// levigo has no upper bound when iterating, so need to check ourselves +func (c *cLevelDBIterator) checkEndKey() []byte { + key := c.itr.Key() + if c.end != nil && bytes.Compare(key, c.end) > 0 { + c.invalid = true + } + return key } -func (c cLevelDBIterator) Release() { +func (c *cLevelDBIterator) Release() { c.itr.Close() } -func (c cLevelDBIterator) assertNoError() { +func (c *cLevelDBIterator) assertNoError() { if err := c.itr.GetError(); err != nil { panic(err) } diff --git a/db/go_level_db.go b/db/go_level_db.go index d741c6904..0d24020e0 100644 --- a/db/go_level_db.go +++ b/db/go_level_db.go @@ -8,6 +8,7 @@ import ( "github.com/syndtr/goleveldb/leveldb/errors" "github.com/syndtr/goleveldb/leveldb/iterator" "github.com/syndtr/goleveldb/leveldb/opt" + "github.com/syndtr/goleveldb/leveldb/util" . "github.com/tendermint/tmlibs/common" ) @@ -169,13 +170,24 @@ func (mBatch *goLevelDBBatch) Write() { //---------------------------------------- // Iterator -func (db *GoLevelDB) Iterator(start, end []byte) Iterator { - itr := db.db.NewIterator(nil, nil) - if len(start) > 0 { - itr.Seek(start) - } else { - itr.First() +// https://godoc.org/github.com/syndtr/goleveldb/leveldb#DB.NewIterator +// A nil Range.Start is treated as a key before all keys in the DB. +// And a nil Range.Limit is treated as a key after all keys in the DB. +func goLevelDBIterRange(start, end []byte) *util.Range { + // XXX: what if start == nil ? + if len(start) == 0 { + start = nil + } + return &util.Range{ + Start: start, + Limit: end, } +} + +func (db *GoLevelDB) Iterator(start, end []byte) Iterator { + itrRange := goLevelDBIterRange(start, end) + itr := db.db.NewIterator(itrRange, nil) + itr.Seek(start) // if we don't call this the itr is never valid (?!) return &goLevelDBIterator{ source: itr, start: start, diff --git a/db/util_test.go b/db/util_test.go index a0ce9cd51..b273f8d46 100644 --- a/db/util_test.go +++ b/db/util_test.go @@ -5,6 +5,7 @@ import ( "testing" ) +// empty iterator for empty db func TestPrefixIteratorNoMatchNil(t *testing.T) { for backend, _ := range backends { t.Run(fmt.Sprintf("Prefix w/ backend %s", backend), func(t *testing.T) { @@ -16,6 +17,7 @@ func TestPrefixIteratorNoMatchNil(t *testing.T) { } } +// empty iterator for db populated after iterator created func TestPrefixIteratorNoMatch1(t *testing.T) { for backend, _ := range backends { t.Run(fmt.Sprintf("Prefix w/ backend %s", backend), func(t *testing.T) { @@ -28,24 +30,8 @@ func TestPrefixIteratorNoMatch1(t *testing.T) { } } -func TestPrefixIteratorMatch2(t *testing.T) { - for backend, _ := range backends { - t.Run(fmt.Sprintf("Prefix w/ backend %s", backend), func(t *testing.T) { - db := newTempDB(t, backend) - db.SetSync(bz("2"), bz("value_2")) - itr := IteratePrefix(db, []byte("2")) - - checkValid(t, itr, true) - checkItem(t, itr, bz("2"), bz("value_2")) - checkNext(t, itr, false) - - // Once invalid... - checkInvalid(t, itr) - }) - } -} - -func TestPrefixIteratorMatch3(t *testing.T) { +// empty iterator for prefix starting above db entry +func TestPrefixIteratorNoMatch2(t *testing.T) { for backend, _ := range backends { t.Run(fmt.Sprintf("Prefix w/ backend %s", backend), func(t *testing.T) { db := newTempDB(t, backend) @@ -58,21 +44,16 @@ func TestPrefixIteratorMatch3(t *testing.T) { } } -// Search for a/1, fail by too much Next() -func TestPrefixIteratorMatches1N(t *testing.T) { +// iterator with single val for db with single val, starting from that val +func TestPrefixIteratorMatch1(t *testing.T) { for backend, _ := range backends { t.Run(fmt.Sprintf("Prefix w/ backend %s", backend), func(t *testing.T) { db := newTempDB(t, backend) - db.SetSync(bz("a/1"), bz("value_1")) - db.SetSync(bz("a/3"), bz("value_3")) - itr := IteratePrefix(db, []byte("a/")) + db.SetSync(bz("2"), bz("value_2")) + itr := IteratePrefix(db, bz("2")) checkValid(t, itr, true) - checkItem(t, itr, bz("a/1"), bz("value_1")) - checkNext(t, itr, true) - checkItem(t, itr, bz("a/3"), bz("value_3")) - - // Bad! + checkItem(t, itr, bz("2"), bz("value_2")) checkNext(t, itr, false) // Once invalid... @@ -81,38 +62,22 @@ func TestPrefixIteratorMatches1N(t *testing.T) { } } -// Search for a/2, fail by too much Next() -func TestPrefixIteratorMatches2N(t *testing.T) { +// iterator with prefix iterates over everything with same prefix +func TestPrefixIteratorMatches1N(t *testing.T) { for backend, _ := range backends { t.Run(fmt.Sprintf("Prefix w/ backend %s", backend), func(t *testing.T) { db := newTempDB(t, backend) - db.SetSync(bz("a/1"), bz("value_1")) - db.SetSync(bz("a/3"), bz("value_3")) - itr := IteratePrefix(db, []byte("a/")) - - checkValid(t, itr, true) - checkItem(t, itr, bz("a/1"), bz("value_1")) - checkNext(t, itr, true) - checkValid(t, itr, true) - checkItem(t, itr, bz("a/3"), bz("value_3")) - - // Bad! - checkNext(t, itr, false) - - // Once invalid... - checkInvalid(t, itr) - }) - } -} -// Search for a/3, fail by too much Next() -func TestPrefixIteratorMatches3N(t *testing.T) { - for backend, _ := range backends { - t.Run(fmt.Sprintf("Prefix w/ backend %s", backend), func(t *testing.T) { - db := newTempDB(t, backend) + // prefixed db.SetSync(bz("a/1"), bz("value_1")) db.SetSync(bz("a/3"), bz("value_3")) - itr := IteratePrefix(db, []byte("a/")) + + // not + db.SetSync(bz("b/3"), bz("value_3")) + db.SetSync(bz("a-3"), bz("value_3")) + db.SetSync(bz("a.3"), bz("value_3")) + db.SetSync(bz("abcdefg"), bz("value_3")) + itr := IteratePrefix(db, bz("a/")) checkValid(t, itr, true) checkItem(t, itr, bz("a/1"), bz("value_1")) @@ -122,7 +87,7 @@ func TestPrefixIteratorMatches3N(t *testing.T) { // Bad! checkNext(t, itr, false) - // Once invalid... + //Once invalid... checkInvalid(t, itr) }) } From a7b20d4e46db417d2256dfe81d910834348e2dc5 Mon Sep 17 00:00:00 2001 From: Emmanuel Odeke Date: Fri, 15 Dec 2017 02:48:40 -0700 Subject: [PATCH 23/68] db: Simplify exists check, fix IsKeyInDomain signature, Iterator Close + *FSDB.HasKey now uses common.FileExists to test for file existence + IsKeyInDomain takes key as a []byte slice instead of as a string to avoid extraneous []byte<-->string conversions for start and end + Iterator.Close() instead of Iterator.Release() + withDB helper to encapsulate DB creation, deferred cleanups so that for loops can use opened DBs and discard them ASAP Addressing accepted changes from review with @jaekwon --- db/backend_test.go | 48 +++++++++++++++++++++++++--------------------- db/c_level_db.go | 4 ++-- db/fsdb.go | 11 +++-------- db/go_level_db.go | 4 ---- db/mem_db.go | 4 +--- db/types.go | 4 ++-- db/util.go | 7 +++---- 7 files changed, 37 insertions(+), 45 deletions(-) diff --git a/db/backend_test.go b/db/backend_test.go index 7ead549b0..00fece515 100644 --- a/db/backend_test.go +++ b/db/backend_test.go @@ -45,33 +45,37 @@ func TestBackendsGetSetDelete(t *testing.T) { } } +func withDB(t *testing.T, creator dbCreator, fn func(DB)) { + name := cmn.Fmt("test_%x", cmn.RandStr(12)) + db, err := creator(name, "") + defer cleanupDBDir("", name) + assert.Nil(t, err) + fn(db) + db.Close() +} + func TestBackendsNilKeys(t *testing.T) { // test all backends for dbType, creator := range backends { - name := cmn.Fmt("test_%x", cmn.RandStr(12)) - db, err := creator(name, "") - defer cleanupDBDir("", name) - assert.Nil(t, err) - - panicMsg := "expecting %s.%s to panic" - assert.Panics(t, func() { db.Get(nil) }, panicMsg, dbType, "get") - assert.Panics(t, func() { db.Has(nil) }, panicMsg, dbType, "has") - assert.Panics(t, func() { db.Set(nil, []byte("abc")) }, panicMsg, dbType, "set") - assert.Panics(t, func() { db.SetSync(nil, []byte("abc")) }, panicMsg, dbType, "setsync") - assert.Panics(t, func() { db.Delete(nil) }, panicMsg, dbType, "delete") - assert.Panics(t, func() { db.DeleteSync(nil) }, panicMsg, dbType, "deletesync") - - db.Close() + withDB(t, creator, func(db DB) { + panicMsg := "expecting %s.%s to panic" + assert.Panics(t, func() { db.Get(nil) }, panicMsg, dbType, "get") + assert.Panics(t, func() { db.Has(nil) }, panicMsg, dbType, "has") + assert.Panics(t, func() { db.Set(nil, []byte("abc")) }, panicMsg, dbType, "set") + assert.Panics(t, func() { db.SetSync(nil, []byte("abc")) }, panicMsg, dbType, "setsync") + assert.Panics(t, func() { db.Delete(nil) }, panicMsg, dbType, "delete") + assert.Panics(t, func() { db.DeleteSync(nil) }, panicMsg, dbType, "deletesync") + }) } } func TestGoLevelDBBackendStr(t *testing.T) { - name := cmn.Fmt("test_%x", cmn.RandStr(12)) - db := NewDB(name, LevelDBBackendStr, "") - defer cleanupDBDir("", name) - - if _, ok := backends[CLevelDBBackendStr]; !ok { - _, ok := db.(*GoLevelDB) - assert.True(t, ok) - } + name := cmn.Fmt("test_%x", cmn.RandStr(12)) + db := NewDB(name, LevelDBBackendStr, "") + defer cleanupDBDir("", name) + + if _, ok := backends[CLevelDBBackendStr]; !ok { + _, ok := db.(*GoLevelDB) + assert.True(t, ok) + } } diff --git a/db/c_level_db.go b/db/c_level_db.go index 8e2a9372d..961e4d090 100644 --- a/db/c_level_db.go +++ b/db/c_level_db.go @@ -109,7 +109,7 @@ func (db *CLevelDB) Close() { func (db *CLevelDB) Print() { itr := db.Iterator(BeginningKey(), EndingKey()) - defer itr.Release() + defer itr.Close() for ; itr.Valid(); itr.Next() { key := itr.Key() value := itr.Value() @@ -231,7 +231,7 @@ func (c *cLevelDBIterator) checkEndKey() []byte { return key } -func (c *cLevelDBIterator) Release() { +func (c *cLevelDBIterator) Close() { c.itr.Close() } diff --git a/db/fsdb.go b/db/fsdb.go index 85adae630..056cc3982 100644 --- a/db/fsdb.go +++ b/db/fsdb.go @@ -10,6 +10,7 @@ import ( "sync" "github.com/pkg/errors" + cmn "github.com/tendermint/tmlibs/common" ) const ( @@ -64,13 +65,7 @@ func (db *FSDB) Has(key []byte) bool { panicNilKey(key) path := db.nameToPath(key) - _, err := read(path) - if os.IsNotExist(err) { - return false - } else if err != nil { - panic(errors.Wrapf(err, "Getting key %s (0x%X)", string(key), key)) - } - return true + return cmn.FileExists(path) } func (db *FSDB) Set(key []byte, value []byte) { @@ -246,7 +241,7 @@ func list(dirPath string, start, end []byte) ([]string, error) { if err != nil { return nil, fmt.Errorf("Failed to unescape %s while listing", name) } - if IsKeyInDomain(n, start, end) { + if IsKeyInDomain([]byte(n), start, end) { paths = append(paths, n) } } diff --git a/db/go_level_db.go b/db/go_level_db.go index 0d24020e0..45cb04984 100644 --- a/db/go_level_db.go +++ b/db/go_level_db.go @@ -263,10 +263,6 @@ func (it *goLevelDBIterator) Close() { it.source.Release() } -func (it *goLevelDBIterator) Release() { - it.source.Release() -} - func (it *goLevelDBIterator) assertNoError() { if err := it.source.Error(); err != nil { panic(err) diff --git a/db/mem_db.go b/db/mem_db.go index d20d0e7ea..44254870a 100644 --- a/db/mem_db.go +++ b/db/mem_db.go @@ -157,7 +157,7 @@ func (db *MemDB) ReverseIterator(start, end []byte) Iterator { func (db *MemDB) getSortedKeys(start, end []byte) []string { keys := []string{} for key, _ := range db.db { - if IsKeyInDomain(key, start, end) { + if IsKeyInDomain([]byte(key), start, end) { keys = append(keys, key) } } @@ -222,5 +222,3 @@ func (it *memDBIterator) Close() { it.db = nil it.keys = nil } - -func (it *memDBIterator) Release() {} diff --git a/db/types.go b/db/types.go index 8370ff2da..ee8d69cc1 100644 --- a/db/types.go +++ b/db/types.go @@ -68,7 +68,7 @@ func EndingKey() []byte { Usage: var itr Iterator = ... - defer itr.Release() + defer itr.Close() for ; itr.Valid(); itr.Next() { k, v := itr.Key(); itr.Value() @@ -108,7 +108,7 @@ type Iterator interface { Value() []byte // Release deallocates the given Iterator. - Release() + Close() } // For testing convenience. diff --git a/db/util.go b/db/util.go index 203ddcfaf..661d0a16f 100644 --- a/db/util.go +++ b/db/util.go @@ -2,7 +2,6 @@ package db import ( "bytes" - "strings" ) func IteratePrefix(db DB, prefix []byte) Iterator { @@ -39,8 +38,8 @@ func cpIncr(bz []byte) (ret []byte) { return EndingKey() } -func IsKeyInDomain(key string, start, end []byte) bool { - leftCondition := bytes.Equal(start, BeginningKey()) || strings.Compare(key, string(start)) >= 0 - rightCondition := bytes.Equal(end, EndingKey()) || strings.Compare(key, string(end)) < 0 +func IsKeyInDomain(key, start, end []byte) bool { + leftCondition := bytes.Equal(start, BeginningKey()) || bytes.Compare(key, start) >= 0 + rightCondition := bytes.Equal(end, EndingKey()) || bytes.Compare(key, end) < 0 return leftCondition && rightCondition } From 225eace3169c1d1b3c5f0a6786b9f060f2784d38 Mon Sep 17 00:00:00 2001 From: Ethan Buchman Date: Fri, 15 Dec 2017 15:14:48 -0500 Subject: [PATCH 24/68] dont run metalinter on circle ... --- circle.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/circle.yml b/circle.yml index 3dba976be..104cfa6f3 100644 --- a/circle.yml +++ b/circle.yml @@ -15,7 +15,7 @@ dependencies: test: override: - - cd $PROJECT_PATH && make get_vendor_deps && make metalinter_test && bash ./test.sh + - cd $PROJECT_PATH && make get_vendor_deps && bash ./test.sh post: - cd "$PROJECT_PATH" && bash <(curl -s https://codecov.io/bash) -f coverage.txt - cd "$PROJECT_PATH" && mv coverage.txt "${CIRCLE_ARTIFACTS}" From 66b0e8fa2d3b8632bad1eb84076dfe70c84657bf Mon Sep 17 00:00:00 2001 From: Ethan Buchman Date: Fri, 15 Dec 2017 15:58:23 -0500 Subject: [PATCH 25/68] fix c level db iterator --- db/backend_test.go | 16 ++++++++-------- db/c_level_db.go | 34 ++++++++++++++++++++++------------ 2 files changed, 30 insertions(+), 20 deletions(-) diff --git a/db/backend_test.go b/db/backend_test.go index 00fece515..3362fecf6 100644 --- a/db/backend_test.go +++ b/db/backend_test.go @@ -70,12 +70,12 @@ func TestBackendsNilKeys(t *testing.T) { } func TestGoLevelDBBackendStr(t *testing.T) { - name := cmn.Fmt("test_%x", cmn.RandStr(12)) - db := NewDB(name, LevelDBBackendStr, "") - defer cleanupDBDir("", name) - - if _, ok := backends[CLevelDBBackendStr]; !ok { - _, ok := db.(*GoLevelDB) - assert.True(t, ok) - } + name := cmn.Fmt("test_%x", cmn.RandStr(12)) + db := NewDB(name, LevelDBBackendStr, "") + defer cleanupDBDir("", name) + + if _, ok := backends[CLevelDBBackendStr]; !ok { + _, ok := db.(*GoLevelDB) + assert.True(t, ok) + } } diff --git a/db/c_level_db.go b/db/c_level_db.go index 961e4d090..60198d84c 100644 --- a/db/c_level_db.go +++ b/db/c_level_db.go @@ -162,16 +162,7 @@ func (mBatch *cLevelDBBatch) Write() { func (db *CLevelDB) Iterator(start, end []byte) Iterator { itr := db.db.NewIterator(db.ro) - if len(start) > 0 { - itr.Seek(start) - } else { - itr.SeekToFirst() - } - return &cLevelDBIterator{ - itr: itr, - start: start, - end: end, - } + return newCLevelDBIterator(itr, start, end) } func (db *CLevelDB) ReverseIterator(start, end []byte) Iterator { @@ -187,6 +178,21 @@ type cLevelDBIterator struct { invalid bool } +func newCLevelDBIterator(itr *levigo.Iterator, start, end []byte) *cLevelDBIterator { + + if len(start) > 0 { + itr.Seek(start) + } else { + itr.SeekToFirst() + } + + return &cLevelDBIterator{ + itr: itr, + start: start, + end: end, + } +} + func (c *cLevelDBIterator) Domain() ([]byte, []byte) { return c.start, c.end } @@ -223,12 +229,16 @@ func (c *cLevelDBIterator) Next() { } // levigo has no upper bound when iterating, so need to check ourselves -func (c *cLevelDBIterator) checkEndKey() []byte { +func (c *cLevelDBIterator) checkEndKey() { + if !c.itr.Valid() { + c.invalid = true + return + } + key := c.itr.Key() if c.end != nil && bytes.Compare(key, c.end) > 0 { c.invalid = true } - return key } func (c *cLevelDBIterator) Close() { From aab2d70dd34ec8a1aa780f7562193110fe8cb809 Mon Sep 17 00:00:00 2001 From: Jae Kwon Date: Sun, 17 Dec 2017 13:04:15 -0800 Subject: [PATCH 26/68] Sdk2 kvpair (#102) * Canonical KVPair in common * Simplify common/Bytes to just hex encode --- common/bytes.go | 53 ++++++++++++++++++++++++++ common/bytes_test.go | 68 ++++++++++++++++++++++++++++++++++ common/kvpair.go | 30 +++++++++++++++ glide.lock | 48 ++++++++++++------------ merkle/kvpairs.go | 48 ------------------------ merkle/simple_map.go | 78 ++++++++++++++++++++++++++++++++++----- merkle/simple_map_test.go | 12 +++--- merkle/simple_tree.go | 7 +++- 8 files changed, 254 insertions(+), 90 deletions(-) create mode 100644 common/bytes.go create mode 100644 common/bytes_test.go create mode 100644 common/kvpair.go delete mode 100644 merkle/kvpairs.go diff --git a/common/bytes.go b/common/bytes.go new file mode 100644 index 000000000..d9ede98df --- /dev/null +++ b/common/bytes.go @@ -0,0 +1,53 @@ +package common + +import ( + "encoding/hex" + "fmt" + "strings" +) + +// The main purpose of Bytes is to enable HEX-encoding for json/encoding. +type Bytes []byte + +// Marshal needed for protobuf compatibility +func (b Bytes) Marshal() ([]byte, error) { + return b, nil +} + +// Unmarshal needed for protobuf compatibility +func (b *Bytes) Unmarshal(data []byte) error { + *b = data + return nil +} + +// This is the point of Bytes. +func (b Bytes) MarshalJSON() ([]byte, error) { + s := strings.ToUpper(hex.EncodeToString(b)) + jb := make([]byte, len(s)+2) + jb[0] = '"' + copy(jb[1:], []byte(s)) + jb[1] = '"' + return jb, nil +} + +// This is the point of Bytes. +func (b *Bytes) UnmarshalJSON(data []byte) error { + if len(data) < 2 || data[0] != '"' || data[len(data)-1] != '"' { + return fmt.Errorf("Invalid hex string: %s", data) + } + bytes, err := hex.DecodeString(string(data[1 : len(data)-1])) + if err != nil { + return err + } + *b = bytes + return nil +} + +// Allow it to fulfill various interfaces in light-client, etc... +func (b Bytes) Bytes() []byte { + return b +} + +func (b Bytes) String() string { + return strings.ToUpper(hex.EncodeToString(b)) +} diff --git a/common/bytes_test.go b/common/bytes_test.go new file mode 100644 index 000000000..0c0eacc33 --- /dev/null +++ b/common/bytes_test.go @@ -0,0 +1,68 @@ +package common + +import ( + "encoding/json" + "fmt" + "testing" + + "github.com/stretchr/testify/assert" +) + +// This is a trivial test for protobuf compatibility. +func TestMarshal(t *testing.T) { + assert := assert.New(t) + + b := []byte("hello world") + dataB := Bytes(b) + b2, err := dataB.Marshal() + assert.Nil(err) + assert.Equal(b, b2) + + var dataB2 Bytes + err = (&dataB2).Unmarshal(b) + assert.Nil(err) + assert.Equal(dataB, dataB2) +} + +// Test that the hex encoding works. +func TestJSONMarshal(t *testing.T) { + assert := assert.New(t) + + type TestStruct struct { + B1 []byte + B2 Bytes + } + + cases := []struct { + input []byte + expected string + }{ + {[]byte(``), `{"B1":"","B2":""}`}, + {[]byte(``), `{"B1":"","B2":""}`}, + {[]byte(``), `{"B1":"","B2":""}`}, + } + + for i, tc := range cases { + t.Run(fmt.Sprintf("Case %d", i), func(t *testing.T) { + ts := TestStruct{B1: tc.input, B2: tc.input} + + // Test that it marshals correctly to JSON. + jsonBytes, err := json.Marshal(ts) + if err != nil { + t.Fatal(err) + } + assert.Equal(string(jsonBytes), tc.expected) + + // TODO do fuzz testing to ensure that unmarshal fails + + // Test that unmarshaling works correctly. + ts2 := TestStruct{} + err = json.Unmarshal(jsonBytes, &ts2) + if err != nil { + t.Fatal(err) + } + assert.Equal(ts2.B1, tc.input) + assert.Equal(ts2.B2, Bytes(tc.input)) + }) + } +} diff --git a/common/kvpair.go b/common/kvpair.go new file mode 100644 index 000000000..b9e45733f --- /dev/null +++ b/common/kvpair.go @@ -0,0 +1,30 @@ +package common + +import ( + "bytes" + "sort" +) + +type KVPair struct { + Key Bytes + Value Bytes +} + +type KVPairs []KVPair + +// Sorting +func (kvs KVPairs) Len() int { return len(kvs) } +func (kvs KVPairs) Less(i, j int) bool { + switch bytes.Compare(kvs[i].Key, kvs[j].Key) { + case -1: + return true + case 0: + return bytes.Compare(kvs[i].Value, kvs[j].Value) < 0 + case 1: + return false + default: + panic("invalid comparison result") + } +} +func (kvs KVPairs) Swap(i, j int) { kvs[i], kvs[j] = kvs[j], kvs[i] } +func (kvs KVPairs) Sort() { sort.Sort(kvs) } diff --git a/glide.lock b/glide.lock index b0b3ff3c7..e87782d21 100644 --- a/glide.lock +++ b/glide.lock @@ -1,10 +1,10 @@ hash: 6efda1f3891a7211fc3dc1499c0079267868ced9739b781928af8e225420f867 -updated: 2017-08-11T20:28:34.550901198Z +updated: 2017-12-17T12:50:35.983353926-08:00 imports: - name: github.com/fsnotify/fsnotify version: 4da3e2cfbabc9f751898f250b49f2439785783a1 - name: github.com/go-kit/kit - version: 0873e56b0faeae3a1d661b10d629135508ea5504 + version: e3b2152e0063c5f05efea89ecbe297852af2a92d subpackages: - log - log/level @@ -12,17 +12,17 @@ imports: - name: github.com/go-logfmt/logfmt version: 390ab7935ee28ec6b286364bba9b4dd6410cb3d5 - name: github.com/go-playground/locales - version: 1e5f1161c6416a5ff48840eb8724a394e48cc534 + version: e4cbcb5d0652150d40ad0646651076b6bd2be4f6 subpackages: - currency - name: github.com/go-playground/universal-translator version: 71201497bace774495daed26a3874fd339e0b538 - name: github.com/go-stack/stack - version: 7a2f19628aabfe68f0766b59e74d6315f8347d22 + version: 259ab82a6cad3992b4e21ff5cac294ccb06474bc - name: github.com/golang/snappy version: 553a641470496b2327abcac10b36396bd98e45c9 - name: github.com/hashicorp/hcl - version: a4b07c25de5ff55ad3b8936cea69a79a3d95a855 + version: 23c074d0eceb2b8a5bfdbb271ab780cde70f05a8 subpackages: - hcl/ast - hcl/parser @@ -39,35 +39,33 @@ imports: - name: github.com/kr/logfmt version: b84e30acd515aadc4b783ad4ff83aff3299bdfe0 - name: github.com/magiconair/properties - version: 51463bfca2576e06c62a8504b5c0f06d61312647 + version: 49d762b9817ba1c2e9d0c69183c2b4a8b8f1d934 - name: github.com/mattn/go-colorable - version: ded68f7a9561c023e790de24279db7ebf473ea80 + version: 6fcc0c1fd9b620311d821b106a400b35dc95c497 - name: github.com/mattn/go-isatty - version: fc9e8d8ef48496124e79ae0df75490096eccf6fe + version: a5cdd64afdee435007ee3e9f6ed4684af949d568 - name: github.com/mitchellh/mapstructure - version: cc8532a8e9a55ea36402aa21efdf403a60d34096 -- name: github.com/pelletier/go-buffruneio - version: c37440a7cf42ac63b919c752ca73a85067e05992 + version: 06020f85339e21b2478f756a78e295255ffa4d6a - name: github.com/pelletier/go-toml - version: 97253b98df84f9eef872866d079e74b8265150f1 + version: 4e9e0ee19b60b13eb79915933f44d8ed5f268bdd - name: github.com/pkg/errors - version: c605e284fe17294bda444b34710735b29d1a9d90 + version: 645ef00459ed84a119197bfb8d8205042c6df63d - name: github.com/spf13/afero - version: 9be650865eab0c12963d8753212f4f9c66cdcf12 + version: 8d919cbe7e2627e417f3e45c3c0e489a5b7e2536 subpackages: - mem - name: github.com/spf13/cast version: acbeb36b902d72a7a4c18e8f3241075e7ab763e4 - name: github.com/spf13/cobra - version: db6b9a8b3f3f400c8ecb4a4d7d02245b8facad66 + version: 7b2c5ac9fc04fc5efafb60700713d4fa609b777b - name: github.com/spf13/jwalterweatherman - version: fa7ca7e836cf3a8bb4ebf799f472c12d7e903d66 + version: 12bd96e66386c1960ab0f74ced1362f66f552f7b - name: github.com/spf13/pflag - version: 80fe0fb4eba54167e2ccae1c6c950e72abf61b73 + version: 4c012f6dcd9546820e378d0bdda4d8fc772cdfea - name: github.com/spf13/viper - version: 0967fc9aceab2ce9da34061253ac10fb99bba5b2 + version: 25b30aa063fc18e48662b86996252eabdcf2f0c7 - name: github.com/syndtr/goleveldb - version: 8c81ea47d4c41a385645e133e15510fc6a2a74b4 + version: adf24ef3f94bd13ec4163060b21a5678f22b429b subpackages: - leveldb - leveldb/cache @@ -82,7 +80,7 @@ imports: - leveldb/table - leveldb/util - name: github.com/tendermint/go-wire - version: b53add0b622662731985485f3a19be7f684660b8 + version: b6fc872b42d41158a60307db4da051dd6f179415 subpackages: - data - data/base58 @@ -91,22 +89,22 @@ imports: subpackages: - term - name: golang.org/x/crypto - version: 5a033cc77e57eca05bdb50522851d29e03569cbe + version: 94eea52f7b742c7cbe0b03b22f0c4c8631ece122 subpackages: - ripemd160 - name: golang.org/x/sys - version: 9ccfe848b9db8435a24c424abbc07a921adf1df5 + version: 8b4580aae2a0dd0c231a45d3ccb8434ff533b840 subpackages: - unix - name: golang.org/x/text - version: 470f45bf29f4147d6fbd7dfd0a02a848e49f5bf4 + version: 75cc3cad82b5f47d3fb229ddda8c5167da14f294 subpackages: - transform - unicode/norm - name: gopkg.in/go-playground/validator.v9 - version: d529ee1b0f30352444f507cc6cdac96bfd12decc + version: 61caf9d3038e1af346dbf5c2e16f6678e1548364 - name: gopkg.in/yaml.v2 - version: cd8b52f8269e0feb286dfeef29f8fe4d5b397e0b + version: 287cf08546ab5e7e37d55a84f7ed3fd1db036de5 testImports: - name: github.com/davecgh/go-spew version: 6d212800a42e8ab5c146b8ace3490ee17e5225f9 diff --git a/merkle/kvpairs.go b/merkle/kvpairs.go deleted file mode 100644 index 3d67049f2..000000000 --- a/merkle/kvpairs.go +++ /dev/null @@ -1,48 +0,0 @@ -package merkle - -import ( - "sort" - - wire "github.com/tendermint/go-wire" - "golang.org/x/crypto/ripemd160" -) - -// NOTE: Behavior is undefined with dup keys. -type KVPair struct { - Key string - Value interface{} // Can be Hashable or not. -} - -func (kv KVPair) Hash() []byte { - hasher, n, err := ripemd160.New(), new(int), new(error) - wire.WriteString(kv.Key, hasher, n, err) - if kvH, ok := kv.Value.(Hashable); ok { - wire.WriteByteSlice(kvH.Hash(), hasher, n, err) - } else { - wire.WriteBinary(kv.Value, hasher, n, err) - } - if *err != nil { - panic(*err) - } - return hasher.Sum(nil) -} - -type KVPairs []KVPair - -func (kvps KVPairs) Len() int { return len(kvps) } -func (kvps KVPairs) Less(i, j int) bool { return kvps[i].Key < kvps[j].Key } -func (kvps KVPairs) Swap(i, j int) { kvps[i], kvps[j] = kvps[j], kvps[i] } -func (kvps KVPairs) Sort() { sort.Sort(kvps) } - -func MakeSortedKVPairs(m map[string]interface{}) []Hashable { - kvPairs := make([]KVPair, 0, len(m)) - for k, v := range m { - kvPairs = append(kvPairs, KVPair{k, v}) - } - KVPairs(kvPairs).Sort() - kvPairsH := make([]Hashable, 0, len(kvPairs)) - for _, kvp := range kvPairs { - kvPairsH = append(kvPairsH, kvp) - } - return kvPairsH -} diff --git a/merkle/simple_map.go b/merkle/simple_map.go index 43dce990f..003c7cd42 100644 --- a/merkle/simple_map.go +++ b/merkle/simple_map.go @@ -1,26 +1,86 @@ package merkle +import ( + "github.com/tendermint/go-wire" + cmn "github.com/tendermint/tmlibs/common" + "golang.org/x/crypto/ripemd160" +) + type SimpleMap struct { - kvz KVPairs + kvs cmn.KVPairs + sorted bool } func NewSimpleMap() *SimpleMap { return &SimpleMap{ - kvz: nil, + kvs: nil, + sorted: false, } } -func (sm *SimpleMap) Set(k string, o interface{}) { - sm.kvz = append(sm.kvz, KVPair{Key: k, Value: o}) +func (sm *SimpleMap) Set(key string, value interface{}) { + sm.sorted = false + + // Is value Hashable? + var vBytes []byte + if hashable, ok := value.(Hashable); ok { + vBytes = hashable.Hash() + } else { + vBytes = wire.BinaryBytes(value) + } + + sm.kvs = append(sm.kvs, cmn.KVPair{ + Key: []byte(key), + Value: vBytes, + }) } // Merkle root hash of items sorted by key. // NOTE: Behavior is undefined when key is duplicate. func (sm *SimpleMap) Hash() []byte { - sm.kvz.Sort() - kvPairsH := make([]Hashable, 0, len(sm.kvz)) - for _, kvp := range sm.kvz { - kvPairsH = append(kvPairsH, kvp) + sm.Sort() + return hashKVPairs(sm.kvs) +} + +func (sm *SimpleMap) Sort() { + if sm.sorted { + return + } + sm.kvs.Sort() + sm.sorted = true +} + +// Returns a copy of sorted KVPairs. +// CONTRACT: The returned slice must not be mutated. +func (sm *SimpleMap) KVPairs() cmn.KVPairs { + sm.Sort() + kvs := make(cmn.KVPairs, len(sm.kvs)) + copy(kvs, sm.kvs) + return kvs +} + +//---------------------------------------- + +// A local extension to KVPair that can be hashed. +type kvPair cmn.KVPair + +func (kv kvPair) Hash() []byte { + hasher, n, err := ripemd160.New(), new(int), new(error) + wire.WriteByteSlice(kv.Key, hasher, n, err) + if *err != nil { + panic(*err) + } + wire.WriteByteSlice(kv.Value, hasher, n, err) + if *err != nil { + panic(*err) + } + return hasher.Sum(nil) +} + +func hashKVPairs(kvs cmn.KVPairs) []byte { + kvsH := make([]Hashable, 0, len(kvs)) + for _, kvp := range kvs { + kvsH = append(kvsH, kvPair(kvp)) } - return SimpleHashFromHashables(kvPairsH) + return SimpleHashFromHashables(kvsH) } diff --git a/merkle/simple_map_test.go b/merkle/simple_map_test.go index 5eb218274..8ba7ce66b 100644 --- a/merkle/simple_map_test.go +++ b/merkle/simple_map_test.go @@ -11,37 +11,37 @@ func TestSimpleMap(t *testing.T) { { db := NewSimpleMap() db.Set("key1", "value1") - assert.Equal(t, "376bf717ebe3659a34f68edb833dfdcf4a2d3c10", fmt.Sprintf("%x", db.Hash()), "Hash didn't match") + assert.Equal(t, "3bb53f017d2f5b4f144692aa829a5c245ac2b123", fmt.Sprintf("%x", db.Hash()), "Hash didn't match") } { db := NewSimpleMap() db.Set("key1", "value2") - assert.Equal(t, "72fd3a7224674377952214cb10ef21753ec803eb", fmt.Sprintf("%x", db.Hash()), "Hash didn't match") + assert.Equal(t, "14a68db29e3f930ffaafeff5e07c17a439384f39", fmt.Sprintf("%x", db.Hash()), "Hash didn't match") } { db := NewSimpleMap() db.Set("key1", "value1") db.Set("key2", "value2") - assert.Equal(t, "23a160bd4eea5b2fcc0755d722f9112a15999abc", fmt.Sprintf("%x", db.Hash()), "Hash didn't match") + assert.Equal(t, "275c6367f4be335f9c482b6ef72e49c84e3f8bda", fmt.Sprintf("%x", db.Hash()), "Hash didn't match") } { db := NewSimpleMap() db.Set("key2", "value2") // NOTE: out of order db.Set("key1", "value1") - assert.Equal(t, "23a160bd4eea5b2fcc0755d722f9112a15999abc", fmt.Sprintf("%x", db.Hash()), "Hash didn't match") + assert.Equal(t, "275c6367f4be335f9c482b6ef72e49c84e3f8bda", fmt.Sprintf("%x", db.Hash()), "Hash didn't match") } { db := NewSimpleMap() db.Set("key1", "value1") db.Set("key2", "value2") db.Set("key3", "value3") - assert.Equal(t, "40df7416429148d03544cfafa86e1080615cd2bc", fmt.Sprintf("%x", db.Hash()), "Hash didn't match") + assert.Equal(t, "48d60701cb4c96916f68a958b3368205ebe3809b", fmt.Sprintf("%x", db.Hash()), "Hash didn't match") } { db := NewSimpleMap() db.Set("key2", "value2") // NOTE: out of order db.Set("key1", "value1") db.Set("key3", "value3") - assert.Equal(t, "40df7416429148d03544cfafa86e1080615cd2bc", fmt.Sprintf("%x", db.Hash()), "Hash didn't match") + assert.Equal(t, "48d60701cb4c96916f68a958b3368205ebe3809b", fmt.Sprintf("%x", db.Hash()), "Hash didn't match") } } diff --git a/merkle/simple_tree.go b/merkle/simple_tree.go index d64082b43..3a82f4edc 100644 --- a/merkle/simple_tree.go +++ b/merkle/simple_tree.go @@ -88,6 +88,9 @@ func SimpleHashFromHashables(items []Hashable) []byte { // Convenience for SimpleHashFromHashes. func SimpleHashFromMap(m map[string]interface{}) []byte { - kpPairsH := MakeSortedKVPairs(m) - return SimpleHashFromHashables(kpPairsH) + sm := NewSimpleMap() + for k, v := range m { + sm.Set(k, v) + } + return sm.Hash() } From 4ce8448d7fcf92b040046f894474ce2f7e779b67 Mon Sep 17 00:00:00 2001 From: Jae Kwon Date: Sun, 17 Dec 2017 13:11:28 -0800 Subject: [PATCH 27/68] Nil keys are OK, deprecate BeginningKey/EndingKey (#101) * Nil keys are OK, deprecate BeginningKey/EndingKey --- db/backend_test.go | 95 ++++++++++++++++++++++---- db/c_level_db.go | 132 ++++++++++++++++++++---------------- db/common_test.go | 34 ++++++++-- db/fsdb.go | 51 +++++++------- db/go_level_db.go | 163 ++++++++++++++++++++++----------------------- db/mem_db.go | 124 ++++++++++++++++------------------ db/mem_db_test.go | 48 ------------- db/types.go | 45 +++++++------ db/util.go | 29 ++++++-- db/util_test.go | 11 ++- 10 files changed, 400 insertions(+), 332 deletions(-) delete mode 100644 db/mem_db_test.go diff --git a/db/backend_test.go b/db/backend_test.go index 3362fecf6..e103843dc 100644 --- a/db/backend_test.go +++ b/db/backend_test.go @@ -21,6 +21,13 @@ func testBackendGetSetDelete(t *testing.T, backend string) { defer dir.Close() db := NewDB("testdb", backend, dirname) + // A nonexistent key should return nil, even if the key is empty. + require.Nil(t, db.Get([]byte(""))) + + // A nonexistent key should return nil, even if the key is nil. + require.Nil(t, db.Get(nil)) + + // A nonexistent key should return nil. key := []byte("abc") require.Nil(t, db.Get(key)) @@ -55,27 +62,89 @@ func withDB(t *testing.T, creator dbCreator, fn func(DB)) { } func TestBackendsNilKeys(t *testing.T) { - // test all backends + // test all backends. + // nil keys are treated as the empty key for most operations. for dbType, creator := range backends { withDB(t, creator, func(db DB) { - panicMsg := "expecting %s.%s to panic" - assert.Panics(t, func() { db.Get(nil) }, panicMsg, dbType, "get") - assert.Panics(t, func() { db.Has(nil) }, panicMsg, dbType, "has") - assert.Panics(t, func() { db.Set(nil, []byte("abc")) }, panicMsg, dbType, "set") - assert.Panics(t, func() { db.SetSync(nil, []byte("abc")) }, panicMsg, dbType, "setsync") - assert.Panics(t, func() { db.Delete(nil) }, panicMsg, dbType, "delete") - assert.Panics(t, func() { db.DeleteSync(nil) }, panicMsg, dbType, "deletesync") + t.Run(fmt.Sprintf("Testing %s", dbType), func(t *testing.T) { + + expect := func(key, value []byte) { + if len(key) == 0 { // nil or empty + assert.Equal(t, db.Get(nil), db.Get([]byte(""))) + assert.Equal(t, db.Has(nil), db.Has([]byte(""))) + } + assert.Equal(t, db.Get(key), value) + assert.Equal(t, db.Has(key), value != nil) + } + + // Not set + expect(nil, nil) + + // Set nil value + db.Set(nil, nil) + expect(nil, []byte("")) + + // Set empty value + db.Set(nil, []byte("")) + expect(nil, []byte("")) + + // Set nil, Delete nil + db.Set(nil, []byte("abc")) + expect(nil, []byte("abc")) + db.Delete(nil) + expect(nil, nil) + + // Set nil, Delete empty + db.Set(nil, []byte("abc")) + expect(nil, []byte("abc")) + db.Delete([]byte("")) + expect(nil, nil) + + // Set empty, Delete nil + db.Set([]byte(""), []byte("abc")) + expect(nil, []byte("abc")) + db.Delete(nil) + expect(nil, nil) + + // Set empty, Delete empty + db.Set([]byte(""), []byte("abc")) + expect(nil, []byte("abc")) + db.Delete([]byte("")) + expect(nil, nil) + + // SetSync nil, DeleteSync nil + db.SetSync(nil, []byte("abc")) + expect(nil, []byte("abc")) + db.DeleteSync(nil) + expect(nil, nil) + + // SetSync nil, DeleteSync empty + db.SetSync(nil, []byte("abc")) + expect(nil, []byte("abc")) + db.DeleteSync([]byte("")) + expect(nil, nil) + + // SetSync empty, DeleteSync nil + db.SetSync([]byte(""), []byte("abc")) + expect(nil, []byte("abc")) + db.DeleteSync(nil) + expect(nil, nil) + + // SetSync empty, DeleteSync empty + db.SetSync([]byte(""), []byte("abc")) + expect(nil, []byte("abc")) + db.DeleteSync([]byte("")) + expect(nil, nil) + }) }) } } func TestGoLevelDBBackendStr(t *testing.T) { name := cmn.Fmt("test_%x", cmn.RandStr(12)) - db := NewDB(name, LevelDBBackendStr, "") + db := NewDB(name, GoLevelDBBackendStr, "") defer cleanupDBDir("", name) - if _, ok := backends[CLevelDBBackendStr]; !ok { - _, ok := db.(*GoLevelDB) - assert.True(t, ok) - } + _, ok := db.(*GoLevelDB) + assert.True(t, ok) } diff --git a/db/c_level_db.go b/db/c_level_db.go index 60198d84c..c9f8d419b 100644 --- a/db/c_level_db.go +++ b/db/c_level_db.go @@ -51,7 +51,7 @@ func NewCLevelDB(name string, dir string) (*CLevelDB, error) { } func (db *CLevelDB) Get(key []byte) []byte { - panicNilKey(key) + key = nonNilBytes(key) res, err := db.db.Get(db.ro, key) if err != nil { panic(err) @@ -60,12 +60,12 @@ func (db *CLevelDB) Get(key []byte) []byte { } func (db *CLevelDB) Has(key []byte) bool { - panicNilKey(key) - panic("not implemented yet") + return db.Get(key) != nil } func (db *CLevelDB) Set(key []byte, value []byte) { - panicNilKey(key) + key = nonNilBytes(key) + value = nonNilBytes(value) err := db.db.Put(db.wo, key, value) if err != nil { panic(err) @@ -73,7 +73,8 @@ func (db *CLevelDB) Set(key []byte, value []byte) { } func (db *CLevelDB) SetSync(key []byte, value []byte) { - panicNilKey(key) + key = nonNilBytes(key) + value = nonNilBytes(value) err := db.db.Put(db.woSync, key, value) if err != nil { panic(err) @@ -81,7 +82,7 @@ func (db *CLevelDB) SetSync(key []byte, value []byte) { } func (db *CLevelDB) Delete(key []byte) { - panicNilKey(key) + key = nonNilBytes(key) err := db.db.Delete(db.wo, key) if err != nil { panic(err) @@ -89,7 +90,7 @@ func (db *CLevelDB) Delete(key []byte) { } func (db *CLevelDB) DeleteSync(key []byte) { - panicNilKey(key) + key = nonNilBytes(key) err := db.db.Delete(db.woSync, key) if err != nil { panic(err) @@ -108,7 +109,7 @@ func (db *CLevelDB) Close() { } func (db *CLevelDB) Print() { - itr := db.Iterator(BeginningKey(), EndingKey()) + itr := db.Iterator(nil, nil) defer itr.Close() for ; itr.Valid(); itr.Next() { key := itr.Key() @@ -159,94 +160,107 @@ func (mBatch *cLevelDBBatch) Write() { //---------------------------------------- // Iterator +// NOTE This is almost identical to db/go_level_db.Iterator +// Before creating a third version, refactor. func (db *CLevelDB) Iterator(start, end []byte) Iterator { itr := db.db.NewIterator(db.ro) - return newCLevelDBIterator(itr, start, end) + return newCLevelDBIterator(itr, start, end, false) } func (db *CLevelDB) ReverseIterator(start, end []byte) Iterator { - // XXX - return nil + panic("not implemented yet") // XXX } var _ Iterator = (*cLevelDBIterator)(nil) type cLevelDBIterator struct { - itr *levigo.Iterator + source *levigo.Iterator start, end []byte - invalid bool + isReverse bool + isInvalid bool } -func newCLevelDBIterator(itr *levigo.Iterator, start, end []byte) *cLevelDBIterator { - - if len(start) > 0 { - itr.Seek(start) +func newCLevelDBIterator(source *levigo.Iterator, start, end []byte, isReverse bool) *cLevelDBIterator { + if isReverse { + panic("not implemented yet") // XXX + } + if start != nil { + source.Seek(start) } else { - itr.SeekToFirst() + source.SeekToFirst() } - return &cLevelDBIterator{ - itr: itr, - start: start, - end: end, + source: source, + start: start, + end: end, + isReverse: isReverse, + isInvalid: false, } } -func (c *cLevelDBIterator) Domain() ([]byte, []byte) { - return c.start, c.end +func (itr *cLevelDBIterator) Domain() ([]byte, []byte) { + return itr.start, itr.end } -func (c *cLevelDBIterator) Valid() bool { - c.assertNoError() - if c.invalid { +func (itr *cLevelDBIterator) Valid() bool { + + // Once invalid, forever invalid. + if itr.isInvalid { return false } - c.invalid = !c.itr.Valid() - return !c.invalid -} -func (c *cLevelDBIterator) Key() []byte { - if !c.Valid() { - panic("cLevelDBIterator Key() called when invalid") + // Panic on DB error. No way to recover. + itr.assertNoError() + + // If source is invalid, invalid. + if !itr.source.Valid() { + itr.isInvalid = true + return false } - return c.itr.Key() -} -func (c *cLevelDBIterator) Value() []byte { - if !c.Valid() { - panic("cLevelDBIterator Value() called when invalid") + // If key is end or past it, invalid. + var end = itr.end + var key = itr.source.Key() + if end != nil && bytes.Compare(end, key) <= 0 { + itr.isInvalid = true + return false } - return c.itr.Value() + + // Valid + return true } -func (c *cLevelDBIterator) Next() { - if !c.Valid() { - panic("cLevelDBIterator Next() called when invalid") - } - c.itr.Next() - c.checkEndKey() // if we've exceeded the range, we're now invalid +func (itr *cLevelDBIterator) Key() []byte { + itr.assertNoError() + itr.assertIsValid() + return itr.source.Key() } -// levigo has no upper bound when iterating, so need to check ourselves -func (c *cLevelDBIterator) checkEndKey() { - if !c.itr.Valid() { - c.invalid = true - return - } +func (itr *cLevelDBIterator) Value() []byte { + itr.assertNoError() + itr.assertIsValid() + return itr.source.Value() +} - key := c.itr.Key() - if c.end != nil && bytes.Compare(key, c.end) > 0 { - c.invalid = true - } +func (itr *cLevelDBIterator) Next() { + itr.assertNoError() + itr.assertIsValid() + itr.source.Next() } -func (c *cLevelDBIterator) Close() { - c.itr.Close() +func (itr *cLevelDBIterator) Close() { + itr.source.Close() } -func (c *cLevelDBIterator) assertNoError() { - if err := c.itr.GetError(); err != nil { +func (itr *cLevelDBIterator) assertNoError() { + if err := itr.source.GetError(); err != nil { panic(err) } } + +func (itr cLevelDBIterator) assertIsValid() { + if !itr.Valid() { + panic("cLevelDBIterator is invalid") + } +} diff --git a/db/common_test.go b/db/common_test.go index 6b3009795..2a5d01818 100644 --- a/db/common_test.go +++ b/db/common_test.go @@ -57,7 +57,7 @@ func TestDBIteratorSingleKey(t *testing.T) { t.Run(fmt.Sprintf("Backend %s", backend), func(t *testing.T) { db := newTempDB(t, backend) db.SetSync(bz("1"), bz("value_1")) - itr := db.Iterator(BeginningKey(), EndingKey()) + itr := db.Iterator(nil, nil) checkValid(t, itr, true) checkNext(t, itr, false) @@ -78,7 +78,7 @@ func TestDBIteratorTwoKeys(t *testing.T) { db.SetSync(bz("2"), bz("value_1")) { // Fail by calling Next too much - itr := db.Iterator(BeginningKey(), EndingKey()) + itr := db.Iterator(nil, nil) checkValid(t, itr, true) checkNext(t, itr, true) @@ -96,11 +96,35 @@ func TestDBIteratorTwoKeys(t *testing.T) { } } +func TestDBIteratorMany(t *testing.T) { + for backend, _ := range backends { + t.Run(fmt.Sprintf("Backend %s", backend), func(t *testing.T) { + db := newTempDB(t, backend) + + keys := make([][]byte, 100) + for i := 0; i < 100; i++ { + keys[i] = []byte{byte(i)} + } + + value := []byte{5} + for _, k := range keys { + db.Set(k, value) + } + + itr := db.Iterator(nil, nil) + defer itr.Close() + for ; itr.Valid(); itr.Next() { + assert.Equal(t, db.Get(itr.Key()), itr.Value()) + } + }) + } +} + func TestDBIteratorEmpty(t *testing.T) { for backend, _ := range backends { t.Run(fmt.Sprintf("Backend %s", backend), func(t *testing.T) { db := newTempDB(t, backend) - itr := db.Iterator(BeginningKey(), EndingKey()) + itr := db.Iterator(nil, nil) checkInvalid(t, itr) }) @@ -111,7 +135,7 @@ func TestDBIteratorEmptyBeginAfter(t *testing.T) { for backend, _ := range backends { t.Run(fmt.Sprintf("Backend %s", backend), func(t *testing.T) { db := newTempDB(t, backend) - itr := db.Iterator(bz("1"), EndingKey()) + itr := db.Iterator(bz("1"), nil) checkInvalid(t, itr) }) @@ -123,7 +147,7 @@ func TestDBIteratorNonemptyBeginAfter(t *testing.T) { t.Run(fmt.Sprintf("Backend %s", backend), func(t *testing.T) { db := newTempDB(t, backend) db.SetSync(bz("1"), bz("value_1")) - itr := db.Iterator(bz("2"), EndingKey()) + itr := db.Iterator(bz("2"), nil) checkInvalid(t, itr) }) diff --git a/db/fsdb.go b/db/fsdb.go index 056cc3982..45c3231f6 100644 --- a/db/fsdb.go +++ b/db/fsdb.go @@ -47,7 +47,7 @@ func NewFSDB(dir string) *FSDB { func (db *FSDB) Get(key []byte) []byte { db.mtx.Lock() defer db.mtx.Unlock() - panicNilKey(key) + key = escapeKey(key) path := db.nameToPath(key) value, err := read(path) @@ -62,7 +62,7 @@ func (db *FSDB) Get(key []byte) []byte { func (db *FSDB) Has(key []byte) bool { db.mtx.Lock() defer db.mtx.Unlock() - panicNilKey(key) + key = escapeKey(key) path := db.nameToPath(key) return cmn.FileExists(path) @@ -71,7 +71,6 @@ func (db *FSDB) Has(key []byte) bool { func (db *FSDB) Set(key []byte, value []byte) { db.mtx.Lock() defer db.mtx.Unlock() - panicNilKey(key) db.SetNoLock(key, value) } @@ -79,17 +78,14 @@ func (db *FSDB) Set(key []byte, value []byte) { func (db *FSDB) SetSync(key []byte, value []byte) { db.mtx.Lock() defer db.mtx.Unlock() - panicNilKey(key) db.SetNoLock(key, value) } // NOTE: Implements atomicSetDeleter. func (db *FSDB) SetNoLock(key []byte, value []byte) { - panicNilKey(key) - if value == nil { - value = []byte{} - } + key = escapeKey(key) + value = nonNilBytes(value) path := db.nameToPath(key) err := write(path, value) if err != nil { @@ -100,7 +96,6 @@ func (db *FSDB) SetNoLock(key []byte, value []byte) { func (db *FSDB) Delete(key []byte) { db.mtx.Lock() defer db.mtx.Unlock() - panicNilKey(key) db.DeleteNoLock(key) } @@ -108,14 +103,13 @@ func (db *FSDB) Delete(key []byte) { func (db *FSDB) DeleteSync(key []byte) { db.mtx.Lock() defer db.mtx.Unlock() - panicNilKey(key) db.DeleteNoLock(key) } // NOTE: Implements atomicSetDeleter. func (db *FSDB) DeleteNoLock(key []byte) { - panicNilKey(key) + key = escapeKey(key) path := db.nameToPath(key) err := remove(path) if os.IsNotExist(err) { @@ -157,8 +151,6 @@ func (db *FSDB) Mutex() *sync.Mutex { } func (db *FSDB) Iterator(start, end []byte) Iterator { - it := newMemDBIterator(db, start, end) - db.mtx.Lock() defer db.mtx.Unlock() @@ -169,13 +161,11 @@ func (db *FSDB) Iterator(start, end []byte) Iterator { panic(errors.Wrapf(err, "Listing keys in %s", db.dir)) } sort.Strings(keys) - it.keys = keys - return it + return newMemDBIterator(db, keys, start, end) } func (db *FSDB) ReverseIterator(start, end []byte) Iterator { - // XXX - return nil + panic("not implemented yet") // XXX } func (db *FSDB) nameToPath(name []byte) string { @@ -221,8 +211,7 @@ func remove(path string) error { return os.Remove(path) } -// List files of a path. -// Paths will NOT include dir as the prefix. +// List keys in a directory, stripping of escape sequences and dir portions. // CONTRACT: returns os errors directly without wrapping. func list(dirPath string, start, end []byte) ([]string, error) { dir, err := os.Open(dirPath) @@ -235,15 +224,31 @@ func list(dirPath string, start, end []byte) ([]string, error) { if err != nil { return nil, err } - var paths []string + var keys []string for _, name := range names { n, err := url.PathUnescape(name) if err != nil { return nil, fmt.Errorf("Failed to unescape %s while listing", name) } - if IsKeyInDomain([]byte(n), start, end) { - paths = append(paths, n) + key := unescapeKey([]byte(n)) + if IsKeyInDomain(key, start, end, false) { + keys = append(keys, string(key)) } } - return paths, nil + return keys, nil +} + +// To support empty or nil keys, while the file system doesn't allow empty +// filenames. +func escapeKey(key []byte) []byte { + return []byte("k_" + string(key)) +} +func unescapeKey(escKey []byte) []byte { + if len(escKey) < 2 { + panic(fmt.Sprintf("Invalid esc key: %x", escKey)) + } + if string(escKey[:2]) != "k_" { + panic(fmt.Sprintf("Invalid esc key: %x", escKey)) + } + return escKey[2:] } diff --git a/db/go_level_db.go b/db/go_level_db.go index 45cb04984..bf2b3bf76 100644 --- a/db/go_level_db.go +++ b/db/go_level_db.go @@ -1,6 +1,7 @@ package db import ( + "bytes" "fmt" "path/filepath" @@ -8,7 +9,6 @@ import ( "github.com/syndtr/goleveldb/leveldb/errors" "github.com/syndtr/goleveldb/leveldb/iterator" "github.com/syndtr/goleveldb/leveldb/opt" - "github.com/syndtr/goleveldb/leveldb/util" . "github.com/tendermint/tmlibs/common" ) @@ -40,33 +40,25 @@ func NewGoLevelDB(name string, dir string) (*GoLevelDB, error) { } func (db *GoLevelDB) Get(key []byte) []byte { - panicNilKey(key) + key = nonNilBytes(key) res, err := db.db.Get(key, nil) if err != nil { if err == errors.ErrNotFound { return nil } else { - PanicCrisis(err) + panic(err) } } return res } func (db *GoLevelDB) Has(key []byte) bool { - panicNilKey(key) - _, err := db.db.Get(key, nil) - if err != nil { - if err == errors.ErrNotFound { - return false - } else { - PanicCrisis(err) - } - } - return true + return db.Get(key) != nil } func (db *GoLevelDB) Set(key []byte, value []byte) { - panicNilKey(key) + key = nonNilBytes(key) + value = nonNilBytes(value) err := db.db.Put(key, value, nil) if err != nil { PanicCrisis(err) @@ -74,7 +66,8 @@ func (db *GoLevelDB) Set(key []byte, value []byte) { } func (db *GoLevelDB) SetSync(key []byte, value []byte) { - panicNilKey(key) + key = nonNilBytes(key) + value = nonNilBytes(value) err := db.db.Put(key, value, &opt.WriteOptions{Sync: true}) if err != nil { PanicCrisis(err) @@ -82,7 +75,7 @@ func (db *GoLevelDB) SetSync(key []byte, value []byte) { } func (db *GoLevelDB) Delete(key []byte) { - panicNilKey(key) + key = nonNilBytes(key) err := db.db.Delete(key, nil) if err != nil { PanicCrisis(err) @@ -90,7 +83,7 @@ func (db *GoLevelDB) Delete(key []byte) { } func (db *GoLevelDB) DeleteSync(key []byte) { - panicNilKey(key) + key = nonNilBytes(key) err := db.db.Delete(key, &opt.WriteOptions{Sync: true}) if err != nil { PanicCrisis(err) @@ -169,102 +162,104 @@ func (mBatch *goLevelDBBatch) Write() { //---------------------------------------- // Iterator +// NOTE This is almost identical to db/c_level_db.Iterator +// Before creating a third version, refactor. + +type goLevelDBIterator struct { + source iterator.Iterator + start []byte + end []byte + isReverse bool + isInvalid bool +} + +var _ Iterator = (*goLevelDBIterator)(nil) -// https://godoc.org/github.com/syndtr/goleveldb/leveldb#DB.NewIterator -// A nil Range.Start is treated as a key before all keys in the DB. -// And a nil Range.Limit is treated as a key after all keys in the DB. -func goLevelDBIterRange(start, end []byte) *util.Range { - // XXX: what if start == nil ? - if len(start) == 0 { - start = nil +func newGoLevelDBIterator(source iterator.Iterator, start, end []byte, isReverse bool) *goLevelDBIterator { + if isReverse { + panic("not implemented yet") // XXX } - return &util.Range{ - Start: start, - Limit: end, + source.Seek(start) + return &goLevelDBIterator{ + source: source, + start: start, + end: end, + isReverse: isReverse, + isInvalid: false, } } func (db *GoLevelDB) Iterator(start, end []byte) Iterator { - itrRange := goLevelDBIterRange(start, end) - itr := db.db.NewIterator(itrRange, nil) - itr.Seek(start) // if we don't call this the itr is never valid (?!) - return &goLevelDBIterator{ - source: itr, - start: start, - end: end, - } + itr := db.db.NewIterator(nil, nil) + return newGoLevelDBIterator(itr, start, end, false) } func (db *GoLevelDB) ReverseIterator(start, end []byte) Iterator { - // XXX - return nil + panic("not implemented yet") // XXX } -var _ Iterator = (*goLevelDBIterator)(nil) - -type goLevelDBIterator struct { - source iterator.Iterator - invalid bool - start, end []byte +func (itr *goLevelDBIterator) Domain() ([]byte, []byte) { + return itr.start, itr.end } -func (it *goLevelDBIterator) Domain() ([]byte, []byte) { - return it.start, it.end -} +func (itr *goLevelDBIterator) Valid() bool { -// Key returns a copy of the current key. -func (it *goLevelDBIterator) Key() []byte { - if !it.Valid() { - panic("goLevelDBIterator Key() called when invalid") + // Once invalid, forever invalid. + if itr.isInvalid { + return false } - key := it.source.Key() - k := make([]byte, len(key)) - copy(k, key) - return k -} + // Panic on DB error. No way to recover. + itr.assertNoError() -// Value returns a copy of the current value. -func (it *goLevelDBIterator) Value() []byte { - if !it.Valid() { - panic("goLevelDBIterator Value() called when invalid") + // If source is invalid, invalid. + if !itr.source.Valid() { + itr.isInvalid = true + return false } - val := it.source.Value() - v := make([]byte, len(val)) - copy(v, val) - - return v -} -func (it *goLevelDBIterator) Valid() bool { - it.assertNoError() - if it.invalid { + // If key is end or past it, invalid. + var end = itr.end + var key = itr.source.Key() + if end != nil && bytes.Compare(end, key) <= 0 { + itr.isInvalid = true return false } - it.invalid = !it.source.Valid() - return !it.invalid + + // Valid + return true } -func (it *goLevelDBIterator) Next() { - if !it.Valid() { - panic("goLevelDBIterator Next() called when invalid") - } - it.source.Next() +func (itr *goLevelDBIterator) Key() []byte { + itr.assertNoError() + itr.assertIsValid() + return itr.source.Key() } -func (it *goLevelDBIterator) Prev() { - if !it.Valid() { - panic("goLevelDBIterator Prev() called when invalid") - } - it.source.Prev() +func (itr *goLevelDBIterator) Value() []byte { + itr.assertNoError() + itr.assertIsValid() + return itr.source.Value() +} + +func (itr *goLevelDBIterator) Next() { + itr.assertNoError() + itr.assertIsValid() + itr.source.Next() } -func (it *goLevelDBIterator) Close() { - it.source.Release() +func (itr *goLevelDBIterator) Close() { + itr.source.Release() } -func (it *goLevelDBIterator) assertNoError() { - if err := it.source.Error(); err != nil { +func (itr *goLevelDBIterator) assertNoError() { + if err := itr.source.Error(); err != nil { panic(err) } } + +func (itr goLevelDBIterator) assertIsValid() { + if !itr.Valid() { + panic("goLevelDBIterator is invalid") + } +} diff --git a/db/mem_db.go b/db/mem_db.go index 44254870a..e9d9174dc 100644 --- a/db/mem_db.go +++ b/db/mem_db.go @@ -29,14 +29,16 @@ func NewMemDB() *MemDB { func (db *MemDB) Get(key []byte) []byte { db.mtx.Lock() defer db.mtx.Unlock() - panicNilKey(key) + key = nonNilBytes(key) + return db.db[string(key)] } func (db *MemDB) Has(key []byte) bool { db.mtx.Lock() defer db.mtx.Unlock() - panicNilKey(key) + key = nonNilBytes(key) + _, ok := db.db[string(key)] return ok } @@ -44,43 +46,43 @@ func (db *MemDB) Has(key []byte) bool { func (db *MemDB) Set(key []byte, value []byte) { db.mtx.Lock() defer db.mtx.Unlock() - panicNilKey(key) + db.SetNoLock(key, value) } func (db *MemDB) SetSync(key []byte, value []byte) { db.mtx.Lock() defer db.mtx.Unlock() - panicNilKey(key) + db.SetNoLock(key, value) } // NOTE: Implements atomicSetDeleter func (db *MemDB) SetNoLock(key []byte, value []byte) { - if value == nil { - value = []byte{} - } - panicNilKey(key) + key = nonNilBytes(key) + value = nonNilBytes(value) + db.db[string(key)] = value } func (db *MemDB) Delete(key []byte) { db.mtx.Lock() defer db.mtx.Unlock() - panicNilKey(key) - delete(db.db, string(key)) + + db.DeleteNoLock(key) } func (db *MemDB) DeleteSync(key []byte) { db.mtx.Lock() defer db.mtx.Unlock() - panicNilKey(key) - delete(db.db, string(key)) + + db.DeleteNoLock(key) } // NOTE: Implements atomicSetDeleter func (db *MemDB) DeleteNoLock(key []byte) { - panicNilKey(key) + key = nonNilBytes(key) + delete(db.db, string(key)) } @@ -125,100 +127,92 @@ func (db *MemDB) Mutex() *sync.Mutex { //---------------------------------------- func (db *MemDB) Iterator(start, end []byte) Iterator { - it := newMemDBIterator(db, start, end) - db.mtx.Lock() defer db.mtx.Unlock() - // We need a copy of all of the keys. - // Not the best, but probably not a bottleneck depending. - it.keys = db.getSortedKeys(start, end) - return it + keys := db.getSortedKeys(start, end, false) + return newMemDBIterator(db, keys, start, end) } func (db *MemDB) ReverseIterator(start, end []byte) Iterator { - it := newMemDBIterator(db, start, end) - db.mtx.Lock() defer db.mtx.Unlock() - // We need a copy of all of the keys. - // Not the best, but probably not a bottleneck depending. - it.keys = db.getSortedKeys(end, start) - // reverse the order - l := len(it.keys) - 1 - for i, v := range it.keys { - it.keys[i] = it.keys[l-i] - it.keys[l-i] = v - } - return nil + keys := db.getSortedKeys(end, start, true) + return newMemDBIterator(db, keys, start, end) } -func (db *MemDB) getSortedKeys(start, end []byte) []string { +func (db *MemDB) getSortedKeys(start, end []byte, reverse bool) []string { keys := []string{} for key, _ := range db.db { - if IsKeyInDomain([]byte(key), start, end) { + if IsKeyInDomain([]byte(key), start, end, false) { keys = append(keys, key) } } sort.Strings(keys) + if reverse { + nkeys := len(keys) + for i := 0; i < nkeys/2; i++ { + keys[i] = keys[nkeys-i-1] + } + } return keys } var _ Iterator = (*memDBIterator)(nil) +// We need a copy of all of the keys. +// Not the best, but probably not a bottleneck depending. type memDBIterator struct { - cur int - keys []string - db DB - start, end []byte + db DB + cur int + keys []string + start []byte + end []byte } -func newMemDBIterator(db DB, start, end []byte) *memDBIterator { +// Keys is expected to be in reverse order for reverse iterators. +func newMemDBIterator(db DB, keys []string, start, end []byte) *memDBIterator { return &memDBIterator{ db: db, + cur: 0, + keys: keys, start: start, end: end, } } -func (it *memDBIterator) Domain() ([]byte, []byte) { - return it.start, it.end +func (itr *memDBIterator) Domain() ([]byte, []byte) { + return itr.start, itr.end } -func (it *memDBIterator) Valid() bool { - return 0 <= it.cur && it.cur < len(it.keys) +func (itr *memDBIterator) Valid() bool { + return 0 <= itr.cur && itr.cur < len(itr.keys) } -func (it *memDBIterator) Next() { - if !it.Valid() { - panic("memDBIterator Next() called when invalid") - } - it.cur++ +func (itr *memDBIterator) Next() { + itr.assertIsValid() + itr.cur++ } -func (it *memDBIterator) Prev() { - if !it.Valid() { - panic("memDBIterator Next() called when invalid") - } - it.cur-- +func (itr *memDBIterator) Key() []byte { + itr.assertIsValid() + return []byte(itr.keys[itr.cur]) } -func (it *memDBIterator) Key() []byte { - if !it.Valid() { - panic("memDBIterator Key() called when invalid") - } - return []byte(it.keys[it.cur]) +func (itr *memDBIterator) Value() []byte { + itr.assertIsValid() + key := []byte(itr.keys[itr.cur]) + return itr.db.Get(key) } -func (it *memDBIterator) Value() []byte { - if !it.Valid() { - panic("memDBIterator Value() called when invalid") - } - return it.db.Get(it.Key()) +func (itr *memDBIterator) Close() { + itr.keys = nil + itr.db = nil } -func (it *memDBIterator) Close() { - it.db = nil - it.keys = nil +func (itr *memDBIterator) assertIsValid() { + if !itr.Valid() { + panic("memDBIterator is invalid") + } } diff --git a/db/mem_db_test.go b/db/mem_db_test.go deleted file mode 100644 index a08a3679b..000000000 --- a/db/mem_db_test.go +++ /dev/null @@ -1,48 +0,0 @@ -package db - -import ( - "testing" - - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" -) - -func TestMemDBIterator(t *testing.T) { - db := NewMemDB() - keys := make([][]byte, 100) - for i := 0; i < 100; i++ { - keys[i] = []byte{byte(i)} - } - - value := []byte{5} - for _, k := range keys { - db.Set(k, value) - } - - iter := db.Iterator(BeginningKey(), EndingKey()) - i := 0 - for ; iter.Valid(); iter.Next() { - assert.Equal(t, db.Get(iter.Key()), iter.Value(), "values dont match for key") - i += 1 - } - assert.Equal(t, i, len(db.db), "iterator didnt cover whole db") -} - -func TestMemDBClose(t *testing.T) { - db := NewMemDB() - copyDB := func(orig map[string][]byte) map[string][]byte { - copy := make(map[string][]byte) - for k, v := range orig { - copy[k] = v - } - return copy - } - k, v := []byte("foo"), []byte("bar") - db.Set(k, v) - require.Equal(t, db.Get(k), v, "expecting a successful get") - copyBefore := copyDB(db.db) - db.Close() - require.Equal(t, db.Get(k), v, "Close is a noop, expecting a successful get") - copyAfter := copyDB(db.db) - require.Equal(t, copyBefore, copyAfter, "Close is a noop and shouldn't modify any internal data") -} diff --git a/db/types.go b/db/types.go index ee8d69cc1..6e5d2408d 100644 --- a/db/types.go +++ b/db/types.go @@ -2,31 +2,39 @@ package db type DB interface { - // Get returns nil iff key doesn't exist. Panics on nil key. + // Get returns nil iff key doesn't exist. + // A nil key is interpreted as an empty byteslice. Get([]byte) []byte - // Has checks if a key exists. Panics on nil key. + // Has checks if a key exists. + // A nil key is interpreted as an empty byteslice. Has(key []byte) bool - // Set sets the key. Panics on nil key. + // Set sets the key. + // A nil key is interpreted as an empty byteslice. Set([]byte, []byte) SetSync([]byte, []byte) - // Delete deletes the key. Panics on nil key. + // Delete deletes the key. + // A nil key is interpreted as an empty byteslice. Delete([]byte) DeleteSync([]byte) - // Iterator over a domain of keys in ascending order. End is exclusive. + // Iterate over a domain of keys in ascending order. End is exclusive. // Start must be less than end, or the Iterator is invalid. + // A nil start is interpreted as an empty byteslice. + // If end is nil, iterates up to the last item (inclusive). // CONTRACT: No writes may happen within a domain while an iterator exists over it. Iterator(start, end []byte) Iterator - // Iterator over a domain of keys in descending order. End is exclusive. + // Iterate over a domain of keys in descending order. End is exclusive. // Start must be greater than end, or the Iterator is invalid. + // If start is nil, iterates from the last/greatest item (inclusive). + // If end is nil, iterates up to the first/least item (iclusive). // CONTRACT: No writes may happen within a domain while an iterator exists over it. ReverseIterator(start, end []byte) Iterator - // Releases the connection. + // Closes the connection. Close() // Creates a batch for atomic updates. @@ -54,16 +62,6 @@ type SetDeleter interface { //---------------------------------------- -// BeginningKey is the smallest key. -func BeginningKey() []byte { - return []byte{} -} - -// EndingKey is the largest key. -func EndingKey() []byte { - return nil -} - /* Usage: @@ -107,7 +105,7 @@ type Iterator interface { // If Valid returns false, this method will panic. Value() []byte - // Release deallocates the given Iterator. + // Close releases the Iterator. Close() } @@ -116,9 +114,12 @@ func bz(s string) []byte { return []byte(s) } -// All DB funcs should panic on nil key. -func panicNilKey(key []byte) { - if key == nil { - panic("nil key") +// We defensively turn nil keys or values into []byte{} for +// most operations. +func nonNilBytes(bz []byte) []byte { + if bz == nil { + return []byte{} + } else { + return bz } } diff --git a/db/util.go b/db/util.go index 661d0a16f..b0ab7f6ad 100644 --- a/db/util.go +++ b/db/util.go @@ -7,8 +7,8 @@ import ( func IteratePrefix(db DB, prefix []byte) Iterator { var start, end []byte if len(prefix) == 0 { - start = BeginningKey() - end = EndingKey() + start = nil + end = nil } else { start = cp(prefix) end = cpIncr(prefix) @@ -35,11 +35,26 @@ func cpIncr(bz []byte) (ret []byte) { ret[i] = byte(0x00) } } - return EndingKey() + return nil } -func IsKeyInDomain(key, start, end []byte) bool { - leftCondition := bytes.Equal(start, BeginningKey()) || bytes.Compare(key, start) >= 0 - rightCondition := bytes.Equal(end, EndingKey()) || bytes.Compare(key, end) < 0 - return leftCondition && rightCondition +// See DB interface documentation for more information. +func IsKeyInDomain(key, start, end []byte, isReverse bool) bool { + if !isReverse { + if bytes.Compare(key, start) < 0 { + return false + } + if end != nil && bytes.Compare(end, key) <= 0 { + return false + } + return true + } else { + if start != nil && bytes.Compare(start, key) < 0 { + return false + } + if end != nil && bytes.Compare(key, end) <= 0 { + return false + } + return true + } } diff --git a/db/util_test.go b/db/util_test.go index b273f8d46..854448af3 100644 --- a/db/util_test.go +++ b/db/util_test.go @@ -5,7 +5,7 @@ import ( "testing" ) -// empty iterator for empty db +// Empty iterator for empty db. func TestPrefixIteratorNoMatchNil(t *testing.T) { for backend, _ := range backends { t.Run(fmt.Sprintf("Prefix w/ backend %s", backend), func(t *testing.T) { @@ -17,7 +17,7 @@ func TestPrefixIteratorNoMatchNil(t *testing.T) { } } -// empty iterator for db populated after iterator created +// Empty iterator for db populated after iterator created. func TestPrefixIteratorNoMatch1(t *testing.T) { for backend, _ := range backends { t.Run(fmt.Sprintf("Prefix w/ backend %s", backend), func(t *testing.T) { @@ -30,7 +30,7 @@ func TestPrefixIteratorNoMatch1(t *testing.T) { } } -// empty iterator for prefix starting above db entry +// Empty iterator for prefix starting after db entry. func TestPrefixIteratorNoMatch2(t *testing.T) { for backend, _ := range backends { t.Run(fmt.Sprintf("Prefix w/ backend %s", backend), func(t *testing.T) { @@ -38,13 +38,12 @@ func TestPrefixIteratorNoMatch2(t *testing.T) { db.SetSync(bz("3"), bz("value_3")) itr := IteratePrefix(db, []byte("4")) - // Once invalid... checkInvalid(t, itr) }) } } -// iterator with single val for db with single val, starting from that val +// Iterator with single val for db with single val, starting from that val. func TestPrefixIteratorMatch1(t *testing.T) { for backend, _ := range backends { t.Run(fmt.Sprintf("Prefix w/ backend %s", backend), func(t *testing.T) { @@ -62,7 +61,7 @@ func TestPrefixIteratorMatch1(t *testing.T) { } } -// iterator with prefix iterates over everything with same prefix +// Iterator with prefix iterates over everything with same prefix. func TestPrefixIteratorMatches1N(t *testing.T) { for backend, _ := range backends { t.Run(fmt.Sprintf("Prefix w/ backend %s", backend), func(t *testing.T) { From ca56a274bda79ca6367eac339e837c8042385c1e Mon Sep 17 00:00:00 2001 From: Jae Kwon Date: Tue, 19 Dec 2017 20:33:34 -0800 Subject: [PATCH 28/68] Nil bytes are OK for Get/Set etc And s/Release/Close/g --- db/backend_test.go | 15 ++++++++------- db/c_level_db.go | 18 +++++++++--------- db/mem_db.go | 4 ++-- 3 files changed, 19 insertions(+), 18 deletions(-) diff --git a/db/backend_test.go b/db/backend_test.go index e103843dc..0f4346f2e 100644 --- a/db/backend_test.go +++ b/db/backend_test.go @@ -21,27 +21,27 @@ func testBackendGetSetDelete(t *testing.T, backend string) { defer dir.Close() db := NewDB("testdb", backend, dirname) - // A nonexistent key should return nil, even if the key is empty. + // A nonexistent key should return nil, even if the key is empty require.Nil(t, db.Get([]byte(""))) - // A nonexistent key should return nil, even if the key is nil. + // A nonexistent key should return nil, even if the key is nil require.Nil(t, db.Get(nil)) // A nonexistent key should return nil. key := []byte("abc") require.Nil(t, db.Get(key)) - // Set empty ("") + // Set empty value. db.Set(key, []byte("")) require.NotNil(t, db.Get(key)) require.Empty(t, db.Get(key)) - // Set empty (nil) + // Set nil value. db.Set(key, nil) require.NotNil(t, db.Get(key)) require.Empty(t, db.Get(key)) - // Delete + // Delete. db.Delete(key) require.Nil(t, db.Get(key)) } @@ -62,12 +62,13 @@ func withDB(t *testing.T, creator dbCreator, fn func(DB)) { } func TestBackendsNilKeys(t *testing.T) { - // test all backends. - // nil keys are treated as the empty key for most operations. + + // Test all backends. for dbType, creator := range backends { withDB(t, creator, func(db DB) { t.Run(fmt.Sprintf("Testing %s", dbType), func(t *testing.T) { + // Nil keys are treated as the empty key for most operations. expect := func(key, value []byte) { if len(key) == 0 { // nil or empty assert.Equal(t, db.Get(nil), db.Get([]byte(""))) diff --git a/db/c_level_db.go b/db/c_level_db.go index c9f8d419b..7910628bf 100644 --- a/db/c_level_db.go +++ b/db/c_level_db.go @@ -199,12 +199,12 @@ func newCLevelDBIterator(source *levigo.Iterator, start, end []byte, isReverse b } } -func (itr *cLevelDBIterator) Domain() ([]byte, []byte) { +func (itr cLevelDBIterator) Domain() ([]byte, []byte) { return itr.start, itr.end } -func (itr *cLevelDBIterator) Valid() bool { - +func (itr cLevelDBIterator) Valid() bool { + // Once invalid, forever invalid. if itr.isInvalid { return false @@ -227,33 +227,33 @@ func (itr *cLevelDBIterator) Valid() bool { return false } - // Valid + // It's valid. return true } -func (itr *cLevelDBIterator) Key() []byte { +func (itr cLevelDBIterator) Key() []byte { itr.assertNoError() itr.assertIsValid() return itr.source.Key() } -func (itr *cLevelDBIterator) Value() []byte { +func (itr cLevelDBIterator) Value() []byte { itr.assertNoError() itr.assertIsValid() return itr.source.Value() } -func (itr *cLevelDBIterator) Next() { +func (itr cLevelDBIterator) Next() { itr.assertNoError() itr.assertIsValid() itr.source.Next() } -func (itr *cLevelDBIterator) Close() { +func (itr cLevelDBIterator) Close() { itr.source.Close() } -func (itr *cLevelDBIterator) assertNoError() { +func (itr cLevelDBIterator) assertNoError() { if err := itr.source.GetError(); err != nil { panic(err) } diff --git a/db/mem_db.go b/db/mem_db.go index e9d9174dc..e2470d7f2 100644 --- a/db/mem_db.go +++ b/db/mem_db.go @@ -30,7 +30,7 @@ func (db *MemDB) Get(key []byte) []byte { db.mtx.Lock() defer db.mtx.Unlock() key = nonNilBytes(key) - + return db.db[string(key)] } @@ -215,4 +215,4 @@ func (itr *memDBIterator) assertIsValid() { if !itr.Valid() { panic("memDBIterator is invalid") } -} +} \ No newline at end of file From b70ae4919befb6ae3e5cb40ae8174e122e771d08 Mon Sep 17 00:00:00 2001 From: Jae Kwon Date: Tue, 19 Dec 2017 20:47:22 -0800 Subject: [PATCH 29/68] Update glide file --- glide.lock | 32 +++++++++++++++++--------------- 1 file changed, 17 insertions(+), 15 deletions(-) diff --git a/glide.lock b/glide.lock index e87782d21..f541f98e3 100644 --- a/glide.lock +++ b/glide.lock @@ -1,10 +1,10 @@ hash: 6efda1f3891a7211fc3dc1499c0079267868ced9739b781928af8e225420f867 -updated: 2017-12-17T12:50:35.983353926-08:00 +updated: 2017-12-19T20:38:52.947516911-08:00 imports: - name: github.com/fsnotify/fsnotify version: 4da3e2cfbabc9f751898f250b49f2439785783a1 - name: github.com/go-kit/kit - version: e3b2152e0063c5f05efea89ecbe297852af2a92d + version: e2b298466b32c7cd5579a9b9b07e968fc9d9452c subpackages: - log - log/level @@ -18,7 +18,7 @@ imports: - name: github.com/go-playground/universal-translator version: 71201497bace774495daed26a3874fd339e0b538 - name: github.com/go-stack/stack - version: 259ab82a6cad3992b4e21ff5cac294ccb06474bc + version: 817915b46b97fd7bb80e8ab6b69f01a53ac3eebf - name: github.com/golang/snappy version: 553a641470496b2327abcac10b36396bd98e45c9 - name: github.com/hashicorp/hcl @@ -39,19 +39,21 @@ imports: - name: github.com/kr/logfmt version: b84e30acd515aadc4b783ad4ff83aff3299bdfe0 - name: github.com/magiconair/properties - version: 49d762b9817ba1c2e9d0c69183c2b4a8b8f1d934 + version: 8d7837e64d3c1ee4e54a880c5a920ab4316fc90a - name: github.com/mattn/go-colorable version: 6fcc0c1fd9b620311d821b106a400b35dc95c497 - name: github.com/mattn/go-isatty version: a5cdd64afdee435007ee3e9f6ed4684af949d568 - name: github.com/mitchellh/mapstructure version: 06020f85339e21b2478f756a78e295255ffa4d6a +- name: github.com/pelletier/go-buffruneio + version: c37440a7cf42ac63b919c752ca73a85067e05992 - name: github.com/pelletier/go-toml - version: 4e9e0ee19b60b13eb79915933f44d8ed5f268bdd + version: 13d49d4606eb801b8f01ae542b4afc4c6ee3d84a - name: github.com/pkg/errors version: 645ef00459ed84a119197bfb8d8205042c6df63d - name: github.com/spf13/afero - version: 8d919cbe7e2627e417f3e45c3c0e489a5b7e2536 + version: 5660eeed305fe5f69c8fc6cf899132a459a97064 subpackages: - mem - name: github.com/spf13/cast @@ -61,11 +63,11 @@ imports: - name: github.com/spf13/jwalterweatherman version: 12bd96e66386c1960ab0f74ced1362f66f552f7b - name: github.com/spf13/pflag - version: 4c012f6dcd9546820e378d0bdda4d8fc772cdfea + version: 97afa5e7ca8a08a383cb259e06636b5e2cc7897f - name: github.com/spf13/viper - version: 25b30aa063fc18e48662b86996252eabdcf2f0c7 + version: 8ef37cbca71638bf32f3d5e194117d4cb46da163 - name: github.com/syndtr/goleveldb - version: adf24ef3f94bd13ec4163060b21a5678f22b429b + version: b89cc31ef7977104127d34c1bd31ebd1a9db2199 subpackages: - leveldb - leveldb/cache @@ -80,7 +82,7 @@ imports: - leveldb/table - leveldb/util - name: github.com/tendermint/go-wire - version: b6fc872b42d41158a60307db4da051dd6f179415 + version: 27be46e25124ddf775e23317a83647ce62a93f6b subpackages: - data - data/base58 @@ -89,22 +91,22 @@ imports: subpackages: - term - name: golang.org/x/crypto - version: 94eea52f7b742c7cbe0b03b22f0c4c8631ece122 + version: edd5e9b0879d13ee6970a50153d85b8fec9f7686 subpackages: - ripemd160 - name: golang.org/x/sys - version: 8b4580aae2a0dd0c231a45d3ccb8434ff533b840 + version: 8dbc5d05d6edcc104950cc299a1ce6641235bc86 subpackages: - unix - name: golang.org/x/text - version: 75cc3cad82b5f47d3fb229ddda8c5167da14f294 + version: c01e4764d870b77f8abe5096ee19ad20d80e8075 subpackages: - transform - unicode/norm - name: gopkg.in/go-playground/validator.v9 - version: 61caf9d3038e1af346dbf5c2e16f6678e1548364 + version: 1304298bf10d085adec514b076772a79c9cadb6b - name: gopkg.in/yaml.v2 - version: 287cf08546ab5e7e37d55a84f7ed3fd1db036de5 + version: eb3733d160e74a9c7e442f435eb3bea458e1d19f testImports: - name: github.com/davecgh/go-spew version: 6d212800a42e8ab5c146b8ace3490ee17e5225f9 From f2a8e95248ca42cf29885a44bc1362cf78d364fe Mon Sep 17 00:00:00 2001 From: Jae Kwon Date: Mon, 25 Dec 2017 11:40:05 -0800 Subject: [PATCH 30/68] Add KI64Pair(s) --- common/kvpair.go | 30 ++++++++++++++++++++++++++++++ 1 file changed, 30 insertions(+) diff --git a/common/kvpair.go b/common/kvpair.go index b9e45733f..5faa534df 100644 --- a/common/kvpair.go +++ b/common/kvpair.go @@ -5,6 +5,9 @@ import ( "sort" ) +//---------------------------------------- +// KVPair + type KVPair struct { Key Bytes Value Bytes @@ -28,3 +31,30 @@ func (kvs KVPairs) Less(i, j int) bool { } func (kvs KVPairs) Swap(i, j int) { kvs[i], kvs[j] = kvs[j], kvs[i] } func (kvs KVPairs) Sort() { sort.Sort(kvs) } + +//---------------------------------------- +// KI64Pair + +type KI64Pair struct { + Key Bytes + Value int64 +} + +type KI64Pairs []KI64Pair + +// Sorting +func (kvs KI64Pairs) Len() int { return len(kvs) } +func (kvs KI64Pairs) Less(i, j int) bool { + switch bytes.Compare(kvs[i].Key, kvs[j].Key) { + case -1: + return true + case 0: + return kvs[i].Value < kvs[j].Value + case 1: + return false + default: + panic("invalid comparison result") + } +} +func (kvs KI64Pairs) Swap(i, j int) { kvs[i], kvs[j] = kvs[j], kvs[i] } +func (kvs KI64Pairs) Sort() { sort.Sort(kvs) } From 797bcdd9e05f81a7e5c052384bc83ac71938753b Mon Sep 17 00:00:00 2001 From: Jae Kwon Date: Mon, 25 Dec 2017 17:46:21 -0800 Subject: [PATCH 31/68] Remove common/http --- common/http.go | 153 --------------------------- common/http_test.go | 250 -------------------------------------------- glide.lock | 28 ++--- glide.yaml | 1 - 4 files changed, 10 insertions(+), 422 deletions(-) delete mode 100644 common/http.go delete mode 100644 common/http_test.go diff --git a/common/http.go b/common/http.go deleted file mode 100644 index 56b5b6c63..000000000 --- a/common/http.go +++ /dev/null @@ -1,153 +0,0 @@ -package common - -import ( - "encoding/json" - "io" - "net/http" - - "gopkg.in/go-playground/validator.v9" - - "github.com/pkg/errors" -) - -type ErrorResponse struct { - Success bool `json:"success,omitempty"` - - // Err is the error message if Success is false - Err string `json:"error,omitempty"` - - // Code is set if Success is false - Code int `json:"code,omitempty"` -} - -// ErrorWithCode makes an ErrorResponse with the -// provided err's Error() content, and status code. -// It panics if err is nil. -func ErrorWithCode(err error, code int) *ErrorResponse { - return &ErrorResponse{ - Err: err.Error(), - Code: code, - } -} - -// Ensure that ErrorResponse implements error -var _ error = (*ErrorResponse)(nil) - -func (er *ErrorResponse) Error() string { - return er.Err -} - -// Ensure that ErrorResponse implements httpCoder -var _ httpCoder = (*ErrorResponse)(nil) - -func (er *ErrorResponse) HTTPCode() int { - return er.Code -} - -var errNilBody = errors.Errorf("expecting a non-nil body") - -// FparseJSON unmarshals into save, the body of the provided reader. -// Since it uses json.Unmarshal, save must be of a pointer type -// or compatible with json.Unmarshal. -func FparseJSON(r io.Reader, save interface{}) error { - if r == nil { - return errors.Wrap(errNilBody, "Reader") - } - - dec := json.NewDecoder(r) - if err := dec.Decode(save); err != nil { - return errors.Wrap(err, "Decode/Unmarshal") - } - return nil -} - -// ParseRequestJSON unmarshals into save, the body of the -// request. It closes the body of the request after parsing. -// Since it uses json.Unmarshal, save must be of a pointer type -// or compatible with json.Unmarshal. -func ParseRequestJSON(r *http.Request, save interface{}) error { - if r == nil || r.Body == nil { - return errNilBody - } - defer r.Body.Close() - - return FparseJSON(r.Body, save) -} - -// ParseRequestAndValidateJSON unmarshals into save, the body of the -// request and invokes a validator on the saved content. To ensure -// validation, make sure to set tags "validate" on your struct as -// per https://godoc.org/gopkg.in/go-playground/validator.v9. -// It closes the body of the request after parsing. -// Since it uses json.Unmarshal, save must be of a pointer type -// or compatible with json.Unmarshal. -func ParseRequestAndValidateJSON(r *http.Request, save interface{}) error { - if r == nil || r.Body == nil { - return errNilBody - } - defer r.Body.Close() - - return FparseAndValidateJSON(r.Body, save) -} - -// FparseAndValidateJSON like FparseJSON unmarshals into save, -// the body of the provided reader. However, it invokes the validator -// to check the set validators on your struct fields as per -// per https://godoc.org/gopkg.in/go-playground/validator.v9. -// Since it uses json.Unmarshal, save must be of a pointer type -// or compatible with json.Unmarshal. -func FparseAndValidateJSON(r io.Reader, save interface{}) error { - if err := FparseJSON(r, save); err != nil { - return err - } - return validate(save) -} - -var theValidator = validator.New() - -func validate(obj interface{}) error { - return errors.Wrap(theValidator.Struct(obj), "Validate") -} - -// WriteSuccess JSON marshals the content provided, to an HTTP -// response, setting the provided status code and setting header -// "Content-Type" to "application/json". -func WriteSuccess(w http.ResponseWriter, data interface{}) { - WriteCode(w, data, 200) -} - -// WriteCode JSON marshals content, to an HTTP response, -// setting the provided status code, and setting header -// "Content-Type" to "application/json". If JSON marshalling fails -// with an error, WriteCode instead writes out the error invoking -// WriteError. -func WriteCode(w http.ResponseWriter, out interface{}, code int) { - blob, err := json.MarshalIndent(out, "", " ") - if err != nil { - WriteError(w, err) - } else { - w.Header().Set("Content-Type", "application/json") - w.WriteHeader(code) - w.Write(blob) - } -} - -type httpCoder interface { - HTTPCode() int -} - -// WriteError is a convenience function to write out an -// error to an http.ResponseWriter, to send out an error -// that's structured as JSON i.e the form -// {"error": sss, "code": ddd} -// If err implements the interface HTTPCode() int, -// it will use that status code otherwise, it will -// set code to be http.StatusBadRequest -func WriteError(w http.ResponseWriter, err error) { - code := http.StatusBadRequest - if httpC, ok := err.(httpCoder); ok { - code = httpC.HTTPCode() - } - - WriteCode(w, ErrorWithCode(err, code), code) -} diff --git a/common/http_test.go b/common/http_test.go deleted file mode 100644 index 4272f6062..000000000 --- a/common/http_test.go +++ /dev/null @@ -1,250 +0,0 @@ -package common_test - -import ( - "bytes" - "encoding/json" - "errors" - "io" - "io/ioutil" - "net/http" - "net/http/httptest" - "reflect" - "strings" - "sync" - "testing" - - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" - - "github.com/tendermint/tmlibs/common" -) - -func TestWriteSuccess(t *testing.T) { - w := httptest.NewRecorder() - common.WriteSuccess(w, "foo") - assert.Equal(t, w.Code, 200, "should get a 200") -} - -var blankErrResponse = new(common.ErrorResponse) - -func TestWriteError(t *testing.T) { - tests := [...]struct { - msg string - code int - }{ - 0: { - msg: "this is a message", - code: 419, - }, - } - - for i, tt := range tests { - w := httptest.NewRecorder() - msg := tt.msg - - // First check without a defined code, should send back a 400 - common.WriteError(w, errors.New(msg)) - assert.Equal(t, w.Code, http.StatusBadRequest, "#%d: should get a 400", i) - blob, err := ioutil.ReadAll(w.Body) - if err != nil { - assert.Fail(t, "expecting a successful ioutil.ReadAll", "#%d", i) - continue - } - - recv := new(common.ErrorResponse) - if err := json.Unmarshal(blob, recv); err != nil { - assert.Fail(t, "expecting a successful json.Unmarshal", "#%d", i) - continue - } - - assert.Equal(t, reflect.DeepEqual(recv, blankErrResponse), false, "expecting a non-blank error response") - - // Now test with an error that's .HTTPCode() int conforming - - // Reset w - w = httptest.NewRecorder() - - common.WriteError(w, common.ErrorWithCode(errors.New("foo"), tt.code)) - assert.Equal(t, w.Code, tt.code, "case #%d", i) - } -} - -type marshalFailer struct{} - -var errFooFailed = errors.New("foo failed here") - -func (mf *marshalFailer) MarshalJSON() ([]byte, error) { - return nil, errFooFailed -} - -func TestWriteCode(t *testing.T) { - codes := [...]int{ - 0: http.StatusOK, - 1: http.StatusBadRequest, - 2: http.StatusUnauthorized, - 3: http.StatusInternalServerError, - } - - for i, code := range codes { - w := httptest.NewRecorder() - common.WriteCode(w, "foo", code) - assert.Equal(t, w.Code, code, "#%d", i) - - // Then for the failed JSON marshaling - w = httptest.NewRecorder() - common.WriteCode(w, &marshalFailer{}, code) - wantCode := http.StatusBadRequest - assert.Equal(t, w.Code, wantCode, "#%d", i) - assert.True(t, strings.Contains(w.Body.String(), errFooFailed.Error()), - "#%d: expected %q in the error message", i, errFooFailed) - } -} - -type saver struct { - Foo int `json:"foo" validate:"min=10"` - Bar string `json:"bar"` -} - -type rcloser struct { - closeOnce sync.Once - body *bytes.Buffer - closeChan chan bool -} - -var errAlreadyClosed = errors.New("already closed") - -func (rc *rcloser) Close() error { - var err = errAlreadyClosed - rc.closeOnce.Do(func() { - err = nil - rc.closeChan <- true - close(rc.closeChan) - }) - return err -} - -func (rc *rcloser) Read(b []byte) (int, error) { - return rc.body.Read(b) -} - -var _ io.ReadCloser = (*rcloser)(nil) - -func makeReq(strBody string) (*http.Request, <-chan bool) { - closeChan := make(chan bool, 1) - buf := new(bytes.Buffer) - buf.Write([]byte(strBody)) - req := &http.Request{ - Header: make(http.Header), - Body: &rcloser{body: buf, closeChan: closeChan}, - } - return req, closeChan -} - -func TestParseRequestJSON(t *testing.T) { - tests := [...]struct { - body string - wantErr bool - useNil bool - }{ - 0: {wantErr: true, body: ``}, - 1: {body: `{}`}, - 2: {body: `{"foo": 2}`}, // Not that the validate tags don't matter here since we are just parsing - 3: {body: `{"foo": "abcd"}`, wantErr: true}, - 4: {useNil: true, wantErr: true}, - } - - for i, tt := range tests { - req, closeChan := makeReq(tt.body) - if tt.useNil { - req.Body = nil - } - sav := new(saver) - err := common.ParseRequestJSON(req, sav) - if tt.wantErr { - assert.NotEqual(t, err, nil, "#%d: want non-nil error", i) - continue - } - assert.Equal(t, err, nil, "#%d: want nil error", i) - wasClosed := <-closeChan - assert.Equal(t, wasClosed, true, "#%d: should have invoked close", i) - } -} - -func TestFparseJSON(t *testing.T) { - r1 := strings.NewReader(`{"foo": 1}`) - sav := new(saver) - require.Equal(t, common.FparseJSON(r1, sav), nil, "expecting successful parsing") - r2 := strings.NewReader(`{"bar": "blockchain"}`) - require.Equal(t, common.FparseJSON(r2, sav), nil, "expecting successful parsing") - require.Equal(t, reflect.DeepEqual(sav, &saver{Foo: 1, Bar: "blockchain"}), true, "should have parsed both") - - // Now with a nil body - require.NotEqual(t, nil, common.FparseJSON(nil, sav), "expecting a nil error report") -} - -func TestFparseAndValidateJSON(t *testing.T) { - r1 := strings.NewReader(`{"foo": 1}`) - sav := new(saver) - require.NotEqual(t, common.FparseAndValidateJSON(r1, sav), nil, "expecting validation to fail") - r1 = strings.NewReader(`{"foo": 100}`) - require.Equal(t, common.FparseJSON(r1, sav), nil, "expecting successful parsing") - r2 := strings.NewReader(`{"bar": "blockchain"}`) - require.Equal(t, common.FparseAndValidateJSON(r2, sav), nil, "expecting successful parsing") - require.Equal(t, reflect.DeepEqual(sav, &saver{Foo: 100, Bar: "blockchain"}), true, "should have parsed both") - - // Now with a nil body - require.NotEqual(t, nil, common.FparseJSON(nil, sav), "expecting a nil error report") -} - -var blankSaver = new(saver) - -func TestParseAndValidateRequestJSON(t *testing.T) { - tests := [...]struct { - body string - wantErr bool - useNil bool - }{ - 0: {wantErr: true, body: ``}, - 1: {body: `{}`, wantErr: true}, // Here it should fail since Foo doesn't meet the minimum value - 2: {body: `{"foo": 2}`, wantErr: true}, // Here validation should fail - 3: {body: `{"foo": "abcd"}`, wantErr: true}, - 4: {useNil: true, wantErr: true}, - 5: {body: `{"foo": 100}`}, // Must succeed - } - - for i, tt := range tests { - req, closeChan := makeReq(tt.body) - if tt.useNil { - req.Body = nil - } - sav := new(saver) - err := common.ParseRequestAndValidateJSON(req, sav) - if tt.wantErr { - assert.NotEqual(t, err, nil, "#%d: want non-nil error", i) - continue - } - - assert.Equal(t, err, nil, "#%d: want nil error", i) - assert.False(t, reflect.DeepEqual(blankSaver, sav), "#%d: expecting a set saver", i) - - wasClosed := <-closeChan - assert.Equal(t, wasClosed, true, "#%d: should have invoked close", i) - } -} - -func TestErrorWithCode(t *testing.T) { - tests := [...]struct { - code int - err error - }{ - 0: {code: 500, err: errors.New("funky")}, - 1: {code: 406, err: errors.New("purist")}, - } - - for i, tt := range tests { - errRes := common.ErrorWithCode(tt.err, tt.code) - assert.Equal(t, errRes.Error(), tt.err.Error(), "#%d: expecting the error values to be equal", i) - assert.Equal(t, errRes.Code, tt.code, "expecting the same status code", i) - assert.Equal(t, errRes.HTTPCode(), tt.code, "expecting the same status code", i) - } -} diff --git a/glide.lock b/glide.lock index f541f98e3..83c8551e0 100644 --- a/glide.lock +++ b/glide.lock @@ -1,24 +1,18 @@ -hash: 6efda1f3891a7211fc3dc1499c0079267868ced9739b781928af8e225420f867 -updated: 2017-12-19T20:38:52.947516911-08:00 +hash: 325b2f9c7e84696f88fa88126a22eb1e1e91c2be5f60402d17bfaad6713b33c2 +updated: 2017-12-25T17:45:52.357002873-08:00 imports: - name: github.com/fsnotify/fsnotify version: 4da3e2cfbabc9f751898f250b49f2439785783a1 - name: github.com/go-kit/kit - version: e2b298466b32c7cd5579a9b9b07e968fc9d9452c + version: e3b2152e0063c5f05efea89ecbe297852af2a92d subpackages: - log - log/level - log/term - name: github.com/go-logfmt/logfmt version: 390ab7935ee28ec6b286364bba9b4dd6410cb3d5 -- name: github.com/go-playground/locales - version: e4cbcb5d0652150d40ad0646651076b6bd2be4f6 - subpackages: - - currency -- name: github.com/go-playground/universal-translator - version: 71201497bace774495daed26a3874fd339e0b538 - name: github.com/go-stack/stack - version: 817915b46b97fd7bb80e8ab6b69f01a53ac3eebf + version: 259ab82a6cad3992b4e21ff5cac294ccb06474bc - name: github.com/golang/snappy version: 553a641470496b2327abcac10b36396bd98e45c9 - name: github.com/hashicorp/hcl @@ -51,7 +45,7 @@ imports: - name: github.com/pelletier/go-toml version: 13d49d4606eb801b8f01ae542b4afc4c6ee3d84a - name: github.com/pkg/errors - version: 645ef00459ed84a119197bfb8d8205042c6df63d + version: f15c970de5b76fac0b59abb32d62c17cc7bed265 - name: github.com/spf13/afero version: 5660eeed305fe5f69c8fc6cf899132a459a97064 subpackages: @@ -63,11 +57,11 @@ imports: - name: github.com/spf13/jwalterweatherman version: 12bd96e66386c1960ab0f74ced1362f66f552f7b - name: github.com/spf13/pflag - version: 97afa5e7ca8a08a383cb259e06636b5e2cc7897f + version: 4c012f6dcd9546820e378d0bdda4d8fc772cdfea - name: github.com/spf13/viper version: 8ef37cbca71638bf32f3d5e194117d4cb46da163 - name: github.com/syndtr/goleveldb - version: b89cc31ef7977104127d34c1bd31ebd1a9db2199 + version: adf24ef3f94bd13ec4163060b21a5678f22b429b subpackages: - leveldb - leveldb/cache @@ -82,7 +76,7 @@ imports: - leveldb/table - leveldb/util - name: github.com/tendermint/go-wire - version: 27be46e25124ddf775e23317a83647ce62a93f6b + version: 5ab49b4c6ad674da6b81442911cf713ef0afb544 subpackages: - data - data/base58 @@ -91,7 +85,7 @@ imports: subpackages: - term - name: golang.org/x/crypto - version: edd5e9b0879d13ee6970a50153d85b8fec9f7686 + version: 94eea52f7b742c7cbe0b03b22f0c4c8631ece122 subpackages: - ripemd160 - name: golang.org/x/sys @@ -99,12 +93,10 @@ imports: subpackages: - unix - name: golang.org/x/text - version: c01e4764d870b77f8abe5096ee19ad20d80e8075 + version: 75cc3cad82b5f47d3fb229ddda8c5167da14f294 subpackages: - transform - unicode/norm -- name: gopkg.in/go-playground/validator.v9 - version: 1304298bf10d085adec514b076772a79c9cadb6b - name: gopkg.in/yaml.v2 version: eb3733d160e74a9c7e442f435eb3bea458e1d19f testImports: diff --git a/glide.yaml b/glide.yaml index 22825a273..d8bdd5872 100644 --- a/glide.yaml +++ b/glide.yaml @@ -23,7 +23,6 @@ import: - package: golang.org/x/crypto subpackages: - ripemd160 -- package: gopkg.in/go-playground/validator.v9 testImport: - package: github.com/stretchr/testify version: ^1.1.4 From 6ec8c1602f22f41fc320da05c3a80acebf2c23bd Mon Sep 17 00:00:00 2001 From: Jae Kwon Date: Mon, 25 Dec 2017 22:41:40 -0800 Subject: [PATCH 32/68] Update Makefile --- Makefile | 81 ++++++++++++++++++++++++++++++++++++++++++-------------- 1 file changed, 61 insertions(+), 20 deletions(-) diff --git a/Makefile b/Makefile index a24306f32..29a3ac7db 100644 --- a/Makefile +++ b/Makefile @@ -2,38 +2,69 @@ GOTOOLS = \ github.com/Masterminds/glide \ - github.com/alecthomas/gometalinter + github.com/alecthomas/gometalinter.v2 +GOTOOLS_CHECK = glide gometalinter.v2 -REPO:=github.com/tendermint/tmlibs +all: check get_vendor_deps build test install metalinter -all: test +check: check_tools -NOVENDOR = go list github.com/tendermint/tmlibs/... | grep -v /vendor/ -test: - go test -tags gcc `glide novendor` +######################################## +### Build + +build: + # Nothing to build! + +install: + # Nothing to install! + + +######################################## +### Tools & dependencies + +check_tools: + @# https://stackoverflow.com/a/25668869 + @echo "Found tools: $(foreach tool,$(GOTOOLS_CHECK),\ + $(if $(shell which $(tool)),$(tool),$(error "No $(tool) in PATH")))" + +get_tools: + @echo "--> Installing tools" + go get -u -v $(GOTOOLS) + @gometalinter.v2 --install + +update_tools: + @echo "--> Updating tools" + @go get -u $(GOTOOLS) -get_vendor_deps: ensure_tools +get_vendor_deps: @rm -rf vendor/ @echo "--> Running glide install" @glide install -ensure_tools: - go get $(GOTOOLS) -metalinter: ensure_tools - @gometalinter --install - gometalinter --vendor --deadline=600s --enable-all --disable=lll ./... +######################################## +### Testing -metalinter_test: ensure_tools - @gometalinter --install - gometalinter --vendor --deadline=600s --disable-all \ +test: + go test -tags gcc `glide novendor` + + +######################################## +### Formatting, linting, and vetting + +fmt: + @go fmt ./... + +metalinter: + @echo "==> Running linter" + gometalinter.v2 --vendor --deadline=600s --disable-all \ + --enable=maligned \ --enable=deadcode \ - --enable=gas \ --enable=goconst \ + --enable=goimports \ --enable=gosimple \ --enable=ineffassign \ - --enable=interfacer \ --enable=megacheck \ --enable=misspell \ --enable=staticcheck \ @@ -43,13 +74,23 @@ metalinter_test: ensure_tools --enable=unused \ --enable=varcheck \ --enable=vetshadow \ - --enable=vet \ ./... - #--enable=aligncheck \ + #--enable=gas \ #--enable=dupl \ #--enable=errcheck \ #--enable=gocyclo \ - #--enable=goimports \ #--enable=golint \ <== comments on anything exported #--enable=gotype \ + #--enable=interfacer \ #--enable=unparam \ + #--enable=vet \ + +metalinter_all: + protoc $(INCLUDE) --lint_out=. types/*.proto + gometalinter.v2 --vendor --deadline=600s --enable-all --disable=lll ./... + + +# To avoid unintended conflicts with file names, always add to .PHONY +# unless there is a reason not to. +# https://www.gnu.org/software/make/manual/html_node/Phony-Targets.html +.PHONY: check build check_tools get_tools update_tools get_vendor_deps test fmt metalinter metalinter_all From bf644b098496cd766e7ab540898b1d3e25d11e77 Mon Sep 17 00:00:00 2001 From: Jae Kwon Date: Tue, 26 Dec 2017 00:36:58 -0800 Subject: [PATCH 33/68] Do not shadow assert --- common/bytes_test.go | 17 +++++++---------- 1 file changed, 7 insertions(+), 10 deletions(-) diff --git a/common/bytes_test.go b/common/bytes_test.go index 0c0eacc33..2ad0e692c 100644 --- a/common/bytes_test.go +++ b/common/bytes_test.go @@ -10,23 +10,20 @@ import ( // This is a trivial test for protobuf compatibility. func TestMarshal(t *testing.T) { - assert := assert.New(t) - b := []byte("hello world") dataB := Bytes(b) b2, err := dataB.Marshal() - assert.Nil(err) - assert.Equal(b, b2) + assert.Nil(t, err) + assert.Equal(t, b, b2) var dataB2 Bytes err = (&dataB2).Unmarshal(b) - assert.Nil(err) - assert.Equal(dataB, dataB2) + assert.Nil(t, err) + assert.Equal(t, dataB, dataB2) } // Test that the hex encoding works. func TestJSONMarshal(t *testing.T) { - assert := assert.New(t) type TestStruct struct { B1 []byte @@ -51,7 +48,7 @@ func TestJSONMarshal(t *testing.T) { if err != nil { t.Fatal(err) } - assert.Equal(string(jsonBytes), tc.expected) + assert.Equal(t, string(jsonBytes), tc.expected) // TODO do fuzz testing to ensure that unmarshal fails @@ -61,8 +58,8 @@ func TestJSONMarshal(t *testing.T) { if err != nil { t.Fatal(err) } - assert.Equal(ts2.B1, tc.input) - assert.Equal(ts2.B2, Bytes(tc.input)) + assert.Equal(t, ts2.B1, tc.input) + assert.Equal(t, ts2.B2, Bytes(tc.input)) }) } } From b25df389db3c98f4b964bd39511c199f02d07715 Mon Sep 17 00:00:00 2001 From: Jae Kwon Date: Tue, 26 Dec 2017 04:40:35 -0800 Subject: [PATCH 34/68] Remove Bytes, just use []byte; Use protobuf for KVPair/KI64Pair --- Makefile | 32 +++++++++++--- common/bytes.go | 53 ----------------------- common/bytes_test.go | 65 ---------------------------- common/kvpair.go | 13 ++++-- common/types.pb.go | 101 +++++++++++++++++++++++++++++++++++++++++++ common/types.proto | 24 ++++++++++ 6 files changed, 161 insertions(+), 127 deletions(-) delete mode 100644 common/bytes.go delete mode 100644 common/bytes_test.go create mode 100644 common/types.pb.go create mode 100644 common/types.proto diff --git a/Makefile b/Makefile index 29a3ac7db..af60f7314 100644 --- a/Makefile +++ b/Makefile @@ -1,11 +1,12 @@ -.PHONY: all test get_vendor_deps ensure_tools - GOTOOLS = \ github.com/Masterminds/glide \ - github.com/alecthomas/gometalinter.v2 -GOTOOLS_CHECK = glide gometalinter.v2 + github.com/alecthomas/gometalinter.v2 \ + github.com/gogo/protobuf/protoc-gen-gogo \ + github.com/gogo/protobuf/gogoproto +GOTOOLS_CHECK = glide gometalinter.v2 protoc protoc-gen-gogo +INCLUDE = -I=. -I=${GOPATH}/src -I=${GOPATH}/src/github.com/gogo/protobuf/protobuf -all: check get_vendor_deps build test install metalinter +all: check get_vendor_deps protoc build test install metalinter check: check_tools @@ -13,6 +14,15 @@ check: check_tools ######################################## ### Build +protoc: + ## If you get the following error, + ## "error while loading shared libraries: libprotobuf.so.14: cannot open shared object file: No such file or directory" + ## See https://stackoverflow.com/a/25518702 + protoc $(INCLUDE) --gogo_out=plugins=grpc:. common/*.proto + @echo "--> adding nolint declarations to protobuf generated files" + @awk '/package common/ { print "//nolint: gas"; print; next }1' common/types.pb.go > common/types.pb.go.new + @mv common/types.pb.go.new common/types.pb.go + build: # Nothing to build! @@ -33,6 +43,16 @@ get_tools: go get -u -v $(GOTOOLS) @gometalinter.v2 --install +get_protoc: + @# https://github.com/google/protobuf/releases + curl -L https://github.com/google/protobuf/releases/download/v3.4.1/protobuf-cpp-3.4.1.tar.gz | tar xvz && \ + cd protobuf-3.4.1 && \ + DIST_LANG=cpp ./configure && \ + make && \ + make install && \ + cd .. && \ + rm -rf protobuf-3.4.1 + update_tools: @echo "--> Updating tools" @go get -u $(GOTOOLS) @@ -93,4 +113,4 @@ metalinter_all: # To avoid unintended conflicts with file names, always add to .PHONY # unless there is a reason not to. # https://www.gnu.org/software/make/manual/html_node/Phony-Targets.html -.PHONY: check build check_tools get_tools update_tools get_vendor_deps test fmt metalinter metalinter_all +.PHONY: check protoc build check_tools get_tools get_protoc update_tools get_vendor_deps test fmt metalinter metalinter_all diff --git a/common/bytes.go b/common/bytes.go deleted file mode 100644 index d9ede98df..000000000 --- a/common/bytes.go +++ /dev/null @@ -1,53 +0,0 @@ -package common - -import ( - "encoding/hex" - "fmt" - "strings" -) - -// The main purpose of Bytes is to enable HEX-encoding for json/encoding. -type Bytes []byte - -// Marshal needed for protobuf compatibility -func (b Bytes) Marshal() ([]byte, error) { - return b, nil -} - -// Unmarshal needed for protobuf compatibility -func (b *Bytes) Unmarshal(data []byte) error { - *b = data - return nil -} - -// This is the point of Bytes. -func (b Bytes) MarshalJSON() ([]byte, error) { - s := strings.ToUpper(hex.EncodeToString(b)) - jb := make([]byte, len(s)+2) - jb[0] = '"' - copy(jb[1:], []byte(s)) - jb[1] = '"' - return jb, nil -} - -// This is the point of Bytes. -func (b *Bytes) UnmarshalJSON(data []byte) error { - if len(data) < 2 || data[0] != '"' || data[len(data)-1] != '"' { - return fmt.Errorf("Invalid hex string: %s", data) - } - bytes, err := hex.DecodeString(string(data[1 : len(data)-1])) - if err != nil { - return err - } - *b = bytes - return nil -} - -// Allow it to fulfill various interfaces in light-client, etc... -func (b Bytes) Bytes() []byte { - return b -} - -func (b Bytes) String() string { - return strings.ToUpper(hex.EncodeToString(b)) -} diff --git a/common/bytes_test.go b/common/bytes_test.go deleted file mode 100644 index 2ad0e692c..000000000 --- a/common/bytes_test.go +++ /dev/null @@ -1,65 +0,0 @@ -package common - -import ( - "encoding/json" - "fmt" - "testing" - - "github.com/stretchr/testify/assert" -) - -// This is a trivial test for protobuf compatibility. -func TestMarshal(t *testing.T) { - b := []byte("hello world") - dataB := Bytes(b) - b2, err := dataB.Marshal() - assert.Nil(t, err) - assert.Equal(t, b, b2) - - var dataB2 Bytes - err = (&dataB2).Unmarshal(b) - assert.Nil(t, err) - assert.Equal(t, dataB, dataB2) -} - -// Test that the hex encoding works. -func TestJSONMarshal(t *testing.T) { - - type TestStruct struct { - B1 []byte - B2 Bytes - } - - cases := []struct { - input []byte - expected string - }{ - {[]byte(``), `{"B1":"","B2":""}`}, - {[]byte(``), `{"B1":"","B2":""}`}, - {[]byte(``), `{"B1":"","B2":""}`}, - } - - for i, tc := range cases { - t.Run(fmt.Sprintf("Case %d", i), func(t *testing.T) { - ts := TestStruct{B1: tc.input, B2: tc.input} - - // Test that it marshals correctly to JSON. - jsonBytes, err := json.Marshal(ts) - if err != nil { - t.Fatal(err) - } - assert.Equal(t, string(jsonBytes), tc.expected) - - // TODO do fuzz testing to ensure that unmarshal fails - - // Test that unmarshaling works correctly. - ts2 := TestStruct{} - err = json.Unmarshal(jsonBytes, &ts2) - if err != nil { - t.Fatal(err) - } - assert.Equal(t, ts2.B1, tc.input) - assert.Equal(t, ts2.B2, Bytes(tc.input)) - }) - } -} diff --git a/common/kvpair.go b/common/kvpair.go index 5faa534df..54c3a58c0 100644 --- a/common/kvpair.go +++ b/common/kvpair.go @@ -8,10 +8,14 @@ import ( //---------------------------------------- // KVPair +/* +Defined in types.proto + type KVPair struct { - Key Bytes - Value Bytes + Key []byte + Value []byte } +*/ type KVPairs []KVPair @@ -35,10 +39,13 @@ func (kvs KVPairs) Sort() { sort.Sort(kvs) } //---------------------------------------- // KI64Pair +/* +Defined in types.proto type KI64Pair struct { - Key Bytes + Key []byte Value int64 } +*/ type KI64Pairs []KI64Pair diff --git a/common/types.pb.go b/common/types.pb.go new file mode 100644 index 000000000..047b7aee2 --- /dev/null +++ b/common/types.pb.go @@ -0,0 +1,101 @@ +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: common/types.proto + +/* +Package common is a generated protocol buffer package. + +It is generated from these files: + common/types.proto + +It has these top-level messages: + KVPair + KI64Pair +*/ +//nolint: gas +package common + +import proto "github.com/gogo/protobuf/proto" +import fmt "fmt" +import math "math" +import _ "github.com/gogo/protobuf/gogoproto" + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package + +// Define these here for compatibility but use tmlibs/common.KVPair. +type KVPair struct { + Key []byte `protobuf:"bytes,1,opt,name=key,proto3" json:"key,omitempty"` + Value []byte `protobuf:"bytes,2,opt,name=value,proto3" json:"value,omitempty"` +} + +func (m *KVPair) Reset() { *m = KVPair{} } +func (m *KVPair) String() string { return proto.CompactTextString(m) } +func (*KVPair) ProtoMessage() {} +func (*KVPair) Descriptor() ([]byte, []int) { return fileDescriptorTypes, []int{0} } + +func (m *KVPair) GetKey() []byte { + if m != nil { + return m.Key + } + return nil +} + +func (m *KVPair) GetValue() []byte { + if m != nil { + return m.Value + } + return nil +} + +// Define these here for compatibility but use tmlibs/common.KI64Pair. +type KI64Pair struct { + Key []byte `protobuf:"bytes,1,opt,name=key,proto3" json:"key,omitempty"` + Value int64 `protobuf:"varint,2,opt,name=value,proto3" json:"value,omitempty"` +} + +func (m *KI64Pair) Reset() { *m = KI64Pair{} } +func (m *KI64Pair) String() string { return proto.CompactTextString(m) } +func (*KI64Pair) ProtoMessage() {} +func (*KI64Pair) Descriptor() ([]byte, []int) { return fileDescriptorTypes, []int{1} } + +func (m *KI64Pair) GetKey() []byte { + if m != nil { + return m.Key + } + return nil +} + +func (m *KI64Pair) GetValue() int64 { + if m != nil { + return m.Value + } + return 0 +} + +func init() { + proto.RegisterType((*KVPair)(nil), "common.KVPair") + proto.RegisterType((*KI64Pair)(nil), "common.KI64Pair") +} + +func init() { proto.RegisterFile("common/types.proto", fileDescriptorTypes) } + +var fileDescriptorTypes = []byte{ + // 137 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x12, 0x4a, 0xce, 0xcf, 0xcd, + 0xcd, 0xcf, 0xd3, 0x2f, 0xa9, 0x2c, 0x48, 0x2d, 0xd6, 0x2b, 0x28, 0xca, 0x2f, 0xc9, 0x17, 0x62, + 0x83, 0x88, 0x49, 0xe9, 0xa6, 0x67, 0x96, 0x64, 0x94, 0x26, 0xe9, 0x25, 0xe7, 0xe7, 0xea, 0xa7, + 0xe7, 0xa7, 0xe7, 0xeb, 0x83, 0xa5, 0x93, 0x4a, 0xd3, 0xc0, 0x3c, 0x30, 0x07, 0xcc, 0x82, 0x68, + 0x53, 0x32, 0xe0, 0x62, 0xf3, 0x0e, 0x0b, 0x48, 0xcc, 0x2c, 0x12, 0x12, 0xe0, 0x62, 0xce, 0x4e, + 0xad, 0x94, 0x60, 0x54, 0x60, 0xd4, 0xe0, 0x09, 0x02, 0x31, 0x85, 0x44, 0xb8, 0x58, 0xcb, 0x12, + 0x73, 0x4a, 0x53, 0x25, 0x98, 0xc0, 0x62, 0x10, 0x8e, 0x92, 0x11, 0x17, 0x87, 0xb7, 0xa7, 0x99, + 0x09, 0x31, 0x7a, 0x98, 0xa1, 0x7a, 0x92, 0xd8, 0xc0, 0x96, 0x19, 0x03, 0x02, 0x00, 0x00, 0xff, + 0xff, 0x5c, 0xb8, 0x46, 0xc5, 0xb9, 0x00, 0x00, 0x00, +} diff --git a/common/types.proto b/common/types.proto new file mode 100644 index 000000000..94abcccc3 --- /dev/null +++ b/common/types.proto @@ -0,0 +1,24 @@ +syntax = "proto3"; +package common; + +// For more information on gogo.proto, see: +// https://github.com/gogo/protobuf/blob/master/extensions.md +// NOTE: Try really hard not to use custom types, +// it's often complicated, broken, nor not worth it. +import "github.com/gogo/protobuf/gogoproto/gogo.proto"; + + +//---------------------------------------- +// Abstract types + +// Define these here for compatibility but use tmlibs/common.KVPair. +message KVPair { + bytes key = 1; + bytes value = 2; +} + +// Define these here for compatibility but use tmlibs/common.KI64Pair. +message KI64Pair { + bytes key = 1; + int64 value = 2; +} From 93c05aa8c06ef38f2b15fcdd1d91eafefda2732d Mon Sep 17 00:00:00 2001 From: Jae Kwon Date: Wed, 27 Dec 2017 13:52:32 -0800 Subject: [PATCH 35/68] Add back on HexBytes --- common/bytes.go | 53 ++++++++++++++++++++++++++++++++++++ common/bytes_test.go | 65 ++++++++++++++++++++++++++++++++++++++++++++ 2 files changed, 118 insertions(+) create mode 100644 common/bytes.go create mode 100644 common/bytes_test.go diff --git a/common/bytes.go b/common/bytes.go new file mode 100644 index 000000000..1ec880c25 --- /dev/null +++ b/common/bytes.go @@ -0,0 +1,53 @@ +package common + +import ( + "encoding/hex" + "fmt" + "strings" +) + +// The main purpose of HexBytes is to enable HEX-encoding for json/encoding. +type HexBytes []byte + +// Marshal needed for protobuf compatibility +func (bz HexBytes) Marshal() ([]byte, error) { + return bz, nil +} + +// Unmarshal needed for protobuf compatibility +func (bz *HexBytes) Unmarshal(data []byte) error { + *bz = data + return nil +} + +// This is the point of Bytes. +func (bz HexBytes) MarshalJSON() ([]byte, error) { + s := strings.ToUpper(hex.EncodeToString(bz)) + jbz := make([]byte, len(s)+2) + jbz[0] = '"' + copy(jbz[1:], []byte(s)) + jbz[1] = '"' + return jbz, nil +} + +// This is the point of Bytes. +func (bz *HexBytes) UnmarshalJSON(data []byte) error { + if len(data) < 2 || data[0] != '"' || data[len(data)-1] != '"' { + return fmt.Errorf("Invalid hex string: %s", data) + } + bz2, err := hex.DecodeString(string(data[1 : len(data)-1])) + if err != nil { + return err + } + *bz = bz2 + return nil +} + +// Allow it to fulfill various interfaces in light-client, etc... +func (bz HexBytes) Bytes() []byte { + return bz +} + +func (bz HexBytes) String() string { + return strings.ToUpper(hex.EncodeToString(bz)) +} diff --git a/common/bytes_test.go b/common/bytes_test.go new file mode 100644 index 000000000..3e693b239 --- /dev/null +++ b/common/bytes_test.go @@ -0,0 +1,65 @@ +package common + +import ( + "encoding/json" + "fmt" + "testing" + + "github.com/stretchr/testify/assert" +) + +// This is a trivial test for protobuf compatibility. +func TestMarshal(t *testing.T) { + bz := []byte("hello world") + dataB := HexBytes(bz) + bz2, err := dataB.Marshal() + assert.Nil(t, err) + assert.Equal(t, bz, bz2) + + var dataB2 HexBytes + err = (&dataB2).Unmarshal(bz) + assert.Nil(t, err) + assert.Equal(t, dataB, dataB2) +} + +// Test that the hex encoding works. +func TestJSONMarshal(t *testing.T) { + + type TestStruct struct { + B1 []byte + B2 HexBytes + } + + cases := []struct { + input []byte + expected string + }{ + {[]byte(``), `{"B1":"","B2":""}`}, + {[]byte(``), `{"B1":"","B2":""}`}, + {[]byte(``), `{"B1":"","B2":""}`}, + } + + for i, tc := range cases { + t.Run(fmt.Sprintf("Case %d", i), func(t *testing.T) { + ts := TestStruct{B1: tc.input, B2: tc.input} + + // Test that it marshals correctly to JSON. + jsonBytes, err := json.Marshal(ts) + if err != nil { + t.Fatal(err) + } + assert.Equal(t, string(jsonBytes), tc.expected) + + // TODO do fuzz testing to ensure that unmarshal fails + + // Test that unmarshaling works correctly. + ts2 := TestStruct{} + err = json.Unmarshal(jsonBytes, &ts2) + if err != nil { + t.Fatal(err) + } + assert.Equal(t, ts2.B1, tc.input) + assert.Equal(t, ts2.B2, HexBytes(tc.input)) + }) + } +} From b31397aff5f43216ba831338f7dcdee8a53cf433 Mon Sep 17 00:00:00 2001 From: Jae Kwon Date: Thu, 28 Dec 2017 18:30:56 -0800 Subject: [PATCH 36/68] Fix GoLevelDB Iterator which needs to copy a temp []byte --- db/c_level_db.go | 15 +++++++++- db/db.go | 2 +- db/go_level_db.go | 49 ++++++++++++++++++++++++-------- db/mem_db.go | 71 ++++++++++++++++++++++++++++++++--------------- db/types.go | 20 +++++++++---- glide.lock | 24 ++++++++++------ 6 files changed, 129 insertions(+), 52 deletions(-) diff --git a/db/c_level_db.go b/db/c_level_db.go index 7910628bf..f1a5a3aef 100644 --- a/db/c_level_db.go +++ b/db/c_level_db.go @@ -50,6 +50,7 @@ func NewCLevelDB(name string, dir string) (*CLevelDB, error) { return database, nil } +// Implements DB. func (db *CLevelDB) Get(key []byte) []byte { key = nonNilBytes(key) res, err := db.db.Get(db.ro, key) @@ -59,10 +60,12 @@ func (db *CLevelDB) Get(key []byte) []byte { return res } +// Implements DB. func (db *CLevelDB) Has(key []byte) bool { return db.Get(key) != nil } +// Implements DB. func (db *CLevelDB) Set(key []byte, value []byte) { key = nonNilBytes(key) value = nonNilBytes(value) @@ -72,6 +75,7 @@ func (db *CLevelDB) Set(key []byte, value []byte) { } } +// Implements DB. func (db *CLevelDB) SetSync(key []byte, value []byte) { key = nonNilBytes(key) value = nonNilBytes(value) @@ -81,6 +85,7 @@ func (db *CLevelDB) SetSync(key []byte, value []byte) { } } +// Implements DB. func (db *CLevelDB) Delete(key []byte) { key = nonNilBytes(key) err := db.db.Delete(db.wo, key) @@ -89,6 +94,7 @@ func (db *CLevelDB) Delete(key []byte) { } } +// Implements DB. func (db *CLevelDB) DeleteSync(key []byte) { key = nonNilBytes(key) err := db.db.Delete(db.woSync, key) @@ -101,6 +107,7 @@ func (db *CLevelDB) DB() *levigo.DB { return db.db } +// Implements DB. func (db *CLevelDB) Close() { db.db.Close() db.ro.Close() @@ -108,6 +115,7 @@ func (db *CLevelDB) Close() { db.woSync.Close() } +// Implements DB. func (db *CLevelDB) Print() { itr := db.Iterator(nil, nil) defer itr.Close() @@ -118,6 +126,7 @@ func (db *CLevelDB) Print() { } } +// Implements DB. func (db *CLevelDB) Stats() map[string]string { // TODO: Find the available properties for the C LevelDB implementation keys := []string{} @@ -133,6 +142,7 @@ func (db *CLevelDB) Stats() map[string]string { //---------------------------------------- // Batch +// Implements DB. func (db *CLevelDB) NewBatch() Batch { batch := levigo.NewWriteBatch() return &cLevelDBBatch{db, batch} @@ -143,14 +153,17 @@ type cLevelDBBatch struct { batch *levigo.WriteBatch } +// Implements Batch. func (mBatch *cLevelDBBatch) Set(key, value []byte) { mBatch.batch.Put(key, value) } +// Implements Batch. func (mBatch *cLevelDBBatch) Delete(key []byte) { mBatch.batch.Delete(key) } +// Implements Batch. func (mBatch *cLevelDBBatch) Write() { err := mBatch.db.db.Write(mBatch.db.wo, mBatch.batch) if err != nil { @@ -204,7 +217,7 @@ func (itr cLevelDBIterator) Domain() ([]byte, []byte) { } func (itr cLevelDBIterator) Valid() bool { - + // Once invalid, forever invalid. if itr.isInvalid { return false diff --git a/db/db.go b/db/db.go index b43b06554..25ff93ec5 100644 --- a/db/db.go +++ b/db/db.go @@ -2,7 +2,7 @@ package db import "fmt" -//----------------------------------------------------------------------------- +//---------------------------------------- // Main entry const ( diff --git a/db/go_level_db.go b/db/go_level_db.go index bf2b3bf76..7d60e060f 100644 --- a/db/go_level_db.go +++ b/db/go_level_db.go @@ -39,6 +39,7 @@ func NewGoLevelDB(name string, dir string) (*GoLevelDB, error) { return database, nil } +// Implements DB. func (db *GoLevelDB) Get(key []byte) []byte { key = nonNilBytes(key) res, err := db.db.Get(key, nil) @@ -52,10 +53,12 @@ func (db *GoLevelDB) Get(key []byte) []byte { return res } +// Implements DB. func (db *GoLevelDB) Has(key []byte) bool { return db.Get(key) != nil } +// Implements DB. func (db *GoLevelDB) Set(key []byte, value []byte) { key = nonNilBytes(key) value = nonNilBytes(value) @@ -65,6 +68,7 @@ func (db *GoLevelDB) Set(key []byte, value []byte) { } } +// Implements DB. func (db *GoLevelDB) SetSync(key []byte, value []byte) { key = nonNilBytes(key) value = nonNilBytes(value) @@ -74,6 +78,7 @@ func (db *GoLevelDB) SetSync(key []byte, value []byte) { } } +// Implements DB. func (db *GoLevelDB) Delete(key []byte) { key = nonNilBytes(key) err := db.db.Delete(key, nil) @@ -82,6 +87,7 @@ func (db *GoLevelDB) Delete(key []byte) { } } +// Implements DB. func (db *GoLevelDB) DeleteSync(key []byte) { key = nonNilBytes(key) err := db.db.Delete(key, &opt.WriteOptions{Sync: true}) @@ -94,10 +100,12 @@ func (db *GoLevelDB) DB() *leveldb.DB { return db.db } +// Implements DB. func (db *GoLevelDB) Close() { db.db.Close() } +// Implements DB. func (db *GoLevelDB) Print() { str, _ := db.db.GetProperty("leveldb.stats") fmt.Printf("%v\n", str) @@ -110,6 +118,7 @@ func (db *GoLevelDB) Print() { } } +// Implements DB. func (db *GoLevelDB) Stats() map[string]string { keys := []string{ "leveldb.num-files-at-level{n}", @@ -135,6 +144,7 @@ func (db *GoLevelDB) Stats() map[string]string { //---------------------------------------- // Batch +// Implements DB. func (db *GoLevelDB) NewBatch() Batch { batch := new(leveldb.Batch) return &goLevelDBBatch{db, batch} @@ -145,18 +155,21 @@ type goLevelDBBatch struct { batch *leveldb.Batch } +// Implements Batch. func (mBatch *goLevelDBBatch) Set(key, value []byte) { mBatch.batch.Put(key, value) } +// Implements Batch. func (mBatch *goLevelDBBatch) Delete(key []byte) { mBatch.batch.Delete(key) } +// Implements Batch. func (mBatch *goLevelDBBatch) Write() { err := mBatch.db.db.Write(mBatch.batch, nil) if err != nil { - PanicCrisis(err) + panic(err) } } @@ -165,6 +178,17 @@ func (mBatch *goLevelDBBatch) Write() { // NOTE This is almost identical to db/c_level_db.Iterator // Before creating a third version, refactor. +// Implements DB. +func (db *GoLevelDB) Iterator(start, end []byte) Iterator { + itr := db.db.NewIterator(nil, nil) + return newGoLevelDBIterator(itr, start, end, false) +} + +// Implements DB. +func (db *GoLevelDB) ReverseIterator(start, end []byte) Iterator { + panic("not implemented yet") // XXX +} + type goLevelDBIterator struct { source iterator.Iterator start []byte @@ -189,19 +213,12 @@ func newGoLevelDBIterator(source iterator.Iterator, start, end []byte, isReverse } } -func (db *GoLevelDB) Iterator(start, end []byte) Iterator { - itr := db.db.NewIterator(nil, nil) - return newGoLevelDBIterator(itr, start, end, false) -} - -func (db *GoLevelDB) ReverseIterator(start, end []byte) Iterator { - panic("not implemented yet") // XXX -} - +// Implements Iterator. func (itr *goLevelDBIterator) Domain() ([]byte, []byte) { return itr.start, itr.end } +// Implements Iterator. func (itr *goLevelDBIterator) Valid() bool { // Once invalid, forever invalid. @@ -230,24 +247,32 @@ func (itr *goLevelDBIterator) Valid() bool { return true } +// Implements Iterator. func (itr *goLevelDBIterator) Key() []byte { + // Key returns a copy of the current key. + // See https://github.com/syndtr/goleveldb/blob/52c212e6c196a1404ea59592d3f1c227c9f034b2/leveldb/iterator/iter.go#L88 itr.assertNoError() itr.assertIsValid() - return itr.source.Key() + return cp(itr.source.Key()) } +// Implements Iterator. func (itr *goLevelDBIterator) Value() []byte { + // Value returns a copy of the current value. + // See https://github.com/syndtr/goleveldb/blob/52c212e6c196a1404ea59592d3f1c227c9f034b2/leveldb/iterator/iter.go#L88 itr.assertNoError() itr.assertIsValid() - return itr.source.Value() + return cp(itr.source.Value()) } +// Implements Iterator. func (itr *goLevelDBIterator) Next() { itr.assertNoError() itr.assertIsValid() itr.source.Next() } +// Implements Iterator. func (itr *goLevelDBIterator) Close() { itr.source.Release() } diff --git a/db/mem_db.go b/db/mem_db.go index e2470d7f2..1e3bee5a5 100644 --- a/db/mem_db.go +++ b/db/mem_db.go @@ -26,14 +26,16 @@ func NewMemDB() *MemDB { return database } +// Implements DB. func (db *MemDB) Get(key []byte) []byte { db.mtx.Lock() defer db.mtx.Unlock() key = nonNilBytes(key) - + return db.db[string(key)] } +// Implements DB. func (db *MemDB) Has(key []byte) bool { db.mtx.Lock() defer db.mtx.Unlock() @@ -43,6 +45,7 @@ func (db *MemDB) Has(key []byte) bool { return ok } +// Implements DB. func (db *MemDB) Set(key []byte, value []byte) { db.mtx.Lock() defer db.mtx.Unlock() @@ -50,6 +53,7 @@ func (db *MemDB) Set(key []byte, value []byte) { db.SetNoLock(key, value) } +// Implements DB. func (db *MemDB) SetSync(key []byte, value []byte) { db.mtx.Lock() defer db.mtx.Unlock() @@ -57,7 +61,7 @@ func (db *MemDB) SetSync(key []byte, value []byte) { db.SetNoLock(key, value) } -// NOTE: Implements atomicSetDeleter +// Implements atomicSetDeleter. func (db *MemDB) SetNoLock(key []byte, value []byte) { key = nonNilBytes(key) value = nonNilBytes(value) @@ -65,6 +69,7 @@ func (db *MemDB) SetNoLock(key []byte, value []byte) { db.db[string(key)] = value } +// Implements DB. func (db *MemDB) Delete(key []byte) { db.mtx.Lock() defer db.mtx.Unlock() @@ -72,6 +77,7 @@ func (db *MemDB) Delete(key []byte) { db.DeleteNoLock(key) } +// Implements DB. func (db *MemDB) DeleteSync(key []byte) { db.mtx.Lock() defer db.mtx.Unlock() @@ -79,13 +85,14 @@ func (db *MemDB) DeleteSync(key []byte) { db.DeleteNoLock(key) } -// NOTE: Implements atomicSetDeleter +// Implements atomicSetDeleter. func (db *MemDB) DeleteNoLock(key []byte) { key = nonNilBytes(key) delete(db.db, string(key)) } +// Implements DB. func (db *MemDB) Close() { // Close is a noop since for an in-memory // database, we don't have a destination @@ -94,6 +101,7 @@ func (db *MemDB) Close() { // See the discussion in https://github.com/tendermint/tmlibs/pull/56 } +// Implements DB. func (db *MemDB) Print() { db.mtx.Lock() defer db.mtx.Unlock() @@ -103,6 +111,7 @@ func (db *MemDB) Print() { } } +// Implements DB. func (db *MemDB) Stats() map[string]string { db.mtx.Lock() defer db.mtx.Unlock() @@ -113,6 +122,10 @@ func (db *MemDB) Stats() map[string]string { return stats } +//---------------------------------------- +// Batch + +// Implements DB. func (db *MemDB) NewBatch() Batch { db.mtx.Lock() defer db.mtx.Unlock() @@ -125,7 +138,9 @@ func (db *MemDB) Mutex() *sync.Mutex { } //---------------------------------------- +// Iterator +// Implements DB. func (db *MemDB) Iterator(start, end []byte) Iterator { db.mtx.Lock() defer db.mtx.Unlock() @@ -134,6 +149,7 @@ func (db *MemDB) Iterator(start, end []byte) Iterator { return newMemDBIterator(db, keys, start, end) } +// Implements DB. func (db *MemDB) ReverseIterator(start, end []byte) Iterator { db.mtx.Lock() defer db.mtx.Unlock() @@ -142,25 +158,6 @@ func (db *MemDB) ReverseIterator(start, end []byte) Iterator { return newMemDBIterator(db, keys, start, end) } -func (db *MemDB) getSortedKeys(start, end []byte, reverse bool) []string { - keys := []string{} - for key, _ := range db.db { - if IsKeyInDomain([]byte(key), start, end, false) { - keys = append(keys, key) - } - } - sort.Strings(keys) - if reverse { - nkeys := len(keys) - for i := 0; i < nkeys/2; i++ { - keys[i] = keys[nkeys-i-1] - } - } - return keys -} - -var _ Iterator = (*memDBIterator)(nil) - // We need a copy of all of the keys. // Not the best, but probably not a bottleneck depending. type memDBIterator struct { @@ -171,6 +168,8 @@ type memDBIterator struct { end []byte } +var _ Iterator = (*memDBIterator)(nil) + // Keys is expected to be in reverse order for reverse iterators. func newMemDBIterator(db DB, keys []string, start, end []byte) *memDBIterator { return &memDBIterator{ @@ -182,30 +181,36 @@ func newMemDBIterator(db DB, keys []string, start, end []byte) *memDBIterator { } } +// Implements Iterator. func (itr *memDBIterator) Domain() ([]byte, []byte) { return itr.start, itr.end } +// Implements Iterator. func (itr *memDBIterator) Valid() bool { return 0 <= itr.cur && itr.cur < len(itr.keys) } +// Implements Iterator. func (itr *memDBIterator) Next() { itr.assertIsValid() itr.cur++ } +// Implements Iterator. func (itr *memDBIterator) Key() []byte { itr.assertIsValid() return []byte(itr.keys[itr.cur]) } +// Implements Iterator. func (itr *memDBIterator) Value() []byte { itr.assertIsValid() key := []byte(itr.keys[itr.cur]) return itr.db.Get(key) } +// Implements Iterator. func (itr *memDBIterator) Close() { itr.keys = nil itr.db = nil @@ -215,4 +220,24 @@ func (itr *memDBIterator) assertIsValid() { if !itr.Valid() { panic("memDBIterator is invalid") } -} \ No newline at end of file +} + +//---------------------------------------- +// Misc. + +func (db *MemDB) getSortedKeys(start, end []byte, reverse bool) []string { + keys := []string{} + for key, _ := range db.db { + if IsKeyInDomain([]byte(key), start, end, false) { + keys = append(keys, key) + } + } + sort.Strings(keys) + if reverse { + nkeys := len(keys) + for i := 0; i < nkeys/2; i++ { + keys[i] = keys[nkeys-i-1] + } + } + return keys +} diff --git a/db/types.go b/db/types.go index 6e5d2408d..07858087a 100644 --- a/db/types.go +++ b/db/types.go @@ -4,19 +4,23 @@ type DB interface { // Get returns nil iff key doesn't exist. // A nil key is interpreted as an empty byteslice. + // CONTRACT: key, value readonly []byte Get([]byte) []byte // Has checks if a key exists. // A nil key is interpreted as an empty byteslice. + // CONTRACT: key, value readonly []byte Has(key []byte) bool // Set sets the key. // A nil key is interpreted as an empty byteslice. + // CONTRACT: key, value readonly []byte Set([]byte, []byte) SetSync([]byte, []byte) // Delete deletes the key. // A nil key is interpreted as an empty byteslice. + // CONTRACT: key readonly []byte Delete([]byte) DeleteSync([]byte) @@ -25,6 +29,7 @@ type DB interface { // A nil start is interpreted as an empty byteslice. // If end is nil, iterates up to the last item (inclusive). // CONTRACT: No writes may happen within a domain while an iterator exists over it. + // CONTRACT: start, end readonly []byte Iterator(start, end []byte) Iterator // Iterate over a domain of keys in descending order. End is exclusive. @@ -32,6 +37,7 @@ type DB interface { // If start is nil, iterates from the last/greatest item (inclusive). // If end is nil, iterates up to the first/least item (iclusive). // CONTRACT: No writes may happen within a domain while an iterator exists over it. + // CONTRACT: start, end readonly []byte ReverseIterator(start, end []byte) Iterator // Closes the connection. @@ -56,11 +62,12 @@ type Batch interface { } type SetDeleter interface { - Set(key, value []byte) - Delete(key []byte) + Set(key, value []byte) // CONTRACT: key, value readonly []byte + Delete(key []byte) // CONTRACT: key readonly []byte } //---------------------------------------- +// Iterator /* Usage: @@ -83,6 +90,7 @@ type Iterator interface { // // The smallest key is the empty byte array []byte{} - see BeginningKey(). // The largest key is the nil byte array []byte(nil) - see EndingKey(). + // CONTRACT: start, end readonly []byte Domain() (start []byte, end []byte) // Valid returns whether the current position is valid. @@ -96,14 +104,14 @@ type Iterator interface { Next() // Key returns the key of the cursor. - // // If Valid returns false, this method will panic. - Key() []byte + // CONTRACT: key readonly []byte + Key() (key []byte) // Value returns the value of the cursor. - // // If Valid returns false, this method will panic. - Value() []byte + // CONTRACT: value readonly []byte + Value() (value []byte) // Close releases the Iterator. Close() diff --git a/glide.lock b/glide.lock index 83c8551e0..146a32a0f 100644 --- a/glide.lock +++ b/glide.lock @@ -1,10 +1,10 @@ hash: 325b2f9c7e84696f88fa88126a22eb1e1e91c2be5f60402d17bfaad6713b33c2 -updated: 2017-12-25T17:45:52.357002873-08:00 +updated: 2017-12-28T18:27:21.247160207-08:00 imports: - name: github.com/fsnotify/fsnotify version: 4da3e2cfbabc9f751898f250b49f2439785783a1 - name: github.com/go-kit/kit - version: e3b2152e0063c5f05efea89ecbe297852af2a92d + version: e2b298466b32c7cd5579a9b9b07e968fc9d9452c subpackages: - log - log/level @@ -12,7 +12,13 @@ imports: - name: github.com/go-logfmt/logfmt version: 390ab7935ee28ec6b286364bba9b4dd6410cb3d5 - name: github.com/go-stack/stack - version: 259ab82a6cad3992b4e21ff5cac294ccb06474bc + version: 817915b46b97fd7bb80e8ab6b69f01a53ac3eebf +- name: github.com/gogo/protobuf + version: 342cbe0a04158f6dcb03ca0079991a51a4248c02 + subpackages: + - gogoproto + - proto + - protoc-gen-gogo/descriptor - name: github.com/golang/snappy version: 553a641470496b2327abcac10b36396bd98e45c9 - name: github.com/hashicorp/hcl @@ -45,7 +51,7 @@ imports: - name: github.com/pelletier/go-toml version: 13d49d4606eb801b8f01ae542b4afc4c6ee3d84a - name: github.com/pkg/errors - version: f15c970de5b76fac0b59abb32d62c17cc7bed265 + version: 645ef00459ed84a119197bfb8d8205042c6df63d - name: github.com/spf13/afero version: 5660eeed305fe5f69c8fc6cf899132a459a97064 subpackages: @@ -57,11 +63,11 @@ imports: - name: github.com/spf13/jwalterweatherman version: 12bd96e66386c1960ab0f74ced1362f66f552f7b - name: github.com/spf13/pflag - version: 4c012f6dcd9546820e378d0bdda4d8fc772cdfea + version: 97afa5e7ca8a08a383cb259e06636b5e2cc7897f - name: github.com/spf13/viper version: 8ef37cbca71638bf32f3d5e194117d4cb46da163 - name: github.com/syndtr/goleveldb - version: adf24ef3f94bd13ec4163060b21a5678f22b429b + version: b89cc31ef7977104127d34c1bd31ebd1a9db2199 subpackages: - leveldb - leveldb/cache @@ -76,7 +82,7 @@ imports: - leveldb/table - leveldb/util - name: github.com/tendermint/go-wire - version: 5ab49b4c6ad674da6b81442911cf713ef0afb544 + version: 27be46e25124ddf775e23317a83647ce62a93f6b subpackages: - data - data/base58 @@ -85,7 +91,7 @@ imports: subpackages: - term - name: golang.org/x/crypto - version: 94eea52f7b742c7cbe0b03b22f0c4c8631ece122 + version: edd5e9b0879d13ee6970a50153d85b8fec9f7686 subpackages: - ripemd160 - name: golang.org/x/sys @@ -93,7 +99,7 @@ imports: subpackages: - unix - name: golang.org/x/text - version: 75cc3cad82b5f47d3fb229ddda8c5167da14f294 + version: c01e4764d870b77f8abe5096ee19ad20d80e8075 subpackages: - transform - unicode/norm From 9f72e25b23f3a3120af5e48b5b7520c34b88775f Mon Sep 17 00:00:00 2001 From: Ethan Buchman Date: Sat, 16 Dec 2017 00:03:40 -0500 Subject: [PATCH 37/68] readme --- README.md | 53 +++++++++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 53 insertions(+) create mode 100644 README.md diff --git a/README.md b/README.md new file mode 100644 index 000000000..d5a11c7b4 --- /dev/null +++ b/README.md @@ -0,0 +1,53 @@ +# TMLIBS + +This repo is a home for various small packages. + +## autofile + +Autofile is file access with automatic log rotation. A group of files is maintained and rotation happens +when the leading file gets too big. Provides a reader for reading from the file group. + +## cli + +CLI wraps the `cobra` and `viper` packages and handles some common elements of building a CLI like flags and env vars for the home directory and the logger. + +## clist + +Clist provides a linekd list that is safe for concurrent access by many readers. + +## common + +Common provides a hodgepodge of useful functions. + +## db + +DB provides a database interface and a number of implementions, including ones using an in-memory map, the filesystem directory structure, +an implemention of LevelDB in Go, and the official LevelDB in C. + +## events + +Events is a synchronous PubSub package. + +## flowrate + +Flowrate is a fork of https://github.com/mxk/go-flowrate that added a `SetREMA` method. + +## log + +Log is a log package structured around key-value pairs that allows logging level to be set differently for different keys. + +## logger + +Logger is DEPRECATED. It's a simple wrapper around `log15`. + +## merkle + +Merkle provides a simple static merkle tree and corresponding proofs. + +## process + +Process is a simple utility for spawning OS processes. + +## pubsub + +PubSub is an asynchronous PubSub package. From a84bc2f5b26094bbd15dfefe46a2ac932fc9d557 Mon Sep 17 00:00:00 2001 From: Zach Ramsay Date: Fri, 29 Dec 2017 15:23:07 +0000 Subject: [PATCH 38/68] logger is deprecated, removed; closes #115 --- README.md | 4 --- logger/log.go | 78 --------------------------------------------------- 2 files changed, 82 deletions(-) delete mode 100644 logger/log.go diff --git a/README.md b/README.md index d5a11c7b4..9ea618dbd 100644 --- a/README.md +++ b/README.md @@ -36,10 +36,6 @@ Flowrate is a fork of https://github.com/mxk/go-flowrate that added a `SetREMA` Log is a log package structured around key-value pairs that allows logging level to be set differently for different keys. -## logger - -Logger is DEPRECATED. It's a simple wrapper around `log15`. - ## merkle Merkle provides a simple static merkle tree and corresponding proofs. diff --git a/logger/log.go b/logger/log.go deleted file mode 100644 index 2f4faef6b..000000000 --- a/logger/log.go +++ /dev/null @@ -1,78 +0,0 @@ -// DEPRECATED! Use newer log package. -package logger - -import ( - "os" - - "github.com/tendermint/log15" - . "github.com/tendermint/tmlibs/common" -) - -var mainHandler log15.Handler -var bypassHandler log15.Handler - -func init() { - resetWithLogLevel("debug") -} - -func SetLogLevel(logLevel string) { - resetWithLogLevel(logLevel) -} - -func resetWithLogLevel(logLevel string) { - // main handler - //handlers := []log15.Handler{} - mainHandler = log15.LvlFilterHandler( - getLevel(logLevel), - log15.StreamHandler(os.Stdout, log15.TerminalFormat()), - ) - //handlers = append(handlers, mainHandler) - - // bypass handler for not filtering on global logLevel. - bypassHandler = log15.StreamHandler(os.Stdout, log15.TerminalFormat()) - //handlers = append(handlers, bypassHandler) - - // By setting handlers on the root, we handle events from all loggers. - log15.Root().SetHandler(mainHandler) -} - -// See go-wire/log for an example of usage. -func MainHandler() log15.Handler { - return mainHandler -} - -func New(ctx ...interface{}) log15.Logger { - return NewMain(ctx...) -} - -func BypassHandler() log15.Handler { - return bypassHandler -} - -func NewMain(ctx ...interface{}) log15.Logger { - return log15.Root().New(ctx...) -} - -func NewBypass(ctx ...interface{}) log15.Logger { - bypass := log15.New(ctx...) - bypass.SetHandler(bypassHandler) - return bypass -} - -func getLevel(lvlString string) log15.Lvl { - lvl, err := log15.LvlFromString(lvlString) - if err != nil { - Exit(Fmt("Invalid log level %v: %v", lvlString, err)) - } - return lvl -} - -//---------------------------------------- -// Exported from log15 - -var LvlFilterHandler = log15.LvlFilterHandler -var LvlDebug = log15.LvlDebug -var LvlInfo = log15.LvlInfo -var LvlNotice = log15.LvlNotice -var LvlWarn = log15.LvlWarn -var LvlError = log15.LvlError From 7fe3d5dac2786aae214fbd5b1df45bc9e4db9e18 Mon Sep 17 00:00:00 2001 From: Ethan Buchman Date: Tue, 2 Jan 2018 10:48:36 -0500 Subject: [PATCH 39/68] metalinter --- Makefile | 2 +- db/stats.go | 7 ------- 2 files changed, 1 insertion(+), 8 deletions(-) delete mode 100644 db/stats.go diff --git a/Makefile b/Makefile index f1ee1004e..dfdd6bef5 100644 --- a/Makefile +++ b/Makefile @@ -77,7 +77,6 @@ fmt: metalinter: @echo "==> Running linter" gometalinter.v2 --vendor --deadline=600s --disable-all \ - --enable=maligned \ --enable=deadcode \ --enable=goconst \ --enable=goimports \ @@ -94,6 +93,7 @@ metalinter: --enable=vetshadow \ ./... + #--enable=maligned \ #--enable=gas \ #--enable=aligncheck \ #--enable=dupl \ diff --git a/db/stats.go b/db/stats.go deleted file mode 100644 index ef4b0dd0f..000000000 --- a/db/stats.go +++ /dev/null @@ -1,7 +0,0 @@ -package db - -func mergeStats(src, dest map[string]string, prefix string) { - for key, value := range src { - dest[prefix+key] = value - } -} From 1838db28803edbc8c77cefc814652f32bbae39d7 Mon Sep 17 00:00:00 2001 From: Ethan Buchman Date: Tue, 2 Jan 2018 10:53:56 -0500 Subject: [PATCH 40/68] circle --- circle.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/circle.yml b/circle.yml index 104cfa6f3..390ffb039 100644 --- a/circle.yml +++ b/circle.yml @@ -15,7 +15,7 @@ dependencies: test: override: - - cd $PROJECT_PATH && make get_vendor_deps && bash ./test.sh + - cd $PROJECT_PATH && make get_tools && make get_vendor_deps && bash ./test.sh post: - cd "$PROJECT_PATH" && bash <(curl -s https://codecov.io/bash) -f coverage.txt - cd "$PROJECT_PATH" && mv coverage.txt "${CIRCLE_ARTIFACTS}" From 1460540acd267c37f3d58ebe18cdaf4baec15f7f Mon Sep 17 00:00:00 2001 From: Ethan Buchman Date: Tue, 2 Jan 2018 10:59:43 -0500 Subject: [PATCH 41/68] metalinter is for another time --- Makefile | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/Makefile b/Makefile index dfdd6bef5..e15356c2c 100644 --- a/Makefile +++ b/Makefile @@ -1,8 +1,9 @@ GOTOOLS = \ github.com/Masterminds/glide \ - github.com/alecthomas/gometalinter.v2 \ github.com/gogo/protobuf/protoc-gen-gogo \ github.com/gogo/protobuf/gogoproto + # github.com/alecthomas/gometalinter.v2 \ + GOTOOLS_CHECK = glide gometalinter.v2 protoc protoc-gen-gogo INCLUDE = -I=. -I=${GOPATH}/src -I=${GOPATH}/src/github.com/gogo/protobuf/protobuf @@ -40,7 +41,7 @@ check_tools: get_tools: @echo "--> Installing tools" go get -u -v $(GOTOOLS) - @gometalinter.v2 --install + # @gometalinter.v2 --install get_protoc: @# https://github.com/google/protobuf/releases From 2bb538b150f197a04a0b969a27e9ea24d35edbc1 Mon Sep 17 00:00:00 2001 From: Ethan Buchman Date: Tue, 2 Jan 2018 11:05:53 -0500 Subject: [PATCH 42/68] cmn: fix HexBytes.MarshalJSON --- common/bytes.go | 2 +- common/bytes_test.go | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/common/bytes.go b/common/bytes.go index 1ec880c25..ba81bbe97 100644 --- a/common/bytes.go +++ b/common/bytes.go @@ -26,7 +26,7 @@ func (bz HexBytes) MarshalJSON() ([]byte, error) { jbz := make([]byte, len(s)+2) jbz[0] = '"' copy(jbz[1:], []byte(s)) - jbz[1] = '"' + jbz[len(jbz)-1] = '"' return jbz, nil } diff --git a/common/bytes_test.go b/common/bytes_test.go index 3e693b239..9e11988f2 100644 --- a/common/bytes_test.go +++ b/common/bytes_test.go @@ -35,8 +35,8 @@ func TestJSONMarshal(t *testing.T) { expected string }{ {[]byte(``), `{"B1":"","B2":""}`}, - {[]byte(``), `{"B1":"","B2":""}`}, - {[]byte(``), `{"B1":"","B2":""}`}, + {[]byte(`a`), `{"B1":"YQ==","B2":"61"}`}, + {[]byte(`abc`), `{"B1":"YWJj","B2":"616263"}`}, } for i, tc := range cases { From 84afef20f5d960b033c9c8d84710331e6cacec70 Mon Sep 17 00:00:00 2001 From: Emmanuel Odeke Date: Sun, 28 Jan 2018 10:35:34 -0700 Subject: [PATCH 43/68] common: fix BitArray.Update to avoid nil dereference Update previously only checked that the receiver was non-nil but didn't check that the input parameter to update "o" was non-nil causing a nil dereference in cases such as https://github.com/tendermint/tendermint/blob/fe632ea32a89c3d9804bbd6e3ce9391b1d5a0993/consensus/reactor.go#L306 Fixes https://github.com/tendermint/tendermint/issues/1169 --- common/bit_array.go | 2 +- common/bit_array_test.go | 23 +++++++++++++++++++++++ 2 files changed, 24 insertions(+), 1 deletion(-) diff --git a/common/bit_array.go b/common/bit_array.go index 848763b48..68201bad6 100644 --- a/common/bit_array.go +++ b/common/bit_array.go @@ -306,7 +306,7 @@ func (bA *BitArray) Bytes() []byte { // so if necessary, caller must copy or lock o prior to calling Update. // If bA is nil, does nothing. func (bA *BitArray) Update(o *BitArray) { - if bA == nil { + if bA == nil || o == nil { return } bA.mtx.Lock() diff --git a/common/bit_array_test.go b/common/bit_array_test.go index 1c72882c7..9a787e441 100644 --- a/common/bit_array_test.go +++ b/common/bit_array_test.go @@ -164,3 +164,26 @@ func TestEmptyFull(t *testing.T) { } } } + +func TestUpdateNeverPanics(t *testing.T) { + newRandBitArray := func(n int) *BitArray { + ba, _ := randBitArray(n) + return ba + } + pairs := []struct { + a, b *BitArray + }{ + {nil, nil}, + {newRandBitArray(10), newRandBitArray(12)}, + {newRandBitArray(0), NewBitArray(10)}, + {nil, NewBitArray(10)}, + {nil, newRandBitArray(64)}, + {newRandBitArray(63), newRandBitArray(64)}, + } + + for _, pair := range pairs { + a, b := pair.a, pair.b + a.Update(b) + b.Update(a) + } +} From 85be26c675b05a2a75c856f7c22b446d8df1c944 Mon Sep 17 00:00:00 2001 From: Emmanuel Odeke Date: Sun, 28 Jan 2018 22:02:46 -0700 Subject: [PATCH 44/68] common: BitArray: feedback from @adrianbrink to simplify tests --- common/bit_array_test.go | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/common/bit_array_test.go b/common/bit_array_test.go index 9a787e441..e4ac8bf6f 100644 --- a/common/bit_array_test.go +++ b/common/bit_array_test.go @@ -175,10 +175,9 @@ func TestUpdateNeverPanics(t *testing.T) { }{ {nil, nil}, {newRandBitArray(10), newRandBitArray(12)}, - {newRandBitArray(0), NewBitArray(10)}, + {newRandBitArray(23), newRandBitArray(23)}, + {newRandBitArray(37), nil}, {nil, NewBitArray(10)}, - {nil, newRandBitArray(64)}, - {newRandBitArray(63), newRandBitArray(64)}, } for _, pair := range pairs { From bcd8d403dcef53c1fabf2521362c467459fabafc Mon Sep 17 00:00:00 2001 From: Jae Kwon Date: Sun, 14 Jan 2018 13:27:34 -0800 Subject: [PATCH 45/68] Remove encoding from common cli --- cli/setup.go | 22 +--------------------- 1 file changed, 1 insertion(+), 21 deletions(-) diff --git a/cli/setup.go b/cli/setup.go index 295477598..2dcadb407 100644 --- a/cli/setup.go +++ b/cli/setup.go @@ -8,9 +8,6 @@ import ( "github.com/pkg/errors" "github.com/spf13/cobra" "github.com/spf13/viper" - - data "github.com/tendermint/go-wire/data" - "github.com/tendermint/go-wire/data/base58" ) const ( @@ -42,7 +39,7 @@ func PrepareBaseCmd(cmd *cobra.Command, envPrefix, defaultHome string) Executor func PrepareMainCmd(cmd *cobra.Command, envPrefix, defaultHome string) Executor { cmd.PersistentFlags().StringP(EncodingFlag, "e", "hex", "Binary encoding (hex|b64|btc)") cmd.PersistentFlags().StringP(OutputFlag, "o", "text", "Output format (text|json)") - cmd.PersistentPreRunE = concatCobraCmdFuncs(setEncoding, validateOutput, cmd.PersistentPreRunE) + cmd.PersistentPreRunE = concatCobraCmdFuncs(validateOutput, cmd.PersistentPreRunE) return PrepareBaseCmd(cmd, envPrefix, defaultHome) } @@ -147,23 +144,6 @@ func bindFlagsLoadViper(cmd *cobra.Command, args []string) error { return nil } -// setEncoding reads the encoding flag -func setEncoding(cmd *cobra.Command, args []string) error { - // validate and set encoding - enc := viper.GetString("encoding") - switch enc { - case "hex": - data.Encoder = data.HexEncoder - case "b64": - data.Encoder = data.B64Encoder - case "btc": - data.Encoder = base58.BTCEncoder - default: - return errors.Errorf("Unsupported encoding: %s", enc) - } - return nil -} - func validateOutput(cmd *cobra.Command, args []string) error { // validate output format output := viper.GetString(OutputFlag) From 4e2a275a67614a250f70adba2340cd463b24f06d Mon Sep 17 00:00:00 2001 From: Jae Kwon Date: Sun, 14 Jan 2018 13:30:47 -0800 Subject: [PATCH 46/68] Update to use tmlibs sdk2 --- glide.lock | 62 +++++++++++++++++++++++------------------------------- glide.yaml | 4 +--- 2 files changed, 27 insertions(+), 39 deletions(-) diff --git a/glide.lock b/glide.lock index aaf7c07e2..5576d090a 100644 --- a/glide.lock +++ b/glide.lock @@ -1,10 +1,14 @@ -hash: 325b2f9c7e84696f88fa88126a22eb1e1e91c2be5f60402d17bfaad6713b33c2 -updated: 2017-12-28T18:27:21.247160207-08:00 +hash: 1990fb145d5c5098b5ee467c59506e81b6c3b973667eeb63d83abd7ef831b919 +updated: 2018-01-14T13:29:55.282854028-08:00 imports: +- name: github.com/davecgh/go-spew + version: ecdeabc65495df2dec95d7c4a4c3e021903035e5 + subpackages: + - spew - name: github.com/fsnotify/fsnotify - version: 4da3e2cfbabc9f751898f250b49f2439785783a1 + version: c2828203cd70a50dcccfb2761f8b1f8ceef9a8e9 - name: github.com/go-kit/kit - version: e2b298466b32c7cd5579a9b9b07e968fc9d9452c + version: 953e747656a7bbb5e1f998608b460458958b70cc subpackages: - log - log/level @@ -12,7 +16,7 @@ imports: - name: github.com/go-logfmt/logfmt version: 390ab7935ee28ec6b286364bba9b4dd6410cb3d5 - name: github.com/go-stack/stack - version: 817915b46b97fd7bb80e8ab6b69f01a53ac3eebf + version: 259ab82a6cad3992b4e21ff5cac294ccb06474bc - name: github.com/gogo/protobuf version: 342cbe0a04158f6dcb03ca0079991a51a4248c02 subpackages: @@ -26,6 +30,7 @@ imports: subpackages: - hcl/ast - hcl/parser + - hcl/printer - hcl/scanner - hcl/strconv - hcl/token @@ -39,21 +44,15 @@ imports: - name: github.com/kr/logfmt version: b84e30acd515aadc4b783ad4ff83aff3299bdfe0 - name: github.com/magiconair/properties - version: 8d7837e64d3c1ee4e54a880c5a920ab4316fc90a -- name: github.com/mattn/go-colorable - version: 6fcc0c1fd9b620311d821b106a400b35dc95c497 -- name: github.com/mattn/go-isatty - version: a5cdd64afdee435007ee3e9f6ed4684af949d568 + version: 49d762b9817ba1c2e9d0c69183c2b4a8b8f1d934 - name: github.com/mitchellh/mapstructure - version: 06020f85339e21b2478f756a78e295255ffa4d6a -- name: github.com/pelletier/go-buffruneio - version: c37440a7cf42ac63b919c752ca73a85067e05992 + version: b4575eea38cca1123ec2dc90c26529b5c5acfcff - name: github.com/pelletier/go-toml - version: 13d49d4606eb801b8f01ae542b4afc4c6ee3d84a + version: 0131db6d737cfbbfb678f8b7d92e55e27ce46224 - name: github.com/pkg/errors - version: 645ef00459ed84a119197bfb8d8205042c6df63d + version: e881fd58d78e04cf6d0de1217f8707c8cc2249bc - name: github.com/spf13/afero - version: 5660eeed305fe5f69c8fc6cf899132a459a97064 + version: 57afd63c68602b63ed976de00dd066ccb3c319db subpackages: - mem - name: github.com/spf13/cast @@ -61,13 +60,13 @@ imports: - name: github.com/spf13/cobra version: 7b2c5ac9fc04fc5efafb60700713d4fa609b777b - name: github.com/spf13/jwalterweatherman - version: 12bd96e66386c1960ab0f74ced1362f66f552f7b + version: 7c0cea34c8ece3fbeb2b27ab9b59511d360fb394 - name: github.com/spf13/pflag - version: 97afa5e7ca8a08a383cb259e06636b5e2cc7897f + version: 4c012f6dcd9546820e378d0bdda4d8fc772cdfea - name: github.com/spf13/viper - version: 8ef37cbca71638bf32f3d5e194117d4cb46da163 + version: aafc9e6bc7b7bb53ddaa75a5ef49a17d6e654be5 - name: github.com/syndtr/goleveldb - version: b89cc31ef7977104127d34c1bd31ebd1a9db2199 + version: 34011bf325bce385408353a30b101fe5e923eb6e subpackages: - leveldb - leveldb/cache @@ -82,40 +81,31 @@ imports: - leveldb/table - leveldb/util - name: github.com/tendermint/go-wire - version: 27be46e25124ddf775e23317a83647ce62a93f6b - subpackages: - - data - - data/base58 + version: b93ebdd4f306833936c243561ec30af3455dc764 - name: github.com/tendermint/log15 version: f91285dece9f4875421b481da3e613d83d44f29b - subpackages: - - term - name: golang.org/x/crypto - version: edd5e9b0879d13ee6970a50153d85b8fec9f7686 + version: 13931e22f9e72ea58bb73048bc752b48c6d4d4ac subpackages: - ripemd160 - name: golang.org/x/sys - version: 8dbc5d05d6edcc104950cc299a1ce6641235bc86 + version: 810d7000345868fc619eb81f46307107118f4ae1 subpackages: - unix - name: golang.org/x/text - version: c01e4764d870b77f8abe5096ee19ad20d80e8075 + version: e19ae1496984b1c655b8044a65c0300a3c878dd3 subpackages: - transform - unicode/norm - name: gopkg.in/yaml.v2 - version: eb3733d160e74a9c7e442f435eb3bea458e1d19f + version: d670f9405373e636a5a2765eea47fac0c9bc91a4 testImports: -- name: github.com/davecgh/go-spew - version: 04cdfd42973bb9c8589fd6a731800cf222fde1a9 - subpackages: - - spew - name: github.com/pmezard/go-difflib - version: d8ed2627bdf02c080bf22230dbb337003b7aba2d + version: 792786c7400a136282c1664665ae0a8db921c6c2 subpackages: - difflib - name: github.com/stretchr/testify - version: 2aa2c176b9dab406a6970f6a55f513e8a8c8b18f + version: b91bfb9ebec76498946beb6af7c0230c7cc7ba6c subpackages: - assert - require diff --git a/glide.yaml b/glide.yaml index 2df880175..a28bd39ec 100644 --- a/glide.yaml +++ b/glide.yaml @@ -16,9 +16,7 @@ import: - leveldb/errors - leveldb/opt - package: github.com/tendermint/go-wire - subpackages: - - data - - data/base58 + version: sdk2 - package: github.com/tendermint/log15 - package: golang.org/x/crypto subpackages: From 6637c202bf7d5256caf8acb65070937cfd2e75a0 Mon Sep 17 00:00:00 2001 From: Jae Kwon Date: Sun, 14 Jan 2018 13:40:18 -0800 Subject: [PATCH 47/68] Revert "Update to use tmlibs sdk2" This reverts commit ae58af0be534a5c344896461b97a6490d428deb4. Breaks the tests. --- glide.lock | 62 +++++++++++++++++++++++++++++++----------------------- glide.yaml | 4 +++- 2 files changed, 39 insertions(+), 27 deletions(-) diff --git a/glide.lock b/glide.lock index 5576d090a..aaf7c07e2 100644 --- a/glide.lock +++ b/glide.lock @@ -1,14 +1,10 @@ -hash: 1990fb145d5c5098b5ee467c59506e81b6c3b973667eeb63d83abd7ef831b919 -updated: 2018-01-14T13:29:55.282854028-08:00 +hash: 325b2f9c7e84696f88fa88126a22eb1e1e91c2be5f60402d17bfaad6713b33c2 +updated: 2017-12-28T18:27:21.247160207-08:00 imports: -- name: github.com/davecgh/go-spew - version: ecdeabc65495df2dec95d7c4a4c3e021903035e5 - subpackages: - - spew - name: github.com/fsnotify/fsnotify - version: c2828203cd70a50dcccfb2761f8b1f8ceef9a8e9 + version: 4da3e2cfbabc9f751898f250b49f2439785783a1 - name: github.com/go-kit/kit - version: 953e747656a7bbb5e1f998608b460458958b70cc + version: e2b298466b32c7cd5579a9b9b07e968fc9d9452c subpackages: - log - log/level @@ -16,7 +12,7 @@ imports: - name: github.com/go-logfmt/logfmt version: 390ab7935ee28ec6b286364bba9b4dd6410cb3d5 - name: github.com/go-stack/stack - version: 259ab82a6cad3992b4e21ff5cac294ccb06474bc + version: 817915b46b97fd7bb80e8ab6b69f01a53ac3eebf - name: github.com/gogo/protobuf version: 342cbe0a04158f6dcb03ca0079991a51a4248c02 subpackages: @@ -30,7 +26,6 @@ imports: subpackages: - hcl/ast - hcl/parser - - hcl/printer - hcl/scanner - hcl/strconv - hcl/token @@ -44,15 +39,21 @@ imports: - name: github.com/kr/logfmt version: b84e30acd515aadc4b783ad4ff83aff3299bdfe0 - name: github.com/magiconair/properties - version: 49d762b9817ba1c2e9d0c69183c2b4a8b8f1d934 + version: 8d7837e64d3c1ee4e54a880c5a920ab4316fc90a +- name: github.com/mattn/go-colorable + version: 6fcc0c1fd9b620311d821b106a400b35dc95c497 +- name: github.com/mattn/go-isatty + version: a5cdd64afdee435007ee3e9f6ed4684af949d568 - name: github.com/mitchellh/mapstructure - version: b4575eea38cca1123ec2dc90c26529b5c5acfcff + version: 06020f85339e21b2478f756a78e295255ffa4d6a +- name: github.com/pelletier/go-buffruneio + version: c37440a7cf42ac63b919c752ca73a85067e05992 - name: github.com/pelletier/go-toml - version: 0131db6d737cfbbfb678f8b7d92e55e27ce46224 + version: 13d49d4606eb801b8f01ae542b4afc4c6ee3d84a - name: github.com/pkg/errors - version: e881fd58d78e04cf6d0de1217f8707c8cc2249bc + version: 645ef00459ed84a119197bfb8d8205042c6df63d - name: github.com/spf13/afero - version: 57afd63c68602b63ed976de00dd066ccb3c319db + version: 5660eeed305fe5f69c8fc6cf899132a459a97064 subpackages: - mem - name: github.com/spf13/cast @@ -60,13 +61,13 @@ imports: - name: github.com/spf13/cobra version: 7b2c5ac9fc04fc5efafb60700713d4fa609b777b - name: github.com/spf13/jwalterweatherman - version: 7c0cea34c8ece3fbeb2b27ab9b59511d360fb394 + version: 12bd96e66386c1960ab0f74ced1362f66f552f7b - name: github.com/spf13/pflag - version: 4c012f6dcd9546820e378d0bdda4d8fc772cdfea + version: 97afa5e7ca8a08a383cb259e06636b5e2cc7897f - name: github.com/spf13/viper - version: aafc9e6bc7b7bb53ddaa75a5ef49a17d6e654be5 + version: 8ef37cbca71638bf32f3d5e194117d4cb46da163 - name: github.com/syndtr/goleveldb - version: 34011bf325bce385408353a30b101fe5e923eb6e + version: b89cc31ef7977104127d34c1bd31ebd1a9db2199 subpackages: - leveldb - leveldb/cache @@ -81,31 +82,40 @@ imports: - leveldb/table - leveldb/util - name: github.com/tendermint/go-wire - version: b93ebdd4f306833936c243561ec30af3455dc764 + version: 27be46e25124ddf775e23317a83647ce62a93f6b + subpackages: + - data + - data/base58 - name: github.com/tendermint/log15 version: f91285dece9f4875421b481da3e613d83d44f29b + subpackages: + - term - name: golang.org/x/crypto - version: 13931e22f9e72ea58bb73048bc752b48c6d4d4ac + version: edd5e9b0879d13ee6970a50153d85b8fec9f7686 subpackages: - ripemd160 - name: golang.org/x/sys - version: 810d7000345868fc619eb81f46307107118f4ae1 + version: 8dbc5d05d6edcc104950cc299a1ce6641235bc86 subpackages: - unix - name: golang.org/x/text - version: e19ae1496984b1c655b8044a65c0300a3c878dd3 + version: c01e4764d870b77f8abe5096ee19ad20d80e8075 subpackages: - transform - unicode/norm - name: gopkg.in/yaml.v2 - version: d670f9405373e636a5a2765eea47fac0c9bc91a4 + version: eb3733d160e74a9c7e442f435eb3bea458e1d19f testImports: +- name: github.com/davecgh/go-spew + version: 04cdfd42973bb9c8589fd6a731800cf222fde1a9 + subpackages: + - spew - name: github.com/pmezard/go-difflib - version: 792786c7400a136282c1664665ae0a8db921c6c2 + version: d8ed2627bdf02c080bf22230dbb337003b7aba2d subpackages: - difflib - name: github.com/stretchr/testify - version: b91bfb9ebec76498946beb6af7c0230c7cc7ba6c + version: 2aa2c176b9dab406a6970f6a55f513e8a8c8b18f subpackages: - assert - require diff --git a/glide.yaml b/glide.yaml index a28bd39ec..2df880175 100644 --- a/glide.yaml +++ b/glide.yaml @@ -16,7 +16,9 @@ import: - leveldb/errors - leveldb/opt - package: github.com/tendermint/go-wire - version: sdk2 + subpackages: + - data + - data/base58 - package: github.com/tendermint/log15 - package: golang.org/x/crypto subpackages: From cfbb9338bdad8f5b369b0e403eb428712860f1bb Mon Sep 17 00:00:00 2001 From: Ethan Buchman Date: Sun, 14 Jan 2018 17:35:57 -0500 Subject: [PATCH 48/68] use go-wire sdk2 --- glide.lock | 46 ++++++++++++++++----------------------- glide.yaml | 1 + merkle/simple_map.go | 16 +++++++------- merkle/simple_map_test.go | 12 +++++----- merkle/simple_tree.go | 18 +++++++-------- 5 files changed, 42 insertions(+), 51 deletions(-) diff --git a/glide.lock b/glide.lock index aaf7c07e2..cd11b7588 100644 --- a/glide.lock +++ b/glide.lock @@ -1,10 +1,14 @@ -hash: 325b2f9c7e84696f88fa88126a22eb1e1e91c2be5f60402d17bfaad6713b33c2 -updated: 2017-12-28T18:27:21.247160207-08:00 +hash: c2db6960e66e1f56fbce88caec470cbde14701763efb4a26d2f3fabd2f979a96 +updated: 2018-01-14T17:26:45.597677436-05:00 imports: +- name: github.com/davecgh/go-spew + version: 04cdfd42973bb9c8589fd6a731800cf222fde1a9 + subpackages: + - spew - name: github.com/fsnotify/fsnotify version: 4da3e2cfbabc9f751898f250b49f2439785783a1 - name: github.com/go-kit/kit - version: e2b298466b32c7cd5579a9b9b07e968fc9d9452c + version: 953e747656a7bbb5e1f998608b460458958b70cc subpackages: - log - log/level @@ -12,7 +16,7 @@ imports: - name: github.com/go-logfmt/logfmt version: 390ab7935ee28ec6b286364bba9b4dd6410cb3d5 - name: github.com/go-stack/stack - version: 817915b46b97fd7bb80e8ab6b69f01a53ac3eebf + version: 259ab82a6cad3992b4e21ff5cac294ccb06474bc - name: github.com/gogo/protobuf version: 342cbe0a04158f6dcb03ca0079991a51a4248c02 subpackages: @@ -39,21 +43,15 @@ imports: - name: github.com/kr/logfmt version: b84e30acd515aadc4b783ad4ff83aff3299bdfe0 - name: github.com/magiconair/properties - version: 8d7837e64d3c1ee4e54a880c5a920ab4316fc90a -- name: github.com/mattn/go-colorable - version: 6fcc0c1fd9b620311d821b106a400b35dc95c497 -- name: github.com/mattn/go-isatty - version: a5cdd64afdee435007ee3e9f6ed4684af949d568 + version: 49d762b9817ba1c2e9d0c69183c2b4a8b8f1d934 - name: github.com/mitchellh/mapstructure version: 06020f85339e21b2478f756a78e295255ffa4d6a -- name: github.com/pelletier/go-buffruneio - version: c37440a7cf42ac63b919c752ca73a85067e05992 - name: github.com/pelletier/go-toml - version: 13d49d4606eb801b8f01ae542b4afc4c6ee3d84a + version: 0131db6d737cfbbfb678f8b7d92e55e27ce46224 - name: github.com/pkg/errors version: 645ef00459ed84a119197bfb8d8205042c6df63d - name: github.com/spf13/afero - version: 5660eeed305fe5f69c8fc6cf899132a459a97064 + version: 57afd63c68602b63ed976de00dd066ccb3c319db subpackages: - mem - name: github.com/spf13/cast @@ -63,11 +61,11 @@ imports: - name: github.com/spf13/jwalterweatherman version: 12bd96e66386c1960ab0f74ced1362f66f552f7b - name: github.com/spf13/pflag - version: 97afa5e7ca8a08a383cb259e06636b5e2cc7897f + version: 4c012f6dcd9546820e378d0bdda4d8fc772cdfea - name: github.com/spf13/viper - version: 8ef37cbca71638bf32f3d5e194117d4cb46da163 + version: 25b30aa063fc18e48662b86996252eabdcf2f0c7 - name: github.com/syndtr/goleveldb - version: b89cc31ef7977104127d34c1bd31ebd1a9db2199 + version: 34011bf325bce385408353a30b101fe5e923eb6e subpackages: - leveldb - leveldb/cache @@ -82,34 +80,28 @@ imports: - leveldb/table - leveldb/util - name: github.com/tendermint/go-wire - version: 27be46e25124ddf775e23317a83647ce62a93f6b + version: b93ebdd4f306833936c243561ec30af3455dc764 subpackages: - data - data/base58 - name: github.com/tendermint/log15 version: f91285dece9f4875421b481da3e613d83d44f29b - subpackages: - - term - name: golang.org/x/crypto - version: edd5e9b0879d13ee6970a50153d85b8fec9f7686 + version: 95a4943f35d008beabde8c11e5075a1b714e6419 subpackages: - ripemd160 - name: golang.org/x/sys - version: 8dbc5d05d6edcc104950cc299a1ce6641235bc86 + version: 83801418e1b59fb1880e363299581ee543af32ca subpackages: - unix - name: golang.org/x/text - version: c01e4764d870b77f8abe5096ee19ad20d80e8075 + version: e19ae1496984b1c655b8044a65c0300a3c878dd3 subpackages: - transform - unicode/norm - name: gopkg.in/yaml.v2 - version: eb3733d160e74a9c7e442f435eb3bea458e1d19f + version: 287cf08546ab5e7e37d55a84f7ed3fd1db036de5 testImports: -- name: github.com/davecgh/go-spew - version: 04cdfd42973bb9c8589fd6a731800cf222fde1a9 - subpackages: - - spew - name: github.com/pmezard/go-difflib version: d8ed2627bdf02c080bf22230dbb337003b7aba2d subpackages: diff --git a/glide.yaml b/glide.yaml index 2df880175..e295781cb 100644 --- a/glide.yaml +++ b/glide.yaml @@ -16,6 +16,7 @@ import: - leveldb/errors - leveldb/opt - package: github.com/tendermint/go-wire + version: sdk2 subpackages: - data - data/base58 diff --git a/merkle/simple_map.go b/merkle/simple_map.go index 003c7cd42..f637d30a7 100644 --- a/merkle/simple_map.go +++ b/merkle/simple_map.go @@ -26,7 +26,7 @@ func (sm *SimpleMap) Set(key string, value interface{}) { if hashable, ok := value.(Hashable); ok { vBytes = hashable.Hash() } else { - vBytes = wire.BinaryBytes(value) + vBytes, _ = wire.MarshalBinary(value) } sm.kvs = append(sm.kvs, cmn.KVPair{ @@ -65,14 +65,14 @@ func (sm *SimpleMap) KVPairs() cmn.KVPairs { type kvPair cmn.KVPair func (kv kvPair) Hash() []byte { - hasher, n, err := ripemd160.New(), new(int), new(error) - wire.WriteByteSlice(kv.Key, hasher, n, err) - if *err != nil { - panic(*err) + hasher := ripemd160.New() + err := wire.EncodeByteSlice(hasher, kv.Key) + if err != nil { + panic(err) } - wire.WriteByteSlice(kv.Value, hasher, n, err) - if *err != nil { - panic(*err) + err = wire.EncodeByteSlice(hasher, kv.Value) + if err != nil { + panic(err) } return hasher.Sum(nil) } diff --git a/merkle/simple_map_test.go b/merkle/simple_map_test.go index 8ba7ce66b..946858550 100644 --- a/merkle/simple_map_test.go +++ b/merkle/simple_map_test.go @@ -11,37 +11,37 @@ func TestSimpleMap(t *testing.T) { { db := NewSimpleMap() db.Set("key1", "value1") - assert.Equal(t, "3bb53f017d2f5b4f144692aa829a5c245ac2b123", fmt.Sprintf("%x", db.Hash()), "Hash didn't match") + assert.Equal(t, "d7df3e1d47fe38b51f8d897a88828026807a86b6", fmt.Sprintf("%x", db.Hash()), "Hash didn't match") } { db := NewSimpleMap() db.Set("key1", "value2") - assert.Equal(t, "14a68db29e3f930ffaafeff5e07c17a439384f39", fmt.Sprintf("%x", db.Hash()), "Hash didn't match") + assert.Equal(t, "db415336c9be129ac38259b935a49d8e9c248c88", fmt.Sprintf("%x", db.Hash()), "Hash didn't match") } { db := NewSimpleMap() db.Set("key1", "value1") db.Set("key2", "value2") - assert.Equal(t, "275c6367f4be335f9c482b6ef72e49c84e3f8bda", fmt.Sprintf("%x", db.Hash()), "Hash didn't match") + assert.Equal(t, "fdb900a04c1de42bd3d924fc644e28a4bdce30ce", fmt.Sprintf("%x", db.Hash()), "Hash didn't match") } { db := NewSimpleMap() db.Set("key2", "value2") // NOTE: out of order db.Set("key1", "value1") - assert.Equal(t, "275c6367f4be335f9c482b6ef72e49c84e3f8bda", fmt.Sprintf("%x", db.Hash()), "Hash didn't match") + assert.Equal(t, "fdb900a04c1de42bd3d924fc644e28a4bdce30ce", fmt.Sprintf("%x", db.Hash()), "Hash didn't match") } { db := NewSimpleMap() db.Set("key1", "value1") db.Set("key2", "value2") db.Set("key3", "value3") - assert.Equal(t, "48d60701cb4c96916f68a958b3368205ebe3809b", fmt.Sprintf("%x", db.Hash()), "Hash didn't match") + assert.Equal(t, "488cfdaea108ef8bd406f6163555752392ae1b4a", fmt.Sprintf("%x", db.Hash()), "Hash didn't match") } { db := NewSimpleMap() db.Set("key2", "value2") // NOTE: out of order db.Set("key1", "value1") db.Set("key3", "value3") - assert.Equal(t, "48d60701cb4c96916f68a958b3368205ebe3809b", fmt.Sprintf("%x", db.Hash()), "Hash didn't match") + assert.Equal(t, "488cfdaea108ef8bd406f6163555752392ae1b4a", fmt.Sprintf("%x", db.Hash()), "Hash didn't match") } } diff --git a/merkle/simple_tree.go b/merkle/simple_tree.go index 3a82f4edc..86b0bf26c 100644 --- a/merkle/simple_tree.go +++ b/merkle/simple_tree.go @@ -28,17 +28,14 @@ import ( "golang.org/x/crypto/ripemd160" "github.com/tendermint/go-wire" - . "github.com/tendermint/tmlibs/common" ) func SimpleHashFromTwoHashes(left []byte, right []byte) []byte { - var n int - var err error var hasher = ripemd160.New() - wire.WriteByteSlice(left, hasher, &n, &err) - wire.WriteByteSlice(right, hasher, &n, &err) + err := wire.EncodeByteSlice(hasher, left) + err = wire.EncodeByteSlice(hasher, right) if err != nil { - PanicCrisis(err) + panic(err) } return hasher.Sum(nil) } @@ -68,11 +65,12 @@ func SimpleHashFromBinaries(items []interface{}) []byte { // General Convenience func SimpleHashFromBinary(item interface{}) []byte { - hasher, n, err := ripemd160.New(), new(int), new(error) - wire.WriteBinary(item, hasher, n, err) - if *err != nil { - PanicCrisis(err) + hasher := ripemd160.New() + bz, err := wire.MarshalBinary(item) + if err != nil { + panic(err) } + hasher.Write(bz) return hasher.Sum(nil) } From ff230682d1a058e2707600c4ba45a7fa6d6b39f5 Mon Sep 17 00:00:00 2001 From: Jae Kwon Date: Sun, 14 Jan 2018 21:20:53 -0800 Subject: [PATCH 49/68] Fix logical time (#122) Should fix a nondeterministic bug so... --- Makefile | 4 ++ common/repeat_timer.go | 6 +- common/repeat_timer_test.go | 114 ++++++++++++++++++++---------------- 3 files changed, 69 insertions(+), 55 deletions(-) diff --git a/Makefile b/Makefile index e15356c2c..ae2c71610 100644 --- a/Makefile +++ b/Makefile @@ -62,12 +62,16 @@ get_vendor_deps: @echo "--> Running glide install" @glide install + ######################################## ### Testing test: go test -tags gcc `glide novendor` +test100: + @for i in {1..100}; do make test; done + ######################################## ### Formatting, linting, and vetting diff --git a/common/repeat_timer.go b/common/repeat_timer.go index 2e6cb81c8..cb227199e 100644 --- a/common/repeat_timer.go +++ b/common/repeat_timer.go @@ -80,13 +80,11 @@ func (t *logicalTicker) fireRoutine(interval time.Duration) { } // Init `lasttime` end - timeleft := interval for { select { case newtime := <-source: elapsed := newtime.Sub(lasttime) - timeleft -= elapsed - if timeleft <= 0 { + if interval <= elapsed { // Block for determinism until the ticker is stopped. select { case t.ch <- newtime: @@ -97,7 +95,7 @@ func (t *logicalTicker) fireRoutine(interval time.Duration) { // Don't try to "catch up" by sending more. // "Ticker adjusts the intervals or drops ticks to make up for // slow receivers" - https://golang.org/pkg/time/#Ticker - timeleft = interval + lasttime = newtime } case <-t.quit: return // done diff --git a/common/repeat_timer_test.go b/common/repeat_timer_test.go index 5a3a4c0a6..5598922c5 100644 --- a/common/repeat_timer_test.go +++ b/common/repeat_timer_test.go @@ -1,6 +1,7 @@ package common import ( + "sync" "testing" "time" @@ -13,29 +14,42 @@ func TestDefaultTicker(t *testing.T) { ticker.Stop() } -func TestRepeat(t *testing.T) { +func TestRepeatTimer(t *testing.T) { ch := make(chan time.Time, 100) - lt := time.Time{} // zero time is year 1 + mtx := new(sync.Mutex) - // tick fires `cnt` times for each second. - tick := func(cnt int) { - for i := 0; i < cnt; i++ { - lt = lt.Add(time.Second) - ch <- lt - } + // tick() fires from start to end + // (exclusive) in milliseconds with incr. + // It locks on mtx, so subsequent calls + // run in series. + tick := func(startMs, endMs, incrMs time.Duration) { + mtx.Lock() + go func() { + for tMs := startMs; tMs < endMs; tMs += incrMs { + lt := time.Time{} + lt = lt.Add(tMs * time.Millisecond) + ch <- lt + } + mtx.Unlock() + }() } - // tock consumes Ticker.Chan() events `cnt` times. - tock := func(t *testing.T, rt *RepeatTimer, cnt int) { - for i := 0; i < cnt; i++ { - timeout := time.After(time.Second * 10) - select { - case <-rt.Chan(): - case <-timeout: - panic("expected RepeatTimer to fire") - } + // tock consumes Ticker.Chan() events and checks them against the ms in "timesMs". + tock := func(t *testing.T, rt *RepeatTimer, timesMs []int64) { + + // Check against timesMs. + for _, timeMs := range timesMs { + tyme := <-rt.Chan() + sinceMs := tyme.Sub(time.Time{}) / time.Millisecond + assert.Equal(t, timeMs, int64(sinceMs)) } + + // TODO detect number of running + // goroutines to ensure that + // no other times will fire. + // See https://github.com/tendermint/tmlibs/issues/120. + time.Sleep(time.Millisecond * 100) done := true select { case <-rt.Chan(): @@ -46,46 +60,44 @@ func TestRepeat(t *testing.T) { } tm := NewLogicalTickerMaker(ch) - dur := time.Duration(10 * time.Millisecond) // less than a second - rt := NewRepeatTimerWithTickerMaker("bar", dur, tm) - - // Start at 0. - tock(t, rt, 0) - tick(1) // init time + rt := NewRepeatTimerWithTickerMaker("bar", time.Second, tm) - tock(t, rt, 0) - tick(1) // wait 1 periods - tock(t, rt, 1) - tick(2) // wait 2 periods - tock(t, rt, 2) - tick(3) // wait 3 periods - tock(t, rt, 3) - tick(4) // wait 4 periods - tock(t, rt, 4) - - // Multiple resets leads to no firing. - for i := 0; i < 20; i++ { - time.Sleep(time.Millisecond) - rt.Reset() - } + /* NOTE: Useful for debugging deadlocks... + go func() { + time.Sleep(time.Second * 3) + trace := make([]byte, 102400) + count := runtime.Stack(trace, true) + fmt.Printf("Stack of %d bytes: %s\n", count, trace) + }() + */ - // After this, it works as new. - tock(t, rt, 0) - tick(1) // init time + tick(0, 1000, 10) + tock(t, rt, []int64{}) + tick(1000, 2000, 10) + tock(t, rt, []int64{1000}) + tick(2005, 5000, 10) + tock(t, rt, []int64{2005, 3005, 4005}) + tick(5001, 5999, 1) + // Read 5005 instead of 5001 because + // it's 1 second greater than 4005. + tock(t, rt, []int64{5005}) + tick(6000, 7005, 1) + tock(t, rt, []int64{6005}) + tick(7033, 8032, 1) + tock(t, rt, []int64{7033}) - tock(t, rt, 0) - tick(1) // wait 1 periods - tock(t, rt, 1) - tick(2) // wait 2 periods - tock(t, rt, 2) - tick(3) // wait 3 periods - tock(t, rt, 3) - tick(4) // wait 4 periods - tock(t, rt, 4) + // After a reset, nothing happens + // until two ticks are received. + rt.Reset() + tock(t, rt, []int64{}) + tick(8040, 8041, 1) + tock(t, rt, []int64{}) + tick(9555, 9556, 1) + tock(t, rt, []int64{9555}) // After a stop, nothing more is sent. rt.Stop() - tock(t, rt, 0) + tock(t, rt, []int64{}) // Another stop panics. assert.Panics(t, func() { rt.Stop() }) From 7ef6d4b8132dc8e9bc035b6fa9311934c67d5f87 Mon Sep 17 00:00:00 2001 From: Jae Kwon Date: Sun, 14 Jan 2018 21:25:57 -0800 Subject: [PATCH 50/68] Glide update --- glide.lock | 35 +++++++++++++++++------------------ glide.yaml | 3 --- 2 files changed, 17 insertions(+), 21 deletions(-) diff --git a/glide.lock b/glide.lock index cd11b7588..e23eb217a 100644 --- a/glide.lock +++ b/glide.lock @@ -1,5 +1,5 @@ -hash: c2db6960e66e1f56fbce88caec470cbde14701763efb4a26d2f3fabd2f979a96 -updated: 2018-01-14T17:26:45.597677436-05:00 +hash: 1990fb145d5c5098b5ee467c59506e81b6c3b973667eeb63d83abd7ef831b919 +updated: 2018-01-14T21:24:21.241420637-08:00 imports: - name: github.com/davecgh/go-spew version: 04cdfd42973bb9c8589fd6a731800cf222fde1a9 @@ -8,7 +8,7 @@ imports: - name: github.com/fsnotify/fsnotify version: 4da3e2cfbabc9f751898f250b49f2439785783a1 - name: github.com/go-kit/kit - version: 953e747656a7bbb5e1f998608b460458958b70cc + version: e2b298466b32c7cd5579a9b9b07e968fc9d9452c subpackages: - log - log/level @@ -16,7 +16,7 @@ imports: - name: github.com/go-logfmt/logfmt version: 390ab7935ee28ec6b286364bba9b4dd6410cb3d5 - name: github.com/go-stack/stack - version: 259ab82a6cad3992b4e21ff5cac294ccb06474bc + version: 817915b46b97fd7bb80e8ab6b69f01a53ac3eebf - name: github.com/gogo/protobuf version: 342cbe0a04158f6dcb03ca0079991a51a4248c02 subpackages: @@ -43,15 +43,17 @@ imports: - name: github.com/kr/logfmt version: b84e30acd515aadc4b783ad4ff83aff3299bdfe0 - name: github.com/magiconair/properties - version: 49d762b9817ba1c2e9d0c69183c2b4a8b8f1d934 + version: 8d7837e64d3c1ee4e54a880c5a920ab4316fc90a - name: github.com/mitchellh/mapstructure version: 06020f85339e21b2478f756a78e295255ffa4d6a +- name: github.com/pelletier/go-buffruneio + version: c37440a7cf42ac63b919c752ca73a85067e05992 - name: github.com/pelletier/go-toml - version: 0131db6d737cfbbfb678f8b7d92e55e27ce46224 + version: 13d49d4606eb801b8f01ae542b4afc4c6ee3d84a - name: github.com/pkg/errors version: 645ef00459ed84a119197bfb8d8205042c6df63d - name: github.com/spf13/afero - version: 57afd63c68602b63ed976de00dd066ccb3c319db + version: 5660eeed305fe5f69c8fc6cf899132a459a97064 subpackages: - mem - name: github.com/spf13/cast @@ -61,11 +63,11 @@ imports: - name: github.com/spf13/jwalterweatherman version: 12bd96e66386c1960ab0f74ced1362f66f552f7b - name: github.com/spf13/pflag - version: 4c012f6dcd9546820e378d0bdda4d8fc772cdfea + version: 97afa5e7ca8a08a383cb259e06636b5e2cc7897f - name: github.com/spf13/viper - version: 25b30aa063fc18e48662b86996252eabdcf2f0c7 + version: 8ef37cbca71638bf32f3d5e194117d4cb46da163 - name: github.com/syndtr/goleveldb - version: 34011bf325bce385408353a30b101fe5e923eb6e + version: b89cc31ef7977104127d34c1bd31ebd1a9db2199 subpackages: - leveldb - leveldb/cache @@ -81,33 +83,30 @@ imports: - leveldb/util - name: github.com/tendermint/go-wire version: b93ebdd4f306833936c243561ec30af3455dc764 - subpackages: - - data - - data/base58 - name: github.com/tendermint/log15 version: f91285dece9f4875421b481da3e613d83d44f29b - name: golang.org/x/crypto - version: 95a4943f35d008beabde8c11e5075a1b714e6419 + version: edd5e9b0879d13ee6970a50153d85b8fec9f7686 subpackages: - ripemd160 - name: golang.org/x/sys - version: 83801418e1b59fb1880e363299581ee543af32ca + version: 8dbc5d05d6edcc104950cc299a1ce6641235bc86 subpackages: - unix - name: golang.org/x/text - version: e19ae1496984b1c655b8044a65c0300a3c878dd3 + version: c01e4764d870b77f8abe5096ee19ad20d80e8075 subpackages: - transform - unicode/norm - name: gopkg.in/yaml.v2 - version: 287cf08546ab5e7e37d55a84f7ed3fd1db036de5 + version: eb3733d160e74a9c7e442f435eb3bea458e1d19f testImports: - name: github.com/pmezard/go-difflib version: d8ed2627bdf02c080bf22230dbb337003b7aba2d subpackages: - difflib - name: github.com/stretchr/testify - version: 2aa2c176b9dab406a6970f6a55f513e8a8c8b18f + version: b91bfb9ebec76498946beb6af7c0230c7cc7ba6c subpackages: - assert - require diff --git a/glide.yaml b/glide.yaml index e295781cb..a28bd39ec 100644 --- a/glide.yaml +++ b/glide.yaml @@ -17,9 +17,6 @@ import: - leveldb/opt - package: github.com/tendermint/go-wire version: sdk2 - subpackages: - - data - - data/base58 - package: github.com/tendermint/log15 - package: golang.org/x/crypto subpackages: From 580c3db8f974ec771fd4d7b64b2bbf690002bc75 Mon Sep 17 00:00:00 2001 From: Jae Kwon Date: Thu, 25 Jan 2018 20:05:23 -0800 Subject: [PATCH 51/68] Hashable -> Hasher; SimpleMap upgrade; No "SimpleHashFromBinary" (#128) * Update SimpleMap to hash both keys and values for benefit; Hashable is Hasher; Don't assume go-wire --- glide.lock | 6 +++--- merkle/simple_map.go | 29 +++++++++++++------------- merkle/simple_map_test.go | 42 ++++++++++++++++++++++---------------- merkle/simple_proof.go | 10 ++++----- merkle/simple_tree.go | 26 ++++++++++------------- merkle/simple_tree_test.go | 6 +++--- merkle/types.go | 2 +- 7 files changed, 61 insertions(+), 60 deletions(-) diff --git a/glide.lock b/glide.lock index e23eb217a..8ed27e0b0 100644 --- a/glide.lock +++ b/glide.lock @@ -1,5 +1,5 @@ hash: 1990fb145d5c5098b5ee467c59506e81b6c3b973667eeb63d83abd7ef831b919 -updated: 2018-01-14T21:24:21.241420637-08:00 +updated: 2018-01-21T03:46:56.821595635-08:00 imports: - name: github.com/davecgh/go-spew version: 04cdfd42973bb9c8589fd6a731800cf222fde1a9 @@ -65,7 +65,7 @@ imports: - name: github.com/spf13/pflag version: 97afa5e7ca8a08a383cb259e06636b5e2cc7897f - name: github.com/spf13/viper - version: 8ef37cbca71638bf32f3d5e194117d4cb46da163 + version: 25b30aa063fc18e48662b86996252eabdcf2f0c7 - name: github.com/syndtr/goleveldb version: b89cc31ef7977104127d34c1bd31ebd1a9db2199 subpackages: @@ -82,7 +82,7 @@ imports: - leveldb/table - leveldb/util - name: github.com/tendermint/go-wire - version: b93ebdd4f306833936c243561ec30af3455dc764 + version: 0cce10e82786f2d501827fbe158747dbc4ceeb43 - name: github.com/tendermint/log15 version: f91285dece9f4875421b481da3e613d83d44f29b - name: golang.org/x/crypto diff --git a/merkle/simple_map.go b/merkle/simple_map.go index f637d30a7..b09b71d54 100644 --- a/merkle/simple_map.go +++ b/merkle/simple_map.go @@ -18,25 +18,25 @@ func NewSimpleMap() *SimpleMap { } } -func (sm *SimpleMap) Set(key string, value interface{}) { +func (sm *SimpleMap) Set(key string, value Hasher) { sm.sorted = false - // Is value Hashable? - var vBytes []byte - if hashable, ok := value.(Hashable); ok { - vBytes = hashable.Hash() - } else { - vBytes, _ = wire.MarshalBinary(value) - } + // Hash the key to blind it... why not? + khash := SimpleHashFromBytes([]byte(key)) + + // And the value is hashed too, so you can + // check for equality with a cached value (say) + // and make a determination to fetch or not. + vhash := value.Hash() sm.kvs = append(sm.kvs, cmn.KVPair{ - Key: []byte(key), - Value: vBytes, + Key: khash, + Value: vhash, }) } -// Merkle root hash of items sorted by key. -// NOTE: Behavior is undefined when key is duplicate. +// Merkle root hash of items sorted by key +// (UNSTABLE: and by value too if duplicate key). func (sm *SimpleMap) Hash() []byte { sm.Sort() return hashKVPairs(sm.kvs) @@ -51,7 +51,6 @@ func (sm *SimpleMap) Sort() { } // Returns a copy of sorted KVPairs. -// CONTRACT: The returned slice must not be mutated. func (sm *SimpleMap) KVPairs() cmn.KVPairs { sm.Sort() kvs := make(cmn.KVPairs, len(sm.kvs)) @@ -78,9 +77,9 @@ func (kv kvPair) Hash() []byte { } func hashKVPairs(kvs cmn.KVPairs) []byte { - kvsH := make([]Hashable, 0, len(kvs)) + kvsH := make([]Hasher, 0, len(kvs)) for _, kvp := range kvs { kvsH = append(kvsH, kvPair(kvp)) } - return SimpleHashFromHashables(kvsH) + return SimpleHashFromHashers(kvsH) } diff --git a/merkle/simple_map_test.go b/merkle/simple_map_test.go index 946858550..61210132b 100644 --- a/merkle/simple_map_test.go +++ b/merkle/simple_map_test.go @@ -7,41 +7,47 @@ import ( "github.com/stretchr/testify/assert" ) +type strHasher string + +func (str strHasher) Hash() []byte { + return SimpleHashFromBytes([]byte(str)) +} + func TestSimpleMap(t *testing.T) { { db := NewSimpleMap() - db.Set("key1", "value1") - assert.Equal(t, "d7df3e1d47fe38b51f8d897a88828026807a86b6", fmt.Sprintf("%x", db.Hash()), "Hash didn't match") + db.Set("key1", strHasher("value1")) + assert.Equal(t, "19618304d1ad2635c4238bce87f72331b22a11a1", fmt.Sprintf("%x", db.Hash()), "Hash didn't match") } { db := NewSimpleMap() - db.Set("key1", "value2") - assert.Equal(t, "db415336c9be129ac38259b935a49d8e9c248c88", fmt.Sprintf("%x", db.Hash()), "Hash didn't match") + db.Set("key1", strHasher("value2")) + assert.Equal(t, "51cb96d3d41e1714def72eb4bacc211de9ddf284", fmt.Sprintf("%x", db.Hash()), "Hash didn't match") } { db := NewSimpleMap() - db.Set("key1", "value1") - db.Set("key2", "value2") - assert.Equal(t, "fdb900a04c1de42bd3d924fc644e28a4bdce30ce", fmt.Sprintf("%x", db.Hash()), "Hash didn't match") + db.Set("key1", strHasher("value1")) + db.Set("key2", strHasher("value2")) + assert.Equal(t, "58a0a99d5019fdcad4bcf55942e833b2dfab9421", fmt.Sprintf("%x", db.Hash()), "Hash didn't match") } { db := NewSimpleMap() - db.Set("key2", "value2") // NOTE: out of order - db.Set("key1", "value1") - assert.Equal(t, "fdb900a04c1de42bd3d924fc644e28a4bdce30ce", fmt.Sprintf("%x", db.Hash()), "Hash didn't match") + db.Set("key2", strHasher("value2")) // NOTE: out of order + db.Set("key1", strHasher("value1")) + assert.Equal(t, "58a0a99d5019fdcad4bcf55942e833b2dfab9421", fmt.Sprintf("%x", db.Hash()), "Hash didn't match") } { db := NewSimpleMap() - db.Set("key1", "value1") - db.Set("key2", "value2") - db.Set("key3", "value3") - assert.Equal(t, "488cfdaea108ef8bd406f6163555752392ae1b4a", fmt.Sprintf("%x", db.Hash()), "Hash didn't match") + db.Set("key1", strHasher("value1")) + db.Set("key2", strHasher("value2")) + db.Set("key3", strHasher("value3")) + assert.Equal(t, "cb56db3c7993e977f4c2789559ae3e5e468a6e9b", fmt.Sprintf("%x", db.Hash()), "Hash didn't match") } { db := NewSimpleMap() - db.Set("key2", "value2") // NOTE: out of order - db.Set("key1", "value1") - db.Set("key3", "value3") - assert.Equal(t, "488cfdaea108ef8bd406f6163555752392ae1b4a", fmt.Sprintf("%x", db.Hash()), "Hash didn't match") + db.Set("key2", strHasher("value2")) // NOTE: out of order + db.Set("key1", strHasher("value1")) + db.Set("key3", strHasher("value3")) + assert.Equal(t, "cb56db3c7993e977f4c2789559ae3e5e468a6e9b", fmt.Sprintf("%x", db.Hash()), "Hash didn't match") } } diff --git a/merkle/simple_proof.go b/merkle/simple_proof.go index f75568fd9..83f89e598 100644 --- a/merkle/simple_proof.go +++ b/merkle/simple_proof.go @@ -10,8 +10,8 @@ type SimpleProof struct { } // proofs[0] is the proof for items[0]. -func SimpleProofsFromHashables(items []Hashable) (rootHash []byte, proofs []*SimpleProof) { - trails, rootSPN := trailsFromHashables(items) +func SimpleProofsFromHashers(items []Hasher) (rootHash []byte, proofs []*SimpleProof) { + trails, rootSPN := trailsFromHashers(items) rootHash = rootSPN.Hash proofs = make([]*SimpleProof, len(items)) for i, trail := range trails { @@ -109,7 +109,7 @@ func (spn *SimpleProofNode) FlattenAunts() [][]byte { // trails[0].Hash is the leaf hash for items[0]. // trails[i].Parent.Parent....Parent == root for all i. -func trailsFromHashables(items []Hashable) (trails []*SimpleProofNode, root *SimpleProofNode) { +func trailsFromHashers(items []Hasher) (trails []*SimpleProofNode, root *SimpleProofNode) { // Recursive impl. switch len(items) { case 0: @@ -118,8 +118,8 @@ func trailsFromHashables(items []Hashable) (trails []*SimpleProofNode, root *Sim trail := &SimpleProofNode{items[0].Hash(), nil, nil, nil} return []*SimpleProofNode{trail}, trail default: - lefts, leftRoot := trailsFromHashables(items[:(len(items)+1)/2]) - rights, rightRoot := trailsFromHashables(items[(len(items)+1)/2:]) + lefts, leftRoot := trailsFromHashers(items[:(len(items)+1)/2]) + rights, rightRoot := trailsFromHashers(items[(len(items)+1)/2:]) rootHash := SimpleHashFromTwoHashes(leftRoot.Hash, rightRoot.Hash) root := &SimpleProofNode{rootHash, nil, nil, nil} leftRoot.Parent = root diff --git a/merkle/simple_tree.go b/merkle/simple_tree.go index 86b0bf26c..182f2fdaa 100644 --- a/merkle/simple_tree.go +++ b/merkle/simple_tree.go @@ -54,28 +54,25 @@ func SimpleHashFromHashes(hashes [][]byte) []byte { } } -// Convenience for SimpleHashFromHashes. -func SimpleHashFromBinaries(items []interface{}) []byte { - hashes := make([][]byte, len(items)) - for i, item := range items { - hashes[i] = SimpleHashFromBinary(item) +// NOTE: Do not implement this, use SimpleHashFromByteslices instead. +// type Byteser interface { Bytes() []byte } +// func SimpleHashFromBytesers(items []Byteser) []byte { ... } + +func SimpleHashFromByteslices(bzs [][]byte) []byte { + hashes := make([][]byte, len(bzs)) + for i, bz := range bzs { + hashes[i] = SimpleHashFromBytes(bz) } return SimpleHashFromHashes(hashes) } -// General Convenience -func SimpleHashFromBinary(item interface{}) []byte { +func SimpleHashFromBytes(bz []byte) []byte { hasher := ripemd160.New() - bz, err := wire.MarshalBinary(item) - if err != nil { - panic(err) - } hasher.Write(bz) return hasher.Sum(nil) } -// Convenience for SimpleHashFromHashes. -func SimpleHashFromHashables(items []Hashable) []byte { +func SimpleHashFromHashers(items []Hasher) []byte { hashes := make([][]byte, len(items)) for i, item := range items { hash := item.Hash() @@ -84,8 +81,7 @@ func SimpleHashFromHashables(items []Hashable) []byte { return SimpleHashFromHashes(hashes) } -// Convenience for SimpleHashFromHashes. -func SimpleHashFromMap(m map[string]interface{}) []byte { +func SimpleHashFromMap(m map[string]Hasher) []byte { sm := NewSimpleMap() for k, v := range m { sm.Set(k, v) diff --git a/merkle/simple_tree_test.go b/merkle/simple_tree_test.go index 6299fa33b..26f35c807 100644 --- a/merkle/simple_tree_test.go +++ b/merkle/simple_tree_test.go @@ -19,14 +19,14 @@ func TestSimpleProof(t *testing.T) { total := 100 - items := make([]Hashable, total) + items := make([]Hasher, total) for i := 0; i < total; i++ { items[i] = testItem(RandBytes(32)) } - rootHash := SimpleHashFromHashables(items) + rootHash := SimpleHashFromHashers(items) - rootHash2, proofs := SimpleProofsFromHashables(items) + rootHash2, proofs := SimpleProofsFromHashers(items) if !bytes.Equal(rootHash, rootHash2) { t.Errorf("Unmatched root hashes: %X vs %X", rootHash, rootHash2) diff --git a/merkle/types.go b/merkle/types.go index 93541eda5..1a6d75e0c 100644 --- a/merkle/types.go +++ b/merkle/types.go @@ -18,6 +18,6 @@ type Tree interface { IterateRange(start []byte, end []byte, ascending bool, fx func(key []byte, value []byte) (stop bool)) (stopped bool) } -type Hashable interface { +type Hasher interface { Hash() []byte } From c75298e3594a0d5f0c88112fbc0543edb6212d5b Mon Sep 17 00:00:00 2001 From: Jae Kwon Date: Sun, 21 Jan 2018 18:47:18 -0800 Subject: [PATCH 52/68] Update SimpleMap to hash both keys and values for benefit; Hashable is Hasher; Don't assume go-wire --- merkle/types.go | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/merkle/types.go b/merkle/types.go index 1a6d75e0c..3881f3793 100644 --- a/merkle/types.go +++ b/merkle/types.go @@ -21,3 +21,7 @@ type Tree interface { type Hasher interface { Hash() []byte } + +type Byteser interface { + Bytes() []byte +} From b95cac5f4fbedd187a0503cc18452fd4092413e8 Mon Sep 17 00:00:00 2001 From: Jae Kwon Date: Wed, 24 Jan 2018 15:13:39 -0800 Subject: [PATCH 53/68] Remove unnecessary Byteser interface --- merkle/types.go | 4 ---- 1 file changed, 4 deletions(-) diff --git a/merkle/types.go b/merkle/types.go index 3881f3793..1a6d75e0c 100644 --- a/merkle/types.go +++ b/merkle/types.go @@ -21,7 +21,3 @@ type Tree interface { type Hasher interface { Hash() []byte } - -type Byteser interface { - Bytes() []byte -} From f6dbe9ba054200d9d0753ac0407d1864f90a2b8d Mon Sep 17 00:00:00 2001 From: Jae Kwon Date: Wed, 24 Jan 2018 15:55:31 -0800 Subject: [PATCH 54/68] Refactor string -> dbBackendType --- db/backend_test.go | 6 +++--- db/c_level_db.go | 4 ++-- db/c_level_db_test.go | 4 ++-- db/common_test.go | 2 +- db/db.go | 18 ++++++++++-------- db/fsdb.go | 2 +- db/go_level_db.go | 4 ++-- db/mem_db.go | 2 +- 8 files changed, 22 insertions(+), 20 deletions(-) diff --git a/db/backend_test.go b/db/backend_test.go index 0f4346f2e..0227eb547 100644 --- a/db/backend_test.go +++ b/db/backend_test.go @@ -15,7 +15,7 @@ func cleanupDBDir(dir, name string) { os.RemoveAll(filepath.Join(dir, name) + ".db") } -func testBackendGetSetDelete(t *testing.T, backend string) { +func testBackendGetSetDelete(t *testing.T, backend dbBackendType) { // Default dir, dirname := cmn.Tempdir(fmt.Sprintf("test_backend_%s_", backend)) defer dir.Close() @@ -141,9 +141,9 @@ func TestBackendsNilKeys(t *testing.T) { } } -func TestGoLevelDBBackendStr(t *testing.T) { +func TestGoLevelDBBackend(t *testing.T) { name := cmn.Fmt("test_%x", cmn.RandStr(12)) - db := NewDB(name, GoLevelDBBackendStr, "") + db := NewDB(name, GoLevelDBBackend, "") defer cleanupDBDir("", name) _, ok := db.(*GoLevelDB) diff --git a/db/c_level_db.go b/db/c_level_db.go index f1a5a3aef..a59137883 100644 --- a/db/c_level_db.go +++ b/db/c_level_db.go @@ -14,8 +14,8 @@ func init() { dbCreator := func(name string, dir string) (DB, error) { return NewCLevelDB(name, dir) } - registerDBCreator(LevelDBBackendStr, dbCreator, true) - registerDBCreator(CLevelDBBackendStr, dbCreator, false) + registerDBCreator(LevelDBBackend, dbCreator, true) + registerDBCreator(CLevelDBBackend, dbCreator, false) } var _ DB = (*CLevelDB)(nil) diff --git a/db/c_level_db_test.go b/db/c_level_db_test.go index 89993fbac..34bb72273 100644 --- a/db/c_level_db_test.go +++ b/db/c_level_db_test.go @@ -86,9 +86,9 @@ func bytes2Int64(buf []byte) int64 { } */ -func TestCLevelDBBackendStr(t *testing.T) { +func TestCLevelDBBackend(t *testing.T) { name := cmn.Fmt("test_%x", cmn.RandStr(12)) - db := NewDB(name, LevelDBBackendStr, "") + db := NewDB(name, LevelDBBackend, "") defer cleanupDBDir("", name) _, ok := db.(*CLevelDB) diff --git a/db/common_test.go b/db/common_test.go index 2a5d01818..1a529949f 100644 --- a/db/common_test.go +++ b/db/common_test.go @@ -45,7 +45,7 @@ func checkValuePanics(t *testing.T, itr Iterator) { assert.Panics(t, func() { itr.Key() }, "checkValuePanics expected panic but didn't") } -func newTempDB(t *testing.T, backend string) (db DB) { +func newTempDB(t *testing.T, backend dbBackendType) (db DB) { dir, dirname := cmn.Tempdir("test_go_iterator") db = NewDB("testdb", backend, dirname) dir.Close() diff --git a/db/db.go b/db/db.go index 25ff93ec5..1428c9c42 100644 --- a/db/db.go +++ b/db/db.go @@ -5,19 +5,21 @@ import "fmt" //---------------------------------------- // Main entry +type dbBackendType string + const ( - LevelDBBackendStr = "leveldb" // legacy, defaults to goleveldb unless +gcc - CLevelDBBackendStr = "cleveldb" - GoLevelDBBackendStr = "goleveldb" - MemDBBackendStr = "memdb" - FSDBBackendStr = "fsdb" // using the filesystem naively + LevelDBBackend dbBackendType = "leveldb" // legacy, defaults to goleveldb unless +gcc + CLevelDBBackend dbBackendType = "cleveldb" + GoLevelDBBackend dbBackendType = "goleveldb" + MemDBBackend dbBackendType = "memDB" + FSDBBackend dbBackendType = "fsdb" // using the filesystem naively ) type dbCreator func(name string, dir string) (DB, error) -var backends = map[string]dbCreator{} +var backends = map[dbBackendType]dbCreator{} -func registerDBCreator(backend string, creator dbCreator, force bool) { +func registerDBCreator(backend dbBackendType, creator dbCreator, force bool) { _, ok := backends[backend] if !force && ok { return @@ -25,7 +27,7 @@ func registerDBCreator(backend string, creator dbCreator, force bool) { backends[backend] = creator } -func NewDB(name string, backend string, dir string) DB { +func NewDB(name string, backend dbBackendType, dir string) DB { db, err := backends[backend](name, dir) if err != nil { panic(fmt.Sprintf("Error initializing DB: %v", err)) diff --git a/db/fsdb.go b/db/fsdb.go index 45c3231f6..578c1785a 100644 --- a/db/fsdb.go +++ b/db/fsdb.go @@ -19,7 +19,7 @@ const ( ) func init() { - registerDBCreator(FSDBBackendStr, func(name string, dir string) (DB, error) { + registerDBCreator(FSDBBackend, func(name string, dir string) (DB, error) { dbPath := filepath.Join(dir, name+".db") return NewFSDB(dbPath), nil }, false) diff --git a/db/go_level_db.go b/db/go_level_db.go index 7d60e060f..9fed329bf 100644 --- a/db/go_level_db.go +++ b/db/go_level_db.go @@ -17,8 +17,8 @@ func init() { dbCreator := func(name string, dir string) (DB, error) { return NewGoLevelDB(name, dir) } - registerDBCreator(LevelDBBackendStr, dbCreator, false) - registerDBCreator(GoLevelDBBackendStr, dbCreator, false) + registerDBCreator(LevelDBBackend, dbCreator, false) + registerDBCreator(GoLevelDBBackend, dbCreator, false) } var _ DB = (*GoLevelDB)(nil) diff --git a/db/mem_db.go b/db/mem_db.go index 1e3bee5a5..f2c484fa7 100644 --- a/db/mem_db.go +++ b/db/mem_db.go @@ -7,7 +7,7 @@ import ( ) func init() { - registerDBCreator(MemDBBackendStr, func(name string, dir string) (DB, error) { + registerDBCreator(MemDBBackend, func(name string, dir string) (DB, error) { return NewMemDB(), nil }, false) } From 9ccfe161ad47c9471796107d9a9a68322caf5960 Mon Sep 17 00:00:00 2001 From: Anton Kaliaev Date: Fri, 2 Feb 2018 13:51:39 +0400 Subject: [PATCH 55/68] lowercase memDB type key --- db/db.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/db/db.go b/db/db.go index 1428c9c42..ced0ec9ca 100644 --- a/db/db.go +++ b/db/db.go @@ -11,7 +11,7 @@ const ( LevelDBBackend dbBackendType = "leveldb" // legacy, defaults to goleveldb unless +gcc CLevelDBBackend dbBackendType = "cleveldb" GoLevelDBBackend dbBackendType = "goleveldb" - MemDBBackend dbBackendType = "memDB" + MemDBBackend dbBackendType = "memdb" FSDBBackend dbBackendType = "fsdb" // using the filesystem naively ) From 2e765462234e7749806cd00fa326acc25d704448 Mon Sep 17 00:00:00 2001 From: Adrian Brink Date: Fri, 2 Feb 2018 18:09:48 +0100 Subject: [PATCH 56/68] Clean up glide.yaml --- glide.lock | 34 +++++++++++++++------------------- glide.yaml | 15 +++++++++++++-- 2 files changed, 28 insertions(+), 21 deletions(-) diff --git a/glide.lock b/glide.lock index 8ed27e0b0..875f9837b 100644 --- a/glide.lock +++ b/glide.lock @@ -1,14 +1,14 @@ -hash: 1990fb145d5c5098b5ee467c59506e81b6c3b973667eeb63d83abd7ef831b919 -updated: 2018-01-21T03:46:56.821595635-08:00 +hash: 22e22759d9adc51e3ce0728955143321386891907ce54eb952245d57285d8784 +updated: 2018-02-02T18:08:31.85309+01:00 imports: - name: github.com/davecgh/go-spew - version: 04cdfd42973bb9c8589fd6a731800cf222fde1a9 + version: 346938d642f2ec3594ed81d874461961cd0faa76 subpackages: - spew - name: github.com/fsnotify/fsnotify - version: 4da3e2cfbabc9f751898f250b49f2439785783a1 + version: c2828203cd70a50dcccfb2761f8b1f8ceef9a8e9 - name: github.com/go-kit/kit - version: e2b298466b32c7cd5579a9b9b07e968fc9d9452c + version: 4dc7be5d2d12881735283bcab7352178e190fc71 subpackages: - log - log/level @@ -18,7 +18,7 @@ imports: - name: github.com/go-stack/stack version: 817915b46b97fd7bb80e8ab6b69f01a53ac3eebf - name: github.com/gogo/protobuf - version: 342cbe0a04158f6dcb03ca0079991a51a4248c02 + version: 1adfc126b41513cc696b209667c8656ea7aac67c subpackages: - gogoproto - proto @@ -43,17 +43,15 @@ imports: - name: github.com/kr/logfmt version: b84e30acd515aadc4b783ad4ff83aff3299bdfe0 - name: github.com/magiconair/properties - version: 8d7837e64d3c1ee4e54a880c5a920ab4316fc90a + version: 49d762b9817ba1c2e9d0c69183c2b4a8b8f1d934 - name: github.com/mitchellh/mapstructure - version: 06020f85339e21b2478f756a78e295255ffa4d6a -- name: github.com/pelletier/go-buffruneio - version: c37440a7cf42ac63b919c752ca73a85067e05992 + version: b4575eea38cca1123ec2dc90c26529b5c5acfcff - name: github.com/pelletier/go-toml - version: 13d49d4606eb801b8f01ae542b4afc4c6ee3d84a + version: acdc4509485b587f5e675510c4f2c63e90ff68a8 - name: github.com/pkg/errors version: 645ef00459ed84a119197bfb8d8205042c6df63d - name: github.com/spf13/afero - version: 5660eeed305fe5f69c8fc6cf899132a459a97064 + version: bb8f1927f2a9d3ab41c9340aa034f6b803f4359c subpackages: - mem - name: github.com/spf13/cast @@ -61,7 +59,7 @@ imports: - name: github.com/spf13/cobra version: 7b2c5ac9fc04fc5efafb60700713d4fa609b777b - name: github.com/spf13/jwalterweatherman - version: 12bd96e66386c1960ab0f74ced1362f66f552f7b + version: 7c0cea34c8ece3fbeb2b27ab9b59511d360fb394 - name: github.com/spf13/pflag version: 97afa5e7ca8a08a383cb259e06636b5e2cc7897f - name: github.com/spf13/viper @@ -82,15 +80,13 @@ imports: - leveldb/table - leveldb/util - name: github.com/tendermint/go-wire - version: 0cce10e82786f2d501827fbe158747dbc4ceeb43 -- name: github.com/tendermint/log15 - version: f91285dece9f4875421b481da3e613d83d44f29b + version: e723d95ac2838b7ae9919ada25004859236c32ff - name: golang.org/x/crypto version: edd5e9b0879d13ee6970a50153d85b8fec9f7686 subpackages: - ripemd160 - name: golang.org/x/sys - version: 8dbc5d05d6edcc104950cc299a1ce6641235bc86 + version: 37707fdb30a5b38865cfb95e5aab41707daec7fd subpackages: - unix - name: golang.org/x/text @@ -99,14 +95,14 @@ imports: - transform - unicode/norm - name: gopkg.in/yaml.v2 - version: eb3733d160e74a9c7e442f435eb3bea458e1d19f + version: d670f9405373e636a5a2765eea47fac0c9bc91a4 testImports: - name: github.com/pmezard/go-difflib version: d8ed2627bdf02c080bf22230dbb337003b7aba2d subpackages: - difflib - name: github.com/stretchr/testify - version: b91bfb9ebec76498946beb6af7c0230c7cc7ba6c + version: 12b6f73e6084dad08a7c6e575284b177ecafbc71 subpackages: - assert - require diff --git a/glide.yaml b/glide.yaml index a28bd39ec..42d43e4be 100644 --- a/glide.yaml +++ b/glide.yaml @@ -1,28 +1,39 @@ package: github.com/tendermint/tmlibs import: - package: github.com/go-kit/kit + version: ^0.6.0 subpackages: - log - log/level - log/term - package: github.com/go-logfmt/logfmt + version: ^0.3.0 +- package: github.com/gogo/protobuf + version: ^1.0.0 + subpackages: + - gogoproto + - proto - package: github.com/jmhodges/levigo - package: github.com/pkg/errors + version: ^0.8.0 - package: github.com/spf13/cobra + version: ^0.0.1 - package: github.com/spf13/viper + version: ^1.0.0 - package: github.com/syndtr/goleveldb subpackages: - leveldb - leveldb/errors + - leveldb/iterator - leveldb/opt - package: github.com/tendermint/go-wire - version: sdk2 -- package: github.com/tendermint/log15 + version: develop - package: golang.org/x/crypto subpackages: - ripemd160 testImport: - package: github.com/stretchr/testify + version: ^1.2.1 subpackages: - assert - require From cbc63518e589d6b0069f9750127fa83dd6ea5ee3 Mon Sep 17 00:00:00 2001 From: Adrian Brink Date: Fri, 2 Feb 2018 18:50:24 +0100 Subject: [PATCH 57/68] Export DbBackendType in order to fix IAVL tests --- db/backend_test.go | 2 +- db/common_test.go | 2 +- db/db.go | 18 +++++++++--------- 3 files changed, 11 insertions(+), 11 deletions(-) diff --git a/db/backend_test.go b/db/backend_test.go index 0227eb547..9e73a1f66 100644 --- a/db/backend_test.go +++ b/db/backend_test.go @@ -15,7 +15,7 @@ func cleanupDBDir(dir, name string) { os.RemoveAll(filepath.Join(dir, name) + ".db") } -func testBackendGetSetDelete(t *testing.T, backend dbBackendType) { +func testBackendGetSetDelete(t *testing.T, backend DbBackendType) { // Default dir, dirname := cmn.Tempdir(fmt.Sprintf("test_backend_%s_", backend)) defer dir.Close() diff --git a/db/common_test.go b/db/common_test.go index 1a529949f..4209b44d7 100644 --- a/db/common_test.go +++ b/db/common_test.go @@ -45,7 +45,7 @@ func checkValuePanics(t *testing.T, itr Iterator) { assert.Panics(t, func() { itr.Key() }, "checkValuePanics expected panic but didn't") } -func newTempDB(t *testing.T, backend dbBackendType) (db DB) { +func newTempDB(t *testing.T, backend DbBackendType) (db DB) { dir, dirname := cmn.Tempdir("test_go_iterator") db = NewDB("testdb", backend, dirname) dir.Close() diff --git a/db/db.go b/db/db.go index ced0ec9ca..ac19f6b4b 100644 --- a/db/db.go +++ b/db/db.go @@ -5,21 +5,21 @@ import "fmt" //---------------------------------------- // Main entry -type dbBackendType string +type DbBackendType string const ( - LevelDBBackend dbBackendType = "leveldb" // legacy, defaults to goleveldb unless +gcc - CLevelDBBackend dbBackendType = "cleveldb" - GoLevelDBBackend dbBackendType = "goleveldb" - MemDBBackend dbBackendType = "memdb" - FSDBBackend dbBackendType = "fsdb" // using the filesystem naively + LevelDBBackend DbBackendType = "leveldb" // legacy, defaults to goleveldb unless +gcc + CLevelDBBackend DbBackendType = "cleveldb" + GoLevelDBBackend DbBackendType = "goleveldb" + MemDBBackend DbBackendType = "memdb" + FSDBBackend DbBackendType = "fsdb" // using the filesystem naively ) type dbCreator func(name string, dir string) (DB, error) -var backends = map[dbBackendType]dbCreator{} +var backends = map[DbBackendType]dbCreator{} -func registerDBCreator(backend dbBackendType, creator dbCreator, force bool) { +func registerDBCreator(backend DbBackendType, creator dbCreator, force bool) { _, ok := backends[backend] if !force && ok { return @@ -27,7 +27,7 @@ func registerDBCreator(backend dbBackendType, creator dbCreator, force bool) { backends[backend] = creator } -func NewDB(name string, backend dbBackendType, dir string) DB { +func NewDB(name string, backend DbBackendType, dir string) DB { db, err := backends[backend](name, dir) if err != nil { panic(fmt.Sprintf("Error initializing DB: %v", err)) From 1b5176003a7733baed745dd9b9c153a0893ad46a Mon Sep 17 00:00:00 2001 From: Ethan Buchman Date: Fri, 2 Feb 2018 22:31:17 -0500 Subject: [PATCH 58/68] DbBackend -> DBBackend --- db/backend_test.go | 2 +- db/common_test.go | 2 +- db/db.go | 18 +++++++++--------- 3 files changed, 11 insertions(+), 11 deletions(-) diff --git a/db/backend_test.go b/db/backend_test.go index 9e73a1f66..80fbbb140 100644 --- a/db/backend_test.go +++ b/db/backend_test.go @@ -15,7 +15,7 @@ func cleanupDBDir(dir, name string) { os.RemoveAll(filepath.Join(dir, name) + ".db") } -func testBackendGetSetDelete(t *testing.T, backend DbBackendType) { +func testBackendGetSetDelete(t *testing.T, backend DBBackendType) { // Default dir, dirname := cmn.Tempdir(fmt.Sprintf("test_backend_%s_", backend)) defer dir.Close() diff --git a/db/common_test.go b/db/common_test.go index 4209b44d7..1b0f00416 100644 --- a/db/common_test.go +++ b/db/common_test.go @@ -45,7 +45,7 @@ func checkValuePanics(t *testing.T, itr Iterator) { assert.Panics(t, func() { itr.Key() }, "checkValuePanics expected panic but didn't") } -func newTempDB(t *testing.T, backend DbBackendType) (db DB) { +func newTempDB(t *testing.T, backend DBBackendType) (db DB) { dir, dirname := cmn.Tempdir("test_go_iterator") db = NewDB("testdb", backend, dirname) dir.Close() diff --git a/db/db.go b/db/db.go index ac19f6b4b..869937660 100644 --- a/db/db.go +++ b/db/db.go @@ -5,21 +5,21 @@ import "fmt" //---------------------------------------- // Main entry -type DbBackendType string +type DBBackendType string const ( - LevelDBBackend DbBackendType = "leveldb" // legacy, defaults to goleveldb unless +gcc - CLevelDBBackend DbBackendType = "cleveldb" - GoLevelDBBackend DbBackendType = "goleveldb" - MemDBBackend DbBackendType = "memdb" - FSDBBackend DbBackendType = "fsdb" // using the filesystem naively + LevelDBBackend DBBackendType = "leveldb" // legacy, defaults to goleveldb unless +gcc + CLevelDBBackend DBBackendType = "cleveldb" + GoLevelDBBackend DBBackendType = "goleveldb" + MemDBBackend DBBackendType = "memdb" + FSDBBackend DBBackendType = "fsdb" // using the filesystem naively ) type dbCreator func(name string, dir string) (DB, error) -var backends = map[DbBackendType]dbCreator{} +var backends = map[DBBackendType]dbCreator{} -func registerDBCreator(backend DbBackendType, creator dbCreator, force bool) { +func registerDBCreator(backend DBBackendType, creator dbCreator, force bool) { _, ok := backends[backend] if !force && ok { return @@ -27,7 +27,7 @@ func registerDBCreator(backend DbBackendType, creator dbCreator, force bool) { backends[backend] = creator } -func NewDB(name string, backend DbBackendType, dir string) DB { +func NewDB(name string, backend DBBackendType, dir string) DB { db, err := backends[backend](name, dir) if err != nil { panic(fmt.Sprintf("Error initializing DB: %v", err)) From 690d6c60701758ab757d11ef674906f64e6b618d Mon Sep 17 00:00:00 2001 From: Ethan Buchman Date: Thu, 18 Jan 2018 22:56:36 -0500 Subject: [PATCH 59/68] cli: WriteDemoConfig -> WriteConfigVals --- CHANGELOG.md | 6 ++++++ cli/helper.go | 16 +++++----------- cli/setup.go | 6 ++++-- cli/setup_test.go | 18 +++++++++++++++--- 4 files changed, 30 insertions(+), 16 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index fe2c2fe94..42b8cdd61 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,5 +1,11 @@ # Changelog +## 0.7.0 (TBD) + +BREAKING: + + - [cli] WriteDemoConfig -> WriteConfigValues + ## 0.6.0 (December 29, 2017) BREAKING: diff --git a/cli/helper.go b/cli/helper.go index 845c17dbf..878cf26e5 100644 --- a/cli/helper.go +++ b/cli/helper.go @@ -9,21 +9,15 @@ import ( "path/filepath" ) -// WriteDemoConfig writes a toml file with the given values. -// It returns the RootDir the config.toml file is stored in, -// or an error if writing was impossible -func WriteDemoConfig(vals map[string]string) (string, error) { - cdir, err := ioutil.TempDir("", "test-cli") - if err != nil { - return "", err - } +// WriteConfigVals writes a toml file with the given values. +// It returns an error if writing was impossible. +func WriteConfigVals(dir string, vals map[string]string) error { data := "" for k, v := range vals { data = data + fmt.Sprintf("%s = \"%s\"\n", k, v) } - cfile := filepath.Join(cdir, "config.toml") - err = ioutil.WriteFile(cfile, []byte(data), 0666) - return cdir, err + cfile := filepath.Join(dir, "config.toml") + return ioutil.WriteFile(cfile, []byte(data), 0666) } // RunWithArgs executes the given command with the specified command line args diff --git a/cli/setup.go b/cli/setup.go index 2dcadb407..dc34abdf9 100644 --- a/cli/setup.go +++ b/cli/setup.go @@ -3,6 +3,7 @@ package cli import ( "fmt" "os" + "path/filepath" "strings" "github.com/pkg/errors" @@ -129,8 +130,9 @@ func bindFlagsLoadViper(cmd *cobra.Command, args []string) error { homeDir := viper.GetString(HomeFlag) viper.Set(HomeFlag, homeDir) - viper.SetConfigName("config") // name of config file (without extension) - viper.AddConfigPath(homeDir) // search root directory + viper.SetConfigName("config") // name of config file (without extension) + viper.AddConfigPath(homeDir) // search root directory + viper.AddConfigPath(filepath.Join(homeDir, "config")) // search root directory /config // If a config file is found, read it in. if err := viper.ReadInConfig(); err == nil { diff --git a/cli/setup_test.go b/cli/setup_test.go index e0fd75d8a..04209e493 100644 --- a/cli/setup_test.go +++ b/cli/setup_test.go @@ -2,6 +2,7 @@ package cli import ( "fmt" + "io/ioutil" "strconv" "strings" "testing" @@ -54,11 +55,20 @@ func TestSetupEnv(t *testing.T) { } } +func tempDir() string { + cdir, err := ioutil.TempDir("", "test-cli") + if err != nil { + panic(err) + } + return cdir +} + func TestSetupConfig(t *testing.T) { // we pre-create two config files we can refer to in the rest of // the test cases. cval1 := "fubble" - conf1, err := WriteDemoConfig(map[string]string{"boo": cval1}) + conf1 := tempDir() + err := WriteConfigVals(conf1, map[string]string{"boo": cval1}) require.Nil(t, err) cases := []struct { @@ -116,10 +126,12 @@ func TestSetupUnmarshal(t *testing.T) { // we pre-create two config files we can refer to in the rest of // the test cases. cval1, cval2 := "someone", "else" - conf1, err := WriteDemoConfig(map[string]string{"name": cval1}) + conf1 := tempDir() + err := WriteConfigVals(conf1, map[string]string{"name": cval1}) require.Nil(t, err) // even with some ignored fields, should be no problem - conf2, err := WriteDemoConfig(map[string]string{"name": cval2, "foo": "bar"}) + conf2 := tempDir() + err = WriteConfigVals(conf2, map[string]string{"name": cval2, "foo": "bar"}) require.Nil(t, err) // unused is not declared on a flag and remains from base From 1d7fc78ea171587e9e63da566d3da1b127bfd14c Mon Sep 17 00:00:00 2001 From: Ethan Buchman Date: Fri, 2 Feb 2018 23:49:14 -0500 Subject: [PATCH 60/68] update glide --- glide.lock | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/glide.lock b/glide.lock index 875f9837b..4f3c395ce 100644 --- a/glide.lock +++ b/glide.lock @@ -1,5 +1,5 @@ hash: 22e22759d9adc51e3ce0728955143321386891907ce54eb952245d57285d8784 -updated: 2018-02-02T18:08:31.85309+01:00 +updated: 2018-02-02T23:47:17.788237939-05:00 imports: - name: github.com/davecgh/go-spew version: 346938d642f2ec3594ed81d874461961cd0faa76 @@ -80,7 +80,7 @@ imports: - leveldb/table - leveldb/util - name: github.com/tendermint/go-wire - version: e723d95ac2838b7ae9919ada25004859236c32ff + version: dec83f641903b22f039da3974607859715d0377e - name: golang.org/x/crypto version: edd5e9b0879d13ee6970a50153d85b8fec9f7686 subpackages: From d6d97889f21f5ff168de16191be0f9c937fef1f8 Mon Sep 17 00:00:00 2001 From: Ethan Buchman Date: Sat, 3 Feb 2018 01:29:39 -0500 Subject: [PATCH 61/68] merkle: remove go-wire dep by copying EncodeByteSlice --- glide.lock | 14 ++++++-------- glide.yaml | 2 -- merkle/simple_map.go | 5 ++--- merkle/simple_tree.go | 6 ++---- merkle/types.go | 24 ++++++++++++++++++++++++ 5 files changed, 34 insertions(+), 17 deletions(-) diff --git a/glide.lock b/glide.lock index 4f3c395ce..10dec980b 100644 --- a/glide.lock +++ b/glide.lock @@ -1,10 +1,6 @@ -hash: 22e22759d9adc51e3ce0728955143321386891907ce54eb952245d57285d8784 -updated: 2018-02-02T23:47:17.788237939-05:00 +hash: 98752078f39da926f655268b3b143f713d64edd379fc9fcb1210d9d8aa7ab4e0 +updated: 2018-02-03T01:28:00.221548057-05:00 imports: -- name: github.com/davecgh/go-spew - version: 346938d642f2ec3594ed81d874461961cd0faa76 - subpackages: - - spew - name: github.com/fsnotify/fsnotify version: c2828203cd70a50dcccfb2761f8b1f8ceef9a8e9 - name: github.com/go-kit/kit @@ -79,8 +75,6 @@ imports: - leveldb/storage - leveldb/table - leveldb/util -- name: github.com/tendermint/go-wire - version: dec83f641903b22f039da3974607859715d0377e - name: golang.org/x/crypto version: edd5e9b0879d13ee6970a50153d85b8fec9f7686 subpackages: @@ -97,6 +91,10 @@ imports: - name: gopkg.in/yaml.v2 version: d670f9405373e636a5a2765eea47fac0c9bc91a4 testImports: +- name: github.com/davecgh/go-spew + version: 346938d642f2ec3594ed81d874461961cd0faa76 + subpackages: + - spew - name: github.com/pmezard/go-difflib version: d8ed2627bdf02c080bf22230dbb337003b7aba2d subpackages: diff --git a/glide.yaml b/glide.yaml index 42d43e4be..b12c72a16 100644 --- a/glide.yaml +++ b/glide.yaml @@ -26,8 +26,6 @@ import: - leveldb/errors - leveldb/iterator - leveldb/opt -- package: github.com/tendermint/go-wire - version: develop - package: golang.org/x/crypto subpackages: - ripemd160 diff --git a/merkle/simple_map.go b/merkle/simple_map.go index b09b71d54..b59e3b4b6 100644 --- a/merkle/simple_map.go +++ b/merkle/simple_map.go @@ -1,7 +1,6 @@ package merkle import ( - "github.com/tendermint/go-wire" cmn "github.com/tendermint/tmlibs/common" "golang.org/x/crypto/ripemd160" ) @@ -65,11 +64,11 @@ type kvPair cmn.KVPair func (kv kvPair) Hash() []byte { hasher := ripemd160.New() - err := wire.EncodeByteSlice(hasher, kv.Key) + err := encodeByteSlice(hasher, kv.Key) if err != nil { panic(err) } - err = wire.EncodeByteSlice(hasher, kv.Value) + err = encodeByteSlice(hasher, kv.Value) if err != nil { panic(err) } diff --git a/merkle/simple_tree.go b/merkle/simple_tree.go index 182f2fdaa..a363ea8e8 100644 --- a/merkle/simple_tree.go +++ b/merkle/simple_tree.go @@ -26,14 +26,12 @@ package merkle import ( "golang.org/x/crypto/ripemd160" - - "github.com/tendermint/go-wire" ) func SimpleHashFromTwoHashes(left []byte, right []byte) []byte { var hasher = ripemd160.New() - err := wire.EncodeByteSlice(hasher, left) - err = wire.EncodeByteSlice(hasher, right) + err := encodeByteSlice(hasher, left) + err = encodeByteSlice(hasher, right) if err != nil { panic(err) } diff --git a/merkle/types.go b/merkle/types.go index 1a6d75e0c..e0fe35fa8 100644 --- a/merkle/types.go +++ b/merkle/types.go @@ -1,5 +1,10 @@ package merkle +import ( + "encoding/binary" + "io" +) + type Tree interface { Size() (size int) Height() (height int8) @@ -21,3 +26,22 @@ type Tree interface { type Hasher interface { Hash() []byte } + +//----------------------------------------------------------------------- +// NOTE: these are duplicated from go-wire so we dont need go-wire as a dep + +func encodeByteSlice(w io.Writer, bz []byte) (err error) { + err = encodeVarint(w, int64(len(bz))) + if err != nil { + return + } + _, err = w.Write(bz) + return +} + +func encodeVarint(w io.Writer, i int64) (err error) { + var buf [10]byte + n := binary.PutVarint(buf[:], i) + _, err = w.Write(buf[0:n]) + return +} From 951333ecb0c82d1022bd2fc49da63977f7378eb2 Mon Sep 17 00:00:00 2001 From: Emmanuel Odeke Date: Fri, 2 Feb 2018 23:40:38 -0700 Subject: [PATCH 62/68] common: IsHex should be able to handle 0X prefixed strings IsHex should also successfully decode strings prefixed with 0X instead of only 0x strings. Also add tests generally for IsHex. --- common/string.go | 2 +- common/string_test.go | 18 ++++++++++++++++++ 2 files changed, 19 insertions(+), 1 deletion(-) diff --git a/common/string.go b/common/string.go index 6924e6a5b..a6895eb25 100644 --- a/common/string.go +++ b/common/string.go @@ -29,7 +29,7 @@ func LeftPadString(s string, totalLength int) string { // IsHex returns true for non-empty hex-string prefixed with "0x" func IsHex(s string) bool { - if len(s) > 2 && s[:2] == "0x" { + if len(s) > 2 && strings.EqualFold(s[:2], "0x") { _, err := hex.DecodeString(s[2:]) return err == nil } diff --git a/common/string_test.go b/common/string_test.go index a82f1022b..b8a917c16 100644 --- a/common/string_test.go +++ b/common/string_test.go @@ -12,3 +12,21 @@ func TestStringInSlice(t *testing.T) { assert.True(t, StringInSlice("", []string{""})) assert.False(t, StringInSlice("", []string{})) } + +func TestIsHex(t *testing.T) { + notHex := []string{ + "", " ", "a", "x", "0", "0x", "0X", "0x ", "0X ", "0X a", + "0xf ", "0x f", "0xp", "0x-", + "0xf", "0XBED", "0xF", "0xbed", // Odd lengths + } + for _, v := range notHex { + assert.False(t, IsHex(v), "%q is not hex", v) + } + hex := []string{ + "0x00", "0x0a", "0x0F", "0xFFFFFF", "0Xdeadbeef", "0x0BED", + "0X12", "0X0A", + } + for _, v := range hex { + assert.True(t, IsHex(v), "%q is hex", v) + } +} From 91b41ddd59788ef800804b036f47eda73442b780 Mon Sep 17 00:00:00 2001 From: Anton Kaliaev Date: Wed, 31 Jan 2018 12:13:59 +0400 Subject: [PATCH 63/68] add waitCh as an alternative to waitGroup new methods: - [CList] WaitChan() - [CElement] NextWaitChan() - [CElement] PrevWaitChan() Refs https://github.com/tendermint/tendermint/pull/1173 --- CHANGELOG.md | 7 +++++ clist/clist.go | 75 +++++++++++++++++++++++++++++++++++---------- clist/clist_test.go | 73 +++++++++++++++++++++++++++++++++++++++++++ 3 files changed, 138 insertions(+), 17 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 42b8cdd61..2c9466126 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -6,6 +6,13 @@ BREAKING: - [cli] WriteDemoConfig -> WriteConfigValues +## 0.6.1 (TBD) + +IMPROVEMENTS: + - [clist] add WaitChan() to CList, NextWaitChan() and PrevWaitChan() + to CElement. These can be used instead of blocking *Wait() methods + if you need to be able to send quit signal and not block forever + ## 0.6.0 (December 29, 2017) BREAKING: diff --git a/clist/clist.go b/clist/clist.go index a52920f8c..28d771a28 100644 --- a/clist/clist.go +++ b/clist/clist.go @@ -36,12 +36,14 @@ waiting on NextWait() (since it's just a read operation). */ type CElement struct { - mtx sync.RWMutex - prev *CElement - prevWg *sync.WaitGroup - next *CElement - nextWg *sync.WaitGroup - removed bool + mtx sync.RWMutex + prev *CElement + prevWg *sync.WaitGroup + prevWaitCh chan struct{} + next *CElement + nextWg *sync.WaitGroup + nextWaitCh chan struct{} + removed bool Value interface{} // immutable } @@ -84,6 +86,24 @@ func (e *CElement) PrevWait() *CElement { } } +// PrevWaitChan can be used to wait until Prev becomes not nil. Once it does, +// channel will be closed. +func (e *CElement) PrevWaitChan() <-chan struct{} { + e.mtx.RLock() + defer e.mtx.RUnlock() + + return e.prevWaitCh +} + +// NextWaitChan can be used to wait until Next becomes not nil. Once it does, +// channel will be closed. +func (e *CElement) NextWaitChan() <-chan struct{} { + e.mtx.RLock() + defer e.mtx.RUnlock() + + return e.nextWaitCh +} + // Nonblocking, may return nil if at the end. func (e *CElement) Next() *CElement { e.mtx.RLock() @@ -142,9 +162,11 @@ func (e *CElement) SetNext(newNext *CElement) { // events, new Add calls must happen after all previous Wait calls have // returned. e.nextWg = waitGroup1() // WaitGroups are difficult to re-use. + e.nextWaitCh = make(chan struct{}) } if oldNext == nil && newNext != nil { e.nextWg.Done() + close(e.nextWaitCh) } } @@ -158,9 +180,11 @@ func (e *CElement) SetPrev(newPrev *CElement) { e.prev = newPrev if oldPrev != nil && newPrev == nil { e.prevWg = waitGroup1() // WaitGroups are difficult to re-use. + e.prevWaitCh = make(chan struct{}) } if oldPrev == nil && newPrev != nil { e.prevWg.Done() + close(e.prevWaitCh) } } @@ -173,9 +197,11 @@ func (e *CElement) SetRemoved() { // This wakes up anyone waiting in either direction. if e.prev == nil { e.prevWg.Done() + close(e.prevWaitCh) } if e.next == nil { e.nextWg.Done() + close(e.nextWaitCh) } } @@ -185,11 +211,12 @@ func (e *CElement) SetRemoved() { // The zero value for CList is an empty list ready to use. // Operations are goroutine-safe. type CList struct { - mtx sync.RWMutex - wg *sync.WaitGroup - head *CElement // first element - tail *CElement // last element - len int // list length + mtx sync.RWMutex + wg *sync.WaitGroup + waitCh chan struct{} + head *CElement // first element + tail *CElement // last element + len int // list length } func (l *CList) Init() *CList { @@ -197,6 +224,7 @@ func (l *CList) Init() *CList { defer l.mtx.Unlock() l.wg = waitGroup1() + l.waitCh = make(chan struct{}) l.head = nil l.tail = nil l.len = 0 @@ -258,23 +286,35 @@ func (l *CList) BackWait() *CElement { } } +// WaitChan can be used to wait until Front or Back becomes not nil. Once it +// does, channel will be closed. +func (l *CList) WaitChan() <-chan struct{} { + l.mtx.Lock() + defer l.mtx.Unlock() + + return l.waitCh +} + func (l *CList) PushBack(v interface{}) *CElement { l.mtx.Lock() defer l.mtx.Unlock() // Construct a new element e := &CElement{ - prev: nil, - prevWg: waitGroup1(), - next: nil, - nextWg: waitGroup1(), - removed: false, - Value: v, + prev: nil, + prevWg: waitGroup1(), + prevWaitCh: make(chan struct{}), + next: nil, + nextWg: waitGroup1(), + nextWaitCh: make(chan struct{}), + removed: false, + Value: v, } // Release waiters on FrontWait/BackWait maybe if l.len == 0 { l.wg.Done() + close(l.waitCh) } l.len += 1 @@ -313,6 +353,7 @@ func (l *CList) Remove(e *CElement) interface{} { // If we're removing the only item, make CList FrontWait/BackWait wait. if l.len == 1 { l.wg = waitGroup1() // WaitGroups are difficult to re-use. + l.waitCh = make(chan struct{}) } // Update l.len diff --git a/clist/clist_test.go b/clist/clist_test.go index 9d5272de5..31f821653 100644 --- a/clist/clist_test.go +++ b/clist/clist_test.go @@ -218,3 +218,76 @@ func TestScanRightDeleteRandom(t *testing.T) { t.Fatal("Failed to remove all elements from CList") } } + +func TestWaitChan(t *testing.T) { + l := New() + ch := l.WaitChan() + + // 1) add one element to an empty list + go l.PushBack(1) + <-ch + + // 2) and remove it + el := l.Front() + v := l.Remove(el) + if v != 1 { + t.Fatal("where is 1 coming from?") + } + + // 3) test iterating forward and waiting for Next (NextWaitChan and Next) + el = l.PushBack(0) + + done := make(chan struct{}) + pushed := 0 + go func() { + for i := 1; i < 100; i++ { + l.PushBack(i) + pushed++ + time.Sleep(time.Duration(rand.Intn(100)) * time.Millisecond) + } + close(done) + }() + + next := el + seen := 0 +FOR_LOOP: + for { + select { + case <-next.NextWaitChan(): + next = next.Next() + seen++ + if next == nil { + continue + } + case <-done: + break FOR_LOOP + case <-time.After(10 * time.Second): + t.Fatal("max execution time") + } + } + + if pushed != seen { + t.Fatalf("number of pushed items (%d) not equal to number of seen items (%d)", pushed, seen) + } + + // 4) test iterating backwards (PrevWaitChan and Prev) + prev := next + seen = 0 +FOR_LOOP2: + for { + select { + case <-prev.PrevWaitChan(): + prev = prev.Prev() + seen++ + if prev == nil { + t.Fatal("expected PrevWaitChan to block forever on nil when reached first elem") + } + case <-time.After(5 * time.Second): + break FOR_LOOP2 + } + } + + if pushed != seen { + t.Fatalf("number of pushed items (%d) not equal to number of seen items (%d)", pushed, seen) + } +} From 763dc2139300927522e1fc5aa5a1c7f777f6175a Mon Sep 17 00:00:00 2001 From: Emmanuel Odeke Date: Tue, 6 Feb 2018 01:12:19 -0800 Subject: [PATCH 64/68] common/BitArray: reduce fragility with methods Fixes https://github.com/tendermint/tmlibs/issues/145 Fixes https://github.com/tendermint/tmlibs/issues/146 The code in here has been fragile when it comes to nil but these edge cases were never tested, although they've showed up in the wild and were only noticed because the reporter actually read the logs otherwise we'd have never known. This changes covers some of these cases and adds some tests. --- common/bit_array.go | 15 +++++++++++---- common/bit_array_test.go | 22 ++++++++++++++++++++++ 2 files changed, 33 insertions(+), 4 deletions(-) diff --git a/common/bit_array.go b/common/bit_array.go index 68201bad6..7cc84705e 100644 --- a/common/bit_array.go +++ b/common/bit_array.go @@ -99,8 +99,14 @@ func (bA *BitArray) copyBits(bits int) *BitArray { // Returns a BitArray of larger bits size. func (bA *BitArray) Or(o *BitArray) *BitArray { - if bA == nil { - o.Copy() + if bA == nil && o == nil { + return nil + } + if bA == nil && o != nil { + return o.Copy() + } + if o == nil { + return bA.Copy() } bA.mtx.Lock() defer bA.mtx.Unlock() @@ -113,7 +119,7 @@ func (bA *BitArray) Or(o *BitArray) *BitArray { // Returns a BitArray of smaller bit size. func (bA *BitArray) And(o *BitArray) *BitArray { - if bA == nil { + if bA == nil || o == nil { return nil } bA.mtx.Lock() @@ -143,7 +149,8 @@ func (bA *BitArray) Not() *BitArray { } func (bA *BitArray) Sub(o *BitArray) *BitArray { - if bA == nil { + if bA == nil || o == nil { + // TODO: Decide if we should do 1's complement here? return nil } bA.mtx.Lock() diff --git a/common/bit_array_test.go b/common/bit_array_test.go index e4ac8bf6f..94a312b7e 100644 --- a/common/bit_array_test.go +++ b/common/bit_array_test.go @@ -3,6 +3,8 @@ package common import ( "bytes" "testing" + + "github.com/stretchr/testify/require" ) func randBitArray(bits int) (*BitArray, []byte) { @@ -26,6 +28,11 @@ func TestAnd(t *testing.T) { bA2, _ := randBitArray(31) bA3 := bA1.And(bA2) + var bNil *BitArray + require.Equal(t, bNil.And(bA1), (*BitArray)(nil)) + require.Equal(t, bA1.And(nil), (*BitArray)(nil)) + require.Equal(t, bNil.And(nil), (*BitArray)(nil)) + if bA3.Bits != 31 { t.Error("Expected min bits", bA3.Bits) } @@ -46,6 +53,11 @@ func TestOr(t *testing.T) { bA2, _ := randBitArray(31) bA3 := bA1.Or(bA2) + bNil := (*BitArray)(nil) + require.Equal(t, bNil.Or(bA1), bA1) + require.Equal(t, bA1.Or(nil), bA1) + require.Equal(t, bNil.Or(nil), (*BitArray)(nil)) + if bA3.Bits != 51 { t.Error("Expected max bits") } @@ -66,6 +78,11 @@ func TestSub1(t *testing.T) { bA2, _ := randBitArray(51) bA3 := bA1.Sub(bA2) + bNil := (*BitArray)(nil) + require.Equal(t, bNil.Sub(bA1), (*BitArray)(nil)) + require.Equal(t, bA1.Sub(nil), (*BitArray)(nil)) + require.Equal(t, bNil.Sub(nil), (*BitArray)(nil)) + if bA3.Bits != bA1.Bits { t.Error("Expected bA1 bits") } @@ -89,6 +106,11 @@ func TestSub2(t *testing.T) { bA2, _ := randBitArray(31) bA3 := bA1.Sub(bA2) + bNil := (*BitArray)(nil) + require.Equal(t, bNil.Sub(bA1), (*BitArray)(nil)) + require.Equal(t, bA1.Sub(nil), (*BitArray)(nil)) + require.Equal(t, bNil.Sub(nil), (*BitArray)(nil)) + if bA3.Bits != bA1.Bits { t.Error("Expected bA1 bits") } From 52ce4c20f8bc9b6da5fc1274bcce27c0b9dd738a Mon Sep 17 00:00:00 2001 From: Anton Kaliaev Date: Fri, 9 Feb 2018 13:31:32 +0400 Subject: [PATCH 65/68] Fix RepeatTimer memory leak (#137) fix RepeatTimer memory leak (Refs #137) * test case * drain channels on reset Leaking memory: ``` leaktest.go:144: leaktest: leaked goroutine: goroutine 116 [chan send]: github.com/tendermint/tmlibs/common.(*RepeatTimer).fireRoutine(0xc42006a410, 0xc4203403c0, 0xc42031b2c0) /go/src/github.com/tendermint/tmlibs/common/repeat_timer.go:160 +0x6e created by github.com/tendermint/tmlibs/common.(*RepeatTimer).reset /go/src/github.com/tendermint/tmlibs/common/repeat_timer.go:196 +0xe9 ``` The alternative solution could be draining channels on the client side. * add one more select instead of draining thanks to Jae --- common/repeat_timer.go | 7 +++++-- common/repeat_timer_test.go | 33 +++++++++++++++++++++++++++++++++ glide.lock | 2 ++ glide.yaml | 1 + 4 files changed, 41 insertions(+), 2 deletions(-) diff --git a/common/repeat_timer.go b/common/repeat_timer.go index cb227199e..dba5fbadd 100644 --- a/common/repeat_timer.go +++ b/common/repeat_timer.go @@ -155,7 +155,11 @@ func (t *RepeatTimer) fireRoutine(ch <-chan time.Time, quit <-chan struct{}) { for { select { case t_ := <-ch: - t.ch <- t_ + select { + case t.ch <- t_: + case <-quit: + return + } case <-quit: // NOTE: `t.quit` races. return } @@ -210,7 +214,6 @@ func (t *RepeatTimer) stop() { t.ticker.Stop() t.ticker = nil /* - XXX From https://golang.org/pkg/time/#Ticker: "Stop the ticker to release associated resources" "After Stop, no more ticks will be sent" diff --git a/common/repeat_timer_test.go b/common/repeat_timer_test.go index 5598922c5..160f4394a 100644 --- a/common/repeat_timer_test.go +++ b/common/repeat_timer_test.go @@ -1,10 +1,12 @@ package common import ( + "math/rand" "sync" "testing" "time" + "github.com/fortytw2/leaktest" "github.com/stretchr/testify/assert" ) @@ -102,3 +104,34 @@ func TestRepeatTimer(t *testing.T) { // Another stop panics. assert.Panics(t, func() { rt.Stop() }) } + +func TestRepeatTimerReset(t *testing.T) { + // check that we are not leaking any go-routines + defer leaktest.Check(t)() + + timer := NewRepeatTimer("test", 20*time.Millisecond) + defer timer.Stop() + + // test we don't receive tick before duration ms. + select { + case <-timer.Chan(): + t.Fatal("did not expect to receive tick") + default: + } + + timer.Reset() + + // test we receive tick after Reset is called + select { + case <-timer.Chan(): + // all good + case <-time.After(40 * time.Millisecond): + t.Fatal("expected to receive tick after reset") + } + + // just random calls + for i := 0; i < 100; i++ { + time.Sleep(time.Duration(rand.Intn(40)) * time.Millisecond) + timer.Reset() + } +} diff --git a/glide.lock b/glide.lock index 10dec980b..a0ada5a4a 100644 --- a/glide.lock +++ b/glide.lock @@ -95,6 +95,8 @@ testImports: version: 346938d642f2ec3594ed81d874461961cd0faa76 subpackages: - spew +- name: github.com/fortytw2/leaktest + version: 3b724c3d7b8729a35bf4e577f71653aec6e53513 - name: github.com/pmezard/go-difflib version: d8ed2627bdf02c080bf22230dbb337003b7aba2d subpackages: diff --git a/glide.yaml b/glide.yaml index b12c72a16..cf3da346b 100644 --- a/glide.yaml +++ b/glide.yaml @@ -35,3 +35,4 @@ testImport: subpackages: - assert - require +- package: github.com/fortytw2/leaktest From a57340ffb53aefb0fca1fc610d18fcbcc61b126f Mon Sep 17 00:00:00 2001 From: Anton Kaliaev Date: Mon, 12 Feb 2018 11:38:31 +0400 Subject: [PATCH 66/68] add Quit method to Service interface remove deprecated QuitService --- CHANGELOG.md | 2 ++ common/service.go | 54 ++++++++++++++++++++++++----------------------- 2 files changed, 30 insertions(+), 26 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 2c9466126..374a272d4 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -5,6 +5,8 @@ BREAKING: - [cli] WriteDemoConfig -> WriteConfigValues + - [common] added Quit method to Service interface, which returns a channel + which is closed once a service is stopped ## 0.6.1 (TBD) diff --git a/common/service.go b/common/service.go index d70d16a80..2502d671c 100644 --- a/common/service.go +++ b/common/service.go @@ -35,9 +35,13 @@ type Service interface { // Return true if the service is running IsRunning() bool + // Quit returns a channel, which is closed once service is stopped. + Quit() <-chan struct{} + // String representation of the service String() string + // SetLogger sets a logger. SetLogger(log.Logger) } @@ -88,12 +92,13 @@ type BaseService struct { name string started uint32 // atomic stopped uint32 // atomic - Quit chan struct{} + quit chan struct{} // The "subclass" of BaseService impl Service } +// NewBaseService creates a new BaseService. func NewBaseService(logger log.Logger, name string, impl Service) *BaseService { if logger == nil { logger = log.NewNopLogger() @@ -102,16 +107,19 @@ func NewBaseService(logger log.Logger, name string, impl Service) *BaseService { return &BaseService{ Logger: logger, name: name, - Quit: make(chan struct{}), + quit: make(chan struct{}), impl: impl, } } +// SetLogger implements Service by setting a logger. func (bs *BaseService) SetLogger(l log.Logger) { bs.Logger = l } -// Implements Servce +// Start implements Service by calling OnStart (if defined). An error will be +// returned if the service is already running or stopped. Not to start the +// stopped service, you need to call Reset. func (bs *BaseService) Start() error { if atomic.CompareAndSwapUint32(&bs.started, 0, 1) { if atomic.LoadUint32(&bs.stopped) == 1 { @@ -133,17 +141,18 @@ func (bs *BaseService) Start() error { } } -// Implements Service +// OnStart implements Service by doing nothing. // NOTE: Do not put anything in here, // that way users don't need to call BaseService.OnStart() func (bs *BaseService) OnStart() error { return nil } -// Implements Service +// Stop implements Service by calling OnStop (if defined) and closing quit +// channel. An error will be returned if the service is already stopped. func (bs *BaseService) Stop() error { if atomic.CompareAndSwapUint32(&bs.stopped, 0, 1) { bs.Logger.Info(Fmt("Stopping %v", bs.name), "impl", bs.impl) bs.impl.OnStop() - close(bs.Quit) + close(bs.quit) return nil } else { bs.Logger.Debug(Fmt("Stopping %v (ignoring: already stopped)", bs.name), "impl", bs.impl) @@ -151,12 +160,13 @@ func (bs *BaseService) Stop() error { } } -// Implements Service +// OnStop implements Service by doing nothing. // NOTE: Do not put anything in here, // that way users don't need to call BaseService.OnStop() func (bs *BaseService) OnStop() {} -// Implements Service +// Reset implements Service by calling OnReset callback (if defined). An error +// will be returned if the service is running. func (bs *BaseService) Reset() error { if !atomic.CompareAndSwapUint32(&bs.stopped, 1, 0) { bs.Logger.Debug(Fmt("Can't reset %v. Not stopped", bs.name), "impl", bs.impl) @@ -166,41 +176,33 @@ func (bs *BaseService) Reset() error { // whether or not we've started, we can reset atomic.CompareAndSwapUint32(&bs.started, 1, 0) - bs.Quit = make(chan struct{}) + bs.quit = make(chan struct{}) return bs.impl.OnReset() } -// Implements Service +// OnReset implements Service by panicking. func (bs *BaseService) OnReset() error { PanicSanity("The service cannot be reset") return nil } -// Implements Service +// IsRunning implements Service by returning true or false depending on the +// service's state. func (bs *BaseService) IsRunning() bool { return atomic.LoadUint32(&bs.started) == 1 && atomic.LoadUint32(&bs.stopped) == 0 } +// Wait blocks until the service is stopped. func (bs *BaseService) Wait() { - <-bs.Quit + <-bs.quit } -// Implements Servce +// String implements Servce by returning a string representation of the service. func (bs *BaseService) String() string { return bs.name } -//---------------------------------------- - -type QuitService struct { - BaseService -} - -func NewQuitService(logger log.Logger, name string, impl Service) *QuitService { - if logger != nil { - logger.Info("QuitService is deprecated, use BaseService instead") - } - return &QuitService{ - BaseService: *NewBaseService(logger, name, impl), - } +// Quit Implements Service by returning a quit channel. +func (bs *BaseService) Quit() <-chan struct{} { + return bs.quit } From 737c30c19d43b12e132843d95f6250b216a9c215 Mon Sep 17 00:00:00 2001 From: Ethan Buchman Date: Mon, 12 Feb 2018 19:12:24 -0500 Subject: [PATCH 67/68] minor nit --- common/repeat_timer.go | 13 ++++++++----- 1 file changed, 8 insertions(+), 5 deletions(-) diff --git a/common/repeat_timer.go b/common/repeat_timer.go index dba5fbadd..5d049738d 100644 --- a/common/repeat_timer.go +++ b/common/repeat_timer.go @@ -20,15 +20,17 @@ type Ticker interface { } //---------------------------------------- -// defaultTickerMaker +// defaultTicker + +var _ Ticker = (*defaultTicker)(nil) + +type defaultTicker time.Ticker func defaultTickerMaker(dur time.Duration) Ticker { ticker := time.NewTicker(dur) return (*defaultTicker)(ticker) } -type defaultTicker time.Ticker - // Implements Ticker func (t *defaultTicker) Chan() <-chan time.Time { return t.C @@ -151,12 +153,13 @@ func NewRepeatTimerWithTickerMaker(name string, dur time.Duration, tm TickerMake return t } +// receive ticks on ch, send out on t.ch func (t *RepeatTimer) fireRoutine(ch <-chan time.Time, quit <-chan struct{}) { for { select { - case t_ := <-ch: + case tick := <-ch: select { - case t.ch <- t_: + case t.ch <- tick: case <-quit: return } From c6163bdab2d627855400284c90a9c95a53d8eb87 Mon Sep 17 00:00:00 2001 From: Ethan Buchman Date: Mon, 19 Feb 2018 17:05:36 -0500 Subject: [PATCH 68/68] version bump and changelog --- CHANGELOG.md | 25 +++++++++++++++++++++---- version/version.go | 2 +- 2 files changed, 22 insertions(+), 5 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 374a272d4..89b841d4f 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -4,16 +4,33 @@ BREAKING: + - [db] Major API upgrade. See `db/types.go`. + - [common] added `Quit() <-chan struct{}` to Service interface. + The returned channel is closed when service is stopped. + - [common] Remove HTTP functions + - [common] Heap.Push takes an `int`, new Heap.PushComparable takes the comparable. + - [logger] Removed. Use `log` + - [merkle] Major API updade - uses cmn.KVPairs. - [cli] WriteDemoConfig -> WriteConfigValues - - [common] added Quit method to Service interface, which returns a channel - which is closed once a service is stopped + - [all] Remove go-wire dependency! -## 0.6.1 (TBD) +FEATURES: + + - [db] New FSDB that uses the filesystem directly + - [common] HexBytes + - [common] KVPair and KI64Pair (protobuf based key-value pair objects) IMPROVEMENTS: + - [clist] add WaitChan() to CList, NextWaitChan() and PrevWaitChan() - to CElement. These can be used instead of blocking *Wait() methods + to CElement. These can be used instead of blocking `*Wait()` methods if you need to be able to send quit signal and not block forever + - [common] IsHex handles 0x-prefix + +BUG FIXES: + + - [common] BitArray check for nil arguments + - [common] Fix memory leak in RepeatTimer ## 0.6.0 (December 29, 2017) diff --git a/version/version.go b/version/version.go index 6cc887286..2c0474fa8 100644 --- a/version/version.go +++ b/version/version.go @@ -1,3 +1,3 @@ package version -const Version = "0.6.0" +const Version = "0.7.0"