Browse Source

CacheWrap() -> CacheDB() CacheDB

pull/1842/head
Jae Kwon 7 years ago
parent
commit
56e51bc113
7 changed files with 49 additions and 43 deletions
  1. +1
    -1
      db/c_level_db.go
  2. +29
    -28
      db/cache_db.go
  3. +9
    -9
      db/cache_db_test.go
  4. +7
    -2
      db/db.go
  5. +1
    -1
      db/fsdb.go
  6. +1
    -1
      db/go_level_db.go
  7. +1
    -1
      db/mem_db.go

+ 1
- 1
db/c_level_db.go View File

@ -120,7 +120,7 @@ func (db *CLevelDB) Stats() map[string]string {
return stats
}
func (db *CLevelDB) CacheWrap() interface{} {
func (db *CLevelDB) CacheDB() CacheDB {
return NewCacheDB(db, db.GetWriteLockVersion())
}


+ 29
- 28
db/cache_db.go View File

@ -16,8 +16,8 @@ type cDBValue struct {
dirty bool
}
// CacheDB wraps an in-memory cache around an underlying DB.
type CacheDB struct {
// cacheDB wraps an in-memory cache around an underlying DB.
type cacheDB struct {
mtx sync.Mutex
cache map[string]cDBValue
parent DB
@ -27,13 +27,14 @@ type CacheDB struct {
}
// Needed by MultiStore.CacheWrap().
var _ atomicSetDeleter = (*CacheDB)(nil)
var _ atomicSetDeleter = (*cacheDB)(nil)
var _ CacheDB = (*cacheDB)(nil)
// Users should typically not be required to call NewCacheDB directly, as the
// DB implementations here provide a .CacheWrap() function already.
// DB implementations here provide a .CacheDB() function already.
// `lockVersion` is typically provided by parent.GetWriteLockVersion().
func NewCacheDB(parent DB, lockVersion interface{}) *CacheDB {
db := &CacheDB{
func NewCacheDB(parent DB, lockVersion interface{}) CacheDB {
db := &cacheDB{
cache: make(map[string]cDBValue),
parent: parent,
lockVersion: lockVersion,
@ -42,7 +43,7 @@ func NewCacheDB(parent DB, lockVersion interface{}) *CacheDB {
return db
}
func (db *CacheDB) Get(key []byte) []byte {
func (db *cacheDB) Get(key []byte) []byte {
db.mtx.Lock()
defer db.mtx.Unlock()
@ -55,54 +56,54 @@ func (db *CacheDB) Get(key []byte) []byte {
return dbValue.value
}
func (db *CacheDB) Set(key []byte, value []byte) {
func (db *cacheDB) Set(key []byte, value []byte) {
db.mtx.Lock()
defer db.mtx.Unlock()
db.SetNoLock(key, value)
}
func (db *CacheDB) SetSync(key []byte, value []byte) {
func (db *cacheDB) SetSync(key []byte, value []byte) {
db.mtx.Lock()
defer db.mtx.Unlock()
db.SetNoLock(key, value)
}
func (db *CacheDB) SetNoLock(key []byte, value []byte) {
func (db *cacheDB) SetNoLock(key []byte, value []byte) {
db.cache[string(key)] = cDBValue{value: value, deleted: false, dirty: true}
}
func (db *CacheDB) Delete(key []byte) {
func (db *cacheDB) Delete(key []byte) {
db.mtx.Lock()
defer db.mtx.Unlock()
db.DeleteNoLock(key)
}
func (db *CacheDB) DeleteSync(key []byte) {
func (db *cacheDB) DeleteSync(key []byte) {
db.mtx.Lock()
defer db.mtx.Unlock()
db.DeleteNoLock(key)
}
func (db *CacheDB) DeleteNoLock(key []byte) {
func (db *cacheDB) DeleteNoLock(key []byte) {
db.cache[string(key)] = cDBValue{value: nil, deleted: true, dirty: true}
}
func (db *CacheDB) Close() {
func (db *cacheDB) Close() {
db.mtx.Lock()
defer db.mtx.Unlock()
db.parent.Close()
}
func (db *CacheDB) Print() {
func (db *cacheDB) Print() {
db.mtx.Lock()
defer db.mtx.Unlock()
fmt.Println("CacheDB\ncache:")
fmt.Println("cacheDB\ncache:")
for key, value := range db.cache {
fmt.Printf("[%X]:\t[%v]\n", []byte(key), value)
}
@ -110,7 +111,7 @@ func (db *CacheDB) Print() {
db.parent.Print()
}
func (db *CacheDB) Stats() map[string]string {
func (db *cacheDB) Stats() map[string]string {
db.mtx.Lock()
defer db.mtx.Unlock()
@ -121,30 +122,30 @@ func (db *CacheDB) Stats() map[string]string {
return stats
}
func (db *CacheDB) Iterator() Iterator {
panic("CacheDB.Iterator() not yet supported")
func (db *cacheDB) Iterator() Iterator {
panic("cacheDB.Iterator() not yet supported")
}
func (db *CacheDB) NewBatch() Batch {
func (db *cacheDB) NewBatch() Batch {
return &memBatch{db, nil}
}
// Implements `atomicSetDeleter` for Batch support.
func (db *CacheDB) Mutex() *sync.Mutex {
func (db *cacheDB) Mutex() *sync.Mutex {
return &(db.mtx)
}
// Write writes pending updates to the parent database and clears the cache.
func (db *CacheDB) Write() {
func (db *cacheDB) Write() {
db.mtx.Lock()
defer db.mtx.Unlock()
// Optional sanity check to ensure that CacheDB is valid
// Optional sanity check to ensure that cacheDB is valid
if parent, ok := db.parent.(WriteLocker); ok {
if parent.TryWriteLock(db.lockVersion) {
// All good!
} else {
panic("CacheDB.Write() failed. Did this CacheDB expire?")
panic("cacheDB.Write() failed. Did this CacheDB expire?")
}
}
@ -176,14 +177,14 @@ func (db *CacheDB) Write() {
}
//----------------------------------------
// To CacheWrap this CacheDB further.
// To cache-wrap this cacheDB further.
func (db *CacheDB) CacheWrap() interface{} {
func (db *cacheDB) CacheDB() CacheDB {
return NewCacheDB(db, db.GetWriteLockVersion())
}
// If the parent parent DB implements this, (e.g. such as a CacheDB parent to a
// CacheDB child), CacheDB will call `parent.TryWriteLock()` before attempting
// If the parent parent DB implements this, (e.g. such as a cacheDB parent to a
// cacheDB child), cacheDB will call `parent.TryWriteLock()` before attempting
// to write.
type WriteLocker interface {
GetWriteLockVersion() (lockVersion interface{})


+ 9
- 9
db/cache_db_test.go View File

@ -10,7 +10,7 @@ func bz(s string) []byte { return []byte(s) }
func TestCacheDB(t *testing.T) {
mem := NewMemDB()
cdb := mem.CacheWrap().(*CacheDB)
cdb := mem.CacheDB()
require.Empty(t, cdb.Get(bz("key1")), "Expected `key1` to be empty")
@ -27,7 +27,7 @@ func TestCacheDB(t *testing.T) {
require.Panics(t, func() { cdb.Write() }, "Expected second cdb.Write() to fail")
cdb = mem.CacheWrap().(*CacheDB)
cdb = mem.CacheDB()
cdb.Delete(bz("key1"))
require.Empty(t, cdb.Get(bz("key1")))
require.Equal(t, mem.Get(bz("key1")), bz("value2"))
@ -39,33 +39,33 @@ func TestCacheDB(t *testing.T) {
func TestCacheDBWriteLock(t *testing.T) {
mem := NewMemDB()
cdb := mem.CacheWrap().(*CacheDB)
cdb := mem.CacheDB()
require.NotPanics(t, func() { cdb.Write() })
require.Panics(t, func() { cdb.Write() })
cdb = mem.CacheWrap().(*CacheDB)
cdb = mem.CacheDB()
require.NotPanics(t, func() { cdb.Write() })
require.Panics(t, func() { cdb.Write() })
}
func TestCacheDBWriteLockNested(t *testing.T) {
mem := NewMemDB()
cdb := mem.CacheWrap().(*CacheDB)
cdb2 := cdb.CacheWrap().(*CacheDB)
cdb := mem.CacheDB()
cdb2 := cdb.CacheDB()
require.NotPanics(t, func() { cdb2.Write() })
require.Panics(t, func() { cdb2.Write() })
cdb2 = cdb.CacheWrap().(*CacheDB)
cdb2 = cdb.CacheDB()
require.NotPanics(t, func() { cdb2.Write() })
require.Panics(t, func() { cdb2.Write() })
}
func TestCacheDBNested(t *testing.T) {
mem := NewMemDB()
cdb := mem.CacheWrap().(*CacheDB)
cdb := mem.CacheDB()
cdb.Set(bz("key1"), bz("value1"))
require.Empty(t, mem.Get(bz("key1")))
require.Equal(t, bz("value1"), cdb.Get(bz("key1")))
cdb2 := cdb.CacheWrap().(*CacheDB)
cdb2 := cdb.CacheDB()
require.Equal(t, bz("value1"), cdb2.Get(bz("key1")))
cdb2.Set(bz("key1"), bz("VALUE2"))


+ 7
- 2
db/db.go View File

@ -18,8 +18,13 @@ type DB interface {
// Stats returns a map of property values for all keys and the size of the cache.
Stats() map[string]string
// CacheWrap wraps the DB w/ a CacheDB.
CacheWrap() interface{}
// CacheDB wraps the DB w/ a cache.
CacheDB() CacheDB
}
type CacheDB interface {
DB
Write() // Write to the underlying DB
}
type Batch interface {


+ 1
- 1
db/fsdb.go View File

@ -140,7 +140,7 @@ func (db *FSDB) Mutex() *sync.Mutex {
return &(db.mtx)
}
func (db *FSDB) CacheWrap() interface{} {
func (db *FSDB) CacheDB() CacheDB {
return NewCacheDB(db, db.GetWriteLockVersion())
}


+ 1
- 1
db/go_level_db.go View File

@ -121,7 +121,7 @@ func (db *GoLevelDB) Stats() map[string]string {
return stats
}
func (db *GoLevelDB) CacheWrap() interface{} {
func (db *GoLevelDB) CacheDB() CacheDB {
return NewCacheDB(db, db.GetWriteLockVersion())
}


+ 1
- 1
db/mem_db.go View File

@ -114,7 +114,7 @@ func (db *MemDB) Mutex() *sync.Mutex {
return &(db.mtx)
}
func (db *MemDB) CacheWrap() interface{} {
func (db *MemDB) CacheDB() CacheDB {
return NewCacheDB(db, db.GetWriteLockVersion())
}


Loading…
Cancel
Save