Browse Source

store: order-preserving varint key encoding (#5771)

pull/5867/head
Callum Waters 4 years ago
committed by GitHub
parent
commit
9b9222f461
No known key found for this signature in database GPG Key ID: 4AEE18F83AFDEB23
9 changed files with 193 additions and 125 deletions
  1. +3
    -0
      CHANGELOG_PENDING.md
  2. +28
    -16
      evidence/pool.go
  3. +1
    -0
      go.mod
  4. +2
    -0
      go.sum
  5. +55
    -54
      light/store/db/db.go
  6. +3
    -0
      light/store/db/db_test.go
  7. +34
    -17
      state/store.go
  8. +58
    -27
      store/store.go
  9. +9
    -11
      store/store_test.go

+ 3
- 0
CHANGELOG_PENDING.md View File

@ -38,6 +38,9 @@ Friendly reminder, we have a [bug bounty program](https://hackerone.com/tendermi
- Blockchain Protocol - Blockchain Protocol
- Data Storage
- [store/state/evidence/light] \#5771 Use an order-preserving varint key encoding (@cmwaters)
### FEATURES ### FEATURES
### IMPROVEMENTS ### IMPROVEMENTS


+ 28
- 16
evidence/pool.go View File

@ -11,6 +11,7 @@ import (
"github.com/gogo/protobuf/proto" "github.com/gogo/protobuf/proto"
gogotypes "github.com/gogo/protobuf/types" gogotypes "github.com/gogo/protobuf/types"
"github.com/google/orderedcode"
dbm "github.com/tendermint/tm-db" dbm "github.com/tendermint/tm-db"
clist "github.com/tendermint/tendermint/libs/clist" clist "github.com/tendermint/tendermint/libs/clist"
@ -21,8 +22,9 @@ import (
) )
const ( const (
baseKeyCommitted = byte(0x00)
baseKeyPending = byte(0x01)
// prefixes are unique across all tm db's
prefixCommitted = int64(8)
prefixPending = int64(9)
) )
// Pool maintains a pool of valid evidence to be broadcasted and committed // Pool maintains a pool of valid evidence to be broadcasted and committed
@ -67,7 +69,7 @@ func NewPool(evidenceDB dbm.DB, stateDB sm.Store, blockStore BlockStore) (*Pool,
// if pending evidence already in db, in event of prior failure, then check for expiration, // if pending evidence already in db, in event of prior failure, then check for expiration,
// update the size and load it back to the evidenceList // update the size and load it back to the evidenceList
pool.pruningHeight, pool.pruningTime = pool.removeExpiredPendingEvidence() pool.pruningHeight, pool.pruningTime = pool.removeExpiredPendingEvidence()
evList, _, err := pool.listEvidence(baseKeyPending, -1)
evList, _, err := pool.listEvidence(prefixPending, -1)
if err != nil { if err != nil {
return nil, err return nil, err
} }
@ -84,7 +86,7 @@ func (evpool *Pool) PendingEvidence(maxBytes int64) ([]types.Evidence, int64) {
if evpool.Size() == 0 { if evpool.Size() == 0 {
return []types.Evidence{}, 0 return []types.Evidence{}, 0
} }
evidence, size, err := evpool.listEvidence(baseKeyPending, maxBytes)
evidence, size, err := evpool.listEvidence(prefixPending, maxBytes)
if err != nil { if err != nil {
evpool.logger.Error("Unable to retrieve pending evidence", "err", err) evpool.logger.Error("Unable to retrieve pending evidence", "err", err)
} }
@ -402,7 +404,7 @@ func (evpool *Pool) markEvidenceAsCommitted(evidence types.EvidenceList) {
// listEvidence retrieves lists evidence from oldest to newest within maxBytes. // listEvidence retrieves lists evidence from oldest to newest within maxBytes.
// If maxBytes is -1, there's no cap on the size of returned evidence. // If maxBytes is -1, there's no cap on the size of returned evidence.
func (evpool *Pool) listEvidence(prefixKey byte, maxBytes int64) ([]types.Evidence, int64, error) {
func (evpool *Pool) listEvidence(prefixKey int64, maxBytes int64) ([]types.Evidence, int64, error) {
var ( var (
evSize int64 evSize int64
totalSize int64 totalSize int64
@ -410,11 +412,12 @@ func (evpool *Pool) listEvidence(prefixKey byte, maxBytes int64) ([]types.Eviden
evList tmproto.EvidenceList // used for calculating the bytes size evList tmproto.EvidenceList // used for calculating the bytes size
) )
iter, err := dbm.IteratePrefix(evpool.evidenceStore, []byte{prefixKey})
iter, err := dbm.IteratePrefix(evpool.evidenceStore, prefixToBytes(prefixKey))
if err != nil { if err != nil {
return nil, totalSize, fmt.Errorf("database error: %v", err) return nil, totalSize, fmt.Errorf("database error: %v", err)
} }
defer iter.Close() defer iter.Close()
for ; iter.Valid(); iter.Next() { for ; iter.Valid(); iter.Next() {
var evpb tmproto.Evidence var evpb tmproto.Evidence
err := evpb.Unmarshal(iter.Value()) err := evpb.Unmarshal(iter.Value())
@ -446,7 +449,7 @@ func (evpool *Pool) listEvidence(prefixKey byte, maxBytes int64) ([]types.Eviden
} }
func (evpool *Pool) removeExpiredPendingEvidence() (int64, time.Time) { func (evpool *Pool) removeExpiredPendingEvidence() (int64, time.Time) {
iter, err := dbm.IteratePrefix(evpool.evidenceStore, []byte{baseKeyPending})
iter, err := dbm.IteratePrefix(evpool.evidenceStore, prefixToBytes(prefixPending))
if err != nil { if err != nil {
evpool.logger.Error("Unable to iterate over pending evidence", "err", err) evpool.logger.Error("Unable to iterate over pending evidence", "err", err)
return evpool.State().LastBlockHeight, evpool.State().LastBlockTime return evpool.State().LastBlockHeight, evpool.State().LastBlockTime
@ -511,19 +514,28 @@ func evMapKey(ev types.Evidence) string {
return string(ev.Hash()) return string(ev.Hash())
} }
// big endian padded hex
func bE(h int64) string {
return fmt.Sprintf("%0.16X", h)
func prefixToBytes(prefix int64) []byte {
key, err := orderedcode.Append(nil, prefix)
if err != nil {
panic(err)
}
return key
} }
func keyCommitted(evidence types.Evidence) []byte { func keyCommitted(evidence types.Evidence) []byte {
return append([]byte{baseKeyCommitted}, keySuffix(evidence)...)
var height int64 = evidence.Height()
key, err := orderedcode.Append(nil, prefixCommitted, height, string(evidence.Hash()))
if err != nil {
panic(err)
}
return key
} }
func keyPending(evidence types.Evidence) []byte { func keyPending(evidence types.Evidence) []byte {
return append([]byte{baseKeyPending}, keySuffix(evidence)...)
}
func keySuffix(evidence types.Evidence) []byte {
return []byte(fmt.Sprintf("%s/%X", bE(evidence.Height()), evidence.Hash()))
var height int64 = evidence.Height()
key, err := orderedcode.Append(nil, prefixPending, height, string(evidence.Hash()))
if err != nil {
panic(err)
}
return key
} }

+ 1
- 0
go.mod View File

@ -15,6 +15,7 @@ require (
github.com/go-logfmt/logfmt v0.5.0 github.com/go-logfmt/logfmt v0.5.0
github.com/gogo/protobuf v1.3.1 github.com/gogo/protobuf v1.3.1
github.com/golang/protobuf v1.4.3 github.com/golang/protobuf v1.4.3
github.com/google/orderedcode v0.0.1
github.com/gorilla/websocket v1.4.2 github.com/gorilla/websocket v1.4.2
github.com/gtank/merlin v0.1.1 github.com/gtank/merlin v0.1.1
github.com/hdevalence/ed25519consensus v0.0.0-20201207055737-7fde80a9d5ff github.com/hdevalence/ed25519consensus v0.0.0-20201207055737-7fde80a9d5ff


+ 2
- 0
go.sum View File

@ -221,6 +221,8 @@ github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/
github.com/google/gofuzz v1.0.0 h1:A8PeW59pxE9IoFRqBp37U+mSNaQoZ46F1f0f863XSXw= github.com/google/gofuzz v1.0.0 h1:A8PeW59pxE9IoFRqBp37U+mSNaQoZ46F1f0f863XSXw=
github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs= github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs=
github.com/google/orderedcode v0.0.1 h1:UzfcAexk9Vhv8+9pNOgRu41f16lHq725vPwnSeiG/Us=
github.com/google/orderedcode v0.0.1/go.mod h1:iVyU4/qPKHY5h/wSd6rZZCDcLJNxiWO6dvsYES2Sb20=
github.com/google/pprof v0.0.0-20181206194817-3ea8567a2e57/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= github.com/google/pprof v0.0.0-20181206194817-3ea8567a2e57/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc=
github.com/google/pprof v0.0.0-20190515194954-54271f7e092f/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= github.com/google/pprof v0.0.0-20190515194954-54271f7e092f/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc=
github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI= github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI=


+ 55
- 54
light/store/db/db.go View File

@ -3,9 +3,8 @@ package db
import ( import (
"encoding/binary" "encoding/binary"
"fmt" "fmt"
"regexp"
"strconv"
"github.com/google/orderedcode"
dbm "github.com/tendermint/tm-db" dbm "github.com/tendermint/tm-db"
tmsync "github.com/tendermint/tendermint/libs/sync" tmsync "github.com/tendermint/tendermint/libs/sync"
@ -14,8 +13,9 @@ import (
"github.com/tendermint/tendermint/types" "github.com/tendermint/tendermint/types"
) )
var (
sizeKey = []byte("size")
const (
prefixLightBlock = int64(0x0a)
prefixSize = int64(0x0b)
) )
type dbs struct { type dbs struct {
@ -30,13 +30,17 @@ type dbs struct {
// want to use one DB with many light clients). // want to use one DB with many light clients).
func New(db dbm.DB, prefix string) store.Store { func New(db dbm.DB, prefix string) store.Store {
lightStore := &dbs{db: db, prefix: prefix}
// retrieve the size of the db
size := uint16(0) size := uint16(0)
bz, err := db.Get(sizeKey)
bz, err := lightStore.db.Get(lightStore.sizeKey())
if err == nil && len(bz) > 0 { if err == nil && len(bz) > 0 {
size = unmarshalSize(bz) size = unmarshalSize(bz)
} }
lightStore.size = size
return &dbs{db: db, prefix: prefix, size: size}
return lightStore
} }
// SaveLightBlock persists LightBlock to the db. // SaveLightBlock persists LightBlock to the db.
@ -65,7 +69,7 @@ func (s *dbs) SaveLightBlock(lb *types.LightBlock) error {
if err = b.Set(s.lbKey(lb.Height), lbBz); err != nil { if err = b.Set(s.lbKey(lb.Height), lbBz); err != nil {
return err return err
} }
if err = b.Set(sizeKey, marshalSize(s.size+1)); err != nil {
if err = b.Set(s.sizeKey(), marshalSize(s.size+1)); err != nil {
return err return err
} }
if err = b.WriteSync(); err != nil { if err = b.WriteSync(); err != nil {
@ -93,7 +97,7 @@ func (s *dbs) DeleteLightBlock(height int64) error {
if err := b.Delete(s.lbKey(height)); err != nil { if err := b.Delete(s.lbKey(height)); err != nil {
return err return err
} }
if err := b.Set(sizeKey, marshalSize(s.size-1)); err != nil {
if err := b.Set(s.sizeKey(), marshalSize(s.size-1)); err != nil {
return err return err
} }
if err := b.WriteSync(); err != nil { if err := b.WriteSync(); err != nil {
@ -147,13 +151,8 @@ func (s *dbs) LastLightBlockHeight() (int64, error) {
} }
defer itr.Close() defer itr.Close()
for itr.Valid() {
key := itr.Key()
_, height, ok := parseLbKey(key)
if ok {
return height, nil
}
itr.Next()
if itr.Valid() {
return s.decodeLbKey(itr.Key())
} }
return -1, itr.Error() return -1, itr.Error()
@ -172,13 +171,8 @@ func (s *dbs) FirstLightBlockHeight() (int64, error) {
} }
defer itr.Close() defer itr.Close()
for itr.Valid() {
key := itr.Key()
_, height, ok := parseLbKey(key)
if ok {
return height, nil
}
itr.Next()
if itr.Valid() {
return s.decodeLbKey(itr.Key())
} }
return -1, itr.Error() return -1, itr.Error()
@ -202,13 +196,12 @@ func (s *dbs) LightBlockBefore(height int64) (*types.LightBlock, error) {
} }
defer itr.Close() defer itr.Close()
for itr.Valid() {
key := itr.Key()
_, existingHeight, ok := parseLbKey(key)
if ok {
return s.LightBlock(existingHeight)
if itr.Valid() {
existingHeight, err := s.decodeLbKey(itr.Key())
if err != nil {
return nil, err
} }
itr.Next()
return s.LightBlock(existingHeight)
} }
if err = itr.Error(); err != nil { if err = itr.Error(); err != nil {
return nil, err return nil, err
@ -248,11 +241,12 @@ func (s *dbs) Prune(size uint16) error {
pruned := 0 pruned := 0
for itr.Valid() && numToPrune > 0 { for itr.Valid() && numToPrune > 0 {
key := itr.Key() key := itr.Key()
_, height, ok := parseLbKey(key)
if ok {
if err = b.Delete(s.lbKey(height)); err != nil {
return err
}
height, err := s.decodeLbKey(key)
if err != nil {
return err
}
if err = b.Delete(s.lbKey(height)); err != nil {
return err
} }
itr.Next() itr.Next()
numToPrune-- numToPrune--
@ -273,7 +267,7 @@ func (s *dbs) Prune(size uint16) error {
s.size -= uint16(pruned) s.size -= uint16(pruned)
if wErr := s.db.SetSync(sizeKey, marshalSize(s.size)); wErr != nil {
if wErr := s.db.SetSync(s.sizeKey(), marshalSize(size)); wErr != nil {
return fmt.Errorf("failed to persist size: %w", wErr) return fmt.Errorf("failed to persist size: %w", wErr)
} }
@ -289,32 +283,39 @@ func (s *dbs) Size() uint16 {
return s.size return s.size
} }
func (s *dbs) lbKey(height int64) []byte {
return []byte(fmt.Sprintf("lb/%s/%020d", s.prefix, height))
func (s *dbs) sizeKey() []byte {
key, err := orderedcode.Append(nil, s.prefix, prefixSize)
if err != nil {
panic(err)
}
return key
} }
var keyPattern = regexp.MustCompile(`^(lb)/([^/]*)/([0-9]+)$`)
func parseKey(key []byte) (part string, prefix string, height int64, ok bool) {
submatch := keyPattern.FindSubmatch(key)
if submatch == nil {
return "", "", 0, false
}
part = string(submatch[1])
prefix = string(submatch[2])
height, err := strconv.ParseInt(string(submatch[3]), 10, 64)
func (s *dbs) lbKey(height int64) []byte {
key, err := orderedcode.Append(nil, s.prefix, prefixLightBlock, height)
if err != nil { if err != nil {
return "", "", 0, false
panic(err)
} }
ok = true // good!
return
return key
} }
func parseLbKey(key []byte) (prefix string, height int64, ok bool) {
var part string
part, prefix, height, ok = parseKey(key)
if part != "lb" {
return "", 0, false
func (s *dbs) decodeLbKey(key []byte) (height int64, err error) {
var (
dbPrefix string
lightBlockPrefix int64
)
remaining, err := orderedcode.Parse(string(key), &dbPrefix, &lightBlockPrefix, &height)
if err != nil {
err = fmt.Errorf("failed to parse light block key: %w", err)
}
if len(remaining) != 0 {
err = fmt.Errorf("expected no remainder when parsing light block key but got: %s", remaining)
}
if lightBlockPrefix != prefixLightBlock {
err = fmt.Errorf("expected light block prefix but got: %d", lightBlockPrefix)
}
if dbPrefix != s.prefix {
err = fmt.Errorf("parsed key has a different prefix. Expected: %s, got: %s", s.prefix, dbPrefix)
} }
return return
} }


+ 3
- 0
light/store/db/db_test.go View File

@ -89,6 +89,9 @@ func Test_LightBlockBefore(t *testing.T) {
if assert.NotNil(t, h) { if assert.NotNil(t, h) {
assert.EqualValues(t, 2, h.Height) assert.EqualValues(t, 2, h.Height)
} }
_, err = dbStore.LightBlockBefore(2)
require.Error(t, err)
} }
func Test_Prune(t *testing.T) { func Test_Prune(t *testing.T) {


+ 34
- 17
state/store.go View File

@ -5,6 +5,7 @@ import (
"fmt" "fmt"
"github.com/gogo/protobuf/proto" "github.com/gogo/protobuf/proto"
"github.com/google/orderedcode"
dbm "github.com/tendermint/tm-db" dbm "github.com/tendermint/tm-db"
abci "github.com/tendermint/tendermint/abci/types" abci "github.com/tendermint/tendermint/abci/types"
@ -25,16 +26,31 @@ const (
//------------------------------------------------------------------------ //------------------------------------------------------------------------
func calcValidatorsKey(height int64) []byte {
return []byte(fmt.Sprintf("validatorsKey:%v", height))
const (
// prefixes are unique across all tm db's
prefixValidators = int64(5)
prefixConsensusParams = int64(6)
prefixABCIResponses = int64(7)
)
func encodeKey(prefix int64, height int64) []byte {
res, err := orderedcode.Append(nil, prefix, height)
if err != nil {
panic(err)
}
return res
}
func validatorsKey(height int64) []byte {
return encodeKey(prefixValidators, height)
} }
func calcConsensusParamsKey(height int64) []byte {
return []byte(fmt.Sprintf("consensusParamsKey:%v", height))
func consensusParamsKey(height int64) []byte {
return encodeKey(prefixConsensusParams, height)
} }
func calcABCIResponsesKey(height int64) []byte {
return []byte(fmt.Sprintf("abciResponsesKey:%v", height))
func abciResponsesKey(height int64) []byte {
return encodeKey(prefixABCIResponses, height)
} }
//---------------------- //----------------------
@ -276,13 +292,13 @@ func (store dbStore) PruneStates(from int64, to int64) error {
if err != nil { if err != nil {
return err return err
} }
err = batch.Set(calcValidatorsKey(h), bz)
err = batch.Set(validatorsKey(h), bz)
if err != nil { if err != nil {
return err return err
} }
} }
} else { } else {
err = batch.Delete(calcValidatorsKey(h))
err = batch.Delete(validatorsKey(h))
if err != nil { if err != nil {
return err return err
} }
@ -306,19 +322,19 @@ func (store dbStore) PruneStates(from int64, to int64) error {
return err return err
} }
err = batch.Set(calcConsensusParamsKey(h), bz)
err = batch.Set(consensusParamsKey(h), bz)
if err != nil { if err != nil {
return err return err
} }
} }
} else { } else {
err = batch.Delete(calcConsensusParamsKey(h))
err = batch.Delete(consensusParamsKey(h))
if err != nil { if err != nil {
return err return err
} }
} }
err = batch.Delete(calcABCIResponsesKey(h))
err = batch.Delete(abciResponsesKey(h))
if err != nil { if err != nil {
return err return err
} }
@ -361,7 +377,7 @@ func ABCIResponsesResultsHash(ar *tmstate.ABCIResponses) []byte {
// before we called s.Save(). It can also be used to produce Merkle proofs of // before we called s.Save(). It can also be used to produce Merkle proofs of
// the result of txs. // the result of txs.
func (store dbStore) LoadABCIResponses(height int64) (*tmstate.ABCIResponses, error) { func (store dbStore) LoadABCIResponses(height int64) (*tmstate.ABCIResponses, error) {
buf, err := store.db.Get(calcABCIResponsesKey(height))
buf, err := store.db.Get(abciResponsesKey(height))
if err != nil { if err != nil {
return nil, err return nil, err
} }
@ -403,7 +419,7 @@ func (store dbStore) SaveABCIResponses(height int64, abciResponses *tmstate.ABCI
return err return err
} }
err = store.db.SetSync(calcABCIResponsesKey(height), bz)
err = store.db.SetSync(abciResponsesKey(height), bz)
if err != nil { if err != nil {
return err return err
} }
@ -416,6 +432,7 @@ func (store dbStore) SaveABCIResponses(height int64, abciResponses *tmstate.ABCI
// LoadValidators loads the ValidatorSet for a given height. // LoadValidators loads the ValidatorSet for a given height.
// Returns ErrNoValSetForHeight if the validator set can't be found for this height. // Returns ErrNoValSetForHeight if the validator set can't be found for this height.
func (store dbStore) LoadValidators(height int64) (*types.ValidatorSet, error) { func (store dbStore) LoadValidators(height int64) (*types.ValidatorSet, error) {
valInfo, err := loadValidatorsInfo(store.db, height) valInfo, err := loadValidatorsInfo(store.db, height)
if err != nil { if err != nil {
return nil, ErrNoValSetForHeight{height} return nil, ErrNoValSetForHeight{height}
@ -462,7 +479,7 @@ func lastStoredHeightFor(height, lastHeightChanged int64) int64 {
// CONTRACT: Returned ValidatorsInfo can be mutated. // CONTRACT: Returned ValidatorsInfo can be mutated.
func loadValidatorsInfo(db dbm.DB, height int64) (*tmstate.ValidatorsInfo, error) { func loadValidatorsInfo(db dbm.DB, height int64) (*tmstate.ValidatorsInfo, error) {
buf, err := db.Get(calcValidatorsKey(height))
buf, err := db.Get(validatorsKey(height))
if err != nil { if err != nil {
return nil, err return nil, err
} }
@ -510,7 +527,7 @@ func (store dbStore) saveValidatorsInfo(height, lastHeightChanged int64, valSet
return err return err
} }
err = store.db.Set(calcValidatorsKey(height), bz)
err = store.db.Set(validatorsKey(height), bz)
if err != nil { if err != nil {
return err return err
} }
@ -549,7 +566,7 @@ func (store dbStore) LoadConsensusParams(height int64) (tmproto.ConsensusParams,
} }
func (store dbStore) loadConsensusParamsInfo(height int64) (*tmstate.ConsensusParamsInfo, error) { func (store dbStore) loadConsensusParamsInfo(height int64) (*tmstate.ConsensusParamsInfo, error) {
buf, err := store.db.Get(calcConsensusParamsKey(height))
buf, err := store.db.Get(consensusParamsKey(height))
if err != nil { if err != nil {
return nil, err return nil, err
} }
@ -585,7 +602,7 @@ func (store dbStore) saveConsensusParamsInfo(nextHeight, changeHeight int64, par
return err return err
} }
err = store.db.Set(calcConsensusParamsKey(nextHeight), bz)
err = store.db.Set(consensusParamsKey(nextHeight), bz)
if err != nil { if err != nil {
return err return err
} }


+ 58
- 27
store/store.go View File

@ -5,6 +5,7 @@ import (
"strconv" "strconv"
"github.com/gogo/protobuf/proto" "github.com/gogo/protobuf/proto"
"github.com/google/orderedcode"
dbm "github.com/tendermint/tm-db" dbm "github.com/tendermint/tm-db"
tmsync "github.com/tendermint/tendermint/libs/sync" tmsync "github.com/tendermint/tendermint/libs/sync"
@ -126,7 +127,7 @@ func (bs *BlockStore) LoadBlock(height int64) *types.Block {
// If no block is found for that hash, it returns nil. // If no block is found for that hash, it returns nil.
// Panics if it fails to parse height associated with the given hash. // Panics if it fails to parse height associated with the given hash.
func (bs *BlockStore) LoadBlockByHash(hash []byte) *types.Block { func (bs *BlockStore) LoadBlockByHash(hash []byte) *types.Block {
bz, err := bs.db.Get(calcBlockHashKey(hash))
bz, err := bs.db.Get(blockHashKey(hash))
if err != nil { if err != nil {
panic(err) panic(err)
} }
@ -149,7 +150,7 @@ func (bs *BlockStore) LoadBlockByHash(hash []byte) *types.Block {
func (bs *BlockStore) LoadBlockPart(height int64, index int) *types.Part { func (bs *BlockStore) LoadBlockPart(height int64, index int) *types.Part {
var pbpart = new(tmproto.Part) var pbpart = new(tmproto.Part)
bz, err := bs.db.Get(calcBlockPartKey(height, index))
bz, err := bs.db.Get(blockPartKey(height, index))
if err != nil { if err != nil {
panic(err) panic(err)
} }
@ -173,7 +174,7 @@ func (bs *BlockStore) LoadBlockPart(height int64, index int) *types.Part {
// If no block is found for the given height, it returns nil. // If no block is found for the given height, it returns nil.
func (bs *BlockStore) LoadBlockMeta(height int64) *types.BlockMeta { func (bs *BlockStore) LoadBlockMeta(height int64) *types.BlockMeta {
var pbbm = new(tmproto.BlockMeta) var pbbm = new(tmproto.BlockMeta)
bz, err := bs.db.Get(calcBlockMetaKey(height))
bz, err := bs.db.Get(blockMetaKey(height))
if err != nil { if err != nil {
panic(err) panic(err)
@ -202,7 +203,7 @@ func (bs *BlockStore) LoadBlockMeta(height int64) *types.BlockMeta {
// If no commit is found for the given height, it returns nil. // If no commit is found for the given height, it returns nil.
func (bs *BlockStore) LoadBlockCommit(height int64) *types.Commit { func (bs *BlockStore) LoadBlockCommit(height int64) *types.Commit {
var pbc = new(tmproto.Commit) var pbc = new(tmproto.Commit)
bz, err := bs.db.Get(calcBlockCommitKey(height))
bz, err := bs.db.Get(blockCommitKey(height))
if err != nil { if err != nil {
panic(err) panic(err)
} }
@ -225,7 +226,7 @@ func (bs *BlockStore) LoadBlockCommit(height int64) *types.Commit {
// a new block at `height + 1` that includes this commit in its block.LastCommit. // a new block at `height + 1` that includes this commit in its block.LastCommit.
func (bs *BlockStore) LoadSeenCommit(height int64) *types.Commit { func (bs *BlockStore) LoadSeenCommit(height int64) *types.Commit {
var pbc = new(tmproto.Commit) var pbc = new(tmproto.Commit)
bz, err := bs.db.Get(calcSeenCommitKey(height))
bz, err := bs.db.Get(seenCommitKey(height))
if err != nil { if err != nil {
panic(err) panic(err)
} }
@ -285,20 +286,20 @@ func (bs *BlockStore) PruneBlocks(height int64) (uint64, error) {
if meta == nil { // assume already deleted if meta == nil { // assume already deleted
continue continue
} }
if err := batch.Delete(calcBlockMetaKey(h)); err != nil {
if err := batch.Delete(blockMetaKey(h)); err != nil {
return 0, err return 0, err
} }
if err := batch.Delete(calcBlockHashKey(meta.BlockID.Hash)); err != nil {
if err := batch.Delete(blockHashKey(meta.BlockID.Hash)); err != nil {
return 0, err return 0, err
} }
if err := batch.Delete(calcBlockCommitKey(h)); err != nil {
if err := batch.Delete(blockCommitKey(h)); err != nil {
return 0, err return 0, err
} }
if err := batch.Delete(calcSeenCommitKey(h)); err != nil {
if err := batch.Delete(seenCommitKey(h)); err != nil {
return 0, err return 0, err
} }
for p := 0; p < int(meta.BlockID.PartSetHeader.Total); p++ { for p := 0; p < int(meta.BlockID.PartSetHeader.Total); p++ {
if err := batch.Delete(calcBlockPartKey(h, p)); err != nil {
if err := batch.Delete(blockPartKey(h, p)); err != nil {
return 0, err return 0, err
} }
} }
@ -359,17 +360,17 @@ func (bs *BlockStore) SaveBlock(block *types.Block, blockParts *types.PartSet, s
panic("nil blockmeta") panic("nil blockmeta")
} }
metaBytes := mustEncode(pbm) metaBytes := mustEncode(pbm)
if err := bs.db.Set(calcBlockMetaKey(height), metaBytes); err != nil {
if err := bs.db.Set(blockMetaKey(height), metaBytes); err != nil {
panic(err) panic(err)
} }
if err := bs.db.Set(calcBlockHashKey(hash), []byte(fmt.Sprintf("%d", height))); err != nil {
if err := bs.db.Set(blockHashKey(hash), []byte(fmt.Sprintf("%d", height))); err != nil {
panic(err) panic(err)
} }
// Save block commit (duplicate and separate from the Block) // Save block commit (duplicate and separate from the Block)
pbc := block.LastCommit.ToProto() pbc := block.LastCommit.ToProto()
blockCommitBytes := mustEncode(pbc) blockCommitBytes := mustEncode(pbc)
if err := bs.db.Set(calcBlockCommitKey(height-1), blockCommitBytes); err != nil {
if err := bs.db.Set(blockCommitKey(height-1), blockCommitBytes); err != nil {
panic(err) panic(err)
} }
@ -377,7 +378,7 @@ func (bs *BlockStore) SaveBlock(block *types.Block, blockParts *types.PartSet, s
// NOTE: we can delete this at a later height // NOTE: we can delete this at a later height
pbsc := seenCommit.ToProto() pbsc := seenCommit.ToProto()
seenCommitBytes := mustEncode(pbsc) seenCommitBytes := mustEncode(pbsc)
if err := bs.db.Set(calcSeenCommitKey(height), seenCommitBytes); err != nil {
if err := bs.db.Set(seenCommitKey(height), seenCommitBytes); err != nil {
panic(err) panic(err)
} }
@ -399,7 +400,7 @@ func (bs *BlockStore) saveBlockPart(height int64, index int, part *types.Part) {
panic(fmt.Errorf("unable to make part into proto: %w", err)) panic(fmt.Errorf("unable to make part into proto: %w", err))
} }
partBytes := mustEncode(pbp) partBytes := mustEncode(pbp)
if err := bs.db.Set(calcBlockPartKey(height, index), partBytes); err != nil {
if err := bs.db.Set(blockPartKey(height, index), partBytes); err != nil {
panic(err) panic(err)
} }
} }
@ -421,29 +422,59 @@ func (bs *BlockStore) SaveSeenCommit(height int64, seenCommit *types.Commit) err
if err != nil { if err != nil {
return fmt.Errorf("unable to marshal commit: %w", err) return fmt.Errorf("unable to marshal commit: %w", err)
} }
return bs.db.Set(calcSeenCommitKey(height), seenCommitBytes)
return bs.db.Set(seenCommitKey(height), seenCommitBytes)
} }
//-----------------------------------------------------------------------------
//---------------------------------- KEY ENCODING -----------------------------------------
// key prefixes
const (
// prefixes are unique across all tm db's
prefixBlockMeta = int64(0)
prefixBlockPart = int64(1)
prefixBlockCommit = int64(2)
prefixSeenCommit = int64(3)
prefixBlockHash = int64(4)
)
func calcBlockMetaKey(height int64) []byte {
return []byte(fmt.Sprintf("H:%v", height))
func blockMetaKey(height int64) []byte {
key, err := orderedcode.Append(nil, prefixBlockMeta, height)
if err != nil {
panic(err)
}
return key
} }
func calcBlockPartKey(height int64, partIndex int) []byte {
return []byte(fmt.Sprintf("P:%v:%v", height, partIndex))
func blockPartKey(height int64, partIndex int) []byte {
key, err := orderedcode.Append(nil, prefixBlockPart, height, int64(partIndex))
if err != nil {
panic(err)
}
return key
} }
func calcBlockCommitKey(height int64) []byte {
return []byte(fmt.Sprintf("C:%v", height))
func blockCommitKey(height int64) []byte {
key, err := orderedcode.Append(nil, prefixBlockCommit, height)
if err != nil {
panic(err)
}
return key
} }
func calcSeenCommitKey(height int64) []byte {
return []byte(fmt.Sprintf("SC:%v", height))
func seenCommitKey(height int64) []byte {
key, err := orderedcode.Append(nil, prefixSeenCommit, height)
if err != nil {
panic(err)
}
return key
} }
func calcBlockHashKey(hash []byte) []byte {
return []byte(fmt.Sprintf("BH:%x", hash))
func blockHashKey(hash []byte) []byte {
key, err := orderedcode.Append(nil, prefixBlockHash, string(hash))
if err != nil {
panic(err)
}
return key
} }
//----------------------------------------------------------------------------- //-----------------------------------------------------------------------------


+ 9
- 11
store/store_test.go View File

@ -156,7 +156,6 @@ func TestMain(m *testing.M) {
} }
// TODO: This test should be simplified ... // TODO: This test should be simplified ...
func TestBlockStoreSaveLoadBlock(t *testing.T) { func TestBlockStoreSaveLoadBlock(t *testing.T) {
state, bs, cleanup := makeStateAndBlockStore(log.NewTMLogger(new(bytes.Buffer))) state, bs, cleanup := makeStateAndBlockStore(log.NewTMLogger(new(bytes.Buffer)))
defer cleanup() defer cleanup()
@ -193,7 +192,6 @@ func TestBlockStoreSaveLoadBlock(t *testing.T) {
} }
// End of setup, test data // End of setup, test data
commitAtH10 := makeTestCommit(10, tmtime.Now()) commitAtH10 := makeTestCommit(10, tmtime.Now())
tuples := []struct { tuples := []struct {
block *types.Block block *types.Block
@ -302,29 +300,29 @@ func TestBlockStoreSaveLoadBlock(t *testing.T) {
} }
if tuple.corruptBlockInDB { if tuple.corruptBlockInDB {
err := db.Set(calcBlockMetaKey(tuple.block.Height), []byte("block-bogus"))
err := db.Set(blockMetaKey(tuple.block.Height), []byte("block-bogus"))
require.NoError(t, err) require.NoError(t, err)
} }
bBlock := bs.LoadBlock(tuple.block.Height) bBlock := bs.LoadBlock(tuple.block.Height)
bBlockMeta := bs.LoadBlockMeta(tuple.block.Height) bBlockMeta := bs.LoadBlockMeta(tuple.block.Height)
if tuple.eraseSeenCommitInDB { if tuple.eraseSeenCommitInDB {
err := db.Delete(calcSeenCommitKey(tuple.block.Height))
err := db.Delete(seenCommitKey(tuple.block.Height))
require.NoError(t, err) require.NoError(t, err)
} }
if tuple.corruptSeenCommitInDB { if tuple.corruptSeenCommitInDB {
err := db.Set(calcSeenCommitKey(tuple.block.Height), []byte("bogus-seen-commit"))
err := db.Set(seenCommitKey(tuple.block.Height), []byte("bogus-seen-commit"))
require.NoError(t, err) require.NoError(t, err)
} }
bSeenCommit := bs.LoadSeenCommit(tuple.block.Height) bSeenCommit := bs.LoadSeenCommit(tuple.block.Height)
commitHeight := tuple.block.Height - 1 commitHeight := tuple.block.Height - 1
if tuple.eraseCommitInDB { if tuple.eraseCommitInDB {
err := db.Delete(calcBlockCommitKey(commitHeight))
err := db.Delete(blockCommitKey(commitHeight))
require.NoError(t, err) require.NoError(t, err)
} }
if tuple.corruptCommitInDB { if tuple.corruptCommitInDB {
err := db.Set(calcBlockCommitKey(commitHeight), []byte("foo-bogus"))
err := db.Set(blockCommitKey(commitHeight), []byte("foo-bogus"))
require.NoError(t, err) require.NoError(t, err)
} }
bCommit := bs.LoadBlockCommit(commitHeight) bCommit := bs.LoadBlockCommit(commitHeight)
@ -404,7 +402,7 @@ func TestLoadBlockPart(t *testing.T) {
require.Nil(t, res, "a non-existent block part should return nil") require.Nil(t, res, "a non-existent block part should return nil")
// 2. Next save a corrupted block then try to load it // 2. Next save a corrupted block then try to load it
err := db.Set(calcBlockPartKey(height, index), []byte("Tendermint"))
err := db.Set(blockPartKey(height, index), []byte("Tendermint"))
require.NoError(t, err) require.NoError(t, err)
res, _, panicErr = doFn(loadPart) res, _, panicErr = doFn(loadPart)
require.NotNil(t, panicErr, "expecting a non-nil panic") require.NotNil(t, panicErr, "expecting a non-nil panic")
@ -413,7 +411,7 @@ func TestLoadBlockPart(t *testing.T) {
// 3. A good block serialized and saved to the DB should be retrievable // 3. A good block serialized and saved to the DB should be retrievable
pb1, err := part1.ToProto() pb1, err := part1.ToProto()
require.NoError(t, err) require.NoError(t, err)
err = db.Set(calcBlockPartKey(height, index), mustEncode(pb1))
err = db.Set(blockPartKey(height, index), mustEncode(pb1))
require.NoError(t, err) require.NoError(t, err)
gotPart, _, panicErr := doFn(loadPart) gotPart, _, panicErr := doFn(loadPart)
require.Nil(t, panicErr, "an existent and proper block should not panic") require.Nil(t, panicErr, "an existent and proper block should not panic")
@ -524,7 +522,7 @@ func TestLoadBlockMeta(t *testing.T) {
require.Nil(t, res, "a non-existent blockMeta should return nil") require.Nil(t, res, "a non-existent blockMeta should return nil")
// 2. Next save a corrupted blockMeta then try to load it // 2. Next save a corrupted blockMeta then try to load it
err := db.Set(calcBlockMetaKey(height), []byte("Tendermint-Meta"))
err := db.Set(blockMetaKey(height), []byte("Tendermint-Meta"))
require.NoError(t, err) require.NoError(t, err)
res, _, panicErr = doFn(loadMeta) res, _, panicErr = doFn(loadMeta)
require.NotNil(t, panicErr, "expecting a non-nil panic") require.NotNil(t, panicErr, "expecting a non-nil panic")
@ -535,7 +533,7 @@ func TestLoadBlockMeta(t *testing.T) {
Version: tmversion.Consensus{ Version: tmversion.Consensus{
Block: version.BlockProtocol, App: 0}, Height: 1, ProposerAddress: tmrand.Bytes(crypto.AddressSize)}} Block: version.BlockProtocol, App: 0}, Height: 1, ProposerAddress: tmrand.Bytes(crypto.AddressSize)}}
pbm := meta.ToProto() pbm := meta.ToProto()
err = db.Set(calcBlockMetaKey(height), mustEncode(pbm))
err = db.Set(blockMetaKey(height), mustEncode(pbm))
require.NoError(t, err) require.NoError(t, err)
gotMeta, _, panicErr := doFn(loadMeta) gotMeta, _, panicErr := doFn(loadMeta)
require.Nil(t, panicErr, "an existent and proper block should not panic") require.Nil(t, panicErr, "an existent and proper block should not panic")


Loading…
Cancel
Save