Browse Source

store: use db iterators for pruning and range-based queries (#5848)

pull/5881/head
Callum Waters 4 years ago
committed by GitHub
parent
commit
385ea1db7d
No known key found for this signature in database GPG Key ID: 4AEE18F83AFDEB23
13 changed files with 263 additions and 641 deletions
  1. +2
    -1
      CHANGELOG_PENDING.md
  2. +5
    -3
      cmd/tendermint/commands/light.go
  3. +3
    -3
      light/client_benchmark_test.go
  4. +21
    -21
      light/client_test.go
  5. +5
    -5
      light/detector_test.go
  6. +2
    -2
      light/example_test.go
  7. +29
    -37
      light/store/db/db.go
  8. +5
    -5
      light/store/db/db_test.go
  9. +0
    -337
      proto/tendermint/store/types.pb.go
  10. +0
    -9
      proto/tendermint/store/types.proto
  11. +1
    -1
      statesync/stateprovider.go
  12. +186
    -144
      store/store.go
  13. +4
    -73
      store/store_test.go

+ 2
- 1
CHANGELOG_PENDING.md View File

@ -18,7 +18,7 @@ Friendly reminder, we have a [bug bounty program](https://hackerone.com/tendermi
- [config] \#5598 The `test_fuzz` and `test_fuzz_config` P2P settings have been removed. (@erikgrinaker)
- [config] \#5728 `fast_sync = "v1"` is no longer supported (@melekes)
- [cli] \#5772 `gen_node_key` prints JSON-encoded `NodeKey` rather than ID and does not save it to `node_key.json` (@melekes)
- [cli] \#5777 use hypen-case instead of snake_case for all cli comamnds and config parameters
- [cli] \#5777 use hyphen-case instead of snake_case for all cli commands and config parameters (@cmwaters)
- Apps
- [ABCI] \#5447 Remove `SetOption` method from `ABCI.Client` interface
@ -35,6 +35,7 @@ Friendly reminder, we have a [bug bounty program](https://hackerone.com/tendermi
- [proto/p2p] Rename `NodeInfo.default_node_id` to `node_id` (@erikgrinaker)
- [libs/os] `EnsureDir` now propagates IO errors and checks the file type (@erikgrinaker)
- [libs/os] Kill() and {Must,}{Read,Write}File() functions have been removed. (@alessio)
- [store] \#5848 Remove block store state in favor of using the db iterators directly (@cmwaters)
- Blockchain Protocol


+ 5
- 3
cmd/tendermint/commands/light.go View File

@ -122,10 +122,12 @@ func runProxy(cmd *cobra.Command, args []string) error {
witnessesAddrs = strings.Split(witnessAddrsJoined, ",")
}
db, err := dbm.NewGoLevelDB("light-client-db", dir)
lightDB, err := dbm.NewGoLevelDB("light-client-db", dir)
if err != nil {
return fmt.Errorf("can't create a db: %w", err)
}
// create a prefixed db on the chainID
db := dbm.NewPrefixDB(lightDB, []byte(chainID))
if primaryAddr == "" { // check to see if we can start from an existing state
var err error
@ -187,7 +189,7 @@ func runProxy(cmd *cobra.Command, args []string) error {
},
primaryAddr,
witnessesAddrs,
dbs.New(db, chainID),
dbs.New(db),
options...,
)
} else { // continue from latest state
@ -196,7 +198,7 @@ func runProxy(cmd *cobra.Command, args []string) error {
trustingPeriod,
primaryAddr,
witnessesAddrs,
dbs.New(db, chainID),
dbs.New(db),
options...,
)
}


+ 3
- 3
light/client_benchmark_test.go View File

@ -37,7 +37,7 @@ func BenchmarkSequence(b *testing.B) {
},
benchmarkFullNode,
[]provider.Provider{benchmarkFullNode},
dbs.New(dbm.NewMemDB(), chainID),
dbs.New(dbm.NewMemDB()),
light.Logger(log.TestingLogger()),
light.SequentialVerification(),
)
@ -65,7 +65,7 @@ func BenchmarkBisection(b *testing.B) {
},
benchmarkFullNode,
[]provider.Provider{benchmarkFullNode},
dbs.New(dbm.NewMemDB(), chainID),
dbs.New(dbm.NewMemDB()),
light.Logger(log.TestingLogger()),
)
if err != nil {
@ -93,7 +93,7 @@ func BenchmarkBackwards(b *testing.B) {
},
benchmarkFullNode,
[]provider.Provider{benchmarkFullNode},
dbs.New(dbm.NewMemDB(), chainID),
dbs.New(dbm.NewMemDB()),
light.Logger(log.TestingLogger()),
)
if err != nil {


+ 21
- 21
light/client_test.go View File

@ -231,7 +231,7 @@ func TestClient_SequentialVerification(t *testing.T) {
tc.otherHeaders,
tc.vals,
)},
dbs.New(dbm.NewMemDB(), chainID),
dbs.New(dbm.NewMemDB()),
light.SequentialVerification(),
light.Logger(log.TestingLogger()),
)
@ -356,7 +356,7 @@ func TestClient_SkippingVerification(t *testing.T) {
tc.otherHeaders,
tc.vals,
)},
dbs.New(dbm.NewMemDB(), chainID),
dbs.New(dbm.NewMemDB()),
light.SkippingVerification(light.DefaultTrustLevel),
light.Logger(log.TestingLogger()),
)
@ -394,7 +394,7 @@ func TestClientLargeBisectionVerification(t *testing.T) {
},
veryLargeFullNode,
[]provider.Provider{veryLargeFullNode},
dbs.New(dbm.NewMemDB(), chainID),
dbs.New(dbm.NewMemDB()),
light.SkippingVerification(light.DefaultTrustLevel),
)
require.NoError(t, err)
@ -416,7 +416,7 @@ func TestClientBisectionBetweenTrustedHeaders(t *testing.T) {
},
fullNode,
[]provider.Provider{fullNode},
dbs.New(dbm.NewMemDB(), chainID),
dbs.New(dbm.NewMemDB()),
light.SkippingVerification(light.DefaultTrustLevel),
)
require.NoError(t, err)
@ -440,7 +440,7 @@ func TestClient_Cleanup(t *testing.T) {
trustOptions,
fullNode,
[]provider.Provider{fullNode},
dbs.New(dbm.NewMemDB(), chainID),
dbs.New(dbm.NewMemDB()),
light.Logger(log.TestingLogger()),
)
require.NoError(t, err)
@ -460,7 +460,7 @@ func TestClient_Cleanup(t *testing.T) {
func TestClientRestoresTrustedHeaderAfterStartup1(t *testing.T) {
// 1. options.Hash == trustedHeader.Hash
{
trustedStore := dbs.New(dbm.NewMemDB(), chainID)
trustedStore := dbs.New(dbm.NewMemDB())
err := trustedStore.SaveLightBlock(l1)
require.NoError(t, err)
@ -484,7 +484,7 @@ func TestClientRestoresTrustedHeaderAfterStartup1(t *testing.T) {
// 2. options.Hash != trustedHeader.Hash
{
trustedStore := dbs.New(dbm.NewMemDB(), chainID)
trustedStore := dbs.New(dbm.NewMemDB())
err := trustedStore.SaveLightBlock(l1)
require.NoError(t, err)
@ -529,7 +529,7 @@ func TestClientRestoresTrustedHeaderAfterStartup1(t *testing.T) {
func TestClientRestoresTrustedHeaderAfterStartup2(t *testing.T) {
// 1. options.Hash == trustedHeader.Hash
{
trustedStore := dbs.New(dbm.NewMemDB(), chainID)
trustedStore := dbs.New(dbm.NewMemDB())
err := trustedStore.SaveLightBlock(l1)
require.NoError(t, err)
@ -559,7 +559,7 @@ func TestClientRestoresTrustedHeaderAfterStartup2(t *testing.T) {
// 2. options.Hash != trustedHeader.Hash
// This could happen if previous provider was lying to us.
{
trustedStore := dbs.New(dbm.NewMemDB(), chainID)
trustedStore := dbs.New(dbm.NewMemDB())
err := trustedStore.SaveLightBlock(l1)
require.NoError(t, err)
@ -606,7 +606,7 @@ func TestClientRestoresTrustedHeaderAfterStartup3(t *testing.T) {
// 1. options.Hash == trustedHeader.Hash
{
// load the first three headers into the trusted store
trustedStore := dbs.New(dbm.NewMemDB(), chainID)
trustedStore := dbs.New(dbm.NewMemDB())
err := trustedStore.SaveLightBlock(l1)
require.NoError(t, err)
@ -644,7 +644,7 @@ func TestClientRestoresTrustedHeaderAfterStartup3(t *testing.T) {
// 2. options.Hash != trustedHeader.Hash
// This could happen if previous provider was lying to us.
{
trustedStore := dbs.New(dbm.NewMemDB(), chainID)
trustedStore := dbs.New(dbm.NewMemDB())
err := trustedStore.SaveLightBlock(l1)
require.NoError(t, err)
@ -704,7 +704,7 @@ func TestClient_Update(t *testing.T) {
trustOptions,
fullNode,
[]provider.Provider{fullNode},
dbs.New(dbm.NewMemDB(), chainID),
dbs.New(dbm.NewMemDB()),
light.Logger(log.TestingLogger()),
)
require.NoError(t, err)
@ -725,7 +725,7 @@ func TestClient_Concurrency(t *testing.T) {
trustOptions,
fullNode,
[]provider.Provider{fullNode},
dbs.New(dbm.NewMemDB(), chainID),
dbs.New(dbm.NewMemDB()),
light.Logger(log.TestingLogger()),
)
require.NoError(t, err)
@ -766,7 +766,7 @@ func TestClientReplacesPrimaryWithWitnessIfPrimaryIsUnavailable(t *testing.T) {
trustOptions,
deadNode,
[]provider.Provider{fullNode, fullNode},
dbs.New(dbm.NewMemDB(), chainID),
dbs.New(dbm.NewMemDB()),
light.Logger(log.TestingLogger()),
light.MaxRetryAttempts(1),
)
@ -792,7 +792,7 @@ func TestClient_BackwardsVerification(t *testing.T) {
},
largeFullNode,
[]provider.Provider{largeFullNode},
dbs.New(dbm.NewMemDB(), chainID),
dbs.New(dbm.NewMemDB()),
light.Logger(log.TestingLogger()),
)
require.NoError(t, err)
@ -874,7 +874,7 @@ func TestClient_BackwardsVerification(t *testing.T) {
},
tc.provider,
[]provider.Provider{tc.provider},
dbs.New(dbm.NewMemDB(), chainID),
dbs.New(dbm.NewMemDB()),
light.Logger(log.TestingLogger()),
)
require.NoError(t, err, idx)
@ -887,7 +887,7 @@ func TestClient_BackwardsVerification(t *testing.T) {
func TestClient_NewClientFromTrustedStore(t *testing.T) {
// 1) Initiate DB and fill with a "trusted" header
db := dbs.New(dbm.NewMemDB(), chainID)
db := dbs.New(dbm.NewMemDB())
err := db.SaveLightBlock(l1)
require.NoError(t, err)
@ -944,7 +944,7 @@ func TestClientRemovesWitnessIfItSendsUsIncorrectHeader(t *testing.T) {
trustOptions,
fullNode,
[]provider.Provider{badProvider1, badProvider2},
dbs.New(dbm.NewMemDB(), chainID),
dbs.New(dbm.NewMemDB()),
light.Logger(log.TestingLogger()),
light.MaxRetryAttempts(1),
)
@ -994,7 +994,7 @@ func TestClient_TrustedValidatorSet(t *testing.T) {
trustOptions,
fullNode,
[]provider.Provider{badValSetNode, fullNode},
dbs.New(dbm.NewMemDB(), chainID),
dbs.New(dbm.NewMemDB()),
light.Logger(log.TestingLogger()),
)
require.NoError(t, err)
@ -1012,7 +1012,7 @@ func TestClientPrunesHeadersAndValidatorSets(t *testing.T) {
trustOptions,
fullNode,
[]provider.Provider{fullNode},
dbs.New(dbm.NewMemDB(), chainID),
dbs.New(dbm.NewMemDB()),
light.Logger(log.TestingLogger()),
light.PruningSize(1),
)
@ -1085,7 +1085,7 @@ func TestClientEnsureValidHeadersAndValSets(t *testing.T) {
trustOptions,
badNode,
[]provider.Provider{badNode, badNode},
dbs.New(dbm.NewMemDB(), chainID),
dbs.New(dbm.NewMemDB()),
light.MaxRetryAttempts(1),
)
require.NoError(t, err)


+ 5
- 5
light/detector_test.go View File

@ -54,7 +54,7 @@ func TestLightClientAttackEvidence_Lunatic(t *testing.T) {
},
primary,
[]provider.Provider{witness},
dbs.New(dbm.NewMemDB(), chainID),
dbs.New(dbm.NewMemDB()),
light.Logger(log.TestingLogger()),
light.MaxRetryAttempts(1),
)
@ -136,7 +136,7 @@ func TestLightClientAttackEvidence_Equivocation(t *testing.T) {
},
primary,
[]provider.Provider{witness},
dbs.New(dbm.NewMemDB(), chainID),
dbs.New(dbm.NewMemDB()),
light.Logger(log.TestingLogger()),
light.MaxRetryAttempts(1),
verificationOption,
@ -191,7 +191,7 @@ func TestClientDivergentTraces1(t *testing.T) {
},
primary,
[]provider.Provider{witness},
dbs.New(dbm.NewMemDB(), chainID),
dbs.New(dbm.NewMemDB()),
light.Logger(log.TestingLogger()),
light.MaxRetryAttempts(1),
)
@ -215,7 +215,7 @@ func TestClientDivergentTraces2(t *testing.T) {
},
primary,
[]provider.Provider{deadNode, deadNode, primary},
dbs.New(dbm.NewMemDB(), chainID),
dbs.New(dbm.NewMemDB()),
light.Logger(log.TestingLogger()),
light.MaxRetryAttempts(1),
)
@ -250,7 +250,7 @@ func TestClientDivergentTraces3(t *testing.T) {
},
primary,
[]provider.Provider{witness},
dbs.New(dbm.NewMemDB(), chainID),
dbs.New(dbm.NewMemDB()),
light.Logger(log.TestingLogger()),
light.MaxRetryAttempts(1),
)


+ 2
- 2
light/example_test.go View File

@ -61,7 +61,7 @@ func ExampleClient_Update() {
},
primary,
[]provider.Provider{primary}, // NOTE: primary should not be used here
dbs.New(db, chainID),
dbs.New(db),
light.Logger(log.TestingLogger()),
)
if err != nil {
@ -129,7 +129,7 @@ func ExampleClient_VerifyLightBlockAtHeight() {
},
primary,
[]provider.Provider{primary}, // NOTE: primary should not be used here
dbs.New(db, chainID),
dbs.New(db),
light.Logger(log.TestingLogger()),
)
if err != nil {


+ 29
- 37
light/store/db/db.go View File

@ -19,18 +19,17 @@ const (
)
type dbs struct {
db dbm.DB
prefix string
db dbm.DB
mtx tmsync.RWMutex
size uint16
}
// New returns a Store that wraps any DB (with an optional prefix in case you
// want to use one DB with many light clients).
func New(db dbm.DB, prefix string) store.Store {
// New returns a Store that wraps any DB
// If you want to share one DB across many light clients consider using PrefixDB
func New(db dbm.DB) store.Store {
lightStore := &dbs{db: db, prefix: prefix}
lightStore := &dbs{db: db}
// retrieve the size of the db
size := uint16(0)
@ -197,11 +196,17 @@ func (s *dbs) LightBlockBefore(height int64) (*types.LightBlock, error) {
defer itr.Close()
if itr.Valid() {
existingHeight, err := s.decodeLbKey(itr.Key())
var lbpb tmproto.LightBlock
err = lbpb.Unmarshal(itr.Value())
if err != nil {
return nil, err
return nil, fmt.Errorf("unmarshal error: %w", err)
}
return s.LightBlock(existingHeight)
lightBlock, err := types.LightBlockFromProto(&lbpb)
if err != nil {
return nil, fmt.Errorf("proto conversion error: %w", err)
}
return lightBlock, nil
}
if err = itr.Error(); err != nil {
return nil, err
@ -238,39 +243,32 @@ func (s *dbs) Prune(size uint16) error {
b := s.db.NewBatch()
defer b.Close()
pruned := 0
for itr.Valid() && numToPrune > 0 {
key := itr.Key()
height, err := s.decodeLbKey(key)
if err != nil {
return err
}
if err = b.Delete(s.lbKey(height)); err != nil {
if err = b.Delete(itr.Key()); err != nil {
return err
}
itr.Next()
numToPrune--
pruned++
}
if err = itr.Error(); err != nil {
return err
}
err = b.WriteSync()
if err != nil {
return err
}
// 3) Update size.
// 3) // update size
s.mtx.Lock()
defer s.mtx.Unlock()
s.size -= uint16(pruned)
s.size = size
s.mtx.Unlock()
if wErr := s.db.SetSync(s.sizeKey(), marshalSize(size)); wErr != nil {
if wErr := b.Set(s.sizeKey(), marshalSize(size)); wErr != nil {
return fmt.Errorf("failed to persist size: %w", wErr)
}
// 4) write batch deletion to disk
err = b.WriteSync()
if err != nil {
return err
}
return nil
}
@ -284,7 +282,7 @@ func (s *dbs) Size() uint16 {
}
func (s *dbs) sizeKey() []byte {
key, err := orderedcode.Append(nil, s.prefix, prefixSize)
key, err := orderedcode.Append(nil, prefixSize)
if err != nil {
panic(err)
}
@ -292,7 +290,7 @@ func (s *dbs) sizeKey() []byte {
}
func (s *dbs) lbKey(height int64) []byte {
key, err := orderedcode.Append(nil, s.prefix, prefixLightBlock, height)
key, err := orderedcode.Append(nil, prefixLightBlock, height)
if err != nil {
panic(err)
}
@ -300,11 +298,8 @@ func (s *dbs) lbKey(height int64) []byte {
}
func (s *dbs) decodeLbKey(key []byte) (height int64, err error) {
var (
dbPrefix string
lightBlockPrefix int64
)
remaining, err := orderedcode.Parse(string(key), &dbPrefix, &lightBlockPrefix, &height)
var lightBlockPrefix int64
remaining, err := orderedcode.Parse(string(key), &lightBlockPrefix, &height)
if err != nil {
err = fmt.Errorf("failed to parse light block key: %w", err)
}
@ -314,9 +309,6 @@ func (s *dbs) decodeLbKey(key []byte) (height int64, err error) {
if lightBlockPrefix != prefixLightBlock {
err = fmt.Errorf("expected light block prefix but got: %d", lightBlockPrefix)
}
if dbPrefix != s.prefix {
err = fmt.Errorf("parsed key has a different prefix. Expected: %s, got: %s", s.prefix, dbPrefix)
}
return
}


+ 5
- 5
light/store/db/db_test.go View File

@ -19,7 +19,7 @@ import (
)
func TestLast_FirstLightBlockHeight(t *testing.T) {
dbStore := New(dbm.NewMemDB(), "TestLast_FirstLightBlockHeight")
dbStore := New(dbm.NewMemDB())
// Empty store
height, err := dbStore.LastLightBlockHeight()
@ -44,7 +44,7 @@ func TestLast_FirstLightBlockHeight(t *testing.T) {
}
func Test_SaveLightBlock(t *testing.T) {
dbStore := New(dbm.NewMemDB(), "Test_SaveLightBlockAndValidatorSet")
dbStore := New(dbm.NewMemDB())
// Empty store
h, err := dbStore.LightBlock(1)
@ -74,7 +74,7 @@ func Test_SaveLightBlock(t *testing.T) {
}
func Test_LightBlockBefore(t *testing.T) {
dbStore := New(dbm.NewMemDB(), "Test_LightBlockBefore")
dbStore := New(dbm.NewMemDB())
assert.Panics(t, func() {
_, _ = dbStore.LightBlockBefore(0)
@ -95,7 +95,7 @@ func Test_LightBlockBefore(t *testing.T) {
}
func Test_Prune(t *testing.T) {
dbStore := New(dbm.NewMemDB(), "Test_Prune")
dbStore := New(dbm.NewMemDB())
// Empty store
assert.EqualValues(t, 0, dbStore.Size())
@ -132,7 +132,7 @@ func Test_Prune(t *testing.T) {
}
func Test_Concurrency(t *testing.T) {
dbStore := New(dbm.NewMemDB(), "Test_Prune")
dbStore := New(dbm.NewMemDB())
var wg sync.WaitGroup
for i := 1; i <= 100; i++ {


+ 0
- 337
proto/tendermint/store/types.pb.go View File

@ -1,337 +0,0 @@
// Code generated by protoc-gen-gogo. DO NOT EDIT.
// source: tendermint/store/types.proto
package store
import (
fmt "fmt"
proto "github.com/gogo/protobuf/proto"
io "io"
math "math"
math_bits "math/bits"
)
// Reference imports to suppress errors if they are not otherwise used.
var _ = proto.Marshal
var _ = fmt.Errorf
var _ = math.Inf
// This is a compile-time assertion to ensure that this generated file
// is compatible with the proto package it is being compiled against.
// A compilation error at this line likely means your copy of the
// proto package needs to be updated.
const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package
type BlockStoreState struct {
Base int64 `protobuf:"varint,1,opt,name=base,proto3" json:"base,omitempty"`
Height int64 `protobuf:"varint,2,opt,name=height,proto3" json:"height,omitempty"`
}
func (m *BlockStoreState) Reset() { *m = BlockStoreState{} }
func (m *BlockStoreState) String() string { return proto.CompactTextString(m) }
func (*BlockStoreState) ProtoMessage() {}
func (*BlockStoreState) Descriptor() ([]byte, []int) {
return fileDescriptor_ff9e53a0a74267f7, []int{0}
}
func (m *BlockStoreState) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
}
func (m *BlockStoreState) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
if deterministic {
return xxx_messageInfo_BlockStoreState.Marshal(b, m, deterministic)
} else {
b = b[:cap(b)]
n, err := m.MarshalToSizedBuffer(b)
if err != nil {
return nil, err
}
return b[:n], nil
}
}
func (m *BlockStoreState) XXX_Merge(src proto.Message) {
xxx_messageInfo_BlockStoreState.Merge(m, src)
}
func (m *BlockStoreState) XXX_Size() int {
return m.Size()
}
func (m *BlockStoreState) XXX_DiscardUnknown() {
xxx_messageInfo_BlockStoreState.DiscardUnknown(m)
}
var xxx_messageInfo_BlockStoreState proto.InternalMessageInfo
func (m *BlockStoreState) GetBase() int64 {
if m != nil {
return m.Base
}
return 0
}
func (m *BlockStoreState) GetHeight() int64 {
if m != nil {
return m.Height
}
return 0
}
func init() {
proto.RegisterType((*BlockStoreState)(nil), "tendermint.store.BlockStoreState")
}
func init() { proto.RegisterFile("tendermint/store/types.proto", fileDescriptor_ff9e53a0a74267f7) }
var fileDescriptor_ff9e53a0a74267f7 = []byte{
// 165 bytes of a gzipped FileDescriptorProto
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x92, 0x29, 0x49, 0xcd, 0x4b,
0x49, 0x2d, 0xca, 0xcd, 0xcc, 0x2b, 0xd1, 0x2f, 0x2e, 0xc9, 0x2f, 0x4a, 0xd5, 0x2f, 0xa9, 0x2c,
0x48, 0x2d, 0xd6, 0x2b, 0x28, 0xca, 0x2f, 0xc9, 0x17, 0x12, 0x40, 0xc8, 0xea, 0x81, 0x65, 0x95,
0x6c, 0xb9, 0xf8, 0x9d, 0x72, 0xf2, 0x93, 0xb3, 0x83, 0x41, 0xbc, 0xe0, 0x92, 0xc4, 0x92, 0x54,
0x21, 0x21, 0x2e, 0x96, 0xa4, 0xc4, 0xe2, 0x54, 0x09, 0x46, 0x05, 0x46, 0x0d, 0xe6, 0x20, 0x30,
0x5b, 0x48, 0x8c, 0x8b, 0x2d, 0x23, 0x35, 0x33, 0x3d, 0xa3, 0x44, 0x82, 0x09, 0x2c, 0x0a, 0xe5,
0x39, 0x05, 0x9e, 0x78, 0x24, 0xc7, 0x78, 0xe1, 0x91, 0x1c, 0xe3, 0x83, 0x47, 0x72, 0x8c, 0x13,
0x1e, 0xcb, 0x31, 0x5c, 0x78, 0x2c, 0xc7, 0x70, 0xe3, 0xb1, 0x1c, 0x43, 0x94, 0x79, 0x7a, 0x66,
0x49, 0x46, 0x69, 0x92, 0x5e, 0x72, 0x7e, 0xae, 0x3e, 0x92, 0x9b, 0x90, 0x98, 0x60, 0x27, 0xe9,
0xa3, 0xbb, 0x37, 0x89, 0x0d, 0x2c, 0x6e, 0x0c, 0x08, 0x00, 0x00, 0xff, 0xff, 0xef, 0xa6, 0x30,
0x63, 0xca, 0x00, 0x00, 0x00,
}
func (m *BlockStoreState) Marshal() (dAtA []byte, err error) {
size := m.Size()
dAtA = make([]byte, size)
n, err := m.MarshalToSizedBuffer(dAtA[:size])
if err != nil {
return nil, err
}
return dAtA[:n], nil
}
func (m *BlockStoreState) MarshalTo(dAtA []byte) (int, error) {
size := m.Size()
return m.MarshalToSizedBuffer(dAtA[:size])
}
func (m *BlockStoreState) MarshalToSizedBuffer(dAtA []byte) (int, error) {
i := len(dAtA)
_ = i
var l int
_ = l
if m.Height != 0 {
i = encodeVarintTypes(dAtA, i, uint64(m.Height))
i--
dAtA[i] = 0x10
}
if m.Base != 0 {
i = encodeVarintTypes(dAtA, i, uint64(m.Base))
i--
dAtA[i] = 0x8
}
return len(dAtA) - i, nil
}
func encodeVarintTypes(dAtA []byte, offset int, v uint64) int {
offset -= sovTypes(v)
base := offset
for v >= 1<<7 {
dAtA[offset] = uint8(v&0x7f | 0x80)
v >>= 7
offset++
}
dAtA[offset] = uint8(v)
return base
}
func (m *BlockStoreState) Size() (n int) {
if m == nil {
return 0
}
var l int
_ = l
if m.Base != 0 {
n += 1 + sovTypes(uint64(m.Base))
}
if m.Height != 0 {
n += 1 + sovTypes(uint64(m.Height))
}
return n
}
func sovTypes(x uint64) (n int) {
return (math_bits.Len64(x|1) + 6) / 7
}
func sozTypes(x uint64) (n int) {
return sovTypes(uint64((x << 1) ^ uint64((int64(x) >> 63))))
}
func (m *BlockStoreState) Unmarshal(dAtA []byte) error {
l := len(dAtA)
iNdEx := 0
for iNdEx < l {
preIndex := iNdEx
var wire uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowTypes
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
wire |= uint64(b&0x7F) << shift
if b < 0x80 {
break
}
}
fieldNum := int32(wire >> 3)
wireType := int(wire & 0x7)
if wireType == 4 {
return fmt.Errorf("proto: BlockStoreState: wiretype end group for non-group")
}
if fieldNum <= 0 {
return fmt.Errorf("proto: BlockStoreState: illegal tag %d (wire type %d)", fieldNum, wire)
}
switch fieldNum {
case 1:
if wireType != 0 {
return fmt.Errorf("proto: wrong wireType = %d for field Base", wireType)
}
m.Base = 0
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowTypes
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
m.Base |= int64(b&0x7F) << shift
if b < 0x80 {
break
}
}
case 2:
if wireType != 0 {
return fmt.Errorf("proto: wrong wireType = %d for field Height", wireType)
}
m.Height = 0
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowTypes
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
m.Height |= int64(b&0x7F) << shift
if b < 0x80 {
break
}
}
default:
iNdEx = preIndex
skippy, err := skipTypes(dAtA[iNdEx:])
if err != nil {
return err
}
if skippy < 0 {
return ErrInvalidLengthTypes
}
if (iNdEx + skippy) < 0 {
return ErrInvalidLengthTypes
}
if (iNdEx + skippy) > l {
return io.ErrUnexpectedEOF
}
iNdEx += skippy
}
}
if iNdEx > l {
return io.ErrUnexpectedEOF
}
return nil
}
func skipTypes(dAtA []byte) (n int, err error) {
l := len(dAtA)
iNdEx := 0
depth := 0
for iNdEx < l {
var wire uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return 0, ErrIntOverflowTypes
}
if iNdEx >= l {
return 0, io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
wire |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
wireType := int(wire & 0x7)
switch wireType {
case 0:
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return 0, ErrIntOverflowTypes
}
if iNdEx >= l {
return 0, io.ErrUnexpectedEOF
}
iNdEx++
if dAtA[iNdEx-1] < 0x80 {
break
}
}
case 1:
iNdEx += 8
case 2:
var length int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return 0, ErrIntOverflowTypes
}
if iNdEx >= l {
return 0, io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
length |= (int(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
if length < 0 {
return 0, ErrInvalidLengthTypes
}
iNdEx += length
case 3:
depth++
case 4:
if depth == 0 {
return 0, ErrUnexpectedEndOfGroupTypes
}
depth--
case 5:
iNdEx += 4
default:
return 0, fmt.Errorf("proto: illegal wireType %d", wireType)
}
if iNdEx < 0 {
return 0, ErrInvalidLengthTypes
}
if depth == 0 {
return iNdEx, nil
}
}
return 0, io.ErrUnexpectedEOF
}
var (
ErrInvalidLengthTypes = fmt.Errorf("proto: negative length found during unmarshaling")
ErrIntOverflowTypes = fmt.Errorf("proto: integer overflow")
ErrUnexpectedEndOfGroupTypes = fmt.Errorf("proto: unexpected end of group")
)

+ 0
- 9
proto/tendermint/store/types.proto View File

@ -1,9 +0,0 @@
syntax = "proto3";
package tendermint.store;
option go_package = "github.com/tendermint/tendermint/proto/tendermint/store";
message BlockStoreState {
int64 base = 1;
int64 height = 2;
}

+ 1
- 1
statesync/stateprovider.go View File

@ -72,7 +72,7 @@ func NewLightClientStateProvider(
}
lc, err := light.NewClient(ctx, chainID, trustOptions, providers[0], providers[1:],
lightdb.New(dbm.NewMemDB(), ""), light.Logger(logger), light.MaxRetryAttempts(5))
lightdb.New(dbm.NewMemDB()), light.Logger(logger), light.MaxRetryAttempts(5))
if err != nil {
return nil, err
}


+ 186
- 144
store/store.go View File

@ -8,8 +8,6 @@ import (
"github.com/google/orderedcode"
dbm "github.com/tendermint/tm-db"
tmsync "github.com/tendermint/tendermint/libs/sync"
tmstore "github.com/tendermint/tendermint/proto/tendermint/store"
tmproto "github.com/tendermint/tendermint/proto/tendermint/types"
"github.com/tendermint/tendermint/types"
)
@ -33,60 +31,98 @@ The store can be assumed to contain all contiguous blocks between base and heigh
*/
type BlockStore struct {
db dbm.DB
// mtx guards access to the struct fields listed below it. We rely on the database to enforce
// fine-grained concurrency control for its data, and thus this mutex does not apply to
// database contents. The only reason for keeping these fields in the struct is that the data
// can't efficiently be queried from the database since the key encoding we use is not
// lexicographically ordered (see https://github.com/tendermint/tendermint/issues/4567).
mtx tmsync.RWMutex
base int64
height int64
}
// NewBlockStore returns a new BlockStore with the given DB,
// initialized to the last height that was committed to the DB.
func NewBlockStore(db dbm.DB) *BlockStore {
bs := LoadBlockStoreState(db)
return &BlockStore{
base: bs.Base,
height: bs.Height,
db: db,
}
return &BlockStore{db}
}
// Base returns the first known contiguous block height, or 0 for empty block stores.
func (bs *BlockStore) Base() int64 {
bs.mtx.RLock()
defer bs.mtx.RUnlock()
return bs.base
iter, err := bs.db.Iterator(
blockMetaKey(1),
blockMetaKey(1<<63-1),
)
if err != nil {
panic(err)
}
defer iter.Close()
if iter.Valid() {
height, err := decodeBlockMetaKey(iter.Key())
if err == nil {
return height
}
}
if err := iter.Error(); err != nil {
panic(err)
}
return 0
}
// Height returns the last known contiguous block height, or 0 for empty block stores.
func (bs *BlockStore) Height() int64 {
bs.mtx.RLock()
defer bs.mtx.RUnlock()
return bs.height
iter, err := bs.db.ReverseIterator(
blockMetaKey(1),
blockMetaKey(1<<63-1),
)
if err != nil {
panic(err)
}
defer iter.Close()
if iter.Valid() {
height, err := decodeBlockMetaKey(iter.Key())
if err == nil {
return height
}
}
if err := iter.Error(); err != nil {
panic(err)
}
return 0
}
// Size returns the number of blocks in the block store.
func (bs *BlockStore) Size() int64 {
bs.mtx.RLock()
defer bs.mtx.RUnlock()
if bs.height == 0 {
height := bs.Height()
if height == 0 {
return 0
}
return bs.height - bs.base + 1
return height + 1 - bs.Base()
}
// LoadBase atomically loads the base block meta, or returns nil if no base is found.
func (bs *BlockStore) LoadBaseMeta() *types.BlockMeta {
bs.mtx.RLock()
defer bs.mtx.RUnlock()
if bs.base == 0 {
iter, err := bs.db.Iterator(
blockMetaKey(1),
blockMetaKey(1<<63-1),
)
if err != nil {
return nil
}
return bs.LoadBlockMeta(bs.base)
defer iter.Close()
if iter.Valid() {
var pbbm = new(tmproto.BlockMeta)
err = proto.Unmarshal(iter.Value(), pbbm)
if err != nil {
panic(fmt.Errorf("unmarshal to tmproto.BlockMeta: %w", err))
}
blockMeta, err := types.BlockMetaFromProto(pbbm)
if err != nil {
panic(fmt.Errorf("error from proto blockMeta: %w", err))
}
return blockMeta
}
return nil
}
// LoadBlock returns the block with the given height.
@ -245,82 +281,133 @@ func (bs *BlockStore) LoadSeenCommit(height int64) *types.Commit {
return commit
}
// PruneBlocks removes block up to (but not including) a height. It returns number of blocks pruned.
// PruneBlocks removes block up to (but not including) a height. It returns the number of blocks pruned.
func (bs *BlockStore) PruneBlocks(height int64) (uint64, error) {
if height <= 0 {
return 0, fmt.Errorf("height must be greater than 0")
}
bs.mtx.RLock()
if height > bs.height {
bs.mtx.RUnlock()
return 0, fmt.Errorf("cannot prune beyond the latest height %v", bs.height)
}
base := bs.base
bs.mtx.RUnlock()
if height < base {
return 0, fmt.Errorf("cannot prune to height %v, it is lower than base height %v",
height, base)
if height > bs.Height() {
return 0, fmt.Errorf("height must be equal to or less than the latest height %d", bs.Height())
}
pruned := uint64(0)
batch := bs.db.NewBatch()
defer batch.Close()
flush := func(batch dbm.Batch, base int64) error {
// We can't trust batches to be atomic, so update base first to make sure noone
// tries to access missing blocks.
bs.mtx.Lock()
bs.base = base
bs.mtx.Unlock()
bs.saveState()
err := batch.WriteSync()
// when removing the block meta, use the hash to remove the hash key at the same time
removeBlockHash := func(key, value []byte, batch dbm.Batch) error {
// unmarshal block meta
var pbbm = new(tmproto.BlockMeta)
err := proto.Unmarshal(value, pbbm)
if err != nil {
return fmt.Errorf("failed to prune up to height %v: %w", base, err)
return fmt.Errorf("unmarshal to tmproto.BlockMeta: %w", err)
}
batch.Close()
return nil
}
for h := base; h < height; h++ {
meta := bs.LoadBlockMeta(h)
if meta == nil { // assume already deleted
continue
}
if err := batch.Delete(blockMetaKey(h)); err != nil {
return 0, err
}
if err := batch.Delete(blockHashKey(meta.BlockID.Hash)); err != nil {
return 0, err
}
if err := batch.Delete(blockCommitKey(h)); err != nil {
return 0, err
blockMeta, err := types.BlockMetaFromProto(pbbm)
if err != nil {
return fmt.Errorf("error from proto blockMeta: %w", err)
}
if err := batch.Delete(seenCommitKey(h)); err != nil {
return 0, err
// delete the hash key corresponding to the block meta's hash
if err := batch.Delete(blockHashKey(blockMeta.BlockID.Hash)); err != nil {
return fmt.Errorf("failed to delete hash key: %X: %w", blockHashKey(blockMeta.BlockID.Hash), err)
}
for p := 0; p < int(meta.BlockID.PartSetHeader.Total); p++ {
if err := batch.Delete(blockPartKey(h, p)); err != nil {
return 0, err
return nil
}
// remove block meta first as this is used to indicate whether the block exists.
// For this reason, we also use ony block meta as a measure of the amount of blocks pruned
pruned, err := bs.batchDelete(blockMetaKey(0), blockMetaKey(height), removeBlockHash)
if err != nil {
return pruned, err
}
if _, err := bs.batchDelete(blockPartKey(0, 0), blockPartKey(height, 0), nil); err != nil {
return pruned, err
}
if _, err := bs.batchDelete(blockCommitKey(0), blockCommitKey(height), nil); err != nil {
return pruned, err
}
if _, err := bs.batchDelete(seenCommitKey(0), seenCommitKey(height), nil); err != nil {
return pruned, err
}
return pruned, nil
}
// batchDelete is a generic function for deleting a range of values based on the lowest
// height up to but excluding retainHeight. For each key/value pair, an optional hook can be
// executed before the deletion itself is made
func (bs *BlockStore) batchDelete(
start []byte,
end []byte,
preDeletionHook func(key, value []byte, batch dbm.Batch) error,
) (uint64, error) {
iter, err := bs.db.Iterator(start, end)
if err != nil {
panic(err)
}
defer iter.Close()
batch := bs.db.NewBatch()
defer batch.Close()
pruned := uint64(0)
flushed := pruned
for iter.Valid() {
key := iter.Key()
if preDeletionHook != nil {
if err := preDeletionHook(key, iter.Value(), batch); err != nil {
return flushed, err
}
}
if err := batch.Delete(key); err != nil {
return flushed, fmt.Errorf("pruning error at key %X: %w", iter.Key(), err)
}
pruned++
// avoid batches growing too large by flushing to database regularly
if pruned%1000 == 0 {
if err := iter.Error(); err != nil {
return flushed, err
}
if err := iter.Close(); err != nil {
return flushed, err
}
// flush every 1000 blocks to avoid batches becoming too large
if pruned%1000 == 0 && pruned > 0 {
err := flush(batch, h)
err := batch.Write()
if err != nil {
return 0, err
return flushed, fmt.Errorf("pruning error at key %X: %w", iter.Key(), err)
}
if err := batch.Close(); err != nil {
return flushed, err
}
flushed = pruned
iter, err = bs.db.Iterator(start, end)
if err != nil {
panic(err)
}
defer iter.Close()
batch = bs.db.NewBatch()
defer batch.Close()
} else {
iter.Next()
}
}
flushed = pruned
if err := iter.Error(); err != nil {
return flushed, err
}
err := flush(batch, height)
err = batch.WriteSync()
if err != nil {
return 0, err
return flushed, fmt.Errorf("pruning error at key %X: %w", iter.Key(), err)
}
return pruned, nil
return flushed, nil
}
// SaveBlock persists the given block, blockParts, and seenCommit to the underlying db.
@ -378,20 +465,10 @@ func (bs *BlockStore) SaveBlock(block *types.Block, blockParts *types.PartSet, s
// NOTE: we can delete this at a later height
pbsc := seenCommit.ToProto()
seenCommitBytes := mustEncode(pbsc)
if err := bs.db.Set(seenCommitKey(height), seenCommitBytes); err != nil {
if err := bs.db.SetSync(seenCommitKey(height), seenCommitBytes); err != nil {
panic(err)
}
// Done!
bs.mtx.Lock()
bs.height = height
if bs.base == 0 {
bs.base = height
}
bs.mtx.Unlock()
// Save new BlockStoreState descriptor. This also flushes the database.
bs.saveState()
}
func (bs *BlockStore) saveBlockPart(height int64, index int, part *types.Part) {
@ -405,16 +482,6 @@ func (bs *BlockStore) saveBlockPart(height int64, index int, part *types.Part) {
}
}
func (bs *BlockStore) saveState() {
bs.mtx.RLock()
bss := tmstore.BlockStoreState{
Base: bs.base,
Height: bs.height,
}
bs.mtx.RUnlock()
SaveBlockStoreState(&bss, bs.db)
}
// SaveSeenCommit saves a seen commit, used by e.g. the state sync reactor when bootstrapping node.
func (bs *BlockStore) SaveSeenCommit(height int64, seenCommit *types.Commit) error {
pbc := seenCommit.ToProto()
@ -445,6 +512,21 @@ func blockMetaKey(height int64) []byte {
return key
}
func decodeBlockMetaKey(key []byte) (height int64, err error) {
var prefix int64
remaining, err := orderedcode.Parse(string(key), &prefix, &height)
if err != nil {
return
}
if len(remaining) != 0 {
return -1, fmt.Errorf("expected complete key but got remainder: %s", remaining)
}
if prefix != prefixBlockMeta {
return -1, fmt.Errorf("incorrect prefix. Expected %v, got %v", prefixBlockMeta, prefix)
}
return
}
func blockPartKey(height int64, partIndex int) []byte {
key, err := orderedcode.Append(nil, prefixBlockPart, height, int64(partIndex))
if err != nil {
@ -479,46 +561,6 @@ func blockHashKey(hash []byte) []byte {
//-----------------------------------------------------------------------------
var blockStoreKey = []byte("blockStore")
// SaveBlockStoreState persists the blockStore state to the database.
func SaveBlockStoreState(bsj *tmstore.BlockStoreState, db dbm.DB) {
bytes, err := proto.Marshal(bsj)
if err != nil {
panic(fmt.Sprintf("Could not marshal state bytes: %v", err))
}
if err := db.SetSync(blockStoreKey, bytes); err != nil {
panic(err)
}
}
// LoadBlockStoreState returns the BlockStoreState as loaded from disk.
// If no BlockStoreState was previously persisted, it returns the zero value.
func LoadBlockStoreState(db dbm.DB) tmstore.BlockStoreState {
bytes, err := db.Get(blockStoreKey)
if err != nil {
panic(err)
}
if len(bytes) == 0 {
return tmstore.BlockStoreState{
Base: 0,
Height: 0,
}
}
var bsj tmstore.BlockStoreState
if err := proto.Unmarshal(bytes, &bsj); err != nil {
panic(fmt.Sprintf("Could not unmarshal bytes: %X", bytes))
}
// Backwards compatibility with persisted data from before Base existed.
if bsj.Height > 0 && bsj.Base == 0 {
bsj.Base = 1
}
return bsj
}
// mustEncode proto encodes a proto.message and panics if fails
func mustEncode(pb proto.Message) []byte {
bz, err := proto.Marshal(pb)


+ 4
- 73
store/store_test.go View File

@ -9,7 +9,6 @@ import (
"testing"
"time"
"github.com/gogo/protobuf/proto"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
dbm "github.com/tendermint/tm-db"
@ -18,7 +17,6 @@ import (
"github.com/tendermint/tendermint/crypto"
"github.com/tendermint/tendermint/libs/log"
tmrand "github.com/tendermint/tendermint/libs/rand"
tmstore "github.com/tendermint/tendermint/proto/tendermint/store"
tmversion "github.com/tendermint/tendermint/proto/tendermint/version"
sm "github.com/tendermint/tendermint/state"
"github.com/tendermint/tendermint/types"
@ -68,66 +66,6 @@ func makeStateAndBlockStore(logger log.Logger) (sm.State, *BlockStore, cleanupFu
return state, NewBlockStore(blockDB), func() { os.RemoveAll(config.RootDir) }
}
func TestLoadBlockStoreState(t *testing.T) {
type blockStoreTest struct {
testName string
bss *tmstore.BlockStoreState
want tmstore.BlockStoreState
}
testCases := []blockStoreTest{
{"success", &tmstore.BlockStoreState{Base: 100, Height: 1000},
tmstore.BlockStoreState{Base: 100, Height: 1000}},
{"empty", &tmstore.BlockStoreState{}, tmstore.BlockStoreState{}},
{"no base", &tmstore.BlockStoreState{Height: 1000}, tmstore.BlockStoreState{Base: 1, Height: 1000}},
}
for _, tc := range testCases {
db := dbm.NewMemDB()
SaveBlockStoreState(tc.bss, db)
retrBSJ := LoadBlockStoreState(db)
assert.Equal(t, tc.want, retrBSJ, "expected the retrieved DBs to match: %s", tc.testName)
}
}
func TestNewBlockStore(t *testing.T) {
db := dbm.NewMemDB()
bss := tmstore.BlockStoreState{Base: 100, Height: 10000}
bz, _ := proto.Marshal(&bss)
err := db.Set(blockStoreKey, bz)
require.NoError(t, err)
bs := NewBlockStore(db)
require.Equal(t, int64(100), bs.Base(), "failed to properly parse blockstore")
require.Equal(t, int64(10000), bs.Height(), "failed to properly parse blockstore")
panicCausers := []struct {
data []byte
wantErr string
}{
{[]byte("artful-doger"), "not unmarshal bytes"},
{[]byte(" "), "unmarshal bytes"},
}
for i, tt := range panicCausers {
tt := tt
// Expecting a panic here on trying to parse an invalid blockStore
_, _, panicErr := doFn(func() (interface{}, error) {
err := db.Set(blockStoreKey, tt.data)
require.NoError(t, err)
_ = NewBlockStore(db)
return nil, nil
})
require.NotNil(t, panicErr, "#%d panicCauser: %q expected a panic", i, tt.data)
assert.Contains(t, fmt.Sprintf("%#v", panicErr), tt.wantErr, "#%d data: %q", i, tt.data)
}
err = db.Set(blockStoreKey, []byte{})
require.NoError(t, err)
bs = NewBlockStore(db)
assert.Equal(t, bs.Height(), int64(0), "expecting empty bytes to be unmarshaled alright")
}
func freshBlockStore() (*BlockStore, dbm.DB) {
db := dbm.NewMemDB()
return NewBlockStore(db), db
@ -379,8 +317,9 @@ func TestLoadBaseMeta(t *testing.T) {
bs.SaveBlock(block, partSet, seenCommit)
}
_, err = bs.PruneBlocks(4)
pruned, err := bs.PruneBlocks(4)
require.NoError(t, err)
assert.EqualValues(t, 3, pruned)
baseBlock := bs.LoadBaseMeta()
assert.EqualValues(t, 4, baseBlock.Header.Height)
@ -432,10 +371,6 @@ func TestPruneBlocks(t *testing.T) {
assert.EqualValues(t, 0, bs.Height())
assert.EqualValues(t, 0, bs.Size())
// pruning an empty store should error, even when pruning to 0
_, err = bs.PruneBlocks(1)
require.Error(t, err)
_, err = bs.PruneBlocks(0)
require.Error(t, err)
@ -460,10 +395,6 @@ func TestPruneBlocks(t *testing.T) {
assert.EqualValues(t, 1200, bs.Base())
assert.EqualValues(t, 1500, bs.Height())
assert.EqualValues(t, 301, bs.Size())
assert.EqualValues(t, tmstore.BlockStoreState{
Base: 1200,
Height: 1500,
}, LoadBlockStoreState(db))
require.NotNil(t, bs.LoadBlock(1200))
require.Nil(t, bs.LoadBlock(1199))
@ -479,9 +410,9 @@ func TestPruneBlocks(t *testing.T) {
require.NotNil(t, bs.LoadBlock(i))
}
// Pruning below the current base should error
// Pruning below the current base should not error
_, err = bs.PruneBlocks(1199)
require.Error(t, err)
require.NoError(t, err)
// Pruning to the current base should work
pruned, err = bs.PruneBlocks(1200)


Loading…
Cancel
Save