diff --git a/CHANGELOG_PENDING.md b/CHANGELOG_PENDING.md index f3be19468..5c933ce89 100644 --- a/CHANGELOG_PENDING.md +++ b/CHANGELOG_PENDING.md @@ -22,6 +22,7 @@ Friendly reminder, we have a [bug bounty program](https://hackerone.com/tendermi - [evidence] \#5319 Remove Amnesia & potentialAmnesia evidence types and removed POLC. (@marbar3778) - [params] \#5319 Remove `ProofofTrialPeriod` from evidence params (@marbar3778) - [crypto/secp256k1] \#5280 `secp256k1` has been removed from the Tendermint repo. (@marbar3778) + - [state] \#5348 Define an Interface for the state store. (@marbar3778) - Blockchain Protocol diff --git a/blockchain/v0/reactor_test.go b/blockchain/v0/reactor_test.go index 9dd18189b..7eea42abc 100644 --- a/blockchain/v0/reactor_test.go +++ b/blockchain/v0/reactor_test.go @@ -70,9 +70,10 @@ func newBlockchainReactor( blockDB := dbm.NewMemDB() stateDB := dbm.NewMemDB() + stateStore := sm.NewStore(stateDB) blockStore := store.NewBlockStore(blockDB) - state, err := sm.LoadStateFromDBOrGenesisDoc(stateDB, genDoc) + state, err := stateStore.LoadFromDBOrGenesisDoc(genDoc) if err != nil { panic(fmt.Errorf("error constructing state from genesis file: %w", err)) } @@ -82,9 +83,12 @@ func newBlockchainReactor( // pool.height is determined from the store. fastSync := true db := dbm.NewMemDB() - blockExec := sm.NewBlockExecutor(db, log.TestingLogger(), proxyApp.Consensus(), + stateStore = sm.NewStore(db) + blockExec := sm.NewBlockExecutor(stateStore, log.TestingLogger(), proxyApp.Consensus(), mock.Mempool{}, sm.MockEvidencePool{}) - sm.SaveState(db, state) + if err = stateStore.Save(state); err != nil { + panic(err) + } // let's add some blocks in for blockHeight := int64(1); blockHeight <= maxBlockHeight; blockHeight++ { diff --git a/blockchain/v1/reactor_test.go b/blockchain/v1/reactor_test.go index 620b6ad2a..f8edeef46 100644 --- a/blockchain/v1/reactor_test.go +++ b/blockchain/v1/reactor_test.go @@ -102,9 +102,10 @@ func newBlockchainReactor( blockDB := dbm.NewMemDB() stateDB := dbm.NewMemDB() + stateStore := sm.NewStore(stateDB) blockStore := store.NewBlockStore(blockDB) - state, err := sm.LoadStateFromDBOrGenesisDoc(stateDB, genDoc) + state, err := stateStore.LoadFromDBOrGenesisDoc(genDoc) if err != nil { panic(fmt.Errorf("error constructing state from genesis file: %w", err)) } @@ -114,9 +115,12 @@ func newBlockchainReactor( // pool.height is determined from the store. fastSync := true db := dbm.NewMemDB() - blockExec := sm.NewBlockExecutor(db, log.TestingLogger(), proxyApp.Consensus(), + stateStore = sm.NewStore(db) + blockExec := sm.NewBlockExecutor(stateStore, log.TestingLogger(), proxyApp.Consensus(), mock.Mempool{}, sm.MockEvidencePool{}) - sm.SaveState(db, state) + if err = stateStore.Save(state); err != nil { + panic(err) + } // let's add some blocks in for blockHeight := int64(1); blockHeight <= maxBlockHeight; blockHeight++ { diff --git a/blockchain/v2/reactor_test.go b/blockchain/v2/reactor_test.go index 756af6eea..9984b2cac 100644 --- a/blockchain/v2/reactor_test.go +++ b/blockchain/v2/reactor_test.go @@ -156,8 +156,11 @@ func newTestReactor(p testReactorParams) *BlockchainReactor { panic(fmt.Errorf("error start app: %w", err)) } db := dbm.NewMemDB() - appl = sm.NewBlockExecutor(db, p.logger, proxyApp.Consensus(), mock.Mempool{}, sm.MockEvidencePool{}) - sm.SaveState(db, state) + stateStore := sm.NewStore(db) + appl = sm.NewBlockExecutor(stateStore, p.logger, proxyApp.Consensus(), mock.Mempool{}, sm.MockEvidencePool{}) + if err = stateStore.Save(state); err != nil { + panic(err) + } } r := newReactor(state, store, reporter, appl, true) @@ -498,16 +501,19 @@ func newReactorStore( stateDB := dbm.NewMemDB() blockStore := store.NewBlockStore(dbm.NewMemDB()) - - state, err := sm.LoadStateFromDBOrGenesisDoc(stateDB, genDoc) + stateStore := sm.NewStore(stateDB) + state, err := stateStore.LoadFromDBOrGenesisDoc(genDoc) if err != nil { panic(fmt.Errorf("error constructing state from genesis file: %w", err)) } db := dbm.NewMemDB() - blockExec := sm.NewBlockExecutor(db, log.TestingLogger(), proxyApp.Consensus(), + stateStore = sm.NewStore(db) + blockExec := sm.NewBlockExecutor(stateStore, log.TestingLogger(), proxyApp.Consensus(), mock.Mempool{}, sm.MockEvidencePool{}) - sm.SaveState(db, state) + if err = stateStore.Save(state); err != nil { + panic(err) + } // add blocks in for blockHeight := int64(1); blockHeight <= maxBlockHeight; blockHeight++ { diff --git a/consensus/byzantine_test.go b/consensus/byzantine_test.go index 0b087e4ec..7d0bd0a7a 100644 --- a/consensus/byzantine_test.go +++ b/consensus/byzantine_test.go @@ -45,7 +45,8 @@ func TestByzantinePrevoteEquivocation(t *testing.T) { for i := 0; i < nValidators; i++ { logger := consensusLogger().With("test", "byzantine", "validator", i) stateDB := dbm.NewMemDB() // each state needs its own db - state, _ := sm.LoadStateFromDBOrGenesisDoc(stateDB, genDoc) + stateStore := sm.NewStore(stateDB) + state, _ := stateStore.LoadFromDBOrGenesisDoc(genDoc) thisConfig := ResetConfig(fmt.Sprintf("%s_%d", testName, i)) defer os.RemoveAll(thisConfig.RootDir) ensureDir(path.Dir(thisConfig.Consensus.WalFile()), 0700) // dir for wal @@ -70,12 +71,12 @@ func TestByzantinePrevoteEquivocation(t *testing.T) { // Make a full instance of the evidence pool evidenceDB := dbm.NewMemDB() - evpool, err := evidence.NewPool(evidenceDB, evidence.NewEvidenceStateStore(stateDB), blockStore) + evpool, err := evidence.NewPool(evidenceDB, stateStore, blockStore) require.NoError(t, err) evpool.SetLogger(logger.With("module", "evidence")) // Make State - blockExec := sm.NewBlockExecutor(stateDB, log.TestingLogger(), proxyAppConnCon, mempool, evpool) + blockExec := sm.NewBlockExecutor(stateStore, log.TestingLogger(), proxyAppConnCon, mempool, evpool) cs := NewState(thisConfig.Consensus, state, blockExec, blockStore, mempool, evpool) cs.SetLogger(cs.Logger) // set private validator @@ -111,7 +112,8 @@ func TestByzantinePrevoteEquivocation(t *testing.T) { blocksSubs = append(blocksSubs, blocksSub) if css[i].state.LastBlockHeight == 0 { //simulate handle initChain in handshake - sm.SaveState(css[i].blockExec.DB(), css[i].state) + err = css[i].blockExec.Store().Save(css[i].state) + require.NoError(t, err) } } // make connected switches and start all reactors @@ -249,7 +251,8 @@ func TestByzantineConflictingProposalsWithPartition(t *testing.T) { } reactors[i] = conRI - sm.SaveState(css[i].blockExec.DB(), css[i].state) //for save height 1's validators info + err = css[i].blockExec.Store().Save(css[i].state) //for save height 1's validators info + require.NoError(t, err) } defer func() { diff --git a/consensus/common_test.go b/consensus/common_test.go index 8242dcc57..2a194beaf 100644 --- a/consensus/common_test.go +++ b/consensus/common_test.go @@ -390,8 +390,12 @@ func newStateWithConfigAndBlockStore( // Make State stateDB := blockDB - sm.SaveState(stateDB, state) //for save height 1's validators info - blockExec := sm.NewBlockExecutor(stateDB, log.TestingLogger(), proxyAppConnCon, mempool, evpool) + stateStore := sm.NewStore(stateDB) + if err := stateStore.Save(state); err != nil { //for save height 1's validators info + panic(err) + } + + blockExec := sm.NewBlockExecutor(stateStore, log.TestingLogger(), proxyAppConnCon, mempool, evpool) cs := NewState(thisConfig.Consensus, state, blockExec, blockStore, mempool, evpool) cs.SetLogger(log.TestingLogger().With("module", "consensus")) cs.SetPrivValidator(pv) @@ -676,7 +680,8 @@ func randConsensusNet(nValidators int, testName string, tickerFunc func() Timeou configRootDirs := make([]string, 0, nValidators) for i := 0; i < nValidators; i++ { stateDB := dbm.NewMemDB() // each state needs its own db - state, _ := sm.LoadStateFromDBOrGenesisDoc(stateDB, genDoc) + stateStore := sm.NewStore(stateDB) + state, _ := stateStore.LoadFromDBOrGenesisDoc(genDoc) thisConfig := ResetConfig(fmt.Sprintf("%s_%d", testName, i)) configRootDirs = append(configRootDirs, thisConfig.RootDir) for _, opt := range configOpts { @@ -713,7 +718,8 @@ func randConsensusNetWithPeers( configRootDirs := make([]string, 0, nPeers) for i := 0; i < nPeers; i++ { stateDB := dbm.NewMemDB() // each state needs its own db - state, _ := sm.LoadStateFromDBOrGenesisDoc(stateDB, genDoc) + stateStore := sm.NewStore(stateDB) + state, _ := stateStore.LoadFromDBOrGenesisDoc(genDoc) thisConfig := ResetConfig(fmt.Sprintf("%s_%d", testName, i)) configRootDirs = append(configRootDirs, thisConfig.RootDir) ensureDir(filepath.Dir(thisConfig.Consensus.WalFile()), 0700) // dir for wal diff --git a/consensus/mempool_test.go b/consensus/mempool_test.go index fcba73f13..db9662acb 100644 --- a/consensus/mempool_test.go +++ b/consensus/mempool_test.go @@ -8,6 +8,7 @@ import ( "time" "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" dbm "github.com/tendermint/tm-db" @@ -112,8 +113,10 @@ func deliverTxsRange(cs *State, start, end int) { func TestMempoolTxConcurrentWithCommit(t *testing.T) { state, privVals := randGenesisState(1, false, 10) blockDB := dbm.NewMemDB() + stateStore := sm.NewStore(blockDB) cs := newStateWithConfigAndBlockStore(config, state, privVals[0], NewCounterApplication(), blockDB) - sm.SaveState(blockDB, state) + err := stateStore.Save(state) + require.NoError(t, err) newBlockHeaderCh := subscribe(cs.eventBus, types.EventQueryNewBlockHeader) const numTxs int64 = 3000 @@ -135,8 +138,10 @@ func TestMempoolRmBadTx(t *testing.T) { state, privVals := randGenesisState(1, false, 10) app := NewCounterApplication() blockDB := dbm.NewMemDB() + stateStore := sm.NewStore(blockDB) cs := newStateWithConfigAndBlockStore(config, state, privVals[0], app, blockDB) - sm.SaveState(blockDB, state) + err := stateStore.Save(state) + require.NoError(t, err) // increment the counter by 1 txBytes := make([]byte, 8) diff --git a/consensus/reactor_test.go b/consensus/reactor_test.go index d6729cc3d..2056ddd87 100644 --- a/consensus/reactor_test.go +++ b/consensus/reactor_test.go @@ -64,7 +64,10 @@ func startConsensusNet(t *testing.T, css []*State, n int) ( blocksSubs = append(blocksSubs, blocksSub) if css[i].state.LastBlockHeight == 0 { //simulate handle initChain in handshake - sm.SaveState(css[i].blockExec.DB(), css[i].state) + if err := css[i].blockExec.Store().Save(css[i].state); err != nil { + t.Error(err) + } + } } // make connected switches and start all reactors @@ -131,7 +134,8 @@ func TestReactorWithEvidence(t *testing.T) { logger := consensusLogger() for i := 0; i < nValidators; i++ { stateDB := dbm.NewMemDB() // each state needs its own db - state, _ := sm.LoadStateFromDBOrGenesisDoc(stateDB, genDoc) + stateStore := sm.NewStore(stateDB) + state, _ := stateStore.LoadFromDBOrGenesisDoc(genDoc) thisConfig := ResetConfig(fmt.Sprintf("%s_%d", testName, i)) defer os.RemoveAll(thisConfig.RootDir) ensureDir(path.Dir(thisConfig.Consensus.WalFile()), 0700) // dir for wal @@ -164,7 +168,7 @@ func TestReactorWithEvidence(t *testing.T) { evpool := newMockEvidencePool(privVals[vIdx]) // Make State - blockExec := sm.NewBlockExecutor(stateDB, log.TestingLogger(), proxyAppConnCon, mempool, evpool) + blockExec := sm.NewBlockExecutor(stateStore, log.TestingLogger(), proxyAppConnCon, mempool, evpool) cs := NewState(thisConfig.Consensus, state, blockExec, blockStore, mempool, evpool) cs.SetLogger(log.TestingLogger().With("module", "consensus")) cs.SetPrivValidator(pv) diff --git a/consensus/replay.go b/consensus/replay.go index cfb68d0a9..007e3deac 100644 --- a/consensus/replay.go +++ b/consensus/replay.go @@ -8,8 +8,6 @@ import ( "reflect" "time" - dbm "github.com/tendermint/tm-db" - abci "github.com/tendermint/tendermint/abci/types" "github.com/tendermint/tendermint/crypto/merkle" "github.com/tendermint/tendermint/libs/log" @@ -200,7 +198,7 @@ func makeHeightSearchFunc(height int64) auto.SearchFunc { //--------------------------------------------------- type Handshaker struct { - stateDB dbm.DB + stateStore sm.Store initialState sm.State store sm.BlockStore eventBus types.BlockEventPublisher @@ -210,11 +208,11 @@ type Handshaker struct { nBlocks int // number of blocks applied to the state } -func NewHandshaker(stateDB dbm.DB, state sm.State, +func NewHandshaker(stateStore sm.Store, state sm.State, store sm.BlockStore, genDoc *types.GenesisDoc) *Handshaker { return &Handshaker{ - stateDB: stateDB, + stateStore: stateStore, initialState: state, store: store, eventBus: types.NopEventBus{}, @@ -351,7 +349,9 @@ func (h *Handshaker) ReplayBlocks( } // We update the last results hash with the empty hash, to conform with RFC-6962. state.LastResultsHash = merkle.HashFromByteSlices(nil) - sm.SaveState(h.stateDB, state) + if err := h.stateStore.Save(state); err != nil { + return nil, err + } } } @@ -418,7 +418,7 @@ func (h *Handshaker) ReplayBlocks( case appBlockHeight == storeBlockHeight: // We ran Commit, but didn't save the state, so replayBlock with mock app. - abciResponses, err := sm.LoadABCIResponses(h.stateDB, storeBlockHeight) + abciResponses, err := h.stateStore.LoadABCIResponses(storeBlockHeight) if err != nil { return nil, err } @@ -468,7 +468,7 @@ func (h *Handshaker) replayBlocks( assertAppHashEqualsOneFromBlock(appHash, block) } - appHash, err = sm.ExecCommitBlock(proxyApp.Consensus(), block, h.logger, h.stateDB, h.genDoc.InitialHeight) + appHash, err = sm.ExecCommitBlock(proxyApp.Consensus(), block, h.logger, h.stateStore, h.genDoc.InitialHeight) if err != nil { return nil, err } @@ -496,7 +496,7 @@ func (h *Handshaker) replayBlock(state sm.State, height int64, proxyApp proxy.Ap // Use stubs for both mempool and evidence pool since no transactions nor // evidence are needed here - block already exists. - blockExec := sm.NewBlockExecutor(h.stateDB, h.logger, proxyApp, emptyMempool{}, emptyEvidencePool{}) + blockExec := sm.NewBlockExecutor(h.stateStore, h.logger, proxyApp, emptyMempool{}, emptyEvidencePool{}) blockExec.SetEventBus(h.eventBus) var err error diff --git a/consensus/replay_file.go b/consensus/replay_file.go index 6181bac24..8cfe49c16 100644 --- a/consensus/replay_file.go +++ b/consensus/replay_file.go @@ -297,6 +297,7 @@ func newConsensusStateForReplay(config cfg.BaseConfig, csConfig *cfg.ConsensusCo if err != nil { tmos.Exit(err.Error()) } + stateStore := sm.NewStore(stateDB) gdoc, err := sm.MakeGenesisDocFromFile(config.GenesisFile()) if err != nil { tmos.Exit(err.Error()) @@ -319,7 +320,7 @@ func newConsensusStateForReplay(config cfg.BaseConfig, csConfig *cfg.ConsensusCo tmos.Exit(fmt.Sprintf("Failed to start event bus: %v", err)) } - handshaker := NewHandshaker(stateDB, state, blockStore, gdoc) + handshaker := NewHandshaker(stateStore, state, blockStore, gdoc) handshaker.SetEventBus(eventBus) err = handshaker.Handshake(proxyApp) if err != nil { @@ -327,7 +328,7 @@ func newConsensusStateForReplay(config cfg.BaseConfig, csConfig *cfg.ConsensusCo } mempool, evpool := emptyMempool{}, emptyEvidencePool{} - blockExec := sm.NewBlockExecutor(stateDB, log.TestingLogger(), proxyApp.Consensus(), mempool, evpool) + blockExec := sm.NewBlockExecutor(stateStore, log.TestingLogger(), proxyApp.Consensus(), mempool, evpool) consensusState := NewState(csConfig, state.Copy(), blockExec, blockStore, mempool, evpool) diff --git a/consensus/replay_test.go b/consensus/replay_test.go index ce759baec..4e9898936 100644 --- a/consensus/replay_test.go +++ b/consensus/replay_test.go @@ -65,9 +65,9 @@ func TestMain(m *testing.M) { // wal writer when we need to, instead of with every message. func startNewStateAndWaitForBlock(t *testing.T, consensusReplayConfig *cfg.Config, - lastBlockHeight int64, blockDB dbm.DB, stateDB dbm.DB) { + lastBlockHeight int64, blockDB dbm.DB, stateStore sm.Store) { logger := log.TestingLogger() - state, _ := sm.LoadStateFromDBOrGenesisFile(stateDB, consensusReplayConfig.GenesisFile()) + state, _ := stateStore.LoadFromDBOrGenesisFile(consensusReplayConfig.GenesisFile()) privValidator := loadPrivValidator(consensusReplayConfig) cs := newStateWithConfigAndBlockStore( consensusReplayConfig, @@ -159,6 +159,7 @@ LOOP: logger := log.NewNopLogger() blockDB := dbm.NewMemDB() stateDB := blockDB + stateStore := sm.NewStore(stateDB) state, err := sm.MakeGenesisStateFromFile(consensusReplayConfig.GenesisFile()) require.NoError(t, err) privValidator := loadPrivValidator(consensusReplayConfig) @@ -199,7 +200,7 @@ LOOP: t.Logf("WAL panicked: %v", err) // make sure we can make blocks after a crash - startNewStateAndWaitForBlock(t, consensusReplayConfig, cs.Height, blockDB, stateDB) + startNewStateAndWaitForBlock(t, consensusReplayConfig, cs.Height, blockDB, stateStore) // stop consensus state and transactions sender (initFn) cs.Stop() //nolint:errcheck // Logging this error causes failure @@ -669,6 +670,7 @@ func testHandshakeReplay(t *testing.T, config *cfg.Config, nBlocks int, mode uin testConfig := ResetConfig(fmt.Sprintf("%s_%v_m", t.Name(), mode)) defer os.RemoveAll(testConfig.RootDir) stateDB = dbm.NewMemDB() + genisisState = sim.GenesisState config = sim.Config chain = append([]*types.Block{}, sim.Chain...) // copy chain @@ -701,12 +703,13 @@ func testHandshakeReplay(t *testing.T, config *cfg.Config, nBlocks int, mode uin stateDB, genisisState, store = stateAndStore(config, pubKey, kvstore.ProtocolVersion) } + stateStore := sm.NewStore(stateDB) store.chain = chain store.commits = commits state := genisisState.Copy() // run the chain through state.ApplyBlock to build up the tendermint state - state = buildTMStateFromChain(config, stateDB, state, chain, nBlocks, mode) + state = buildTMStateFromChain(config, stateStore, state, chain, nBlocks, mode) latestAppHash := state.AppHash // make a new client creator @@ -719,8 +722,10 @@ func testHandshakeReplay(t *testing.T, config *cfg.Config, nBlocks int, mode uin // use a throwaway tendermint state proxyApp := proxy.NewAppConns(clientCreator2) stateDB1 := dbm.NewMemDB() - sm.SaveState(stateDB1, genisisState) - buildAppStateFromChain(proxyApp, stateDB1, genisisState, chain, nBlocks, mode) + stateStore := sm.NewStore(stateDB1) + err := stateStore.Save(genisisState) + require.NoError(t, err) + buildAppStateFromChain(proxyApp, stateStore, genisisState, chain, nBlocks, mode) } // Prune block store if requested @@ -734,7 +739,7 @@ func testHandshakeReplay(t *testing.T, config *cfg.Config, nBlocks int, mode uin // now start the app using the handshake - it should sync genDoc, _ := sm.MakeGenesisDocFromFile(config.GenesisFile()) - handshaker := NewHandshaker(stateDB, state, store, genDoc) + handshaker := NewHandshaker(stateStore, state, store, genDoc) proxyApp := proxy.NewAppConns(clientCreator2) if err := proxyApp.Start(); err != nil { t.Fatalf("Error starting proxy app connections: %v", err) @@ -780,9 +785,9 @@ func testHandshakeReplay(t *testing.T, config *cfg.Config, nBlocks int, mode uin } } -func applyBlock(stateDB dbm.DB, st sm.State, blk *types.Block, proxyApp proxy.AppConns) sm.State { +func applyBlock(stateStore sm.Store, st sm.State, blk *types.Block, proxyApp proxy.AppConns) sm.State { testPartSize := types.BlockPartSizeBytes - blockExec := sm.NewBlockExecutor(stateDB, log.TestingLogger(), proxyApp.Consensus(), mempool, evpool) + blockExec := sm.NewBlockExecutor(stateStore, log.TestingLogger(), proxyApp.Consensus(), mempool, evpool) blkID := types.BlockID{Hash: blk.Hash(), PartSetHeader: blk.MakePartSet(testPartSize).Header()} newState, _, err := blockExec.ApplyBlock(st, blkID, blk) @@ -792,7 +797,7 @@ func applyBlock(stateDB dbm.DB, st sm.State, blk *types.Block, proxyApp proxy.Ap return newState } -func buildAppStateFromChain(proxyApp proxy.AppConns, stateDB dbm.DB, +func buildAppStateFromChain(proxyApp proxy.AppConns, stateStore sm.Store, state sm.State, chain []*types.Block, nBlocks int, mode uint) { // start a new app without handshake, play nBlocks blocks if err := proxyApp.Start(); err != nil { @@ -807,24 +812,25 @@ func buildAppStateFromChain(proxyApp proxy.AppConns, stateDB dbm.DB, }); err != nil { panic(err) } - sm.SaveState(stateDB, state) //save height 1's validatorsInfo - + if err := stateStore.Save(state); err != nil { //save height 1's validatorsInfo + panic(err) + } switch mode { case 0: for i := 0; i < nBlocks; i++ { block := chain[i] - state = applyBlock(stateDB, state, block, proxyApp) + state = applyBlock(stateStore, state, block, proxyApp) } case 1, 2, 3: for i := 0; i < nBlocks-1; i++ { block := chain[i] - state = applyBlock(stateDB, state, block, proxyApp) + state = applyBlock(stateStore, state, block, proxyApp) } if mode == 2 || mode == 3 { // update the kvstore height and apphash // as if we ran commit but not - state = applyBlock(stateDB, state, chain[nBlocks-1], proxyApp) + state = applyBlock(stateStore, state, chain[nBlocks-1], proxyApp) } default: panic(fmt.Sprintf("unknown mode %v", mode)) @@ -834,7 +840,7 @@ func buildAppStateFromChain(proxyApp proxy.AppConns, stateDB dbm.DB, func buildTMStateFromChain( config *cfg.Config, - stateDB dbm.DB, + stateStore sm.Store, state sm.State, chain []*types.Block, nBlocks int, @@ -856,25 +862,26 @@ func buildTMStateFromChain( }); err != nil { panic(err) } - sm.SaveState(stateDB, state) //save height 1's validatorsInfo - + if err := stateStore.Save(state); err != nil { //save height 1's validatorsInfo + panic(err) + } switch mode { case 0: // sync right up for _, block := range chain { - state = applyBlock(stateDB, state, block, proxyApp) + state = applyBlock(stateStore, state, block, proxyApp) } case 1, 2, 3: // sync up to the penultimate as if we stored the block. // whether we commit or not depends on the appHash for _, block := range chain[:len(chain)-1] { - state = applyBlock(stateDB, state, block, proxyApp) + state = applyBlock(stateStore, state, block, proxyApp) } // apply the final block to a state copy so we can // get the right next appHash but keep the state back - applyBlock(stateDB, state, chain[len(chain)-1], proxyApp) + applyBlock(stateStore, state, chain[len(chain)-1], proxyApp) default: panic(fmt.Sprintf("unknown mode %v", mode)) } @@ -894,6 +901,7 @@ func TestHandshakePanicsIfAppReturnsWrongAppHash(t *testing.T) { pubKey, err := privVal.GetPubKey() require.NoError(t, err) stateDB, state, store := stateAndStore(config, pubKey, appVersion) + stateStore := sm.NewStore(stateDB) genDoc, _ := sm.MakeGenesisDocFromFile(config.GenesisFile()) state.LastValidators = state.Validators.Copy() // mode = 0 for committing all the blocks @@ -917,7 +925,7 @@ func TestHandshakePanicsIfAppReturnsWrongAppHash(t *testing.T) { }) assert.Panics(t, func() { - h := NewHandshaker(stateDB, state, store, genDoc) + h := NewHandshaker(stateStore, state, store, genDoc) if err = h.Handshake(proxyApp); err != nil { t.Log(err) } @@ -941,7 +949,7 @@ func TestHandshakePanicsIfAppReturnsWrongAppHash(t *testing.T) { }) assert.Panics(t, func() { - h := NewHandshaker(stateDB, state, store, genDoc) + h := NewHandshaker(stateStore, state, store, genDoc) if err = h.Handshake(proxyApp); err != nil { t.Log(err) } @@ -1150,10 +1158,13 @@ func stateAndStore( pubKey crypto.PubKey, appVersion uint64) (dbm.DB, sm.State, *mockBlockStore) { stateDB := dbm.NewMemDB() + stateStore := sm.NewStore(stateDB) state, _ := sm.MakeGenesisStateFromFile(config.GenesisFile()) state.Version.Consensus.App = appVersion store := newMockBlockStore(config, state.ConsensusParams) - sm.SaveState(stateDB, state) + if err := stateStore.Save(state); err != nil { + panic(err) + } return stateDB, state, store } @@ -1223,12 +1234,13 @@ func TestHandshakeUpdatesValidators(t *testing.T) { pubKey, err := privVal.GetPubKey() require.NoError(t, err) stateDB, state, store := stateAndStore(config, pubKey, 0x0) + stateStore := sm.NewStore(stateDB) oldValAddr := state.Validators.Validators[0].Address // now start the app using the handshake - it should sync genDoc, _ := sm.MakeGenesisDocFromFile(config.GenesisFile()) - handshaker := NewHandshaker(stateDB, state, store, genDoc) + handshaker := NewHandshaker(stateStore, state, store, genDoc) proxyApp := proxy.NewAppConns(clientCreator) if err := proxyApp.Start(); err != nil { t.Fatalf("Error starting proxy app connections: %v", err) @@ -1242,7 +1254,8 @@ func TestHandshakeUpdatesValidators(t *testing.T) { t.Fatalf("Error on abci handshake: %v", err) } // reload the state, check the validator set was updated - state = sm.LoadState(stateDB) + state, err = stateStore.Load() + require.NoError(t, err) newValAddr := state.Validators.Validators[0].Address expectValAddr := val.Address diff --git a/consensus/state.go b/consensus/state.go index 2d4b5a4db..236006ed8 100644 --- a/consensus/state.go +++ b/consensus/state.go @@ -1617,7 +1617,7 @@ func (cs *State) pruneBlocks(retainHeight int64) (uint64, error) { if err != nil { return 0, fmt.Errorf("failed to prune block store: %w", err) } - err = sm.PruneStates(cs.blockExec.DB(), base, retainHeight) + err = cs.blockExec.Store().PruneStates(base, retainHeight) if err != nil { return 0, fmt.Errorf("failed to prune state database: %w", err) } diff --git a/consensus/wal_generator.go b/consensus/wal_generator.go index 903967e13..80b123964 100644 --- a/consensus/wal_generator.go +++ b/consensus/wal_generator.go @@ -48,12 +48,16 @@ func WALGenerateNBlocks(t *testing.T, wr io.Writer, numBlocks int) (err error) { } blockStoreDB := db.NewMemDB() stateDB := blockStoreDB + stateStore := sm.NewStore(stateDB) state, err := sm.MakeGenesisState(genDoc) if err != nil { return fmt.Errorf("failed to make genesis state: %w", err) } state.Version.Consensus.App = kvstore.ProtocolVersion - sm.SaveState(stateDB, state) + if err = stateStore.Save(state); err != nil { + t.Error(err) + } + blockStore := store.NewBlockStore(blockStoreDB) proxyApp := proxy.NewAppConns(proxy.NewLocalClientCreator(app)) @@ -79,7 +83,7 @@ func WALGenerateNBlocks(t *testing.T, wr io.Writer, numBlocks int) (err error) { }) mempool := emptyMempool{} evpool := emptyEvidencePool{} - blockExec := sm.NewBlockExecutor(stateDB, log.TestingLogger(), proxyApp.Consensus(), mempool, evpool) + blockExec := sm.NewBlockExecutor(stateStore, log.TestingLogger(), proxyApp.Consensus(), mempool, evpool) consensusState := NewState(config.Consensus, state.Copy(), blockExec, blockStore, mempool, evpool) consensusState.SetLogger(logger) consensusState.SetEventBus(eventBus) diff --git a/evidence/pool.go b/evidence/pool.go index 50e1ca208..47495b2e6 100644 --- a/evidence/pool.go +++ b/evidence/pool.go @@ -30,7 +30,7 @@ type Pool struct { evidenceList *clist.CList // concurrent linked-list of evidence // needed to load validators to verify evidence - stateDB StateStore + stateDB sm.Store // needed to load headers to verify evidence blockStore BlockStore @@ -41,10 +41,12 @@ type Pool struct { // NewPool creates an evidence pool. If using an existing evidence store, // it will add all pending evidence to the concurrent list. -func NewPool(evidenceDB dbm.DB, stateDB StateStore, blockStore BlockStore) (*Pool, error) { - var ( - state = stateDB.LoadState() - ) +func NewPool(evidenceDB dbm.DB, stateDB sm.Store, blockStore BlockStore) (*Pool, error) { + + state, err := stateDB.Load() + if err != nil { + return nil, fmt.Errorf("cannot load state: %w", err) + } pool := &Pool{ stateDB: stateDB, diff --git a/evidence/pool_test.go b/evidence/pool_test.go index c8fc23443..cf7696166 100644 --- a/evidence/pool_test.go +++ b/evidence/pool_test.go @@ -199,15 +199,16 @@ func TestRecoverPendingEvidence(t *testing.T) { stateStore = initializeValidatorState(val, height) evidenceDB = dbm.NewMemDB() blockStoreDB = dbm.NewMemDB() - state = stateStore.LoadState() - blockStore = initializeBlockStore(blockStoreDB, state, valAddr) expiredEvidenceTime = time.Date(2018, 1, 1, 0, 0, 0, 0, time.UTC) goodEvidence = types.NewMockDuplicateVoteEvidenceWithValidator(height, defaultEvidenceTime, val, evidenceChainID) expiredEvidence = types.NewMockDuplicateVoteEvidenceWithValidator(int64(1), expiredEvidenceTime, val, evidenceChainID) ) + state, err := stateStore.Load() + require.NoError(t, err) + blockStore := initializeBlockStore(blockStoreDB, state, valAddr) // load good evidence goodKey := keyPending(goodEvidence) evi, err := types.EvidenceToProto(goodEvidence) @@ -232,8 +233,9 @@ func TestRecoverPendingEvidence(t *testing.T) { assert.False(t, pool.Has(expiredEvidence)) } -func initializeStateFromValidatorSet(valSet *types.ValidatorSet, height int64) StateStore { +func initializeStateFromValidatorSet(valSet *types.ValidatorSet, height int64) sm.Store { stateDB := dbm.NewMemDB() + stateStore := sm.NewStore(stateDB) state := sm.State{ ChainID: evidenceChainID, InitialHeight: 1, @@ -259,13 +261,15 @@ func initializeStateFromValidatorSet(valSet *types.ValidatorSet, height int64) S // save all states up to height for i := int64(0); i <= height; i++ { state.LastBlockHeight = i - sm.SaveState(stateDB, state) + if err := stateStore.Save(state); err != nil { + panic(err) + } } - return &stateStore{db: stateDB} + return stateStore } -func initializeValidatorState(privVal types.PrivValidator, height int64) StateStore { +func initializeValidatorState(privVal types.PrivValidator, height int64) sm.Store { pubKey, _ := privVal.GetPubKey() validator := &types.Validator{Address: pubKey.Address(), VotingPower: 0, PubKey: pubKey} @@ -314,7 +318,8 @@ func defaultTestPool(height int64) (*Pool, types.MockPV) { valAddress := val.PrivKey.PubKey().Address() evidenceDB := dbm.NewMemDB() stateStore := initializeValidatorState(val, height) - blockStore := initializeBlockStore(dbm.NewMemDB(), stateStore.LoadState(), valAddress) + state, _ := stateStore.Load() + blockStore := initializeBlockStore(dbm.NewMemDB(), state, valAddress) pool, err := NewPool(evidenceDB, stateStore, blockStore) if err != nil { panic("test evidence pool could not be created") diff --git a/evidence/reactor_test.go b/evidence/reactor_test.go index 77119e57b..9f27a4307 100644 --- a/evidence/reactor_test.go +++ b/evidence/reactor_test.go @@ -22,6 +22,7 @@ import ( "github.com/tendermint/tendermint/p2p" ep "github.com/tendermint/tendermint/proto/tendermint/evidence" tmproto "github.com/tendermint/tendermint/proto/tendermint/types" + sm "github.com/tendermint/tendermint/state" "github.com/tendermint/tendermint/types" ) @@ -39,7 +40,7 @@ func evidenceLogger() log.Logger { } // connect N evidence reactors through N switches -func makeAndConnectReactors(config *cfg.Config, stateStores []StateStore) []*Reactor { +func makeAndConnectReactors(config *cfg.Config, stateStores []sm.Store) []*Reactor { N := len(stateStores) reactors := make([]*Reactor, N) @@ -143,7 +144,7 @@ func TestReactorBroadcastEvidence(t *testing.T) { N := 7 // create statedb for everyone - stateDBs := make([]StateStore, N) + stateDBs := make([]sm.Store, N) val := types.NewMockPV() // we need validators saved for heights at least as high as we have evidence for height := int64(numEvidence) + 10 @@ -188,7 +189,7 @@ func TestReactorSelectiveBroadcast(t *testing.T) { stateDB2 := initializeValidatorState(val, height2) // make reactors from statedb - reactors := makeAndConnectReactors(config, []StateStore{stateDB1, stateDB2}) + reactors := makeAndConnectReactors(config, []sm.Store{stateDB1, stateDB2}) // set the peer height on each reactor for _, r := range reactors { diff --git a/evidence/services.go b/evidence/services.go index 1fd34285e..e86960653 100644 --- a/evidence/services.go +++ b/evidence/services.go @@ -1,9 +1,6 @@ package evidence import ( - dbm "github.com/tendermint/tm-db" - - "github.com/tendermint/tendermint/state" "github.com/tendermint/tendermint/types" ) @@ -12,28 +9,3 @@ import ( type BlockStore interface { LoadBlockMeta(height int64) *types.BlockMeta } - -type StateStore interface { - LoadValidators(height int64) (*types.ValidatorSet, error) - LoadState() state.State -} - -type stateStore struct { - db dbm.DB -} - -var _ StateStore = &stateStore{} - -// This is a temporary measure until stateDB becomes a store -// TODO: deprecate once state has a store -func NewEvidenceStateStore(db dbm.DB) StateStore { - return &stateStore{db} -} - -func (s *stateStore) LoadValidators(height int64) (*types.ValidatorSet, error) { - return state.LoadValidators(s.db, height) -} - -func (s *stateStore) LoadState() state.State { - return state.LoadState(s.db) -} diff --git a/evidence/verify.go b/evidence/verify.go index a5cf4643d..c6afd5b95 100644 --- a/evidence/verify.go +++ b/evidence/verify.go @@ -12,7 +12,7 @@ import ( // - it is from a key who was a validator at the given height // - it is internally consistent // - it was properly signed by the alleged equivocator -func VerifyEvidence(evidence types.Evidence, state sm.State, stateDB StateStore, blockStore BlockStore) error { +func VerifyEvidence(evidence types.Evidence, state sm.State, stateDB sm.Store, blockStore BlockStore) error { var ( height = state.LastBlockHeight evidenceParams = state.ConsensusParams.Evidence diff --git a/evidence/verify_test.go b/evidence/verify_test.go index 9216487d9..72cd3c6ad 100644 --- a/evidence/verify_test.go +++ b/evidence/verify_test.go @@ -16,13 +16,16 @@ func TestVerifyEvidenceWrongAddress(t *testing.T) { var height int64 = 4 val := types.NewMockPV() stateStore := initializeValidatorState(val, height) - state := stateStore.LoadState() + state, err := stateStore.Load() + if err != nil { + t.Error(err) + } blockStore := &mocks.BlockStore{} blockStore.On("LoadBlockMeta", mock.AnythingOfType("int64")).Return( &types.BlockMeta{Header: types.Header{Time: defaultEvidenceTime}}, ) evidence := types.NewMockDuplicateVoteEvidence(1, defaultEvidenceTime, evidenceChainID) - err := VerifyEvidence(evidence, state, stateStore, blockStore) + err = VerifyEvidence(evidence, state, stateStore, blockStore) errMsg := fmt.Sprintf("address %X was not a validator at height 1", evidence.Address()) if assert.Error(t, err) { assert.Equal(t, err.Error(), errMsg) @@ -33,7 +36,10 @@ func TestVerifyEvidenceExpiredEvidence(t *testing.T) { var height int64 = 4 val := types.NewMockPV() stateStore := initializeValidatorState(val, height) - state := stateStore.LoadState() + state, err := stateStore.Load() + if err != nil { + t.Error(err) + } state.ConsensusParams.Evidence.MaxAgeNumBlocks = 1 expiredEvidenceTime := time.Date(2018, 1, 1, 0, 0, 0, 0, time.UTC) blockStore := &mocks.BlockStore{} @@ -42,7 +48,7 @@ func TestVerifyEvidenceExpiredEvidence(t *testing.T) { ) expiredEv := types.NewMockDuplicateVoteEvidenceWithValidator(1, expiredEvidenceTime, val, evidenceChainID) - err := VerifyEvidence(expiredEv, state, stateStore, blockStore) + err = VerifyEvidence(expiredEv, state, stateStore, blockStore) errMsg := "evidence from height 1 (created at: 2018-01-01 00:00:00 +0000 UTC) is too old" if assert.Error(t, err) { assert.Equal(t, err.Error()[:len(errMsg)], errMsg) @@ -53,7 +59,10 @@ func TestVerifyEvidenceInvalidTime(t *testing.T) { height := int64(4) val := types.NewMockPV() stateStore := initializeValidatorState(val, height) - state := stateStore.LoadState() + state, err := stateStore.Load() + if err != nil { + t.Error(err) + } blockStore := &mocks.BlockStore{} blockStore.On("LoadBlockMeta", mock.AnythingOfType("int64")).Return( &types.BlockMeta{Header: types.Header{Time: defaultEvidenceTime}}, @@ -61,7 +70,7 @@ func TestVerifyEvidenceInvalidTime(t *testing.T) { differentTime := time.Date(2019, 2, 1, 0, 0, 0, 0, time.UTC) ev := types.NewMockDuplicateVoteEvidenceWithValidator(height, differentTime, val, evidenceChainID) - err := VerifyEvidence(ev, state, stateStore, blockStore) + err = VerifyEvidence(ev, state, stateStore, blockStore) errMsg := "evidence time (2019-02-01 00:00:00 +0000 UTC) is different to the time" + " of the header we have for the same height (2019-01-01 00:00:00 +0000 UTC)" if assert.Error(t, err) { diff --git a/node/node.go b/node/node.go index 6b792a4d2..455ad5285 100644 --- a/node/node.go +++ b/node/node.go @@ -185,7 +185,7 @@ type Node struct { // services eventBus *types.EventBus // pub/sub for services - stateDB dbm.DB + stateStore sm.Store blockStore *store.BlockStore // store the blockchain to disk bcReactor p2p.Reactor // for fast-syncing mempoolReactor *mempl.Reactor // for gossipping transactions @@ -263,7 +263,7 @@ func createAndStartIndexerService(config *cfg.Config, dbProvider DBProvider, } func doHandshake( - stateDB dbm.DB, + stateStore sm.Store, state sm.State, blockStore sm.BlockStore, genDoc *types.GenesisDoc, @@ -271,7 +271,7 @@ func doHandshake( proxyApp proxy.AppConns, consensusLogger log.Logger) error { - handshaker := cs.NewHandshaker(stateDB, state, blockStore, genDoc) + handshaker := cs.NewHandshaker(stateStore, state, blockStore, genDoc) handshaker.SetLogger(consensusLogger) handshaker.SetEventBus(eventBus) if err := handshaker.Handshake(proxyApp); err != nil { @@ -342,7 +342,7 @@ func createEvidenceReactor(config *cfg.Config, dbProvider DBProvider, return nil, nil, err } evidenceLogger := logger.With("module", "evidence") - evidencePool, err := evidence.NewPool(evidenceDB, evidence.NewEvidenceStateStore(stateDB), blockStore) + evidencePool, err := evidence.NewPool(evidenceDB, sm.NewStore(stateDB), blockStore) if err != nil { return nil, nil, err } @@ -559,7 +559,7 @@ func createPEXReactorAndAddToSwitch(addrBook pex.AddrBook, config *cfg.Config, // startStateSync starts an asynchronous state sync process, then switches to fast sync mode. func startStateSync(ssR *statesync.Reactor, bcR fastSyncReactor, conR *cs.Reactor, stateProvider statesync.StateProvider, config *cfg.StateSyncConfig, fastSync bool, - stateDB dbm.DB, blockStore *store.BlockStore, state sm.State) error { + stateStore sm.Store, blockStore *store.BlockStore, state sm.State) error { ssR.Logger.Info("Starting state sync") if stateProvider == nil { @@ -582,7 +582,7 @@ func startStateSync(ssR *statesync.Reactor, bcR fastSyncReactor, conR *cs.Reacto ssR.Logger.Error("State sync failed", "err", err) return } - err = sm.BootstrapState(stateDB, state) + err = stateStore.Bootstrap(state) if err != nil { ssR.Logger.Error("Failed to bootstrap node with new state", "err", err) return @@ -625,6 +625,8 @@ func NewNode(config *cfg.Config, return nil, err } + stateStore := sm.NewStore(stateDB) + state, genDoc, err := LoadStateFromDBOrGenesisDocProvider(stateDB, genesisDocProvider) if err != nil { return nil, err @@ -677,14 +679,17 @@ func NewNode(config *cfg.Config, // and replays any blocks as necessary to sync tendermint with the app. consensusLogger := logger.With("module", "consensus") if !stateSync { - if err := doHandshake(stateDB, state, blockStore, genDoc, eventBus, proxyApp, consensusLogger); err != nil { + if err := doHandshake(stateStore, state, blockStore, genDoc, eventBus, proxyApp, consensusLogger); err != nil { return nil, err } // Reload the state. It will have the Version.Consensus.App set by the // Handshake, and may have other modifications as well (ie. depending on // what happened during block replay). - state = sm.LoadState(stateDB) + state, err = stateStore.Load() + if err != nil { + return nil, fmt.Errorf("cannot load state: %w", err) + } } // Determine whether we should do fast sync. This must happen after the handshake, since the @@ -706,7 +711,7 @@ func NewNode(config *cfg.Config, // make block executor for consensus and blockchain reactors to execute blocks blockExec := sm.NewBlockExecutor( - stateDB, + stateStore, logger.With("module", "state"), proxyApp.Consensus(), mempool, @@ -805,7 +810,7 @@ func NewNode(config *cfg.Config, nodeInfo: nodeInfo, nodeKey: nodeKey, - stateDB: stateDB, + stateStore: stateStore, blockStore: blockStore, bcReactor: bcReactor, mempoolReactor: mempoolReactor, @@ -895,7 +900,7 @@ func (n *Node) OnStart() error { return fmt.Errorf("this blockchain reactor does not support switching from state sync") } err := startStateSync(n.stateSyncReactor, bcR, n.consensusReactor, n.stateSyncProvider, - n.config.StateSync, n.config.FastSyncMode, n.stateDB, n.blockStore, n.stateSyncGenesis) + n.config.StateSync, n.config.FastSyncMode, n.stateStore, n.blockStore, n.stateSyncGenesis) if err != nil { return fmt.Errorf("failed to start state sync: %w", err) } @@ -966,7 +971,7 @@ func (n *Node) ConfigureRPC() error { ProxyAppQuery: n.proxyApp.Query(), ProxyAppMempool: n.proxyApp.Mempool(), - StateDB: n.stateDB, + StateStore: n.stateStore, BlockStore: n.blockStore, EvidencePool: n.evidencePool, ConsensusState: n.consensusState, @@ -1298,7 +1303,8 @@ func LoadStateFromDBOrGenesisDocProvider( return sm.State{}, nil, err } } - state, err := sm.LoadStateFromDBOrGenesisDoc(stateDB, genDoc) + stateStore := sm.NewStore(stateDB) + state, err := stateStore.LoadFromDBOrGenesisDoc(genDoc) if err != nil { return sm.State{}, nil, err } diff --git a/node/node_test.go b/node/node_test.go index 3abd8271d..b399fe608 100644 --- a/node/node_test.go +++ b/node/node_test.go @@ -123,7 +123,8 @@ func TestNodeSetAppVersion(t *testing.T) { var appVersion uint64 = kvstore.ProtocolVersion // check version is set in state - state := sm.LoadState(n.stateDB) + state, err := n.stateStore.Load() + require.NoError(t, err) assert.Equal(t, state.Version.Consensus.App, appVersion) // check version is set in node info @@ -231,6 +232,7 @@ func TestCreateProposalBlock(t *testing.T) { var height int64 = 1 state, stateDB, privVals := state(1, height) + stateStore := sm.NewStore(stateDB) maxBytes := 16384 maxEvidence := 10 state.ConsensusParams.Block.MaxBytes = int64(maxBytes) @@ -252,7 +254,7 @@ func TestCreateProposalBlock(t *testing.T) { // Make EvidencePool evidenceDB := dbm.NewMemDB() blockStore := store.NewBlockStore(dbm.NewMemDB()) - evidencePool, err := evidence.NewPool(evidenceDB, evidence.NewEvidenceStateStore(stateDB), blockStore) + evidencePool, err := evidence.NewPool(evidenceDB, stateStore, blockStore) require.NoError(t, err) evidencePool.SetLogger(logger) @@ -274,7 +276,7 @@ func TestCreateProposalBlock(t *testing.T) { } blockExec := sm.NewBlockExecutor( - stateDB, + stateStore, logger, proxyApp.Consensus(), mempool, @@ -346,12 +348,17 @@ func state(nVals int, height int64) (sm.State, dbm.DB, []types.PrivValidator) { // save validators to db for 2 heights stateDB := dbm.NewMemDB() - sm.SaveState(stateDB, s) + stateStore := sm.NewStore(stateDB) + if err := stateStore.Save(s); err != nil { + panic(err) + } for i := 1; i < int(height); i++ { s.LastBlockHeight++ s.LastValidators = s.Validators.Copy() - sm.SaveState(stateDB, s) + if err := stateStore.Save(s); err != nil { + panic(err) + } } return s, stateDB, privVals } diff --git a/rpc/core/blocks.go b/rpc/core/blocks.go index a8e33176a..e5a3f568f 100644 --- a/rpc/core/blocks.go +++ b/rpc/core/blocks.go @@ -6,7 +6,6 @@ import ( tmmath "github.com/tendermint/tendermint/libs/math" ctypes "github.com/tendermint/tendermint/rpc/core/types" rpctypes "github.com/tendermint/tendermint/rpc/jsonrpc/types" - sm "github.com/tendermint/tendermint/state" "github.com/tendermint/tendermint/types" ) @@ -141,7 +140,7 @@ func BlockResults(ctx *rpctypes.Context, heightPtr *int64) (*ctypes.ResultBlockR return nil, err } - results, err := sm.LoadABCIResponses(env.StateDB, height) + results, err := env.StateStore.LoadABCIResponses(height) if err != nil { return nil, err } diff --git a/rpc/core/blocks_test.go b/rpc/core/blocks_test.go index 8cc752ffc..49f405872 100644 --- a/rpc/core/blocks_test.go +++ b/rpc/core/blocks_test.go @@ -81,8 +81,9 @@ func TestBlockResults(t *testing.T) { } env = &Environment{} - env.StateDB = dbm.NewMemDB() - sm.SaveABCIResponses(env.StateDB, 100, results) + env.StateStore = sm.NewStore(dbm.NewMemDB()) + err := env.StateStore.SaveABCIResponses(100, results) + require.NoError(t, err) env.BlockStore = mockBlockStore{height: 100} testCases := []struct { diff --git a/rpc/core/consensus.go b/rpc/core/consensus.go index 9da667b79..89b5d6a45 100644 --- a/rpc/core/consensus.go +++ b/rpc/core/consensus.go @@ -5,7 +5,6 @@ import ( tmmath "github.com/tendermint/tendermint/libs/math" ctypes "github.com/tendermint/tendermint/rpc/core/types" rpctypes "github.com/tendermint/tendermint/rpc/jsonrpc/types" - sm "github.com/tendermint/tendermint/state" "github.com/tendermint/tendermint/types" ) @@ -23,7 +22,7 @@ func Validators(ctx *rpctypes.Context, heightPtr *int64, pagePtr, perPagePtr *in return nil, err } - validators, err := sm.LoadValidators(env.StateDB, height) + validators, err := env.StateStore.LoadValidators(height) if err != nil { return nil, err } @@ -99,7 +98,7 @@ func ConsensusParams(ctx *rpctypes.Context, heightPtr *int64) (*ctypes.ResultCon return nil, err } - consensusParams, err := sm.LoadConsensusParams(env.StateDB, height) + consensusParams, err := env.StateStore.LoadConsensusParams(height) if err != nil { return nil, err } diff --git a/rpc/core/env.go b/rpc/core/env.go index bcd79eb28..ccc46d5a3 100644 --- a/rpc/core/env.go +++ b/rpc/core/env.go @@ -4,8 +4,6 @@ import ( "fmt" "time" - dbm "github.com/tendermint/tm-db" - cfg "github.com/tendermint/tendermint/config" "github.com/tendermint/tendermint/consensus" "github.com/tendermint/tendermint/crypto" @@ -73,7 +71,7 @@ type Environment struct { ProxyAppMempool proxy.AppConnMempool // interfaces defined in types and above - StateDB dbm.DB + StateStore sm.Store BlockStore sm.BlockStore EvidencePool sm.EvidencePool ConsensusState Consensus diff --git a/rpc/core/status.go b/rpc/core/status.go index e3c3b345f..7d70996d5 100644 --- a/rpc/core/status.go +++ b/rpc/core/status.go @@ -7,7 +7,6 @@ import ( "github.com/tendermint/tendermint/p2p" ctypes "github.com/tendermint/tendermint/rpc/core/types" rpctypes "github.com/tendermint/tendermint/rpc/jsonrpc/types" - sm "github.com/tendermint/tendermint/state" "github.com/tendermint/tendermint/types" ) @@ -77,7 +76,7 @@ func Status(ctx *rpctypes.Context) (*ctypes.ResultStatus, error) { } func validatorAtHeight(h int64) *types.Validator { - vals, err := sm.LoadValidators(env.StateDB, h) + vals, err := env.StateStore.LoadValidators(h) if err != nil { return nil } diff --git a/state/execution.go b/state/execution.go index 375178542..e584277a6 100644 --- a/state/execution.go +++ b/state/execution.go @@ -5,8 +5,6 @@ import ( "fmt" "time" - dbm "github.com/tendermint/tm-db" - abci "github.com/tendermint/tendermint/abci/types" cryptoenc "github.com/tendermint/tendermint/crypto/encoding" "github.com/tendermint/tendermint/libs/fail" @@ -26,7 +24,7 @@ import ( // BlockExecutor provides the context and accessories for properly executing a block. type BlockExecutor struct { // save state, validators, consensus params, abci responses here - db dbm.DB + store Store // execute the app against this proxyApp proxy.AppConnConsensus @@ -55,7 +53,7 @@ func BlockExecutorWithMetrics(metrics *Metrics) BlockExecutorOption { // NewBlockExecutor returns a new BlockExecutor with a NopEventBus. // Call SetEventBus to provide one. func NewBlockExecutor( - db dbm.DB, + stateStore Store, logger log.Logger, proxyApp proxy.AppConnConsensus, mempool mempl.Mempool, @@ -63,7 +61,7 @@ func NewBlockExecutor( options ...BlockExecutorOption, ) *BlockExecutor { res := &BlockExecutor{ - db: db, + store: stateStore, proxyApp: proxyApp, eventBus: types.NopEventBus{}, mempool: mempool, @@ -79,8 +77,8 @@ func NewBlockExecutor( return res } -func (blockExec *BlockExecutor) DB() dbm.DB { - return blockExec.db +func (blockExec *BlockExecutor) Store() Store { + return blockExec.store } // SetEventBus - sets the event bus for publishing block related events. @@ -116,7 +114,7 @@ func (blockExec *BlockExecutor) CreateProposalBlock( // Validation does not mutate state, but does require historical information from the stateDB, // ie. to verify evidence from a validator at an old height. func (blockExec *BlockExecutor) ValidateBlock(state State, block *types.Block) error { - return validateBlock(blockExec.evpool, blockExec.db, state, block) + return validateBlock(blockExec.evpool, state, block) } // ApplyBlock validates the block against the state, executes it against the app, @@ -135,7 +133,7 @@ func (blockExec *BlockExecutor) ApplyBlock( startTime := time.Now().UnixNano() abciResponses, err := execBlockOnProxyApp(blockExec.logger, blockExec.proxyApp, block, - blockExec.db, state.InitialHeight) + blockExec.store, state.InitialHeight) endTime := time.Now().UnixNano() blockExec.metrics.BlockProcessingTime.Observe(float64(endTime-startTime) / 1000000) if err != nil { @@ -145,7 +143,9 @@ func (blockExec *BlockExecutor) ApplyBlock( fail.Fail() // XXX // Save the results before we commit. - SaveABCIResponses(blockExec.db, block.Height, abciResponses) + if err := blockExec.store.SaveABCIResponses(block.Height, abciResponses); err != nil { + return state, 0, err + } fail.Fail() // XXX @@ -182,7 +182,9 @@ func (blockExec *BlockExecutor) ApplyBlock( // Update the app hash and save the state. state.AppHash = appHash - SaveState(blockExec.db, state) + if err := blockExec.store.Save(state); err != nil { + return state, 0, err + } fail.Fail() // XXX @@ -254,7 +256,7 @@ func execBlockOnProxyApp( logger log.Logger, proxyAppConn proxy.AppConnConsensus, block *types.Block, - stateDB dbm.DB, + store Store, initialHeight int64, ) (*tmstate.ABCIResponses, error) { var validTxs, invalidTxs = 0, 0 @@ -283,7 +285,7 @@ func execBlockOnProxyApp( } proxyAppConn.SetResponseCallback(proxyCb) - commitInfo, byzVals := getBeginBlockValidatorInfo(block, stateDB, initialHeight) + commitInfo, byzVals := getBeginBlockValidatorInfo(block, store, initialHeight) // Begin block var err error @@ -322,14 +324,14 @@ func execBlockOnProxyApp( return abciResponses, nil } -func getBeginBlockValidatorInfo(block *types.Block, stateDB dbm.DB, +func getBeginBlockValidatorInfo(block *types.Block, store Store, initialHeight int64) (abci.LastCommitInfo, []abci.Evidence) { voteInfos := make([]abci.VoteInfo, block.LastCommit.Size()) // Initial block -> LastCommitInfo.Votes are empty. // Remember that the first LastCommit is intentionally empty, so it makes // sense for LastCommitInfo.Votes to also be empty. if block.Height > initialHeight { - lastValSet, err := LoadValidators(stateDB, block.Height-1) + lastValSet, err := store.LoadValidators(block.Height - 1) if err != nil { panic(err) } @@ -359,7 +361,7 @@ func getBeginBlockValidatorInfo(block *types.Block, stateDB dbm.DB, // We need the validator set. We already did this in validateBlock. // TODO: Should we instead cache the valset in the evidence itself and add // `SetValidatorSet()` and `ToABCI` methods ? - valset, err := LoadValidators(stateDB, ev.Height()) + valset, err := store.LoadValidators(ev.Height()) if err != nil { panic(err) } @@ -528,10 +530,10 @@ func ExecCommitBlock( appConnConsensus proxy.AppConnConsensus, block *types.Block, logger log.Logger, - stateDB dbm.DB, + store Store, initialHeight int64, ) ([]byte, error) { - _, err := execBlockOnProxyApp(logger, appConnConsensus, block, stateDB, initialHeight) + _, err := execBlockOnProxyApp(logger, appConnConsensus, block, store, initialHeight) if err != nil { logger.Error("Error executing block on proxy app", "height", block.Height, "err", err) return nil, err diff --git a/state/execution_test.go b/state/execution_test.go index 679e9526d..2bcb21547 100644 --- a/state/execution_test.go +++ b/state/execution_test.go @@ -35,8 +35,9 @@ func TestApplyBlock(t *testing.T) { defer proxyApp.Stop() //nolint:errcheck // ignore for tests state, stateDB, _ := makeState(1, 1) + stateStore := sm.NewStore(stateDB) - blockExec := sm.NewBlockExecutor(stateDB, log.TestingLogger(), proxyApp.Consensus(), + blockExec := sm.NewBlockExecutor(stateStore, log.TestingLogger(), proxyApp.Consensus(), mock.Mempool{}, sm.MockEvidencePool{}) block := makeBlock(state, 1) @@ -60,6 +61,7 @@ func TestBeginBlockValidators(t *testing.T) { defer proxyApp.Stop() //nolint:errcheck // no need to check error again state, stateDB, _ := makeState(2, 2) + stateStore := sm.NewStore(stateDB) prevHash := state.LastBlockID.Hash prevParts := types.PartSetHeader{} @@ -94,7 +96,7 @@ func TestBeginBlockValidators(t *testing.T) { // block for height 2 block, _ := state.MakeBlock(2, makeTxs(2), lastCommit, nil, state.Validators.GetProposer().Address) - _, err = sm.ExecCommitBlock(proxyApp.Consensus(), block, log.TestingLogger(), stateDB, 1) + _, err = sm.ExecCommitBlock(proxyApp.Consensus(), block, log.TestingLogger(), stateStore, 1) require.Nil(t, err, tc.desc) // -> app receives a list of validators with a bool indicating if they signed @@ -122,6 +124,7 @@ func TestBeginBlockByzantineValidators(t *testing.T) { defer proxyApp.Stop() //nolint:errcheck // ignore for tests state, stateDB, privVals := makeState(2, 12) + stateStore := sm.NewStore(stateDB) prevHash := state.LastBlockID.Hash prevParts := types.PartSetHeader{} @@ -163,7 +166,7 @@ func TestBeginBlockByzantineValidators(t *testing.T) { block, _ := state.MakeBlock(10, makeTxs(2), lastCommit, nil, state.Validators.GetProposer().Address) block.Time = now block.Evidence.Evidence = tc.evidence - _, err = sm.ExecCommitBlock(proxyApp.Consensus(), block, log.TestingLogger(), stateDB, 1) + _, err = sm.ExecCommitBlock(proxyApp.Consensus(), block, log.TestingLogger(), stateStore, 1) require.Nil(t, err, tc.desc) // -> app must receive an index of the byzantine validator @@ -311,9 +314,10 @@ func TestEndBlockValidatorUpdates(t *testing.T) { defer proxyApp.Stop() //nolint:errcheck // ignore for tests state, stateDB, _ := makeState(1, 1) + stateStore := sm.NewStore(stateDB) blockExec := sm.NewBlockExecutor( - stateDB, + stateStore, log.TestingLogger(), proxyApp.Consensus(), mock.Mempool{}, @@ -381,8 +385,9 @@ func TestEndBlockValidatorUpdatesResultingInEmptySet(t *testing.T) { defer proxyApp.Stop() //nolint:errcheck // ignore for tests state, stateDB, _ := makeState(1, 1) + stateStore := sm.NewStore(stateDB) blockExec := sm.NewBlockExecutor( - stateDB, + stateStore, log.TestingLogger(), proxyApp.Consensus(), mock.Mempool{}, diff --git a/state/export_test.go b/state/export_test.go index 25f41695c..c7de6c5c2 100644 --- a/state/export_test.go +++ b/state/export_test.go @@ -1,12 +1,11 @@ package state import ( - dbm "github.com/tendermint/tm-db" - abci "github.com/tendermint/tendermint/abci/types" tmstate "github.com/tendermint/tendermint/proto/tendermint/state" tmproto "github.com/tendermint/tendermint/proto/tendermint/types" "github.com/tendermint/tendermint/types" + dbm "github.com/tendermint/tm-db" ) // @@ -40,14 +39,9 @@ func ValidateValidatorUpdates(abciUpdates []abci.ValidatorUpdate, params tmproto return validateValidatorUpdates(abciUpdates, params) } -// SaveConsensusParamsInfo is an alias for the private saveConsensusParamsInfo -// method in store.go, exported exclusively and explicitly for testing. -func SaveConsensusParamsInfo(db dbm.DB, nextHeight, changeHeight int64, params tmproto.ConsensusParams) { - saveConsensusParamsInfo(db, nextHeight, changeHeight, params) -} - // SaveValidatorsInfo is an alias for the private saveValidatorsInfo method in // store.go, exported exclusively and explicitly for testing. -func SaveValidatorsInfo(db dbm.DB, height, lastHeightChanged int64, valSet *types.ValidatorSet) { - saveValidatorsInfo(db, height, lastHeightChanged, valSet) +func SaveValidatorsInfo(db dbm.DB, height, lastHeightChanged int64, valSet *types.ValidatorSet) error { + stateStore := dbStore{db} + return stateStore.saveValidatorsInfo(height, lastHeightChanged, valSet) } diff --git a/state/helpers_test.go b/state/helpers_test.go index ffd2a8ac5..19549f160 100644 --- a/state/helpers_test.go +++ b/state/helpers_test.go @@ -115,12 +115,17 @@ func makeState(nVals, height int) (sm.State, dbm.DB, map[string]types.PrivValida }) stateDB := dbm.NewMemDB() - sm.SaveState(stateDB, s) + stateStore := sm.NewStore(stateDB) + if err := stateStore.Save(s); err != nil { + panic(err) + } for i := 1; i < height; i++ { s.LastBlockHeight++ s.LastValidators = s.Validators.Copy() - sm.SaveState(stateDB, s) + if err := stateStore.Save(s); err != nil { + panic(err) + } } return s, stateDB, privVals diff --git a/state/state_test.go b/state/state_test.go index b27020d4c..7b2e18529 100644 --- a/state/state_test.go +++ b/state/state_test.go @@ -29,10 +29,12 @@ func setupTestCase(t *testing.T) (func(t *testing.T), dbm.DB, sm.State) { config := cfg.ResetTestRoot("state_") dbType := dbm.BackendType(config.DBBackend) stateDB, err := dbm.NewDB("state", dbType, config.DBDir()) + stateStore := sm.NewStore(stateDB) require.NoError(t, err) - state, err := sm.LoadStateFromDBOrGenesisFile(stateDB, config.GenesisFile()) + state, err := stateStore.LoadFromDBOrGenesisFile(config.GenesisFile()) assert.NoError(t, err, "expected no error on LoadStateFromDBOrGenesisFile") - sm.SaveState(stateDB, state) + err = stateStore.Save(state) + require.NoError(t, err) tearDown := func(t *testing.T) { os.RemoveAll(config.RootDir) } @@ -74,13 +76,16 @@ func TestMakeGenesisStateNilValidators(t *testing.T) { func TestStateSaveLoad(t *testing.T) { tearDown, stateDB, state := setupTestCase(t) defer tearDown(t) + stateStore := sm.NewStore(stateDB) assert := assert.New(t) state.LastBlockHeight++ state.LastValidators = state.Validators - sm.SaveState(stateDB, state) + err := stateStore.Save(state) + require.NoError(t, err) - loadedState := sm.LoadState(stateDB) + loadedState, err := stateStore.Load() + require.NoError(t, err) assert.True(state.Equals(loadedState), fmt.Sprintf("expected state and its copy to be identical.\ngot: %v\nexpected: %v\n", loadedState, state)) @@ -90,6 +95,7 @@ func TestStateSaveLoad(t *testing.T) { func TestABCIResponsesSaveLoad1(t *testing.T) { tearDown, stateDB, state := setupTestCase(t) defer tearDown(t) + stateStore := sm.NewStore(stateDB) assert := assert.New(t) state.LastBlockHeight++ @@ -107,8 +113,9 @@ func TestABCIResponsesSaveLoad1(t *testing.T) { types.TM2PB.NewValidatorUpdate(ed25519.GenPrivKey().PubKey(), 10), }} - sm.SaveABCIResponses(stateDB, block.Height, abciResponses) - loadedABCIResponses, err := sm.LoadABCIResponses(stateDB, block.Height) + err := stateStore.SaveABCIResponses(block.Height, abciResponses) + require.NoError(t, err) + loadedABCIResponses, err := stateStore.LoadABCIResponses(block.Height) assert.Nil(err) assert.Equal(abciResponses, loadedABCIResponses, fmt.Sprintf("ABCIResponses don't match:\ngot: %v\nexpected: %v\n", @@ -121,6 +128,8 @@ func TestABCIResponsesSaveLoad2(t *testing.T) { defer tearDown(t) assert := assert.New(t) + stateStore := sm.NewStore(stateDB) + cases := [...]struct { // Height is implied to equal index+2, // as block 1 is created from genesis. @@ -169,7 +178,7 @@ func TestABCIResponsesSaveLoad2(t *testing.T) { // Query all before, this should return error. for i := range cases { h := int64(i + 1) - res, err := sm.LoadABCIResponses(stateDB, h) + res, err := stateStore.LoadABCIResponses(h) assert.Error(err, "%d: %#v", i, res) } @@ -181,13 +190,14 @@ func TestABCIResponsesSaveLoad2(t *testing.T) { DeliverTxs: tc.added, EndBlock: &abci.ResponseEndBlock{}, } - sm.SaveABCIResponses(stateDB, h, responses) + err := stateStore.SaveABCIResponses(h, responses) + require.NoError(t, err) } // Query all before, should return expected value. for i, tc := range cases { h := int64(i + 1) - res, err := sm.LoadABCIResponses(stateDB, h) + res, err := stateStore.LoadABCIResponses(h) if assert.NoError(err, "%d", i) { t.Log(res) responses := &tmstate.ABCIResponses{ @@ -206,27 +216,30 @@ func TestValidatorSimpleSaveLoad(t *testing.T) { defer tearDown(t) assert := assert.New(t) + statestore := sm.NewStore(stateDB) + // Can't load anything for height 0. - _, err := sm.LoadValidators(stateDB, 0) + _, err := statestore.LoadValidators(0) assert.IsType(sm.ErrNoValSetForHeight{}, err, "expected err at height 0") // Should be able to load for height 1. - v, err := sm.LoadValidators(stateDB, 1) + v, err := statestore.LoadValidators(1) assert.Nil(err, "expected no err at height 1") assert.Equal(v.Hash(), state.Validators.Hash(), "expected validator hashes to match") // Should be able to load for height 2. - v, err = sm.LoadValidators(stateDB, 2) + v, err = statestore.LoadValidators(2) assert.Nil(err, "expected no err at height 2") assert.Equal(v.Hash(), state.NextValidators.Hash(), "expected validator hashes to match") // Increment height, save; should be able to load for next & next next height. state.LastBlockHeight++ nextHeight := state.LastBlockHeight + 1 - sm.SaveValidatorsInfo(stateDB, nextHeight+1, state.LastHeightValidatorsChanged, state.NextValidators) - vp0, err := sm.LoadValidators(stateDB, nextHeight+0) + err = statestore.Save(state) + require.NoError(t, err) + vp0, err := statestore.LoadValidators(nextHeight + 0) assert.Nil(err, "expected no err") - vp1, err := sm.LoadValidators(stateDB, nextHeight+1) + vp1, err := statestore.LoadValidators(nextHeight + 1) assert.Nil(err, "expected no err") assert.Equal(vp0.Hash(), state.Validators.Hash(), "expected validator hashes to match") assert.Equal(vp1.Hash(), state.NextValidators.Hash(), "expected next validator hashes to match") @@ -236,6 +249,7 @@ func TestValidatorSimpleSaveLoad(t *testing.T) { func TestOneValidatorChangesSaveLoad(t *testing.T) { tearDown, stateDB, state := setupTestCase(t) defer tearDown(t) + stateStore := sm.NewStore(stateDB) // Change vals at these heights. changeHeights := []int64{1, 2, 4, 5, 10, 15, 16, 17, 20} @@ -260,8 +274,8 @@ func TestOneValidatorChangesSaveLoad(t *testing.T) { require.NoError(t, err) state, err = sm.UpdateState(state, blockID, &header, responses, validatorUpdates) require.NoError(t, err) - nextHeight := state.LastBlockHeight + 1 - sm.SaveValidatorsInfo(stateDB, nextHeight+1, state.LastHeightValidatorsChanged, state.NextValidators) + err := stateStore.Save(state) + require.NoError(t, err) } // On each height change, increment the power by one. @@ -279,7 +293,7 @@ func TestOneValidatorChangesSaveLoad(t *testing.T) { } for i, power := range testCases { - v, err := sm.LoadValidators(stateDB, int64(i+1+1)) // +1 because vset changes delayed by 1 block. + v, err := stateStore.LoadValidators(int64(i + 1 + 1)) // +1 because vset changes delayed by 1 block. assert.Nil(t, err, fmt.Sprintf("expected no err at height %d", i)) assert.Equal(t, v.Size(), 1, "validator set size is greater than 1: %d", v.Size()) _, val := v.GetByIndex(0) @@ -886,18 +900,20 @@ func TestLargeGenesisValidator(t *testing.T) { func TestStoreLoadValidatorsIncrementsProposerPriority(t *testing.T) { const valSetSize = 2 tearDown, stateDB, state := setupTestCase(t) - defer tearDown(t) + t.Cleanup(func() { tearDown(t) }) + stateStore := sm.NewStore(stateDB) state.Validators = genValSet(valSetSize) state.NextValidators = state.Validators.CopyIncrementProposerPriority(1) - sm.SaveState(stateDB, state) + err := stateStore.Save(state) + require.NoError(t, err) nextHeight := state.LastBlockHeight + 1 - v0, err := sm.LoadValidators(stateDB, nextHeight) + v0, err := stateStore.LoadValidators(nextHeight) assert.Nil(t, err) acc0 := v0.Validators[0].ProposerPriority - v1, err := sm.LoadValidators(stateDB, nextHeight+1) + v1, err := stateStore.LoadValidators(nextHeight + 1) assert.Nil(t, err) acc1 := v1.Validators[0].ProposerPriority @@ -910,10 +926,12 @@ func TestManyValidatorChangesSaveLoad(t *testing.T) { const valSetSize = 7 tearDown, stateDB, state := setupTestCase(t) defer tearDown(t) + stateStore := sm.NewStore(stateDB) require.Equal(t, int64(0), state.LastBlockHeight) state.Validators = genValSet(valSetSize) state.NextValidators = state.Validators.CopyIncrementProposerPriority(1) - sm.SaveState(stateDB, state) + err := stateStore.Save(state) + require.NoError(t, err) _, valOld := state.Validators.GetByIndex(0) var pubkeyOld = valOld.PubKey @@ -923,17 +941,17 @@ func TestManyValidatorChangesSaveLoad(t *testing.T) { header, blockID, responses := makeHeaderPartsResponsesValPubKeyChange(state, pubkey) // Save state etc. - var err error var validatorUpdates []*types.Validator validatorUpdates, err = types.PB2TM.ValidatorUpdates(responses.EndBlock.ValidatorUpdates) require.NoError(t, err) state, err = sm.UpdateState(state, blockID, &header, responses, validatorUpdates) require.Nil(t, err) nextHeight := state.LastBlockHeight + 1 - sm.SaveValidatorsInfo(stateDB, nextHeight+1, state.LastHeightValidatorsChanged, state.NextValidators) + err = stateStore.Save(state) + require.NoError(t, err) // Load nextheight, it should be the oldpubkey. - v0, err := sm.LoadValidators(stateDB, nextHeight) + v0, err := stateStore.LoadValidators(nextHeight) assert.Nil(t, err) assert.Equal(t, valSetSize, v0.Size()) index, val := v0.GetByAddress(pubkeyOld.Address()) @@ -943,7 +961,7 @@ func TestManyValidatorChangesSaveLoad(t *testing.T) { } // Load nextheight+1, it should be the new pubkey. - v1, err := sm.LoadValidators(stateDB, nextHeight+1) + v1, err := stateStore.LoadValidators(nextHeight + 1) assert.Nil(t, err) assert.Equal(t, valSetSize, v1.Size()) index, val = v1.GetByAddress(pubkey.Address()) @@ -972,6 +990,8 @@ func TestConsensusParamsChangesSaveLoad(t *testing.T) { tearDown, stateDB, state := setupTestCase(t) defer tearDown(t) + stateStore := sm.NewStore(stateDB) + // Change vals at these heights. changeHeights := []int64{1, 2, 4, 5, 10, 15, 16, 17, 20} N := len(changeHeights) @@ -1004,8 +1024,8 @@ func TestConsensusParamsChangesSaveLoad(t *testing.T) { state, err = sm.UpdateState(state, blockID, &header, responses, validatorUpdates) require.Nil(t, err) - nextHeight := state.LastBlockHeight + 1 - sm.SaveConsensusParamsInfo(stateDB, nextHeight, state.LastHeightConsensusParamsChanged, state.ConsensusParams) + err := stateStore.Save(state) + require.NoError(t, err) } // Make all the test cases by using the same params until after the change. @@ -1023,7 +1043,7 @@ func TestConsensusParamsChangesSaveLoad(t *testing.T) { } for _, testCase := range testCases { - p, err := sm.LoadConsensusParams(stateDB, testCase.height) + p, err := stateStore.LoadConsensusParams(testCase.height) assert.Nil(t, err, fmt.Sprintf("expected no err at height %d", testCase.height)) assert.EqualValues(t, testCase.params, p, fmt.Sprintf(`unexpected consensus params at height %d`, testCase.height)) diff --git a/state/store.go b/state/store.go index 882562943..f6d65c4d7 100644 --- a/state/store.go +++ b/state/store.go @@ -1,6 +1,7 @@ package state import ( + "errors" "fmt" "github.com/gogo/protobuf/proto" @@ -36,10 +37,52 @@ func calcABCIResponsesKey(height int64) []byte { return []byte(fmt.Sprintf("abciResponsesKey:%v", height)) } +//---------------------- + +type Store interface { + // LoadFromDBOrGenesisFile loads the most recent state. + // If the chain is new it will use the genesis file from the provided genesis file path as the current state. + LoadFromDBOrGenesisFile(string) (State, error) + // LoadFromDBOrGenesisDoc loads the most recent state. + // If the chain is new it will use the genesis doc as the current state. + LoadFromDBOrGenesisDoc(*types.GenesisDoc) (State, error) + // Load loads the current state of the blockchain + Load() (State, error) + // LoadValidators loads the validator set at a given height + LoadValidators(int64) (*types.ValidatorSet, error) + // LoadABCIResponses loads the abciResponse for a given height + LoadABCIResponses(int64) (*tmstate.ABCIResponses, error) + // LoadConsensusParams loads the consensus params for a given height + LoadConsensusParams(int64) (tmproto.ConsensusParams, error) + // Save overwrites the previous state with the updated one + Save(State) error + // SaveABCIResponses saves ABCIResponses for a given height + SaveABCIResponses(int64, *tmstate.ABCIResponses) error + // Bootstrap is used for bootstrapping state when not starting from a initial height. + Bootstrap(State) error + // PruneStates takes the height from which to start prning and which height stop at + PruneStates(int64, int64) error +} + +//dbStore wraps a db (github.com/tendermint/tm-db) +type dbStore struct { + db dbm.DB +} + +var _ Store = (*dbStore)(nil) + +// NewStore creates the dbStore of the state pkg. +func NewStore(db dbm.DB) Store { + return dbStore{db} +} + // LoadStateFromDBOrGenesisFile loads the most recent state from the database, // or creates a new one from the given genesisFilePath. -func LoadStateFromDBOrGenesisFile(stateDB dbm.DB, genesisFilePath string) (State, error) { - state := LoadState(stateDB) +func (store dbStore) LoadFromDBOrGenesisFile(genesisFilePath string) (State, error) { + state, err := store.Load() + if err != nil { + return State{}, err + } if state.IsEmpty() { var err error state, err = MakeGenesisStateFromFile(genesisFilePath) @@ -53,8 +96,11 @@ func LoadStateFromDBOrGenesisFile(stateDB dbm.DB, genesisFilePath string) (State // LoadStateFromDBOrGenesisDoc loads the most recent state from the database, // or creates a new one from the given genesisDoc. -func LoadStateFromDBOrGenesisDoc(stateDB dbm.DB, genesisDoc *types.GenesisDoc) (State, error) { - state := LoadState(stateDB) +func (store dbStore) LoadFromDBOrGenesisDoc(genesisDoc *types.GenesisDoc) (State, error) { + state, err := store.Load() + if err != nil { + return State{}, err + } if state.IsEmpty() { var err error @@ -68,17 +114,17 @@ func LoadStateFromDBOrGenesisDoc(stateDB dbm.DB, genesisDoc *types.GenesisDoc) ( } // LoadState loads the State from the database. -func LoadState(db dbm.DB) State { - return loadState(db, stateKey) +func (store dbStore) Load() (State, error) { + return store.loadState(stateKey) } -func loadState(db dbm.DB, key []byte) (state State) { - buf, err := db.Get(key) +func (store dbStore) loadState(key []byte) (state State, err error) { + buf, err := store.db.Get(key) if err != nil { - panic(err) + return state, err } if len(buf) == 0 { - return state + return state, nil } sp := new(tmstate.State) @@ -92,51 +138,72 @@ func loadState(db dbm.DB, key []byte) (state State) { sm, err := StateFromProto(sp) if err != nil { - panic(err) + return state, err } - return *sm + return *sm, nil } -// SaveState persists the State, the ValidatorsInfo, and the ConsensusParamsInfo to the database. +// Save persists the State, the ValidatorsInfo, and the ConsensusParamsInfo to the database. // This flushes the writes (e.g. calls SetSync). -func SaveState(db dbm.DB, state State) { - saveState(db, state, stateKey) +func (store dbStore) Save(state State) error { + return store.save(state, stateKey) } -func saveState(db dbm.DB, state State, key []byte) { +func (store dbStore) save(state State, key []byte) error { nextHeight := state.LastBlockHeight + 1 // If first block, save validators for the block. if nextHeight == 1 { nextHeight = state.InitialHeight // This extra logic due to Tendermint validator set changes being delayed 1 block. // It may get overwritten due to InitChain validator updates. - saveValidatorsInfo(db, nextHeight, nextHeight, state.Validators) + if err := store.saveValidatorsInfo(nextHeight, nextHeight, state.Validators); err != nil { + return err + } } // Save next validators. - saveValidatorsInfo(db, nextHeight+1, state.LastHeightValidatorsChanged, state.NextValidators) + if err := store.saveValidatorsInfo(nextHeight+1, state.LastHeightValidatorsChanged, state.NextValidators); err != nil { + return err + } // Save next consensus params. - saveConsensusParamsInfo(db, nextHeight, state.LastHeightConsensusParamsChanged, state.ConsensusParams) - err := db.SetSync(key, state.Bytes()) + if err := store.saveConsensusParamsInfo(nextHeight, + state.LastHeightConsensusParamsChanged, state.ConsensusParams); err != nil { + return err + } + err := store.db.SetSync(key, state.Bytes()) if err != nil { - panic(err) + return err } + return nil } // BootstrapState saves a new state, used e.g. by state sync when starting from non-zero height. -func BootstrapState(db dbm.DB, state State) error { +func (store dbStore) Bootstrap(state State) error { height := state.LastBlockHeight + 1 if height == 1 { height = state.InitialHeight } + if height > 1 && !state.LastValidators.IsNilOrEmpty() { - saveValidatorsInfo(db, height-1, height-1, state.LastValidators) + if err := store.saveValidatorsInfo(height-1, height-1, state.LastValidators); err != nil { + return err + } + } + + if err := store.saveValidatorsInfo(height, height, state.Validators); err != nil { + return err + } + + if err := store.saveValidatorsInfo(height+1, height+1, state.NextValidators); err != nil { + return err + } + + if err := store.saveConsensusParamsInfo(height, height, state.ConsensusParams); err != nil { + return err } - saveValidatorsInfo(db, height, height, state.Validators) - saveValidatorsInfo(db, height+1, height+1, state.NextValidators) - saveConsensusParamsInfo(db, height, height, state.ConsensusParams) - return db.SetSync(stateKey, state.Bytes()) + + return store.db.SetSync(stateKey, state.Bytes()) } // PruneStates deletes states between the given heights (including from, excluding to). It is not @@ -147,20 +214,20 @@ func BootstrapState(db dbm.DB, state State) error { // encoding not preserving ordering: https://github.com/tendermint/tendermint/issues/4567 // This will cause some old states to be left behind when doing incremental partial prunes, // specifically older checkpoints and LastHeightChanged targets. -func PruneStates(db dbm.DB, from int64, to int64) error { +func (store dbStore) PruneStates(from int64, to int64) error { if from <= 0 || to <= 0 { return fmt.Errorf("from height %v and to height %v must be greater than 0", from, to) } if from >= to { return fmt.Errorf("from height %v must be lower than to height %v", from, to) } - valInfo := loadValidatorsInfo(db, to) - if valInfo == nil { - return fmt.Errorf("validators at height %v not found", to) + valInfo, err := loadValidatorsInfo(store.db, to) + if err != nil { + return fmt.Errorf("validators at height %v not found: %w", to, err) } - paramsInfo := loadConsensusParamsInfo(db, to) - if paramsInfo == nil { - return fmt.Errorf("consensus params at height %v not found", to) + paramsInfo, err := store.loadConsensusParamsInfo(to) + if err != nil { + return fmt.Errorf("consensus params at height %v not found: %w", to, err) } keepVals := make(map[int64]bool) @@ -173,10 +240,9 @@ func PruneStates(db dbm.DB, from int64, to int64) error { keepParams[paramsInfo.LastHeightChanged] = true } - batch := db.NewBatch() + batch := store.db.NewBatch() defer batch.Close() pruned := uint64(0) - var err error // We have to delete in reverse order, to avoid deleting previous heights that have validator // sets and consensus params that we may need to retrieve. @@ -185,10 +251,9 @@ func PruneStates(db dbm.DB, from int64, to int64) error { // params, otherwise they will panic if they're retrieved directly (instead of // indirectly via a LastHeightChanged pointer). if keepVals[h] { - v := loadValidatorsInfo(db, h) - if v.ValidatorSet == nil { - - vip, err := LoadValidators(db, h) + v, err := loadValidatorsInfo(store.db, h) + if err != nil || v.ValidatorSet == nil { + vip, err := store.LoadValidators(h) if err != nil { return err } @@ -218,17 +283,23 @@ func PruneStates(db dbm.DB, from int64, to int64) error { } if keepParams[h] { - p := loadConsensusParamsInfo(db, h) + p, err := store.loadConsensusParamsInfo(h) + if err != nil { + return err + } + if p.ConsensusParams.Equal(&tmproto.ConsensusParams{}) { - p.ConsensusParams, err = LoadConsensusParams(db, h) + p.ConsensusParams, err = store.LoadConsensusParams(h) if err != nil { return err } + p.LastHeightChanged = h bz, err := p.Marshal() if err != nil { return err } + err = batch.Set(calcConsensusParamsKey(h), bz) if err != nil { return err @@ -254,7 +325,7 @@ func PruneStates(db dbm.DB, from int64, to int64) error { return err } batch.Close() - batch = db.NewBatch() + batch = store.db.NewBatch() defer batch.Close() } } @@ -283,8 +354,8 @@ func ABCIResponsesResultsHash(ar *tmstate.ABCIResponses) []byte { // This is useful for recovering from crashes where we called app.Commit and // before we called s.Save(). It can also be used to produce Merkle proofs of // the result of txs. -func LoadABCIResponses(db dbm.DB, height int64) (*tmstate.ABCIResponses, error) { - buf, err := db.Get(calcABCIResponsesKey(height)) +func (store dbStore) LoadABCIResponses(height int64) (*tmstate.ABCIResponses, error) { + buf, err := store.db.Get(calcABCIResponsesKey(height)) if err != nil { return nil, err } @@ -311,7 +382,7 @@ func LoadABCIResponses(db dbm.DB, height int64) (*tmstate.ABCIResponses, error) // Merkle proofs. // // Exposed for testing. -func SaveABCIResponses(db dbm.DB, height int64, abciResponses *tmstate.ABCIResponses) { +func (store dbStore) SaveABCIResponses(height int64, abciResponses *tmstate.ABCIResponses) error { var dtxs []*abci.ResponseDeliverTx //strip nil values, for _, tx := range abciResponses.DeliverTxs { @@ -323,33 +394,36 @@ func SaveABCIResponses(db dbm.DB, height int64, abciResponses *tmstate.ABCIRespo bz, err := abciResponses.Marshal() if err != nil { - panic(err) + return err } - err = db.SetSync(calcABCIResponsesKey(height), bz) + + err = store.db.SetSync(calcABCIResponsesKey(height), bz) if err != nil { - panic(err) + return err } + + return nil } //----------------------------------------------------------------------------- // LoadValidators loads the ValidatorSet for a given height. // Returns ErrNoValSetForHeight if the validator set can't be found for this height. -func LoadValidators(db dbm.DB, height int64) (*types.ValidatorSet, error) { - valInfo := loadValidatorsInfo(db, height) - if valInfo == nil { +func (store dbStore) LoadValidators(height int64) (*types.ValidatorSet, error) { + valInfo, err := loadValidatorsInfo(store.db, height) + if err != nil { return nil, ErrNoValSetForHeight{height} } if valInfo.ValidatorSet == nil { lastStoredHeight := lastStoredHeightFor(height, valInfo.LastHeightChanged) - valInfo2 := loadValidatorsInfo(db, lastStoredHeight) - if valInfo2 == nil || valInfo2.ValidatorSet == nil { - panic( - fmt.Sprintf("Couldn't find validators at height %d (height %d was originally requested)", + valInfo2, err := loadValidatorsInfo(store.db, lastStoredHeight) + if err != nil || valInfo2.ValidatorSet == nil { + return nil, + fmt.Errorf("couldn't find validators at height %d (height %d was originally requested): %w", lastStoredHeight, height, - ), - ) + err, + ) } vs, err := types.ValidatorSetFromProto(valInfo2.ValidatorSet) @@ -381,14 +455,14 @@ func lastStoredHeightFor(height, lastHeightChanged int64) int64 { } // CONTRACT: Returned ValidatorsInfo can be mutated. -func loadValidatorsInfo(db dbm.DB, height int64) *tmstate.ValidatorsInfo { +func loadValidatorsInfo(db dbm.DB, height int64) (*tmstate.ValidatorsInfo, error) { buf, err := db.Get(calcValidatorsKey(height)) if err != nil { - panic(err) + return nil, err } if len(buf) == 0 { - return nil + return nil, errors.New("value retrieved from db is empty") } v := new(tmstate.ValidatorsInfo) @@ -400,7 +474,7 @@ func loadValidatorsInfo(db dbm.DB, height int64) *tmstate.ValidatorsInfo { } // TODO: ensure that buf is completely read. - return v + return v, nil } // saveValidatorsInfo persists the validator set. @@ -408,9 +482,9 @@ func loadValidatorsInfo(db dbm.DB, height int64) *tmstate.ValidatorsInfo { // `height` is the effective height for which the validator is responsible for // signing. It should be called from s.Save(), right before the state itself is // persisted. -func saveValidatorsInfo(db dbm.DB, height, lastHeightChanged int64, valSet *types.ValidatorSet) { +func (store dbStore) saveValidatorsInfo(height, lastHeightChanged int64, valSet *types.ValidatorSet) error { if lastHeightChanged > height { - panic("LastHeightChanged cannot be greater than ValidatorsInfo height") + return errors.New("lastHeightChanged cannot be greater than ValidatorsInfo height") } valInfo := &tmstate.ValidatorsInfo{ LastHeightChanged: lastHeightChanged, @@ -420,20 +494,22 @@ func saveValidatorsInfo(db dbm.DB, height, lastHeightChanged int64, valSet *type if height == lastHeightChanged || height%valSetCheckpointInterval == 0 { pv, err := valSet.ToProto() if err != nil { - panic(err) + return err } valInfo.ValidatorSet = pv } bz, err := valInfo.Marshal() if err != nil { - panic(err) + return err } - err = db.Set(calcValidatorsKey(height), bz) + err = store.db.Set(calcValidatorsKey(height), bz) if err != nil { - panic(err) + return err } + + return nil } //----------------------------------------------------------------------------- @@ -441,23 +517,22 @@ func saveValidatorsInfo(db dbm.DB, height, lastHeightChanged int64, valSet *type // ConsensusParamsInfo represents the latest consensus params, or the last height it changed // LoadConsensusParams loads the ConsensusParams for a given height. -func LoadConsensusParams(db dbm.DB, height int64) (tmproto.ConsensusParams, error) { +func (store dbStore) LoadConsensusParams(height int64) (tmproto.ConsensusParams, error) { empty := tmproto.ConsensusParams{} - paramsInfo := loadConsensusParamsInfo(db, height) - if paramsInfo == nil { - return empty, ErrNoConsensusParamsForHeight{height} + paramsInfo, err := store.loadConsensusParamsInfo(height) + if err != nil { + return empty, fmt.Errorf("could not find consensus params for height #%d: %w", height, err) } if paramsInfo.ConsensusParams.Equal(&empty) { - paramsInfo2 := loadConsensusParamsInfo(db, paramsInfo.LastHeightChanged) - if paramsInfo2 == nil { - panic( - fmt.Sprintf( - "Couldn't find consensus params at height %d as last changed from height %d", - paramsInfo.LastHeightChanged, - height, - ), + paramsInfo2, err := store.loadConsensusParamsInfo(paramsInfo.LastHeightChanged) + if err != nil { + return empty, fmt.Errorf( + "couldn't find consensus params at height %d as last changed from height %d: %w", + paramsInfo.LastHeightChanged, + height, + err, ) } @@ -467,13 +542,13 @@ func LoadConsensusParams(db dbm.DB, height int64) (tmproto.ConsensusParams, erro return paramsInfo.ConsensusParams, nil } -func loadConsensusParamsInfo(db dbm.DB, height int64) *tmstate.ConsensusParamsInfo { - buf, err := db.Get(calcConsensusParamsKey(height)) +func (store dbStore) loadConsensusParamsInfo(height int64) (*tmstate.ConsensusParamsInfo, error) { + buf, err := store.db.Get(calcConsensusParamsKey(height)) if err != nil { - panic(err) + return nil, err } if len(buf) == 0 { - return nil + return nil, errors.New("value retrieved from db is empty") } paramsInfo := new(tmstate.ConsensusParamsInfo) @@ -484,14 +559,14 @@ func loadConsensusParamsInfo(db dbm.DB, height int64) *tmstate.ConsensusParamsIn } // TODO: ensure that buf is completely read. - return paramsInfo + return paramsInfo, nil } // saveConsensusParamsInfo persists the consensus params for the next block to disk. // It should be called from s.Save(), right before the state itself is persisted. // If the consensus params did not change after processing the latest block, // only the last height for which they changed is persisted. -func saveConsensusParamsInfo(db dbm.DB, nextHeight, changeHeight int64, params tmproto.ConsensusParams) { +func (store dbStore) saveConsensusParamsInfo(nextHeight, changeHeight int64, params tmproto.ConsensusParams) error { paramsInfo := &tmstate.ConsensusParamsInfo{ LastHeightChanged: changeHeight, } @@ -501,11 +576,13 @@ func saveConsensusParamsInfo(db dbm.DB, nextHeight, changeHeight int64, params t } bz, err := paramsInfo.Marshal() if err != nil { - panic(err) + return err } - err = db.Set(calcConsensusParamsKey(nextHeight), bz) + err = store.db.Set(calcConsensusParamsKey(nextHeight), bz) if err != nil { - panic(err) + return err } + + return nil } diff --git a/state/store_test.go b/state/store_test.go index ca01afb9a..e43921519 100644 --- a/state/store_test.go +++ b/state/store_test.go @@ -23,21 +23,25 @@ import ( func TestStoreLoadValidators(t *testing.T) { stateDB := dbm.NewMemDB() + stateStore := sm.NewStore(stateDB) val, _ := types.RandValidator(true, 10) vals := types.NewValidatorSet([]*types.Validator{val}) // 1) LoadValidators loads validators using a height where they were last changed - sm.SaveValidatorsInfo(stateDB, 1, 1, vals) - sm.SaveValidatorsInfo(stateDB, 2, 1, vals) - loadedVals, err := sm.LoadValidators(stateDB, 2) + err := sm.SaveValidatorsInfo(stateDB, 1, 1, vals) + require.NoError(t, err) + err = sm.SaveValidatorsInfo(stateDB, 2, 1, vals) + require.NoError(t, err) + loadedVals, err := stateStore.LoadValidators(2) require.NoError(t, err) assert.NotZero(t, loadedVals.Size()) // 2) LoadValidators loads validators using a checkpoint height - sm.SaveValidatorsInfo(stateDB, sm.ValSetCheckpointInterval, 1, vals) + err = sm.SaveValidatorsInfo(stateDB, sm.ValSetCheckpointInterval, 1, vals) + require.NoError(t, err) - loadedVals, err = sm.LoadValidators(stateDB, sm.ValSetCheckpointInterval) + loadedVals, err = stateStore.LoadValidators(sm.ValSetCheckpointInterval) require.NoError(t, err) assert.NotZero(t, loadedVals.Size()) } @@ -50,22 +54,27 @@ func BenchmarkLoadValidators(b *testing.B) { dbType := dbm.BackendType(config.DBBackend) stateDB, err := dbm.NewDB("state", dbType, config.DBDir()) require.NoError(b, err) - state, err := sm.LoadStateFromDBOrGenesisFile(stateDB, config.GenesisFile()) + stateStore := sm.NewStore(stateDB) + state, err := stateStore.LoadFromDBOrGenesisFile(config.GenesisFile()) if err != nil { b.Fatal(err) } state.Validators = genValSet(valSetSize) state.NextValidators = state.Validators.CopyIncrementProposerPriority(1) - sm.SaveState(stateDB, state) + err = stateStore.Save(state) + require.NoError(b, err) for i := 10; i < 10000000000; i *= 10 { // 10, 100, 1000, ... i := i - sm.SaveValidatorsInfo(stateDB, int64(i), state.LastHeightValidatorsChanged, state.NextValidators) + if err := sm.SaveValidatorsInfo(stateDB, + int64(i), state.LastHeightValidatorsChanged, state.NextValidators); err != nil { + b.Fatal(err) + } b.Run(fmt.Sprintf("height=%d", i), func(b *testing.B) { for n := 0; n < b.N; n++ { - _, err := sm.LoadValidators(stateDB, int64(i)) + _, err := stateStore.LoadValidators(int64(i)) if err != nil { b.Fatal(err) } @@ -98,6 +107,7 @@ func TestPruneStates(t *testing.T) { tc := tc t.Run(name, func(t *testing.T) { db := dbm.NewMemDB() + stateStore := sm.NewStore(db) pk := ed25519.GenPrivKey().PubKey() // Generate a bunch of state data. Validators change for heights ending with 3, and @@ -134,19 +144,21 @@ func TestPruneStates(t *testing.T) { state.LastValidators = state.Validators } - sm.SaveState(db, state) + err := stateStore.Save(state) + require.NoError(t, err) - sm.SaveABCIResponses(db, h, &tmstate.ABCIResponses{ + err = stateStore.SaveABCIResponses(h, &tmstate.ABCIResponses{ DeliverTxs: []*abci.ResponseDeliverTx{ {Data: []byte{1}}, {Data: []byte{2}}, {Data: []byte{3}}, }, }) + require.NoError(t, err) } // Test assertions - err := sm.PruneStates(db, tc.pruneFrom, tc.pruneTo) + err := stateStore.PruneStates(tc.pruneFrom, tc.pruneTo) if tc.expectErr { require.Error(t, err) return @@ -158,7 +170,7 @@ func TestPruneStates(t *testing.T) { expectABCI := sliceToMap(tc.expectABCI) for h := int64(1); h <= tc.makeHeights; h++ { - vals, err := sm.LoadValidators(db, h) + vals, err := stateStore.LoadValidators(h) if expectVals[h] { require.NoError(t, err, "validators height %v", h) require.NotNil(t, vals) @@ -167,16 +179,15 @@ func TestPruneStates(t *testing.T) { require.Equal(t, sm.ErrNoValSetForHeight{Height: h}, err) } - params, err := sm.LoadConsensusParams(db, h) + params, err := stateStore.LoadConsensusParams(h) if expectParams[h] { require.NoError(t, err, "params height %v", h) require.False(t, params.Equal(&tmproto.ConsensusParams{})) } else { require.Error(t, err, "params height %v", h) - require.Equal(t, sm.ErrNoConsensusParamsForHeight{Height: h}, err) } - abci, err := sm.LoadABCIResponses(db, h) + abci, err := stateStore.LoadABCIResponses(h) if expectABCI[h] { require.NoError(t, err, "abci height %v", h) require.NotNil(t, abci) diff --git a/state/tx_filter_test.go b/state/tx_filter_test.go index 2c336809e..fa5392337 100644 --- a/state/tx_filter_test.go +++ b/state/tx_filter_test.go @@ -33,7 +33,8 @@ func TestTxFilter(t *testing.T) { for i, tc := range testCases { stateDB, err := dbm.NewDB("state", "memdb", os.TempDir()) require.NoError(t, err) - state, err := sm.LoadStateFromDBOrGenesisDoc(stateDB, genDoc) + stateStore := sm.NewStore(stateDB) + state, err := stateStore.LoadFromDBOrGenesisDoc(genDoc) require.NoError(t, err) f := sm.TxPreCheck(state) // current max size of a tx 1850 diff --git a/state/validation.go b/state/validation.go index 04cc5ddd1..e63da35ec 100644 --- a/state/validation.go +++ b/state/validation.go @@ -5,8 +5,6 @@ import ( "errors" "fmt" - dbm "github.com/tendermint/tm-db" - "github.com/tendermint/tendermint/crypto" "github.com/tendermint/tendermint/types" ) @@ -14,7 +12,7 @@ import ( //----------------------------------------------------- // Validate block -func validateBlock(evidencePool EvidencePool, stateDB dbm.DB, state State, block *types.Block) error { +func validateBlock(evidencePool EvidencePool, state State, block *types.Block) error { // Validate internal consistency. if err := block.ValidateBasic(); err != nil { return err diff --git a/state/validation_test.go b/state/validation_test.go index bfb1d7dc0..99000d4cc 100644 --- a/state/validation_test.go +++ b/state/validation_test.go @@ -29,8 +29,9 @@ func TestValidateBlockHeader(t *testing.T) { defer proxyApp.Stop() //nolint:errcheck // ignore for tests state, stateDB, privVals := makeState(3, 1) + stateStore := sm.NewStore(stateDB) blockExec := sm.NewBlockExecutor( - stateDB, + stateStore, log.TestingLogger(), proxyApp.Consensus(), memmock.Mempool{}, @@ -99,8 +100,9 @@ func TestValidateBlockCommit(t *testing.T) { defer proxyApp.Stop() //nolint:errcheck // ignore for tests state, stateDB, privVals := makeState(1, 1) + stateStore := sm.NewStore(stateDB) blockExec := sm.NewBlockExecutor( - stateDB, + stateStore, log.TestingLogger(), proxyApp.Consensus(), memmock.Mempool{}, @@ -212,6 +214,7 @@ func TestValidateBlockEvidence(t *testing.T) { defer proxyApp.Stop() //nolint:errcheck // ignore for tests state, stateDB, privVals := makeState(4, 1) + stateStore := sm.NewStore(stateDB) defaultEvidenceTime := time.Date(2019, 1, 1, 0, 0, 0, 0, time.UTC) evpool := &mocks.EvidencePool{} @@ -220,7 +223,7 @@ func TestValidateBlockEvidence(t *testing.T) { state.ConsensusParams.Evidence.MaxNum = 3 blockExec := sm.NewBlockExecutor( - stateDB, + stateStore, log.TestingLogger(), proxyApp.Consensus(), memmock.Mempool{}, @@ -280,6 +283,7 @@ func TestValidateBlockEvidence(t *testing.T) { func TestValidateDuplicateEvidenceShouldFail(t *testing.T) { var height int64 = 1 state, stateDB, privVals := makeState(2, int(height)) + stateStore := sm.NewStore(stateDB) _, val := state.Validators.GetByIndex(0) _, val2 := state.Validators.GetByIndex(1) ev := types.NewMockDuplicateVoteEvidenceWithValidator(height, defaultTestTime, @@ -288,7 +292,7 @@ func TestValidateDuplicateEvidenceShouldFail(t *testing.T) { privVals[val2.Address.String()], chainID) blockExec := sm.NewBlockExecutor( - stateDB, log.TestingLogger(), + stateStore, log.TestingLogger(), nil, nil, sm.MockEvidencePool{}) diff --git a/store/store_test.go b/store/store_test.go index f03c6c896..48eb399c6 100644 --- a/store/store_test.go +++ b/store/store_test.go @@ -60,7 +60,8 @@ func makeStateAndBlockStore(logger log.Logger) (sm.State, *BlockStore, cleanupFu // stateDB := dbm.NewDebugDB("stateDB", dbm.NewMemDB()) blockDB := dbm.NewMemDB() stateDB := dbm.NewMemDB() - state, err := sm.LoadStateFromDBOrGenesisFile(stateDB, config.GenesisFile()) + stateStore := sm.NewStore(stateDB) + state, err := stateStore.LoadFromDBOrGenesisFile(config.GenesisFile()) if err != nil { panic(fmt.Errorf("error constructing state from genesis file: %w", err)) } @@ -401,7 +402,8 @@ func TestLoadBlockPart(t *testing.T) { func TestPruneBlocks(t *testing.T) { config := cfg.ResetTestRoot("blockchain_reactor_test") defer os.RemoveAll(config.RootDir) - state, err := sm.LoadStateFromDBOrGenesisFile(dbm.NewMemDB(), config.GenesisFile()) + stateStore := sm.NewStore(dbm.NewMemDB()) + state, err := stateStore.LoadFromDBOrGenesisFile(config.GenesisFile()) require.NoError(t, err) db := dbm.NewMemDB() bs := NewBlockStore(db)