You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

511 lines
16 KiB

7 years ago
7 years ago
7 years ago
7 years ago
lint: Enable Golint (#4212) * Fix many golint errors * Fix golint errors in the 'lite' package * Don't export Pool.store * Fix typo * Revert unwanted changes * Fix errors in counter package * Fix linter errors in kvstore package * Fix linter error in example package * Fix error in tests package * Fix linter errors in v2 package * Fix linter errors in consensus package * Fix linter errors in evidence package * Fix linter error in fail package * Fix linter errors in query package * Fix linter errors in core package * Fix linter errors in node package * Fix linter errors in mempool package * Fix linter error in conn package * Fix linter errors in pex package * Rename PEXReactor export to Reactor * Fix linter errors in trust package * Fix linter errors in upnp package * Fix linter errors in p2p package * Fix linter errors in proxy package * Fix linter errors in mock_test package * Fix linter error in client_test package * Fix linter errors in coretypes package * Fix linter errors in coregrpc package * Fix linter errors in rpcserver package * Fix linter errors in rpctypes package * Fix linter errors in rpctest package * Fix linter error in json2wal script * Fix linter error in wal2json script * Fix linter errors in kv package * Fix linter error in state package * Fix linter error in grpc_client * Fix linter errors in types package * Fix linter error in version package * Fix remaining errors * Address review comments * Fix broken tests * Reconcile package coregrpc * Fix golangci bot error * Fix new golint errors * Fix broken reference * Enable golint linter * minor changes to bring golint into line * fix failing test * fix pex reactor naming * address PR comments
5 years ago
7 years ago
8 years ago
pubsub 2.0 (#3227) * green pubsub tests :OK: * get rid of clientToQueryMap * Subscribe and SubscribeUnbuffered * start adapting other pkgs to new pubsub * nope * rename MsgAndTags to Message * remove TagMap it does not bring any additional benefits * bring back EventSubscriber * fix test * fix data race in TestStartNextHeightCorrectly ``` Write at 0x00c0001c7418 by goroutine 796: github.com/tendermint/tendermint/consensus.TestStartNextHeightCorrectly() /go/src/github.com/tendermint/tendermint/consensus/state_test.go:1296 +0xad testing.tRunner() /usr/local/go/src/testing/testing.go:827 +0x162 Previous read at 0x00c0001c7418 by goroutine 858: github.com/tendermint/tendermint/consensus.(*ConsensusState).addVote() /go/src/github.com/tendermint/tendermint/consensus/state.go:1631 +0x1366 github.com/tendermint/tendermint/consensus.(*ConsensusState).tryAddVote() /go/src/github.com/tendermint/tendermint/consensus/state.go:1476 +0x8f github.com/tendermint/tendermint/consensus.(*ConsensusState).handleMsg() /go/src/github.com/tendermint/tendermint/consensus/state.go:667 +0xa1e github.com/tendermint/tendermint/consensus.(*ConsensusState).receiveRoutine() /go/src/github.com/tendermint/tendermint/consensus/state.go:628 +0x794 Goroutine 796 (running) created at: testing.(*T).Run() /usr/local/go/src/testing/testing.go:878 +0x659 testing.runTests.func1() /usr/local/go/src/testing/testing.go:1119 +0xa8 testing.tRunner() /usr/local/go/src/testing/testing.go:827 +0x162 testing.runTests() /usr/local/go/src/testing/testing.go:1117 +0x4ee testing.(*M).Run() /usr/local/go/src/testing/testing.go:1034 +0x2ee main.main() _testmain.go:214 +0x332 Goroutine 858 (running) created at: github.com/tendermint/tendermint/consensus.(*ConsensusState).startRoutines() /go/src/github.com/tendermint/tendermint/consensus/state.go:334 +0x221 github.com/tendermint/tendermint/consensus.startTestRound() /go/src/github.com/tendermint/tendermint/consensus/common_test.go:122 +0x63 github.com/tendermint/tendermint/consensus.TestStateFullRound1() /go/src/github.com/tendermint/tendermint/consensus/state_test.go:255 +0x397 testing.tRunner() /usr/local/go/src/testing/testing.go:827 +0x162 ``` * fixes after my own review * fix formatting * wait 100ms before kicking a subscriber out + a test for indexer_service * fixes after my second review * no timeout * add changelog entries * fix merge conflicts * fix typos after Thane's review Co-Authored-By: melekes <anton.kalyaev@gmail.com> * reformat code * rewrite indexer service in the attempt to fix failing test https://github.com/tendermint/tendermint/pull/3227/#issuecomment-462316527 * Revert "rewrite indexer service in the attempt to fix failing test" This reverts commit 0d9107a098230de7138abb1c201877c246e89ed1. * another attempt to fix indexer * fixes after Ethan's review * use unbuffered channel when indexing transactions Refs https://github.com/tendermint/tendermint/pull/3227#discussion_r258786716 * add a comment for EventBus#SubscribeUnbuffered * format code
6 years ago
pubsub 2.0 (#3227) * green pubsub tests :OK: * get rid of clientToQueryMap * Subscribe and SubscribeUnbuffered * start adapting other pkgs to new pubsub * nope * rename MsgAndTags to Message * remove TagMap it does not bring any additional benefits * bring back EventSubscriber * fix test * fix data race in TestStartNextHeightCorrectly ``` Write at 0x00c0001c7418 by goroutine 796: github.com/tendermint/tendermint/consensus.TestStartNextHeightCorrectly() /go/src/github.com/tendermint/tendermint/consensus/state_test.go:1296 +0xad testing.tRunner() /usr/local/go/src/testing/testing.go:827 +0x162 Previous read at 0x00c0001c7418 by goroutine 858: github.com/tendermint/tendermint/consensus.(*ConsensusState).addVote() /go/src/github.com/tendermint/tendermint/consensus/state.go:1631 +0x1366 github.com/tendermint/tendermint/consensus.(*ConsensusState).tryAddVote() /go/src/github.com/tendermint/tendermint/consensus/state.go:1476 +0x8f github.com/tendermint/tendermint/consensus.(*ConsensusState).handleMsg() /go/src/github.com/tendermint/tendermint/consensus/state.go:667 +0xa1e github.com/tendermint/tendermint/consensus.(*ConsensusState).receiveRoutine() /go/src/github.com/tendermint/tendermint/consensus/state.go:628 +0x794 Goroutine 796 (running) created at: testing.(*T).Run() /usr/local/go/src/testing/testing.go:878 +0x659 testing.runTests.func1() /usr/local/go/src/testing/testing.go:1119 +0xa8 testing.tRunner() /usr/local/go/src/testing/testing.go:827 +0x162 testing.runTests() /usr/local/go/src/testing/testing.go:1117 +0x4ee testing.(*M).Run() /usr/local/go/src/testing/testing.go:1034 +0x2ee main.main() _testmain.go:214 +0x332 Goroutine 858 (running) created at: github.com/tendermint/tendermint/consensus.(*ConsensusState).startRoutines() /go/src/github.com/tendermint/tendermint/consensus/state.go:334 +0x221 github.com/tendermint/tendermint/consensus.startTestRound() /go/src/github.com/tendermint/tendermint/consensus/common_test.go:122 +0x63 github.com/tendermint/tendermint/consensus.TestStateFullRound1() /go/src/github.com/tendermint/tendermint/consensus/state_test.go:255 +0x397 testing.tRunner() /usr/local/go/src/testing/testing.go:827 +0x162 ``` * fixes after my own review * fix formatting * wait 100ms before kicking a subscriber out + a test for indexer_service * fixes after my second review * no timeout * add changelog entries * fix merge conflicts * fix typos after Thane's review Co-Authored-By: melekes <anton.kalyaev@gmail.com> * reformat code * rewrite indexer service in the attempt to fix failing test https://github.com/tendermint/tendermint/pull/3227/#issuecomment-462316527 * Revert "rewrite indexer service in the attempt to fix failing test" This reverts commit 0d9107a098230de7138abb1c201877c246e89ed1. * another attempt to fix indexer * fixes after Ethan's review * use unbuffered channel when indexing transactions Refs https://github.com/tendermint/tendermint/pull/3227#discussion_r258786716 * add a comment for EventBus#SubscribeUnbuffered * format code
6 years ago
pubsub 2.0 (#3227) * green pubsub tests :OK: * get rid of clientToQueryMap * Subscribe and SubscribeUnbuffered * start adapting other pkgs to new pubsub * nope * rename MsgAndTags to Message * remove TagMap it does not bring any additional benefits * bring back EventSubscriber * fix test * fix data race in TestStartNextHeightCorrectly ``` Write at 0x00c0001c7418 by goroutine 796: github.com/tendermint/tendermint/consensus.TestStartNextHeightCorrectly() /go/src/github.com/tendermint/tendermint/consensus/state_test.go:1296 +0xad testing.tRunner() /usr/local/go/src/testing/testing.go:827 +0x162 Previous read at 0x00c0001c7418 by goroutine 858: github.com/tendermint/tendermint/consensus.(*ConsensusState).addVote() /go/src/github.com/tendermint/tendermint/consensus/state.go:1631 +0x1366 github.com/tendermint/tendermint/consensus.(*ConsensusState).tryAddVote() /go/src/github.com/tendermint/tendermint/consensus/state.go:1476 +0x8f github.com/tendermint/tendermint/consensus.(*ConsensusState).handleMsg() /go/src/github.com/tendermint/tendermint/consensus/state.go:667 +0xa1e github.com/tendermint/tendermint/consensus.(*ConsensusState).receiveRoutine() /go/src/github.com/tendermint/tendermint/consensus/state.go:628 +0x794 Goroutine 796 (running) created at: testing.(*T).Run() /usr/local/go/src/testing/testing.go:878 +0x659 testing.runTests.func1() /usr/local/go/src/testing/testing.go:1119 +0xa8 testing.tRunner() /usr/local/go/src/testing/testing.go:827 +0x162 testing.runTests() /usr/local/go/src/testing/testing.go:1117 +0x4ee testing.(*M).Run() /usr/local/go/src/testing/testing.go:1034 +0x2ee main.main() _testmain.go:214 +0x332 Goroutine 858 (running) created at: github.com/tendermint/tendermint/consensus.(*ConsensusState).startRoutines() /go/src/github.com/tendermint/tendermint/consensus/state.go:334 +0x221 github.com/tendermint/tendermint/consensus.startTestRound() /go/src/github.com/tendermint/tendermint/consensus/common_test.go:122 +0x63 github.com/tendermint/tendermint/consensus.TestStateFullRound1() /go/src/github.com/tendermint/tendermint/consensus/state_test.go:255 +0x397 testing.tRunner() /usr/local/go/src/testing/testing.go:827 +0x162 ``` * fixes after my own review * fix formatting * wait 100ms before kicking a subscriber out + a test for indexer_service * fixes after my second review * no timeout * add changelog entries * fix merge conflicts * fix typos after Thane's review Co-Authored-By: melekes <anton.kalyaev@gmail.com> * reformat code * rewrite indexer service in the attempt to fix failing test https://github.com/tendermint/tendermint/pull/3227/#issuecomment-462316527 * Revert "rewrite indexer service in the attempt to fix failing test" This reverts commit 0d9107a098230de7138abb1c201877c246e89ed1. * another attempt to fix indexer * fixes after Ethan's review * use unbuffered channel when indexing transactions Refs https://github.com/tendermint/tendermint/pull/3227#discussion_r258786716 * add a comment for EventBus#SubscribeUnbuffered * format code
6 years ago
8 years ago
8 years ago
8 years ago
7 years ago
lint: Enable Golint (#4212) * Fix many golint errors * Fix golint errors in the 'lite' package * Don't export Pool.store * Fix typo * Revert unwanted changes * Fix errors in counter package * Fix linter errors in kvstore package * Fix linter error in example package * Fix error in tests package * Fix linter errors in v2 package * Fix linter errors in consensus package * Fix linter errors in evidence package * Fix linter error in fail package * Fix linter errors in query package * Fix linter errors in core package * Fix linter errors in node package * Fix linter errors in mempool package * Fix linter error in conn package * Fix linter errors in pex package * Rename PEXReactor export to Reactor * Fix linter errors in trust package * Fix linter errors in upnp package * Fix linter errors in p2p package * Fix linter errors in proxy package * Fix linter errors in mock_test package * Fix linter error in client_test package * Fix linter errors in coretypes package * Fix linter errors in coregrpc package * Fix linter errors in rpcserver package * Fix linter errors in rpctypes package * Fix linter errors in rpctest package * Fix linter error in json2wal script * Fix linter error in wal2json script * Fix linter errors in kv package * Fix linter error in state package * Fix linter error in grpc_client * Fix linter errors in types package * Fix linter error in version package * Fix remaining errors * Address review comments * Fix broken tests * Reconcile package coregrpc * Fix golangci bot error * Fix new golint errors * Fix broken reference * Enable golint linter * minor changes to bring golint into line * fix failing test * fix pex reactor naming * address PR comments
5 years ago
7 years ago
7 years ago
lint: Enable Golint (#4212) * Fix many golint errors * Fix golint errors in the 'lite' package * Don't export Pool.store * Fix typo * Revert unwanted changes * Fix errors in counter package * Fix linter errors in kvstore package * Fix linter error in example package * Fix error in tests package * Fix linter errors in v2 package * Fix linter errors in consensus package * Fix linter errors in evidence package * Fix linter error in fail package * Fix linter errors in query package * Fix linter errors in core package * Fix linter errors in node package * Fix linter errors in mempool package * Fix linter error in conn package * Fix linter errors in pex package * Rename PEXReactor export to Reactor * Fix linter errors in trust package * Fix linter errors in upnp package * Fix linter errors in p2p package * Fix linter errors in proxy package * Fix linter errors in mock_test package * Fix linter error in client_test package * Fix linter errors in coretypes package * Fix linter errors in coregrpc package * Fix linter errors in rpcserver package * Fix linter errors in rpctypes package * Fix linter errors in rpctest package * Fix linter error in json2wal script * Fix linter error in wal2json script * Fix linter errors in kv package * Fix linter error in state package * Fix linter error in grpc_client * Fix linter errors in types package * Fix linter error in version package * Fix remaining errors * Address review comments * Fix broken tests * Reconcile package coregrpc * Fix golangci bot error * Fix new golint errors * Fix broken reference * Enable golint linter * minor changes to bring golint into line * fix failing test * fix pex reactor naming * address PR comments
5 years ago
7 years ago
8 years ago
8 years ago
7 years ago
8 years ago
8 years ago
WAL: better errors and new fail point (#3246) * privval: more info in errors * wal: change Debug logs to Info * wal: log and return error on corrupted wal instead of panicing * fail: Exit right away instead of sending interupt * consensus: FAIL before handling our own vote allows to replicate #3089: - run using `FAIL_TEST_INDEX=0` - delete some bytes from the end of the WAL - start normally Results in logs like: ``` I[2019-02-03|18:12:58.225] Searching for height module=consensus wal=/Users/ethanbuchman/.tendermint/data/cs.wal/wal height=1 min=0 max=0 E[2019-02-03|18:12:58.225] Error on catchup replay. Proceeding to start ConsensusState anyway module=consensus err="failed to read data: EOF" I[2019-02-03|18:12:58.225] Started node module=main nodeInfo="{ProtocolVersion:{P2P:6 Block:9 App:1} ID_:35e87e93f2e31f305b65a5517fd2102331b56002 ListenAddr:tcp://0.0.0.0:26656 Network:test-chain-J8JvJH Version:0.29.1 Channels:4020212223303800 Moniker:Ethans-MacBook-Pro.local Other:{TxIndex:on RPCAddress:tcp://0.0.0.0:26657}}" E[2019-02-03|18:12:58.226] Couldn't connect to any seeds module=p2p I[2019-02-03|18:12:59.229] Timed out module=consensus dur=998.568ms height=1 round=0 step=RoundStepNewHeight I[2019-02-03|18:12:59.230] enterNewRound(1/0). Current: 1/0/RoundStepNewHeight module=consensus height=1 round=0 I[2019-02-03|18:12:59.230] enterPropose(1/0). Current: 1/0/RoundStepNewRound module=consensus height=1 round=0 I[2019-02-03|18:12:59.230] enterPropose: Our turn to propose module=consensus height=1 round=0 proposer=AD278B7767B05D7FBEB76207024C650988FA77D5 privValidator="PrivValidator{AD278B7767B05D7FBEB76207024C650988FA77D5 LH:1, LR:0, LS:2}" E[2019-02-03|18:12:59.230] enterPropose: Error signing proposal module=consensus height=1 round=0 err="Error signing proposal: Step regression at height 1 round 0. Got 1, last step 2" I[2019-02-03|18:13:02.233] Timed out module=consensus dur=3s height=1 round=0 step=RoundStepPropose I[2019-02-03|18:13:02.233] enterPrevote(1/0). Current: 1/0/RoundStepPropose module=consensus I[2019-02-03|18:13:02.233] enterPrevote: ProposalBlock is nil module=consensus height=1 round=0 E[2019-02-03|18:13:02.234] Error signing vote module=consensus height=1 round=0 vote="Vote{0:AD278B7767B0 1/00/1(Prevote) 000000000000 000000000000 @ 2019-02-04T02:13:02.233897Z}" err="Error signing vote: Conflicting data" ``` Notice the EOF, the step regression, and the conflicting data. * wal: change errors to be DataCorruptionError * exit on corrupt WAL * fix log * fix new line
6 years ago
8 years ago
7 years ago
7 years ago
7 years ago
7 years ago
7 years ago
7 years ago
8 years ago
7 years ago
cs/replay: execCommitBlock should not read from state.lastValidators (#3067) * execCommitBlock should not read from state.lastValidators * fix height 1 * fix blockchain/reactor_test * fix consensus/mempool_test * fix consensus/reactor_test * fix consensus/replay_test * add CHANGELOG * fix consensus/reactor_test * fix consensus/replay_test * add a test for replay validators change * fix mem_pool test * fix byzantine test * remove a redundant code * reduce validator change blocks to 6 * fix * return peer0 config * seperate testName * seperate testName 1 * seperate testName 2 * seperate app db path * seperate app db path 1 * add a lock before startNet * move the lock to reactor_test * simulate just once * try to find problem * handshake only saveState when app version changed * update gometalinter to 3.0.0 (#3233) in the attempt to fix https://circleci.com/gh/tendermint/tendermint/43165 also code is simplified by running gofmt -s . remove unused vars enable linters we're currently passing remove deprecated linters (cherry picked from commit d47094550315c094512a242445e0dde24b5a03f5) * gofmt code * goimport code * change the bool name to testValidatorsChange * adjust receive kvstore.ProtocolVersion * adjust receive kvstore.ProtocolVersion 1 * adjust receive kvstore.ProtocolVersion 3 * fix merge execution.go * fix merge develop * fix merge develop 1 * fix run cleanupFunc * adjust code according to reviewers' opinion * modify the func name match the convention * simplify simulate a chain containing some validator change txs 1 * test CI error * Merge remote-tracking branch 'upstream/develop' into fixReplay 1 * fix pubsub_test * subscribeUnbuffered vote channel
6 years ago
7 years ago
7 years ago
7 years ago
add support for block pruning via ABCI Commit response (#4588) * Added BlockStore.DeleteBlock() * Added initial block pruner prototype * wip * Added BlockStore.PruneBlocks() * Added consensus setting for block pruning * Added BlockStore base * Error on replay if base does not have blocks * Handle missing blocks when sending VoteSetMaj23Message * Error message tweak * Properly update blockstore state * Error message fix again * blockchain: ignore peer missing blocks * Added FIXME * Added test for block replay with truncated history * Handle peer base in blockchain reactor * Improved replay error handling * Added tests for Store.PruneBlocks() * Fix non-RPC handling of truncated block history * Panic on missing block meta in needProofBlock() * Updated changelog * Handle truncated block history in RPC layer * Added info about earliest block in /status RPC * Reorder height and base in blockchain reactor messages * Updated changelog * Fix tests * Appease linter * Minor review fixes * Non-empty BlockStores should always have base > 0 * Update code to assume base > 0 invariant * Added blockstore tests for pruning to 0 * Make sure we don't prune below the current base * Added BlockStore.Size() * config: added retain_blocks recommendations * Update v1 blockchain reactor to handle blockstore base * Added state database pruning * Propagate errors on missing validator sets * Comment tweaks * Improved error message Co-Authored-By: Anton Kaliaev <anton.kalyaev@gmail.com> * use ABCI field ResponseCommit.retain_height instead of retain-blocks config option * remove State.RetainHeight, return value instead * fix minor issues * rename pruneHeights() to pruneBlocks() * noop to fix GitHub borkage Co-authored-by: Anton Kaliaev <anton.kalyaev@gmail.com>
5 years ago
7 years ago
add support for block pruning via ABCI Commit response (#4588) * Added BlockStore.DeleteBlock() * Added initial block pruner prototype * wip * Added BlockStore.PruneBlocks() * Added consensus setting for block pruning * Added BlockStore base * Error on replay if base does not have blocks * Handle missing blocks when sending VoteSetMaj23Message * Error message tweak * Properly update blockstore state * Error message fix again * blockchain: ignore peer missing blocks * Added FIXME * Added test for block replay with truncated history * Handle peer base in blockchain reactor * Improved replay error handling * Added tests for Store.PruneBlocks() * Fix non-RPC handling of truncated block history * Panic on missing block meta in needProofBlock() * Updated changelog * Handle truncated block history in RPC layer * Added info about earliest block in /status RPC * Reorder height and base in blockchain reactor messages * Updated changelog * Fix tests * Appease linter * Minor review fixes * Non-empty BlockStores should always have base > 0 * Update code to assume base > 0 invariant * Added blockstore tests for pruning to 0 * Make sure we don't prune below the current base * Added BlockStore.Size() * config: added retain_blocks recommendations * Update v1 blockchain reactor to handle blockstore base * Added state database pruning * Propagate errors on missing validator sets * Comment tweaks * Improved error message Co-Authored-By: Anton Kaliaev <anton.kalyaev@gmail.com> * use ABCI field ResponseCommit.retain_height instead of retain-blocks config option * remove State.RetainHeight, return value instead * fix minor issues * rename pruneHeights() to pruneBlocks() * noop to fix GitHub borkage Co-authored-by: Anton Kaliaev <anton.kalyaev@gmail.com>
5 years ago
add support for block pruning via ABCI Commit response (#4588) * Added BlockStore.DeleteBlock() * Added initial block pruner prototype * wip * Added BlockStore.PruneBlocks() * Added consensus setting for block pruning * Added BlockStore base * Error on replay if base does not have blocks * Handle missing blocks when sending VoteSetMaj23Message * Error message tweak * Properly update blockstore state * Error message fix again * blockchain: ignore peer missing blocks * Added FIXME * Added test for block replay with truncated history * Handle peer base in blockchain reactor * Improved replay error handling * Added tests for Store.PruneBlocks() * Fix non-RPC handling of truncated block history * Panic on missing block meta in needProofBlock() * Updated changelog * Handle truncated block history in RPC layer * Added info about earliest block in /status RPC * Reorder height and base in blockchain reactor messages * Updated changelog * Fix tests * Appease linter * Minor review fixes * Non-empty BlockStores should always have base > 0 * Update code to assume base > 0 invariant * Added blockstore tests for pruning to 0 * Make sure we don't prune below the current base * Added BlockStore.Size() * config: added retain_blocks recommendations * Update v1 blockchain reactor to handle blockstore base * Added state database pruning * Propagate errors on missing validator sets * Comment tweaks * Improved error message Co-Authored-By: Anton Kaliaev <anton.kalyaev@gmail.com> * use ABCI field ResponseCommit.retain_height instead of retain-blocks config option * remove State.RetainHeight, return value instead * fix minor issues * rename pruneHeights() to pruneBlocks() * noop to fix GitHub borkage Co-authored-by: Anton Kaliaev <anton.kalyaev@gmail.com>
5 years ago
7 years ago
8 years ago
7 years ago
8 years ago
8 years ago
8 years ago
cs/replay: execCommitBlock should not read from state.lastValidators (#3067) * execCommitBlock should not read from state.lastValidators * fix height 1 * fix blockchain/reactor_test * fix consensus/mempool_test * fix consensus/reactor_test * fix consensus/replay_test * add CHANGELOG * fix consensus/reactor_test * fix consensus/replay_test * add a test for replay validators change * fix mem_pool test * fix byzantine test * remove a redundant code * reduce validator change blocks to 6 * fix * return peer0 config * seperate testName * seperate testName 1 * seperate testName 2 * seperate app db path * seperate app db path 1 * add a lock before startNet * move the lock to reactor_test * simulate just once * try to find problem * handshake only saveState when app version changed * update gometalinter to 3.0.0 (#3233) in the attempt to fix https://circleci.com/gh/tendermint/tendermint/43165 also code is simplified by running gofmt -s . remove unused vars enable linters we're currently passing remove deprecated linters (cherry picked from commit d47094550315c094512a242445e0dde24b5a03f5) * gofmt code * goimport code * change the bool name to testValidatorsChange * adjust receive kvstore.ProtocolVersion * adjust receive kvstore.ProtocolVersion 1 * adjust receive kvstore.ProtocolVersion 3 * fix merge execution.go * fix merge develop * fix merge develop 1 * fix run cleanupFunc * adjust code according to reviewers' opinion * modify the func name match the convention * simplify simulate a chain containing some validator change txs 1 * test CI error * Merge remote-tracking branch 'upstream/develop' into fixReplay 1 * fix pubsub_test * subscribeUnbuffered vote channel
6 years ago
add support for block pruning via ABCI Commit response (#4588) * Added BlockStore.DeleteBlock() * Added initial block pruner prototype * wip * Added BlockStore.PruneBlocks() * Added consensus setting for block pruning * Added BlockStore base * Error on replay if base does not have blocks * Handle missing blocks when sending VoteSetMaj23Message * Error message tweak * Properly update blockstore state * Error message fix again * blockchain: ignore peer missing blocks * Added FIXME * Added test for block replay with truncated history * Handle peer base in blockchain reactor * Improved replay error handling * Added tests for Store.PruneBlocks() * Fix non-RPC handling of truncated block history * Panic on missing block meta in needProofBlock() * Updated changelog * Handle truncated block history in RPC layer * Added info about earliest block in /status RPC * Reorder height and base in blockchain reactor messages * Updated changelog * Fix tests * Appease linter * Minor review fixes * Non-empty BlockStores should always have base > 0 * Update code to assume base > 0 invariant * Added blockstore tests for pruning to 0 * Make sure we don't prune below the current base * Added BlockStore.Size() * config: added retain_blocks recommendations * Update v1 blockchain reactor to handle blockstore base * Added state database pruning * Propagate errors on missing validator sets * Comment tweaks * Improved error message Co-Authored-By: Anton Kaliaev <anton.kalyaev@gmail.com> * use ABCI field ResponseCommit.retain_height instead of retain-blocks config option * remove State.RetainHeight, return value instead * fix minor issues * rename pruneHeights() to pruneBlocks() * noop to fix GitHub borkage Co-authored-by: Anton Kaliaev <anton.kalyaev@gmail.com>
5 years ago
  1. package consensus
  2. import (
  3. "bytes"
  4. "fmt"
  5. "hash/crc32"
  6. "io"
  7. "reflect"
  8. //"strconv"
  9. //"strings"
  10. "time"
  11. abci "github.com/tendermint/tendermint/abci/types"
  12. //auto "github.com/tendermint/tendermint/libs/autofile"
  13. dbm "github.com/tendermint/tm-db"
  14. "github.com/tendermint/tendermint/libs/log"
  15. "github.com/tendermint/tendermint/proxy"
  16. sm "github.com/tendermint/tendermint/state"
  17. "github.com/tendermint/tendermint/types"
  18. "github.com/tendermint/tendermint/version"
  19. )
  20. var crc32c = crc32.MakeTable(crc32.Castagnoli)
  21. // Functionality to replay blocks and messages on recovery from a crash.
  22. // There are two general failure scenarios:
  23. //
  24. // 1. failure during consensus
  25. // 2. failure while applying the block
  26. //
  27. // The former is handled by the WAL, the latter by the proxyApp Handshake on
  28. // restart, which ultimately hands off the work to the WAL.
  29. //-----------------------------------------
  30. // 1. Recover from failure during consensus
  31. // (by replaying messages from the WAL)
  32. //-----------------------------------------
  33. // Unmarshal and apply a single message to the consensus state as if it were
  34. // received in receiveRoutine. Lines that start with "#" are ignored.
  35. // NOTE: receiveRoutine should not be running.
  36. func (cs *State) readReplayMessage(msg *TimedWALMessage, newStepSub types.Subscription) error {
  37. // Skip meta messages which exist for demarcating boundaries.
  38. if _, ok := msg.Msg.(EndHeightMessage); ok {
  39. return nil
  40. }
  41. // for logging
  42. switch m := msg.Msg.(type) {
  43. case types.EventDataRoundState:
  44. cs.Logger.Info("Replay: New Step", "height", m.Height, "round", m.Round, "step", m.Step)
  45. // these are playback checks
  46. ticker := time.After(time.Second * 2)
  47. if newStepSub != nil {
  48. select {
  49. case stepMsg := <-newStepSub.Out():
  50. m2 := stepMsg.Data().(types.EventDataRoundState)
  51. if m.Height != m2.Height || m.Round != m2.Round || m.Step != m2.Step {
  52. return fmt.Errorf("roundState mismatch. Got %v; Expected %v", m2, m)
  53. }
  54. case <-newStepSub.Cancelled():
  55. return fmt.Errorf("failed to read off newStepSub.Out(). newStepSub was cancelled")
  56. case <-ticker:
  57. return fmt.Errorf("failed to read off newStepSub.Out()")
  58. }
  59. }
  60. case msgInfo:
  61. peerID := m.PeerID
  62. if peerID == "" {
  63. peerID = "local"
  64. }
  65. switch msg := m.Msg.(type) {
  66. case *ProposalMessage:
  67. p := msg.Proposal
  68. cs.Logger.Info("Replay: Proposal", "height", p.Height, "round", p.Round, "header",
  69. p.BlockID.PartsHeader, "pol", p.POLRound, "peer", peerID)
  70. case *BlockPartMessage:
  71. cs.Logger.Info("Replay: BlockPart", "height", msg.Height, "round", msg.Round, "peer", peerID)
  72. case *VoteMessage:
  73. v := msg.Vote
  74. cs.Logger.Info("Replay: Vote", "height", v.Height, "round", v.Round, "type", v.Type,
  75. "blockID", v.BlockID, "peer", peerID)
  76. }
  77. cs.handleMsg(m)
  78. case timeoutInfo:
  79. cs.Logger.Info("Replay: Timeout", "height", m.Height, "round", m.Round, "step", m.Step, "dur", m.Duration)
  80. cs.handleTimeout(m, cs.RoundState)
  81. default:
  82. return fmt.Errorf("replay: Unknown TimedWALMessage type: %v", reflect.TypeOf(msg.Msg))
  83. }
  84. return nil
  85. }
  86. // Replay only those messages since the last block. `timeoutRoutine` should
  87. // run concurrently to read off tickChan.
  88. func (cs *State) catchupReplay(csHeight int64) error {
  89. // Set replayMode to true so we don't log signing errors.
  90. cs.replayMode = true
  91. defer func() { cs.replayMode = false }()
  92. // Ensure that #ENDHEIGHT for this height doesn't exist.
  93. // NOTE: This is just a sanity check. As far as we know things work fine
  94. // without it, and Handshake could reuse State if it weren't for
  95. // this check (since we can crash after writing #ENDHEIGHT).
  96. //
  97. // Ignore data corruption errors since this is a sanity check.
  98. gr, found, err := cs.wal.SearchForEndHeight(csHeight, &WALSearchOptions{IgnoreDataCorruptionErrors: true})
  99. if err != nil {
  100. return err
  101. }
  102. if gr != nil {
  103. if err := gr.Close(); err != nil {
  104. return err
  105. }
  106. }
  107. if found {
  108. return fmt.Errorf("wal should not contain #ENDHEIGHT %d", csHeight)
  109. }
  110. // Search for last height marker.
  111. //
  112. // Ignore data corruption errors in previous heights because we only care about last height
  113. gr, found, err = cs.wal.SearchForEndHeight(csHeight-1, &WALSearchOptions{IgnoreDataCorruptionErrors: true})
  114. if err == io.EOF {
  115. cs.Logger.Error("Replay: wal.group.Search returned EOF", "#ENDHEIGHT", csHeight-1)
  116. } else if err != nil {
  117. return err
  118. }
  119. if !found {
  120. return fmt.Errorf("cannot replay height %d. WAL does not contain #ENDHEIGHT for %d", csHeight, csHeight-1)
  121. }
  122. defer gr.Close() // nolint: errcheck
  123. cs.Logger.Info("Catchup by replaying consensus messages", "height", csHeight)
  124. var msg *TimedWALMessage
  125. dec := WALDecoder{gr}
  126. LOOP:
  127. for {
  128. msg, err = dec.Decode()
  129. switch {
  130. case err == io.EOF:
  131. break LOOP
  132. case IsDataCorruptionError(err):
  133. cs.Logger.Error("data has been corrupted in last height of consensus WAL", "err", err, "height", csHeight)
  134. return err
  135. case err != nil:
  136. return err
  137. }
  138. // NOTE: since the priv key is set when the msgs are received
  139. // it will attempt to eg double sign but we can just ignore it
  140. // since the votes will be replayed and we'll get to the next step
  141. if err := cs.readReplayMessage(msg, nil); err != nil {
  142. return err
  143. }
  144. }
  145. cs.Logger.Info("Replay: Done")
  146. return nil
  147. }
  148. //--------------------------------------------------------------------------------
  149. // Parses marker lines of the form:
  150. // #ENDHEIGHT: 12345
  151. /*
  152. func makeHeightSearchFunc(height int64) auto.SearchFunc {
  153. return func(line string) (int, error) {
  154. line = strings.TrimRight(line, "\n")
  155. parts := strings.Split(line, " ")
  156. if len(parts) != 2 {
  157. return -1, errors.New("line did not have 2 parts")
  158. }
  159. i, err := strconv.Atoi(parts[1])
  160. if err != nil {
  161. return -1, errors.New("failed to parse INFO: " + err.Error())
  162. }
  163. if height < i {
  164. return 1, nil
  165. } else if height == i {
  166. return 0, nil
  167. } else {
  168. return -1, nil
  169. }
  170. }
  171. }*/
  172. //---------------------------------------------------
  173. // 2. Recover from failure while applying the block.
  174. // (by handshaking with the app to figure out where
  175. // we were last, and using the WAL to recover there.)
  176. //---------------------------------------------------
  177. type Handshaker struct {
  178. stateDB dbm.DB
  179. initialState sm.State
  180. store sm.BlockStore
  181. eventBus types.BlockEventPublisher
  182. genDoc *types.GenesisDoc
  183. logger log.Logger
  184. nBlocks int // number of blocks applied to the state
  185. }
  186. func NewHandshaker(stateDB dbm.DB, state sm.State,
  187. store sm.BlockStore, genDoc *types.GenesisDoc) *Handshaker {
  188. return &Handshaker{
  189. stateDB: stateDB,
  190. initialState: state,
  191. store: store,
  192. eventBus: types.NopEventBus{},
  193. genDoc: genDoc,
  194. logger: log.NewNopLogger(),
  195. nBlocks: 0,
  196. }
  197. }
  198. func (h *Handshaker) SetLogger(l log.Logger) {
  199. h.logger = l
  200. }
  201. // SetEventBus - sets the event bus for publishing block related events.
  202. // If not called, it defaults to types.NopEventBus.
  203. func (h *Handshaker) SetEventBus(eventBus types.BlockEventPublisher) {
  204. h.eventBus = eventBus
  205. }
  206. // NBlocks returns the number of blocks applied to the state.
  207. func (h *Handshaker) NBlocks() int {
  208. return h.nBlocks
  209. }
  210. // TODO: retry the handshake/replay if it fails ?
  211. func (h *Handshaker) Handshake(proxyApp proxy.AppConns) error {
  212. // Handshake is done via ABCI Info on the query conn.
  213. res, err := proxyApp.Query().InfoSync(proxy.RequestInfo)
  214. if err != nil {
  215. return fmt.Errorf("error calling Info: %v", err)
  216. }
  217. blockHeight := res.LastBlockHeight
  218. if blockHeight < 0 {
  219. return fmt.Errorf("got a negative last block height (%d) from the app", blockHeight)
  220. }
  221. appHash := res.LastBlockAppHash
  222. h.logger.Info("ABCI Handshake App Info",
  223. "height", blockHeight,
  224. "hash", fmt.Sprintf("%X", appHash),
  225. "software-version", res.Version,
  226. "protocol-version", res.AppVersion,
  227. )
  228. // Set AppVersion on the state.
  229. if h.initialState.Version.Consensus.App != version.Protocol(res.AppVersion) {
  230. h.initialState.Version.Consensus.App = version.Protocol(res.AppVersion)
  231. sm.SaveState(h.stateDB, h.initialState)
  232. }
  233. // Replay blocks up to the latest in the blockstore.
  234. _, err = h.ReplayBlocks(h.initialState, appHash, blockHeight, proxyApp)
  235. if err != nil {
  236. return fmt.Errorf("error on replay: %v", err)
  237. }
  238. h.logger.Info("Completed ABCI Handshake - Tendermint and App are synced",
  239. "appHeight", blockHeight, "appHash", fmt.Sprintf("%X", appHash))
  240. // TODO: (on restart) replay mempool
  241. return nil
  242. }
  243. // ReplayBlocks replays all blocks since appBlockHeight and ensures the result
  244. // matches the current state.
  245. // Returns the final AppHash or an error.
  246. func (h *Handshaker) ReplayBlocks(
  247. state sm.State,
  248. appHash []byte,
  249. appBlockHeight int64,
  250. proxyApp proxy.AppConns,
  251. ) ([]byte, error) {
  252. storeBlockBase := h.store.Base()
  253. storeBlockHeight := h.store.Height()
  254. stateBlockHeight := state.LastBlockHeight
  255. h.logger.Info(
  256. "ABCI Replay Blocks",
  257. "appHeight",
  258. appBlockHeight,
  259. "storeHeight",
  260. storeBlockHeight,
  261. "stateHeight",
  262. stateBlockHeight)
  263. // If appBlockHeight == 0 it means that we are at genesis and hence should send InitChain.
  264. if appBlockHeight == 0 {
  265. validators := make([]*types.Validator, len(h.genDoc.Validators))
  266. for i, val := range h.genDoc.Validators {
  267. validators[i] = types.NewValidator(val.PubKey, val.Power)
  268. }
  269. validatorSet := types.NewValidatorSet(validators)
  270. nextVals := types.TM2PB.ValidatorUpdates(validatorSet)
  271. csParams := types.TM2PB.ConsensusParams(h.genDoc.ConsensusParams)
  272. req := abci.RequestInitChain{
  273. Time: h.genDoc.GenesisTime,
  274. ChainId: h.genDoc.ChainID,
  275. ConsensusParams: csParams,
  276. Validators: nextVals,
  277. AppStateBytes: h.genDoc.AppState,
  278. }
  279. res, err := proxyApp.Consensus().InitChainSync(req)
  280. if err != nil {
  281. return nil, err
  282. }
  283. if stateBlockHeight == 0 { //we only update state when we are in initial state
  284. // If the app returned validators or consensus params, update the state.
  285. if len(res.Validators) > 0 {
  286. vals, err := types.PB2TM.ValidatorUpdates(res.Validators)
  287. if err != nil {
  288. return nil, err
  289. }
  290. state.Validators = types.NewValidatorSet(vals)
  291. state.NextValidators = types.NewValidatorSet(vals)
  292. } else if len(h.genDoc.Validators) == 0 {
  293. // If validator set is not set in genesis and still empty after InitChain, exit.
  294. return nil, fmt.Errorf("validator set is nil in genesis and still empty after InitChain")
  295. }
  296. if res.ConsensusParams != nil {
  297. state.ConsensusParams = state.ConsensusParams.Update(res.ConsensusParams)
  298. }
  299. sm.SaveState(h.stateDB, state)
  300. }
  301. }
  302. // First handle edge cases and constraints on the storeBlockHeight and storeBlockBase.
  303. switch {
  304. case storeBlockHeight == 0:
  305. assertAppHashEqualsOneFromState(appHash, state)
  306. return appHash, nil
  307. case appBlockHeight < storeBlockBase-1:
  308. // the app is too far behind truncated store (can be 1 behind since we replay the next)
  309. return appHash, sm.ErrAppBlockHeightTooLow{AppHeight: appBlockHeight, StoreBase: storeBlockBase}
  310. case storeBlockHeight < appBlockHeight:
  311. // the app should never be ahead of the store (but this is under app's control)
  312. return appHash, sm.ErrAppBlockHeightTooHigh{CoreHeight: storeBlockHeight, AppHeight: appBlockHeight}
  313. case storeBlockHeight < stateBlockHeight:
  314. // the state should never be ahead of the store (this is under tendermint's control)
  315. panic(fmt.Sprintf("StateBlockHeight (%d) > StoreBlockHeight (%d)", stateBlockHeight, storeBlockHeight))
  316. case storeBlockHeight > stateBlockHeight+1:
  317. // store should be at most one ahead of the state (this is under tendermint's control)
  318. panic(fmt.Sprintf("StoreBlockHeight (%d) > StateBlockHeight + 1 (%d)", storeBlockHeight, stateBlockHeight+1))
  319. }
  320. var err error
  321. // Now either store is equal to state, or one ahead.
  322. // For each, consider all cases of where the app could be, given app <= store
  323. if storeBlockHeight == stateBlockHeight {
  324. // Tendermint ran Commit and saved the state.
  325. // Either the app is asking for replay, or we're all synced up.
  326. if appBlockHeight < storeBlockHeight {
  327. // the app is behind, so replay blocks, but no need to go through WAL (state is already synced to store)
  328. return h.replayBlocks(state, proxyApp, appBlockHeight, storeBlockHeight, false)
  329. } else if appBlockHeight == storeBlockHeight {
  330. // We're good!
  331. assertAppHashEqualsOneFromState(appHash, state)
  332. return appHash, nil
  333. }
  334. } else if storeBlockHeight == stateBlockHeight+1 {
  335. // We saved the block in the store but haven't updated the state,
  336. // so we'll need to replay a block using the WAL.
  337. switch {
  338. case appBlockHeight < stateBlockHeight:
  339. // the app is further behind than it should be, so replay blocks
  340. // but leave the last block to go through the WAL
  341. return h.replayBlocks(state, proxyApp, appBlockHeight, storeBlockHeight, true)
  342. case appBlockHeight == stateBlockHeight:
  343. // We haven't run Commit (both the state and app are one block behind),
  344. // so replayBlock with the real app.
  345. // NOTE: We could instead use the cs.WAL on cs.Start,
  346. // but we'd have to allow the WAL to replay a block that wrote it's #ENDHEIGHT
  347. h.logger.Info("Replay last block using real app")
  348. state, err = h.replayBlock(state, storeBlockHeight, proxyApp.Consensus())
  349. return state.AppHash, err
  350. case appBlockHeight == storeBlockHeight:
  351. // We ran Commit, but didn't save the state, so replayBlock with mock app.
  352. abciResponses, err := sm.LoadABCIResponses(h.stateDB, storeBlockHeight)
  353. if err != nil {
  354. return nil, err
  355. }
  356. mockApp := newMockProxyApp(appHash, abciResponses)
  357. h.logger.Info("Replay last block using mock app")
  358. state, err = h.replayBlock(state, storeBlockHeight, mockApp)
  359. return state.AppHash, err
  360. }
  361. }
  362. panic(fmt.Sprintf("uncovered case! appHeight: %d, storeHeight: %d, stateHeight: %d",
  363. appBlockHeight, storeBlockHeight, stateBlockHeight))
  364. }
  365. func (h *Handshaker) replayBlocks(
  366. state sm.State,
  367. proxyApp proxy.AppConns,
  368. appBlockHeight,
  369. storeBlockHeight int64,
  370. mutateState bool) ([]byte, error) {
  371. // App is further behind than it should be, so we need to replay blocks.
  372. // We replay all blocks from appBlockHeight+1.
  373. //
  374. // Note that we don't have an old version of the state,
  375. // so we by-pass state validation/mutation using sm.ExecCommitBlock.
  376. // This also means we won't be saving validator sets if they change during this period.
  377. // TODO: Load the historical information to fix this and just use state.ApplyBlock
  378. //
  379. // If mutateState == true, the final block is replayed with h.replayBlock()
  380. var appHash []byte
  381. var err error
  382. finalBlock := storeBlockHeight
  383. if mutateState {
  384. finalBlock--
  385. }
  386. for i := appBlockHeight + 1; i <= finalBlock; i++ {
  387. h.logger.Info("Applying block", "height", i)
  388. block := h.store.LoadBlock(i)
  389. // Extra check to ensure the app was not changed in a way it shouldn't have.
  390. if len(appHash) > 0 {
  391. assertAppHashEqualsOneFromBlock(appHash, block)
  392. }
  393. appHash, err = sm.ExecCommitBlock(proxyApp.Consensus(), block, h.logger, h.stateDB)
  394. if err != nil {
  395. return nil, err
  396. }
  397. h.nBlocks++
  398. }
  399. if mutateState {
  400. // sync the final block
  401. state, err = h.replayBlock(state, storeBlockHeight, proxyApp.Consensus())
  402. if err != nil {
  403. return nil, err
  404. }
  405. appHash = state.AppHash
  406. }
  407. assertAppHashEqualsOneFromState(appHash, state)
  408. return appHash, nil
  409. }
  410. // ApplyBlock on the proxyApp with the last block.
  411. func (h *Handshaker) replayBlock(state sm.State, height int64, proxyApp proxy.AppConnConsensus) (sm.State, error) {
  412. block := h.store.LoadBlock(height)
  413. meta := h.store.LoadBlockMeta(height)
  414. // Use stubs for both mempool and evidence pool since no transactions nor
  415. // evidence are needed here - block already exists.
  416. blockExec := sm.NewBlockExecutor(h.stateDB, h.logger, proxyApp, emptyMempool{}, emptyEvidencePool{})
  417. blockExec.SetEventBus(h.eventBus)
  418. var err error
  419. state, _, err = blockExec.ApplyBlock(state, meta.BlockID, block)
  420. if err != nil {
  421. return sm.State{}, err
  422. }
  423. h.nBlocks++
  424. return state, nil
  425. }
  426. func assertAppHashEqualsOneFromBlock(appHash []byte, block *types.Block) {
  427. if !bytes.Equal(appHash, block.AppHash) {
  428. panic(fmt.Sprintf(`block.AppHash does not match AppHash after replay. Got %X, expected %X.
  429. Block: %v
  430. `,
  431. appHash, block.AppHash, block))
  432. }
  433. }
  434. func assertAppHashEqualsOneFromState(appHash []byte, state sm.State) {
  435. if !bytes.Equal(appHash, state.AppHash) {
  436. panic(fmt.Sprintf(`state.AppHash does not match AppHash after replay. Got
  437. %X, expected %X.
  438. State: %v
  439. Did you reset Tendermint without resetting your application's data?`,
  440. appHash, state.AppHash, state))
  441. }
  442. }