You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

698 lines
21 KiB

cleanup: Reduce and normalize import path aliasing. (#6975) The code in the Tendermint repository makes heavy use of import aliasing. This is made necessary by our extensive reuse of common base package names, and by repetition of similar names across different subdirectories. Unfortunately we have not been very consistent about which packages we alias in various circumstances, and the aliases we use vary. In the spirit of the advice in the style guide and https://github.com/golang/go/wiki/CodeReviewComments#imports, his change makes an effort to clean up and normalize import aliasing. This change makes no API or behavioral changes. It is a pure cleanup intended o help make the code more readable to developers (including myself) trying to understand what is being imported where. Only unexported names have been modified, and the changes were generated and applied mechanically with gofmt -r and comby, respecting the lexical and syntactic rules of Go. Even so, I did not fix every inconsistency. Where the changes would be too disruptive, I left it alone. The principles I followed in this cleanup are: - Remove aliases that restate the package name. - Remove aliases where the base package name is unambiguous. - Move overly-terse abbreviations from the import to the usage site. - Fix lexical issues (remove underscores, remove capitalization). - Fix import groupings to more closely match the style guide. - Group blank (side-effecting) imports and ensure they are commented. - Add aliases to multiple imports with the same base package name.
3 years ago
cleanup: Reduce and normalize import path aliasing. (#6975) The code in the Tendermint repository makes heavy use of import aliasing. This is made necessary by our extensive reuse of common base package names, and by repetition of similar names across different subdirectories. Unfortunately we have not been very consistent about which packages we alias in various circumstances, and the aliases we use vary. In the spirit of the advice in the style guide and https://github.com/golang/go/wiki/CodeReviewComments#imports, his change makes an effort to clean up and normalize import aliasing. This change makes no API or behavioral changes. It is a pure cleanup intended o help make the code more readable to developers (including myself) trying to understand what is being imported where. Only unexported names have been modified, and the changes were generated and applied mechanically with gofmt -r and comby, respecting the lexical and syntactic rules of Go. Even so, I did not fix every inconsistency. Where the changes would be too disruptive, I left it alone. The principles I followed in this cleanup are: - Remove aliases that restate the package name. - Remove aliases where the base package name is unambiguous. - Move overly-terse abbreviations from the import to the usage site. - Fix lexical issues (remove underscores, remove capitalization). - Fix import groupings to more closely match the style guide. - Group blank (side-effecting) imports and ensure they are commented. - Add aliases to multiple imports with the same base package name.
3 years ago
cleanup: Reduce and normalize import path aliasing. (#6975) The code in the Tendermint repository makes heavy use of import aliasing. This is made necessary by our extensive reuse of common base package names, and by repetition of similar names across different subdirectories. Unfortunately we have not been very consistent about which packages we alias in various circumstances, and the aliases we use vary. In the spirit of the advice in the style guide and https://github.com/golang/go/wiki/CodeReviewComments#imports, his change makes an effort to clean up and normalize import aliasing. This change makes no API or behavioral changes. It is a pure cleanup intended o help make the code more readable to developers (including myself) trying to understand what is being imported where. Only unexported names have been modified, and the changes were generated and applied mechanically with gofmt -r and comby, respecting the lexical and syntactic rules of Go. Even so, I did not fix every inconsistency. Where the changes would be too disruptive, I left it alone. The principles I followed in this cleanup are: - Remove aliases that restate the package name. - Remove aliases where the base package name is unambiguous. - Move overly-terse abbreviations from the import to the usage site. - Fix lexical issues (remove underscores, remove capitalization). - Fix import groupings to more closely match the style guide. - Group blank (side-effecting) imports and ensure they are commented. - Add aliases to multiple imports with the same base package name.
3 years ago
cleanup: Reduce and normalize import path aliasing. (#6975) The code in the Tendermint repository makes heavy use of import aliasing. This is made necessary by our extensive reuse of common base package names, and by repetition of similar names across different subdirectories. Unfortunately we have not been very consistent about which packages we alias in various circumstances, and the aliases we use vary. In the spirit of the advice in the style guide and https://github.com/golang/go/wiki/CodeReviewComments#imports, his change makes an effort to clean up and normalize import aliasing. This change makes no API or behavioral changes. It is a pure cleanup intended o help make the code more readable to developers (including myself) trying to understand what is being imported where. Only unexported names have been modified, and the changes were generated and applied mechanically with gofmt -r and comby, respecting the lexical and syntactic rules of Go. Even so, I did not fix every inconsistency. Where the changes would be too disruptive, I left it alone. The principles I followed in this cleanup are: - Remove aliases that restate the package name. - Remove aliases where the base package name is unambiguous. - Move overly-terse abbreviations from the import to the usage site. - Fix lexical issues (remove underscores, remove capitalization). - Fix import groupings to more closely match the style guide. - Group blank (side-effecting) imports and ensure they are commented. - Add aliases to multiple imports with the same base package name.
3 years ago
cleanup: Reduce and normalize import path aliasing. (#6975) The code in the Tendermint repository makes heavy use of import aliasing. This is made necessary by our extensive reuse of common base package names, and by repetition of similar names across different subdirectories. Unfortunately we have not been very consistent about which packages we alias in various circumstances, and the aliases we use vary. In the spirit of the advice in the style guide and https://github.com/golang/go/wiki/CodeReviewComments#imports, his change makes an effort to clean up and normalize import aliasing. This change makes no API or behavioral changes. It is a pure cleanup intended o help make the code more readable to developers (including myself) trying to understand what is being imported where. Only unexported names have been modified, and the changes were generated and applied mechanically with gofmt -r and comby, respecting the lexical and syntactic rules of Go. Even so, I did not fix every inconsistency. Where the changes would be too disruptive, I left it alone. The principles I followed in this cleanup are: - Remove aliases that restate the package name. - Remove aliases where the base package name is unambiguous. - Move overly-terse abbreviations from the import to the usage site. - Fix lexical issues (remove underscores, remove capitalization). - Fix import groupings to more closely match the style guide. - Group blank (side-effecting) imports and ensure they are commented. - Add aliases to multiple imports with the same base package name.
3 years ago
cs/replay: execCommitBlock should not read from state.lastValidators (#3067) * execCommitBlock should not read from state.lastValidators * fix height 1 * fix blockchain/reactor_test * fix consensus/mempool_test * fix consensus/reactor_test * fix consensus/replay_test * add CHANGELOG * fix consensus/reactor_test * fix consensus/replay_test * add a test for replay validators change * fix mem_pool test * fix byzantine test * remove a redundant code * reduce validator change blocks to 6 * fix * return peer0 config * seperate testName * seperate testName 1 * seperate testName 2 * seperate app db path * seperate app db path 1 * add a lock before startNet * move the lock to reactor_test * simulate just once * try to find problem * handshake only saveState when app version changed * update gometalinter to 3.0.0 (#3233) in the attempt to fix https://circleci.com/gh/tendermint/tendermint/43165 also code is simplified by running gofmt -s . remove unused vars enable linters we're currently passing remove deprecated linters (cherry picked from commit d47094550315c094512a242445e0dde24b5a03f5) * gofmt code * goimport code * change the bool name to testValidatorsChange * adjust receive kvstore.ProtocolVersion * adjust receive kvstore.ProtocolVersion 1 * adjust receive kvstore.ProtocolVersion 3 * fix merge execution.go * fix merge develop * fix merge develop 1 * fix run cleanupFunc * adjust code according to reviewers' opinion * modify the func name match the convention * simplify simulate a chain containing some validator change txs 1 * test CI error * Merge remote-tracking branch 'upstream/develop' into fixReplay 1 * fix pubsub_test * subscribeUnbuffered vote channel
6 years ago
7 years ago
add support for block pruning via ABCI Commit response (#4588) * Added BlockStore.DeleteBlock() * Added initial block pruner prototype * wip * Added BlockStore.PruneBlocks() * Added consensus setting for block pruning * Added BlockStore base * Error on replay if base does not have blocks * Handle missing blocks when sending VoteSetMaj23Message * Error message tweak * Properly update blockstore state * Error message fix again * blockchain: ignore peer missing blocks * Added FIXME * Added test for block replay with truncated history * Handle peer base in blockchain reactor * Improved replay error handling * Added tests for Store.PruneBlocks() * Fix non-RPC handling of truncated block history * Panic on missing block meta in needProofBlock() * Updated changelog * Handle truncated block history in RPC layer * Added info about earliest block in /status RPC * Reorder height and base in blockchain reactor messages * Updated changelog * Fix tests * Appease linter * Minor review fixes * Non-empty BlockStores should always have base > 0 * Update code to assume base > 0 invariant * Added blockstore tests for pruning to 0 * Make sure we don't prune below the current base * Added BlockStore.Size() * config: added retain_blocks recommendations * Update v1 blockchain reactor to handle blockstore base * Added state database pruning * Propagate errors on missing validator sets * Comment tweaks * Improved error message Co-Authored-By: Anton Kaliaev <anton.kalyaev@gmail.com> * use ABCI field ResponseCommit.retain_height instead of retain-blocks config option * remove State.RetainHeight, return value instead * fix minor issues * rename pruneHeights() to pruneBlocks() * noop to fix GitHub borkage Co-authored-by: Anton Kaliaev <anton.kalyaev@gmail.com>
5 years ago
add support for block pruning via ABCI Commit response (#4588) * Added BlockStore.DeleteBlock() * Added initial block pruner prototype * wip * Added BlockStore.PruneBlocks() * Added consensus setting for block pruning * Added BlockStore base * Error on replay if base does not have blocks * Handle missing blocks when sending VoteSetMaj23Message * Error message tweak * Properly update blockstore state * Error message fix again * blockchain: ignore peer missing blocks * Added FIXME * Added test for block replay with truncated history * Handle peer base in blockchain reactor * Improved replay error handling * Added tests for Store.PruneBlocks() * Fix non-RPC handling of truncated block history * Panic on missing block meta in needProofBlock() * Updated changelog * Handle truncated block history in RPC layer * Added info about earliest block in /status RPC * Reorder height and base in blockchain reactor messages * Updated changelog * Fix tests * Appease linter * Minor review fixes * Non-empty BlockStores should always have base > 0 * Update code to assume base > 0 invariant * Added blockstore tests for pruning to 0 * Make sure we don't prune below the current base * Added BlockStore.Size() * config: added retain_blocks recommendations * Update v1 blockchain reactor to handle blockstore base * Added state database pruning * Propagate errors on missing validator sets * Comment tweaks * Improved error message Co-Authored-By: Anton Kaliaev <anton.kalyaev@gmail.com> * use ABCI field ResponseCommit.retain_height instead of retain-blocks config option * remove State.RetainHeight, return value instead * fix minor issues * rename pruneHeights() to pruneBlocks() * noop to fix GitHub borkage Co-authored-by: Anton Kaliaev <anton.kalyaev@gmail.com>
5 years ago
add support for block pruning via ABCI Commit response (#4588) * Added BlockStore.DeleteBlock() * Added initial block pruner prototype * wip * Added BlockStore.PruneBlocks() * Added consensus setting for block pruning * Added BlockStore base * Error on replay if base does not have blocks * Handle missing blocks when sending VoteSetMaj23Message * Error message tweak * Properly update blockstore state * Error message fix again * blockchain: ignore peer missing blocks * Added FIXME * Added test for block replay with truncated history * Handle peer base in blockchain reactor * Improved replay error handling * Added tests for Store.PruneBlocks() * Fix non-RPC handling of truncated block history * Panic on missing block meta in needProofBlock() * Updated changelog * Handle truncated block history in RPC layer * Added info about earliest block in /status RPC * Reorder height and base in blockchain reactor messages * Updated changelog * Fix tests * Appease linter * Minor review fixes * Non-empty BlockStores should always have base > 0 * Update code to assume base > 0 invariant * Added blockstore tests for pruning to 0 * Make sure we don't prune below the current base * Added BlockStore.Size() * config: added retain_blocks recommendations * Update v1 blockchain reactor to handle blockstore base * Added state database pruning * Propagate errors on missing validator sets * Comment tweaks * Improved error message Co-Authored-By: Anton Kaliaev <anton.kalyaev@gmail.com> * use ABCI field ResponseCommit.retain_height instead of retain-blocks config option * remove State.RetainHeight, return value instead * fix minor issues * rename pruneHeights() to pruneBlocks() * noop to fix GitHub borkage Co-authored-by: Anton Kaliaev <anton.kalyaev@gmail.com>
5 years ago
add support for block pruning via ABCI Commit response (#4588) * Added BlockStore.DeleteBlock() * Added initial block pruner prototype * wip * Added BlockStore.PruneBlocks() * Added consensus setting for block pruning * Added BlockStore base * Error on replay if base does not have blocks * Handle missing blocks when sending VoteSetMaj23Message * Error message tweak * Properly update blockstore state * Error message fix again * blockchain: ignore peer missing blocks * Added FIXME * Added test for block replay with truncated history * Handle peer base in blockchain reactor * Improved replay error handling * Added tests for Store.PruneBlocks() * Fix non-RPC handling of truncated block history * Panic on missing block meta in needProofBlock() * Updated changelog * Handle truncated block history in RPC layer * Added info about earliest block in /status RPC * Reorder height and base in blockchain reactor messages * Updated changelog * Fix tests * Appease linter * Minor review fixes * Non-empty BlockStores should always have base > 0 * Update code to assume base > 0 invariant * Added blockstore tests for pruning to 0 * Make sure we don't prune below the current base * Added BlockStore.Size() * config: added retain_blocks recommendations * Update v1 blockchain reactor to handle blockstore base * Added state database pruning * Propagate errors on missing validator sets * Comment tweaks * Improved error message Co-Authored-By: Anton Kaliaev <anton.kalyaev@gmail.com> * use ABCI field ResponseCommit.retain_height instead of retain-blocks config option * remove State.RetainHeight, return value instead * fix minor issues * rename pruneHeights() to pruneBlocks() * noop to fix GitHub borkage Co-authored-by: Anton Kaliaev <anton.kalyaev@gmail.com>
5 years ago
add support for block pruning via ABCI Commit response (#4588) * Added BlockStore.DeleteBlock() * Added initial block pruner prototype * wip * Added BlockStore.PruneBlocks() * Added consensus setting for block pruning * Added BlockStore base * Error on replay if base does not have blocks * Handle missing blocks when sending VoteSetMaj23Message * Error message tweak * Properly update blockstore state * Error message fix again * blockchain: ignore peer missing blocks * Added FIXME * Added test for block replay with truncated history * Handle peer base in blockchain reactor * Improved replay error handling * Added tests for Store.PruneBlocks() * Fix non-RPC handling of truncated block history * Panic on missing block meta in needProofBlock() * Updated changelog * Handle truncated block history in RPC layer * Added info about earliest block in /status RPC * Reorder height and base in blockchain reactor messages * Updated changelog * Fix tests * Appease linter * Minor review fixes * Non-empty BlockStores should always have base > 0 * Update code to assume base > 0 invariant * Added blockstore tests for pruning to 0 * Make sure we don't prune below the current base * Added BlockStore.Size() * config: added retain_blocks recommendations * Update v1 blockchain reactor to handle blockstore base * Added state database pruning * Propagate errors on missing validator sets * Comment tweaks * Improved error message Co-Authored-By: Anton Kaliaev <anton.kalyaev@gmail.com> * use ABCI field ResponseCommit.retain_height instead of retain-blocks config option * remove State.RetainHeight, return value instead * fix minor issues * rename pruneHeights() to pruneBlocks() * noop to fix GitHub borkage Co-authored-by: Anton Kaliaev <anton.kalyaev@gmail.com>
5 years ago
8 years ago
8 years ago
8 years ago
cs/replay: execCommitBlock should not read from state.lastValidators (#3067) * execCommitBlock should not read from state.lastValidators * fix height 1 * fix blockchain/reactor_test * fix consensus/mempool_test * fix consensus/reactor_test * fix consensus/replay_test * add CHANGELOG * fix consensus/reactor_test * fix consensus/replay_test * add a test for replay validators change * fix mem_pool test * fix byzantine test * remove a redundant code * reduce validator change blocks to 6 * fix * return peer0 config * seperate testName * seperate testName 1 * seperate testName 2 * seperate app db path * seperate app db path 1 * add a lock before startNet * move the lock to reactor_test * simulate just once * try to find problem * handshake only saveState when app version changed * update gometalinter to 3.0.0 (#3233) in the attempt to fix https://circleci.com/gh/tendermint/tendermint/43165 also code is simplified by running gofmt -s . remove unused vars enable linters we're currently passing remove deprecated linters (cherry picked from commit d47094550315c094512a242445e0dde24b5a03f5) * gofmt code * goimport code * change the bool name to testValidatorsChange * adjust receive kvstore.ProtocolVersion * adjust receive kvstore.ProtocolVersion 1 * adjust receive kvstore.ProtocolVersion 3 * fix merge execution.go * fix merge develop * fix merge develop 1 * fix run cleanupFunc * adjust code according to reviewers' opinion * modify the func name match the convention * simplify simulate a chain containing some validator change txs 1 * test CI error * Merge remote-tracking branch 'upstream/develop' into fixReplay 1 * fix pubsub_test * subscribeUnbuffered vote channel
6 years ago
cs/replay: execCommitBlock should not read from state.lastValidators (#3067) * execCommitBlock should not read from state.lastValidators * fix height 1 * fix blockchain/reactor_test * fix consensus/mempool_test * fix consensus/reactor_test * fix consensus/replay_test * add CHANGELOG * fix consensus/reactor_test * fix consensus/replay_test * add a test for replay validators change * fix mem_pool test * fix byzantine test * remove a redundant code * reduce validator change blocks to 6 * fix * return peer0 config * seperate testName * seperate testName 1 * seperate testName 2 * seperate app db path * seperate app db path 1 * add a lock before startNet * move the lock to reactor_test * simulate just once * try to find problem * handshake only saveState when app version changed * update gometalinter to 3.0.0 (#3233) in the attempt to fix https://circleci.com/gh/tendermint/tendermint/43165 also code is simplified by running gofmt -s . remove unused vars enable linters we're currently passing remove deprecated linters (cherry picked from commit d47094550315c094512a242445e0dde24b5a03f5) * gofmt code * goimport code * change the bool name to testValidatorsChange * adjust receive kvstore.ProtocolVersion * adjust receive kvstore.ProtocolVersion 1 * adjust receive kvstore.ProtocolVersion 3 * fix merge execution.go * fix merge develop * fix merge develop 1 * fix run cleanupFunc * adjust code according to reviewers' opinion * modify the func name match the convention * simplify simulate a chain containing some validator change txs 1 * test CI error * Merge remote-tracking branch 'upstream/develop' into fixReplay 1 * fix pubsub_test * subscribeUnbuffered vote channel
6 years ago
cs/replay: execCommitBlock should not read from state.lastValidators (#3067) * execCommitBlock should not read from state.lastValidators * fix height 1 * fix blockchain/reactor_test * fix consensus/mempool_test * fix consensus/reactor_test * fix consensus/replay_test * add CHANGELOG * fix consensus/reactor_test * fix consensus/replay_test * add a test for replay validators change * fix mem_pool test * fix byzantine test * remove a redundant code * reduce validator change blocks to 6 * fix * return peer0 config * seperate testName * seperate testName 1 * seperate testName 2 * seperate app db path * seperate app db path 1 * add a lock before startNet * move the lock to reactor_test * simulate just once * try to find problem * handshake only saveState when app version changed * update gometalinter to 3.0.0 (#3233) in the attempt to fix https://circleci.com/gh/tendermint/tendermint/43165 also code is simplified by running gofmt -s . remove unused vars enable linters we're currently passing remove deprecated linters (cherry picked from commit d47094550315c094512a242445e0dde24b5a03f5) * gofmt code * goimport code * change the bool name to testValidatorsChange * adjust receive kvstore.ProtocolVersion * adjust receive kvstore.ProtocolVersion 1 * adjust receive kvstore.ProtocolVersion 3 * fix merge execution.go * fix merge develop * fix merge develop 1 * fix run cleanupFunc * adjust code according to reviewers' opinion * modify the func name match the convention * simplify simulate a chain containing some validator change txs 1 * test CI error * Merge remote-tracking branch 'upstream/develop' into fixReplay 1 * fix pubsub_test * subscribeUnbuffered vote channel
6 years ago
cs/replay: execCommitBlock should not read from state.lastValidators (#3067) * execCommitBlock should not read from state.lastValidators * fix height 1 * fix blockchain/reactor_test * fix consensus/mempool_test * fix consensus/reactor_test * fix consensus/replay_test * add CHANGELOG * fix consensus/reactor_test * fix consensus/replay_test * add a test for replay validators change * fix mem_pool test * fix byzantine test * remove a redundant code * reduce validator change blocks to 6 * fix * return peer0 config * seperate testName * seperate testName 1 * seperate testName 2 * seperate app db path * seperate app db path 1 * add a lock before startNet * move the lock to reactor_test * simulate just once * try to find problem * handshake only saveState when app version changed * update gometalinter to 3.0.0 (#3233) in the attempt to fix https://circleci.com/gh/tendermint/tendermint/43165 also code is simplified by running gofmt -s . remove unused vars enable linters we're currently passing remove deprecated linters (cherry picked from commit d47094550315c094512a242445e0dde24b5a03f5) * gofmt code * goimport code * change the bool name to testValidatorsChange * adjust receive kvstore.ProtocolVersion * adjust receive kvstore.ProtocolVersion 1 * adjust receive kvstore.ProtocolVersion 3 * fix merge execution.go * fix merge develop * fix merge develop 1 * fix run cleanupFunc * adjust code according to reviewers' opinion * modify the func name match the convention * simplify simulate a chain containing some validator change txs 1 * test CI error * Merge remote-tracking branch 'upstream/develop' into fixReplay 1 * fix pubsub_test * subscribeUnbuffered vote channel
6 years ago
cleanup: Reduce and normalize import path aliasing. (#6975) The code in the Tendermint repository makes heavy use of import aliasing. This is made necessary by our extensive reuse of common base package names, and by repetition of similar names across different subdirectories. Unfortunately we have not been very consistent about which packages we alias in various circumstances, and the aliases we use vary. In the spirit of the advice in the style guide and https://github.com/golang/go/wiki/CodeReviewComments#imports, his change makes an effort to clean up and normalize import aliasing. This change makes no API or behavioral changes. It is a pure cleanup intended o help make the code more readable to developers (including myself) trying to understand what is being imported where. Only unexported names have been modified, and the changes were generated and applied mechanically with gofmt -r and comby, respecting the lexical and syntactic rules of Go. Even so, I did not fix every inconsistency. Where the changes would be too disruptive, I left it alone. The principles I followed in this cleanup are: - Remove aliases that restate the package name. - Remove aliases where the base package name is unambiguous. - Move overly-terse abbreviations from the import to the usage site. - Fix lexical issues (remove underscores, remove capitalization). - Fix import groupings to more closely match the style guide. - Group blank (side-effecting) imports and ensure they are commented. - Add aliases to multiple imports with the same base package name.
3 years ago
8 years ago
7 years ago
8 years ago
  1. package state
  2. import (
  3. "context"
  4. "errors"
  5. "fmt"
  6. "time"
  7. abci "github.com/tendermint/tendermint/abci/types"
  8. "github.com/tendermint/tendermint/crypto/encoding"
  9. "github.com/tendermint/tendermint/internal/eventbus"
  10. "github.com/tendermint/tendermint/internal/mempool"
  11. "github.com/tendermint/tendermint/internal/proxy"
  12. "github.com/tendermint/tendermint/libs/log"
  13. tmstate "github.com/tendermint/tendermint/proto/tendermint/state"
  14. "github.com/tendermint/tendermint/types"
  15. )
  16. //-----------------------------------------------------------------------------
  17. // BlockExecutor handles block execution and state updates.
  18. // It exposes ApplyBlock(), which validates & executes the block, updates state w/ ABCI responses,
  19. // then commits and updates the mempool atomically, then saves state.
  20. // BlockExecutor provides the context and accessories for properly executing a block.
  21. type BlockExecutor struct {
  22. // save state, validators, consensus params, abci responses here
  23. store Store
  24. // use blockstore for the pruning functions.
  25. blockStore BlockStore
  26. // execute the app against this
  27. proxyApp proxy.AppConnConsensus
  28. // events
  29. eventBus types.BlockEventPublisher
  30. // manage the mempool lock during commit
  31. // and update both with block results after commit.
  32. mempool mempool.Mempool
  33. evpool EvidencePool
  34. logger log.Logger
  35. metrics *Metrics
  36. // cache the verification results over a single height
  37. cache map[string]struct{}
  38. }
  39. type BlockExecutorOption func(executor *BlockExecutor)
  40. func BlockExecutorWithMetrics(metrics *Metrics) BlockExecutorOption {
  41. return func(blockExec *BlockExecutor) {
  42. blockExec.metrics = metrics
  43. }
  44. }
  45. // NewBlockExecutor returns a new BlockExecutor with a NopEventBus.
  46. // Call SetEventBus to provide one.
  47. func NewBlockExecutor(
  48. stateStore Store,
  49. logger log.Logger,
  50. proxyApp proxy.AppConnConsensus,
  51. pool mempool.Mempool,
  52. evpool EvidencePool,
  53. blockStore BlockStore,
  54. options ...BlockExecutorOption,
  55. ) *BlockExecutor {
  56. res := &BlockExecutor{
  57. store: stateStore,
  58. proxyApp: proxyApp,
  59. eventBus: eventbus.NopEventBus{},
  60. mempool: pool,
  61. evpool: evpool,
  62. logger: logger,
  63. metrics: NopMetrics(),
  64. cache: make(map[string]struct{}),
  65. blockStore: blockStore,
  66. }
  67. for _, option := range options {
  68. option(res)
  69. }
  70. return res
  71. }
  72. func (blockExec *BlockExecutor) Store() Store {
  73. return blockExec.store
  74. }
  75. // SetEventBus - sets the event bus for publishing block related events.
  76. // If not called, it defaults to types.NopEventBus.
  77. func (blockExec *BlockExecutor) SetEventBus(eventBus types.BlockEventPublisher) {
  78. blockExec.eventBus = eventBus
  79. }
  80. // CreateProposalBlock calls state.MakeBlock with evidence from the evpool
  81. // and txs from the mempool. The max bytes must be big enough to fit the commit.
  82. // Up to 1/10th of the block space is allcoated for maximum sized evidence.
  83. // The rest is given to txs, up to the max gas.
  84. //
  85. // Contract: application will not return more bytes than are sent over the wire.
  86. func (blockExec *BlockExecutor) CreateProposalBlock(
  87. ctx context.Context,
  88. height int64,
  89. state State, commit *types.Commit,
  90. proposerAddr []byte,
  91. ) (*types.Block, *types.PartSet, error) {
  92. maxBytes := state.ConsensusParams.Block.MaxBytes
  93. maxGas := state.ConsensusParams.Block.MaxGas
  94. evidence, evSize := blockExec.evpool.PendingEvidence(state.ConsensusParams.Evidence.MaxBytes)
  95. // Fetch a limited amount of valid txs
  96. maxDataBytes := types.MaxDataBytes(maxBytes, evSize, state.Validators.Size())
  97. txs := blockExec.mempool.ReapMaxBytesMaxGas(maxDataBytes, maxGas)
  98. preparedProposal, err := blockExec.proxyApp.PrepareProposal(
  99. ctx,
  100. abci.RequestPrepareProposal{BlockData: txs.ToSliceOfBytes(), BlockDataSize: maxDataBytes},
  101. )
  102. if err != nil {
  103. // The App MUST ensure that only valid (and hence 'processable') transactions
  104. // enter the mempool. Hence, at this point, we can't have any non-processable
  105. // transaction causing an error.
  106. //
  107. // Also, the App can simply skip any transaction that could cause any kind of trouble.
  108. // Either way, we can not recover in a meaningful way, unless we skip proposing
  109. // this block, repair what caused the error and try again. Hence, we panic on
  110. // purpose for now.
  111. panic(err)
  112. }
  113. newTxs := preparedProposal.GetBlockData()
  114. var txSize int
  115. for _, tx := range newTxs {
  116. txSize += len(tx)
  117. if maxDataBytes < int64(txSize) {
  118. panic("block data exceeds max amount of allowed bytes")
  119. }
  120. }
  121. modifiedTxs := types.ToTxs(preparedProposal.GetBlockData())
  122. return state.MakeBlock(height, modifiedTxs, commit, evidence, proposerAddr)
  123. }
  124. func (blockExec *BlockExecutor) ProcessProposal(
  125. ctx context.Context,
  126. block *types.Block,
  127. ) (bool, error) {
  128. req := abci.RequestProcessProposal{
  129. Txs: block.Data.Txs.ToSliceOfBytes(),
  130. Header: *block.Header.ToProto(),
  131. }
  132. resp, err := blockExec.proxyApp.ProcessProposal(ctx, req)
  133. if err != nil {
  134. return false, ErrInvalidBlock(err)
  135. }
  136. return resp.IsOK(), nil
  137. }
  138. // ValidateBlock validates the given block against the given state.
  139. // If the block is invalid, it returns an error.
  140. // Validation does not mutate state, but does require historical information from the stateDB,
  141. // ie. to verify evidence from a validator at an old height.
  142. func (blockExec *BlockExecutor) ValidateBlock(state State, block *types.Block) error {
  143. hash := block.Hash()
  144. if _, ok := blockExec.cache[hash.String()]; ok {
  145. return nil
  146. }
  147. err := validateBlock(state, block)
  148. if err != nil {
  149. return err
  150. }
  151. err = blockExec.evpool.CheckEvidence(block.Evidence)
  152. if err != nil {
  153. return err
  154. }
  155. blockExec.cache[hash.String()] = struct{}{}
  156. return nil
  157. }
  158. // ApplyBlock validates the block against the state, executes it against the app,
  159. // fires the relevant events, commits the app, and saves the new state and responses.
  160. // It returns the new state.
  161. // It's the only function that needs to be called
  162. // from outside this package to process and commit an entire block.
  163. // It takes a blockID to avoid recomputing the parts hash.
  164. func (blockExec *BlockExecutor) ApplyBlock(
  165. ctx context.Context,
  166. state State,
  167. blockID types.BlockID,
  168. block *types.Block,
  169. ) (State, error) {
  170. // validate the block if we haven't already
  171. if err := blockExec.ValidateBlock(state, block); err != nil {
  172. return state, ErrInvalidBlock(err)
  173. }
  174. startTime := time.Now().UnixNano()
  175. abciResponses, err := execBlockOnProxyApp(ctx,
  176. blockExec.logger, blockExec.proxyApp, block, blockExec.store, state.InitialHeight,
  177. )
  178. endTime := time.Now().UnixNano()
  179. blockExec.metrics.BlockProcessingTime.Observe(float64(endTime-startTime) / 1000000)
  180. if err != nil {
  181. return state, ErrProxyAppConn(err)
  182. }
  183. // Save the results before we commit.
  184. if err := blockExec.store.SaveABCIResponses(block.Height, abciResponses); err != nil {
  185. return state, err
  186. }
  187. // validate the validator updates and convert to tendermint types
  188. abciValUpdates := abciResponses.EndBlock.ValidatorUpdates
  189. err = validateValidatorUpdates(abciValUpdates, state.ConsensusParams.Validator)
  190. if err != nil {
  191. return state, fmt.Errorf("error in validator updates: %w", err)
  192. }
  193. validatorUpdates, err := types.PB2TM.ValidatorUpdates(abciValUpdates)
  194. if err != nil {
  195. return state, err
  196. }
  197. if len(validatorUpdates) > 0 {
  198. blockExec.logger.Debug("updates to validators", "updates", types.ValidatorListString(validatorUpdates))
  199. }
  200. // Update the state with the block and responses.
  201. state, err = updateState(state, blockID, &block.Header, abciResponses, validatorUpdates)
  202. if err != nil {
  203. return state, fmt.Errorf("commit failed for application: %w", err)
  204. }
  205. // Lock mempool, commit app state, update mempoool.
  206. appHash, retainHeight, err := blockExec.Commit(ctx, state, block, abciResponses.DeliverTxs)
  207. if err != nil {
  208. return state, fmt.Errorf("commit failed for application: %w", err)
  209. }
  210. // Update evpool with the latest state.
  211. blockExec.evpool.Update(state, block.Evidence)
  212. // Update the app hash and save the state.
  213. state.AppHash = appHash
  214. if err := blockExec.store.Save(state); err != nil {
  215. return state, err
  216. }
  217. // Prune old heights, if requested by ABCI app.
  218. if retainHeight > 0 {
  219. pruned, err := blockExec.pruneBlocks(retainHeight)
  220. if err != nil {
  221. blockExec.logger.Error("failed to prune blocks", "retain_height", retainHeight, "err", err)
  222. } else {
  223. blockExec.logger.Debug("pruned blocks", "pruned", pruned, "retain_height", retainHeight)
  224. }
  225. }
  226. // reset the verification cache
  227. blockExec.cache = make(map[string]struct{})
  228. // Events are fired after everything else.
  229. // NOTE: if we crash between Commit and Save, events wont be fired during replay
  230. fireEvents(ctx, blockExec.logger, blockExec.eventBus, block, blockID, abciResponses, validatorUpdates)
  231. return state, nil
  232. }
  233. func (blockExec *BlockExecutor) ExtendVote(ctx context.Context, vote *types.Vote) (types.VoteExtension, error) {
  234. req := abci.RequestExtendVote{
  235. Vote: vote.ToProto(),
  236. }
  237. resp, err := blockExec.proxyApp.ExtendVote(ctx, req)
  238. if err != nil {
  239. return types.VoteExtension{}, err
  240. }
  241. return types.VoteExtensionFromProto(resp.VoteExtension), nil
  242. }
  243. func (blockExec *BlockExecutor) VerifyVoteExtension(ctx context.Context, vote *types.Vote) error {
  244. req := abci.RequestVerifyVoteExtension{
  245. Vote: vote.ToProto(),
  246. }
  247. resp, err := blockExec.proxyApp.VerifyVoteExtension(ctx, req)
  248. if err != nil {
  249. return err
  250. }
  251. if resp.IsErr() {
  252. return types.ErrVoteInvalidExtension
  253. }
  254. return nil
  255. }
  256. // Commit locks the mempool, runs the ABCI Commit message, and updates the
  257. // mempool.
  258. // It returns the result of calling abci.Commit (the AppHash) and the height to retain (if any).
  259. // The Mempool must be locked during commit and update because state is
  260. // typically reset on Commit and old txs must be replayed against committed
  261. // state before new txs are run in the mempool, lest they be invalid.
  262. func (blockExec *BlockExecutor) Commit(
  263. ctx context.Context,
  264. state State,
  265. block *types.Block,
  266. deliverTxResponses []*abci.ResponseDeliverTx,
  267. ) ([]byte, int64, error) {
  268. blockExec.mempool.Lock()
  269. defer blockExec.mempool.Unlock()
  270. // while mempool is Locked, flush to ensure all async requests have completed
  271. // in the ABCI app before Commit.
  272. err := blockExec.mempool.FlushAppConn(ctx)
  273. if err != nil {
  274. blockExec.logger.Error("client error during mempool.FlushAppConn", "err", err)
  275. return nil, 0, err
  276. }
  277. // Commit block, get hash back
  278. res, err := blockExec.proxyApp.Commit(ctx)
  279. if err != nil {
  280. blockExec.logger.Error("client error during proxyAppConn.Commit", "err", err)
  281. return nil, 0, err
  282. }
  283. // ResponseCommit has no error code - just data
  284. blockExec.logger.Info(
  285. "committed state",
  286. "height", block.Height,
  287. "num_txs", len(block.Txs),
  288. "app_hash", fmt.Sprintf("%X", res.Data),
  289. )
  290. // Update mempool.
  291. err = blockExec.mempool.Update(
  292. ctx,
  293. block.Height,
  294. block.Txs,
  295. deliverTxResponses,
  296. TxPreCheck(state),
  297. TxPostCheck(state),
  298. )
  299. return res.Data, res.RetainHeight, err
  300. }
  301. //---------------------------------------------------------
  302. // Helper functions for executing blocks and updating state
  303. // Executes block's transactions on proxyAppConn.
  304. // Returns a list of transaction results and updates to the validator set
  305. func execBlockOnProxyApp(
  306. ctx context.Context,
  307. logger log.Logger,
  308. proxyAppConn proxy.AppConnConsensus,
  309. block *types.Block,
  310. store Store,
  311. initialHeight int64,
  312. ) (*tmstate.ABCIResponses, error) {
  313. var validTxs, invalidTxs = 0, 0
  314. txIndex := 0
  315. abciResponses := new(tmstate.ABCIResponses)
  316. dtxs := make([]*abci.ResponseDeliverTx, len(block.Txs))
  317. abciResponses.DeliverTxs = dtxs
  318. commitInfo := getBeginBlockValidatorInfo(block, store, initialHeight)
  319. byzVals := make([]abci.Evidence, 0)
  320. for _, evidence := range block.Evidence {
  321. byzVals = append(byzVals, evidence.ABCI()...)
  322. }
  323. // Begin block
  324. var err error
  325. pbh := block.Header.ToProto()
  326. if pbh == nil {
  327. return nil, errors.New("nil header")
  328. }
  329. abciResponses.BeginBlock, err = proxyAppConn.BeginBlock(
  330. ctx,
  331. abci.RequestBeginBlock{
  332. Hash: block.Hash(),
  333. Header: *pbh,
  334. LastCommitInfo: commitInfo,
  335. ByzantineValidators: byzVals,
  336. },
  337. )
  338. if err != nil {
  339. logger.Error("error in proxyAppConn.BeginBlock", "err", err)
  340. return nil, err
  341. }
  342. // run txs of block
  343. for _, tx := range block.Txs {
  344. resp, err := proxyAppConn.DeliverTx(ctx, abci.RequestDeliverTx{Tx: tx})
  345. if err != nil {
  346. return nil, err
  347. }
  348. if resp.Code == abci.CodeTypeOK {
  349. validTxs++
  350. } else {
  351. logger.Debug("invalid tx", "code", resp.Code, "log", resp.Log)
  352. invalidTxs++
  353. }
  354. abciResponses.DeliverTxs[txIndex] = resp
  355. txIndex++
  356. }
  357. abciResponses.EndBlock, err = proxyAppConn.EndBlock(ctx, abci.RequestEndBlock{Height: block.Height})
  358. if err != nil {
  359. logger.Error("error in proxyAppConn.EndBlock", "err", err)
  360. return nil, err
  361. }
  362. logger.Info("executed block", "height", block.Height, "num_valid_txs", validTxs, "num_invalid_txs", invalidTxs)
  363. return abciResponses, nil
  364. }
  365. func getBeginBlockValidatorInfo(block *types.Block, store Store,
  366. initialHeight int64) abci.LastCommitInfo {
  367. voteInfos := make([]abci.VoteInfo, block.LastCommit.Size())
  368. // Initial block -> LastCommitInfo.Votes are empty.
  369. // Remember that the first LastCommit is intentionally empty, so it makes
  370. // sense for LastCommitInfo.Votes to also be empty.
  371. if block.Height > initialHeight {
  372. lastValSet, err := store.LoadValidators(block.Height - 1)
  373. if err != nil {
  374. panic(err)
  375. }
  376. // Sanity check that commit size matches validator set size - only applies
  377. // after first block.
  378. var (
  379. commitSize = block.LastCommit.Size()
  380. valSetLen = len(lastValSet.Validators)
  381. )
  382. if commitSize != valSetLen {
  383. panic(fmt.Sprintf(
  384. "commit size (%d) doesn't match valset length (%d) at height %d\n\n%v\n\n%v",
  385. commitSize, valSetLen, block.Height, block.LastCommit.Signatures, lastValSet.Validators,
  386. ))
  387. }
  388. for i, val := range lastValSet.Validators {
  389. commitSig := block.LastCommit.Signatures[i]
  390. voteInfos[i] = abci.VoteInfo{
  391. Validator: types.TM2PB.Validator(val),
  392. SignedLastBlock: !commitSig.Absent(),
  393. }
  394. }
  395. }
  396. return abci.LastCommitInfo{
  397. Round: block.LastCommit.Round,
  398. Votes: voteInfos,
  399. }
  400. }
  401. func validateValidatorUpdates(abciUpdates []abci.ValidatorUpdate,
  402. params types.ValidatorParams) error {
  403. for _, valUpdate := range abciUpdates {
  404. if valUpdate.GetPower() < 0 {
  405. return fmt.Errorf("voting power can't be negative %v", valUpdate)
  406. } else if valUpdate.GetPower() == 0 {
  407. // continue, since this is deleting the validator, and thus there is no
  408. // pubkey to check
  409. continue
  410. }
  411. // Check if validator's pubkey matches an ABCI type in the consensus params
  412. pk, err := encoding.PubKeyFromProto(valUpdate.PubKey)
  413. if err != nil {
  414. return err
  415. }
  416. if !params.IsValidPubkeyType(pk.Type()) {
  417. return fmt.Errorf("validator %v is using pubkey %s, which is unsupported for consensus",
  418. valUpdate, pk.Type())
  419. }
  420. }
  421. return nil
  422. }
  423. // updateState returns a new State updated according to the header and responses.
  424. func updateState(
  425. state State,
  426. blockID types.BlockID,
  427. header *types.Header,
  428. abciResponses *tmstate.ABCIResponses,
  429. validatorUpdates []*types.Validator,
  430. ) (State, error) {
  431. // Copy the valset so we can apply changes from EndBlock
  432. // and update s.LastValidators and s.Validators.
  433. nValSet := state.NextValidators.Copy()
  434. // Update the validator set with the latest abciResponses.
  435. lastHeightValsChanged := state.LastHeightValidatorsChanged
  436. if len(validatorUpdates) > 0 {
  437. err := nValSet.UpdateWithChangeSet(validatorUpdates)
  438. if err != nil {
  439. return state, fmt.Errorf("error changing validator set: %w", err)
  440. }
  441. // Change results from this height but only applies to the next next height.
  442. lastHeightValsChanged = header.Height + 1 + 1
  443. }
  444. // Update validator proposer priority and set state variables.
  445. nValSet.IncrementProposerPriority(1)
  446. // Update the params with the latest abciResponses.
  447. nextParams := state.ConsensusParams
  448. lastHeightParamsChanged := state.LastHeightConsensusParamsChanged
  449. if abciResponses.EndBlock.ConsensusParamUpdates != nil {
  450. // NOTE: must not mutate s.ConsensusParams
  451. nextParams = state.ConsensusParams.UpdateConsensusParams(abciResponses.EndBlock.ConsensusParamUpdates)
  452. err := nextParams.ValidateConsensusParams()
  453. if err != nil {
  454. return state, fmt.Errorf("error updating consensus params: %w", err)
  455. }
  456. state.Version.Consensus.App = nextParams.Version.AppVersion
  457. // Change results from this height but only applies to the next height.
  458. lastHeightParamsChanged = header.Height + 1
  459. }
  460. nextVersion := state.Version
  461. // NOTE: the AppHash and the VoteExtension has not been populated.
  462. // It will be filled on state.Save.
  463. return State{
  464. Version: nextVersion,
  465. ChainID: state.ChainID,
  466. InitialHeight: state.InitialHeight,
  467. LastBlockHeight: header.Height,
  468. LastBlockID: blockID,
  469. LastBlockTime: header.Time,
  470. NextValidators: nValSet,
  471. Validators: state.NextValidators.Copy(),
  472. LastValidators: state.Validators.Copy(),
  473. LastHeightValidatorsChanged: lastHeightValsChanged,
  474. ConsensusParams: nextParams,
  475. LastHeightConsensusParamsChanged: lastHeightParamsChanged,
  476. LastResultsHash: ABCIResponsesResultsHash(abciResponses),
  477. AppHash: nil,
  478. }, nil
  479. }
  480. // Fire NewBlock, NewBlockHeader.
  481. // Fire TxEvent for every tx.
  482. // NOTE: if Tendermint crashes before commit, some or all of these events may be published again.
  483. func fireEvents(
  484. ctx context.Context,
  485. logger log.Logger,
  486. eventBus types.BlockEventPublisher,
  487. block *types.Block,
  488. blockID types.BlockID,
  489. abciResponses *tmstate.ABCIResponses,
  490. validatorUpdates []*types.Validator,
  491. ) {
  492. if err := eventBus.PublishEventNewBlock(ctx, types.EventDataNewBlock{
  493. Block: block,
  494. BlockID: blockID,
  495. ResultBeginBlock: *abciResponses.BeginBlock,
  496. ResultEndBlock: *abciResponses.EndBlock,
  497. }); err != nil {
  498. logger.Error("failed publishing new block", "err", err)
  499. }
  500. if err := eventBus.PublishEventNewBlockHeader(ctx, types.EventDataNewBlockHeader{
  501. Header: block.Header,
  502. NumTxs: int64(len(block.Txs)),
  503. ResultBeginBlock: *abciResponses.BeginBlock,
  504. ResultEndBlock: *abciResponses.EndBlock,
  505. }); err != nil {
  506. logger.Error("failed publishing new block header", "err", err)
  507. }
  508. if len(block.Evidence) != 0 {
  509. for _, ev := range block.Evidence {
  510. if err := eventBus.PublishEventNewEvidence(ctx, types.EventDataNewEvidence{
  511. Evidence: ev,
  512. Height: block.Height,
  513. }); err != nil {
  514. logger.Error("failed publishing new evidence", "err", err)
  515. }
  516. }
  517. }
  518. for i, tx := range block.Data.Txs {
  519. if err := eventBus.PublishEventTx(ctx, types.EventDataTx{TxResult: abci.TxResult{
  520. Height: block.Height,
  521. Index: uint32(i),
  522. Tx: tx,
  523. Result: *(abciResponses.DeliverTxs[i]),
  524. }}); err != nil {
  525. logger.Error("failed publishing event TX", "err", err)
  526. }
  527. }
  528. if len(validatorUpdates) > 0 {
  529. if err := eventBus.PublishEventValidatorSetUpdates(ctx,
  530. types.EventDataValidatorSetUpdates{ValidatorUpdates: validatorUpdates}); err != nil {
  531. logger.Error("failed publishing event", "err", err)
  532. }
  533. }
  534. }
  535. //----------------------------------------------------------------------------------------------------
  536. // Execute block without state. TODO: eliminate
  537. // ExecCommitBlock executes and commits a block on the proxyApp without validating or mutating the state.
  538. // It returns the application root hash (result of abci.Commit).
  539. func ExecCommitBlock(
  540. ctx context.Context,
  541. be *BlockExecutor,
  542. appConnConsensus proxy.AppConnConsensus,
  543. block *types.Block,
  544. logger log.Logger,
  545. store Store,
  546. initialHeight int64,
  547. s State,
  548. ) ([]byte, error) {
  549. abciResponses, err := execBlockOnProxyApp(ctx, logger, appConnConsensus, block, store, initialHeight)
  550. if err != nil {
  551. logger.Error("failed executing block on proxy app", "height", block.Height, "err", err)
  552. return nil, err
  553. }
  554. // the BlockExecutor condition is using for the final block replay process.
  555. if be != nil {
  556. abciValUpdates := abciResponses.EndBlock.ValidatorUpdates
  557. err = validateValidatorUpdates(abciValUpdates, s.ConsensusParams.Validator)
  558. if err != nil {
  559. logger.Error("err", err)
  560. return nil, err
  561. }
  562. validatorUpdates, err := types.PB2TM.ValidatorUpdates(abciValUpdates)
  563. if err != nil {
  564. logger.Error("err", err)
  565. return nil, err
  566. }
  567. bps, err := block.MakePartSet(types.BlockPartSizeBytes)
  568. if err != nil {
  569. return nil, err
  570. }
  571. blockID := types.BlockID{Hash: block.Hash(), PartSetHeader: bps.Header()}
  572. fireEvents(ctx, be.logger, be.eventBus, block, blockID, abciResponses, validatorUpdates)
  573. }
  574. // Commit block, get hash back
  575. res, err := appConnConsensus.Commit(ctx)
  576. if err != nil {
  577. logger.Error("client error during proxyAppConn.Commit", "err", res)
  578. return nil, err
  579. }
  580. // ResponseCommit has no error or log, just data
  581. return res.Data, nil
  582. }
  583. func (blockExec *BlockExecutor) pruneBlocks(retainHeight int64) (uint64, error) {
  584. base := blockExec.blockStore.Base()
  585. if retainHeight <= base {
  586. return 0, nil
  587. }
  588. pruned, err := blockExec.blockStore.PruneBlocks(retainHeight)
  589. if err != nil {
  590. return 0, fmt.Errorf("failed to prune block store: %w", err)
  591. }
  592. err = blockExec.Store().PruneStates(retainHeight)
  593. if err != nil {
  594. return 0, fmt.Errorf("failed to prune state store: %w", err)
  595. }
  596. return pruned, nil
  597. }