Browse Source

Fix linter errors thrown by `lll` (#3970)

* Fix long line errors in abci, crypto, and libs packages

* Fix long lines in p2p and rpc packages

* Fix long lines in abci, state, and tools packages

* Fix long lines in behaviour and blockchain packages

* Fix long lines in cmd and config packages

* Begin fixing long lines in consensus package

* Finish fixing long lines in consensus package

* Add lll exclusion for lines containing URLs

* Fix long lines in crypto package

* Fix long lines in evidence package

* Fix long lines in mempool and node packages

* Fix long lines in libs package

* Fix long lines in lite package

* Fix new long line in node package

* Fix long lines in p2p package

* Ignore gocritic warning

* Fix long lines in privval package

* Fix long lines in rpc package

* Fix long lines in scripts package

* Fix long lines in state package

* Fix long lines in tools package

* Fix long lines in types package

* Enable lll linter
pull/4068/head
Phil Salant 5 years ago
committed by Marko
parent
commit
bc572217c0
112 changed files with 1676 additions and 361 deletions
  1. +7
    -1
      .golangci.yml
  2. +15
    -3
      abci/cmd/abci-cli/abci-cli.go
  3. +6
    -1
      abci/example/kvstore/kvstore_test.go
  4. +1
    -1
      abci/types/application.go
  5. +18
    -3
      behaviour/reporter_test.go
  6. +8
    -1
      blockchain/v0/pool.go
  7. +2
    -1
      blockchain/v0/pool_test.go
  8. +11
    -2
      blockchain/v0/reactor_test.go
  9. +18
    -6
      blockchain/v1/pool_test.go
  10. +6
    -4
      blockchain/v1/reactor_fsm.go
  11. +15
    -3
      blockchain/v1/reactor_test.go
  12. +5
    -1
      cmd/tendermint/commands/lite.go
  13. +23
    -5
      cmd/tendermint/commands/run_node.go
  14. +9
    -3
      cmd/tendermint/commands/testnet.go
  15. +8
    -4
      config/config.go
  16. +4
    -2
      config/toml.go
  17. +8
    -1
      consensus/byzantine_test.go
  18. +66
    -12
      consensus/common_test.go
  19. +20
    -5
      consensus/reactor.go
  20. +35
    -8
      consensus/reactor_test.go
  21. +14
    -2
      consensus/replay.go
  22. +49
    -11
      consensus/replay_test.go
  23. +128
    -24
      consensus/state.go
  24. +28
    -6
      consensus/state_test.go
  25. +7
    -2
      consensus/types/height_vote_set.go
  26. +22
    -12
      consensus/types/peer_round_state.go
  27. +21
    -15
      consensus/types/round_state.go
  28. +7
    -2
      consensus/wal.go
  29. +5
    -2
      consensus/wal_generator.go
  30. +20
    -4
      crypto/merkle/simple_map_test.go
  31. +5
    -1
      crypto/multisig/threshold_pubkey_test.go
  32. +3
    -1
      crypto/secp256k1/internal/secp256k1/curve.go
  33. +5
    -1
      crypto/secp256k1/secp256k1_test.go
  34. +22
    -3
      crypto/xchacha20poly1305/vector_test.go
  35. +6
    -1
      evidence/pool.go
  36. +12
    -2
      evidence/reactor.go
  37. +7
    -2
      evidence/reactor_test.go
  38. +5
    -1
      libs/cli/flags/log_level.go
  39. +3
    -1
      libs/common/async.go
  40. +9
    -1
      libs/common/bit_array_test.go
  41. +6
    -1
      libs/common/cmap_test.go
  42. +6
    -2
      libs/log/filter.go
  43. +26
    -5
      libs/log/filter_test.go
  44. +27
    -4
      libs/log/tracing_logger_test.go
  45. +5
    -1
      libs/pubsub/pubsub.go
  46. +26
    -6
      libs/pubsub/pubsub_test.go
  47. +30
    -8
      libs/pubsub/query/query.go
  48. +73
    -13
      libs/pubsub/query/query_test.go
  49. +26
    -6
      lite/proxy/proxy.go
  50. +7
    -1
      lite/proxy/verifier.go
  51. +8
    -1
      mempool/clist_mempool_test.go
  52. +7
    -1
      mempool/mempool.go
  53. +21
    -4
      node/node.go
  54. +13
    -2
      p2p/conn/connection.go
  55. +5
    -1
      p2p/conn/connection_test.go
  56. +4
    -1
      p2p/conn/secret_connection.go
  57. +29
    -5
      p2p/netaddress_test.go
  58. +5
    -1
      p2p/node_info_test.go
  59. +14
    -2
      p2p/pex/addrbook_test.go
  60. +2
    -1
      p2p/pex/pex_reactor.go
  61. +22
    -6
      p2p/switch_test.go
  62. +11
    -2
      p2p/test_util.go
  63. +14
    -3
      p2p/upnp/upnp.go
  64. +4
    -2
      privval/doc.go
  65. +14
    -2
      privval/file.go
  66. +6
    -1
      privval/file_deprecated_test.go
  67. +5
    -1
      privval/signer_requestHandler.go
  68. +4
    -1
      rpc/client/httpclient.go
  69. +14
    -3
      rpc/client/localclient.go
  70. +12
    -3
      rpc/client/mock/abci.go
  71. +4
    -1
      rpc/client/mock/client.go
  72. +12
    -2
      rpc/client/rpc_test.go
  73. +11
    -2
      rpc/core/abci.go
  74. +6
    -2
      rpc/core/blocks.go
  75. +11
    -5
      rpc/core/doc.go
  76. +3
    -1
      rpc/core/evidence.go
  77. +4
    -1
      rpc/grpc/grpc_test.go
  78. +22
    -4
      rpc/lib/client/http_client.go
  79. +4
    -1
      rpc/lib/client/ws_client.go
  80. +71
    -16
      rpc/lib/server/handlers.go
  81. +8
    -2
      rpc/lib/server/handlers_test.go
  82. +5
    -1
      rpc/lib/server/http_server.go
  83. +8
    -2
      rpc/lib/types/types_test.go
  84. +5
    -1
      scripts/privValUpgrade.go
  85. +5
    -1
      scripts/privValUpgrade_test.go
  86. +17
    -3
      state/errors.go
  87. +15
    -2
      state/execution.go
  88. +19
    -3
      state/execution_test.go
  89. +25
    -5
      state/helpers_test.go
  90. +38
    -6
      state/state_test.go
  91. +4
    -1
      state/txindex/indexer_service.go
  92. +12
    -2
      state/txindex/kv/kv.go
  93. +53
    -7
      state/validation_test.go
  94. +10
    -2
      tools/tm-bench/main.go
  95. +6
    -1
      tools/tm-monitor/monitor/monitor.go
  96. +4
    -1
      tools/tm-monitor/monitor/monitor_test.go
  97. +5
    -1
      tools/tm-monitor/monitor/node.go
  98. +4
    -1
      tools/tm-monitor/monitor/node_test.go
  99. +6
    -1
      tools/tm-monitor/ton.go
  100. +7
    -1
      tools/tm-signer-harness/internal/test_harness_test.go

+ 7
- 1
.golangci.yml View File

@ -21,7 +21,7 @@ linters:
- govet - govet
- ineffassign - ineffassign
- interfacer - interfacer
# - lll
- lll
- misspell - misspell
- maligned - maligned
- nakedret - nakedret
@ -41,6 +41,12 @@ linters:
disable: disable:
- errcheck - errcheck
issues:
exclude-rules:
- linters:
- lll
source: "https://"
linters-settings: linters-settings:
dogsled: dogsled:
max-blank-identifiers: 3 max-blank-identifiers: 3


+ 15
- 3
abci/cmd/abci-cli/abci-cli.go View File

@ -111,16 +111,28 @@ func Execute() error {
} }
func addGlobalFlags() { func addGlobalFlags() {
RootCmd.PersistentFlags().StringVarP(&flagAddress, "address", "", "tcp://0.0.0.0:26658", "address of application socket")
RootCmd.PersistentFlags().StringVarP(&flagAddress,
"address",
"",
"tcp://0.0.0.0:26658",
"address of application socket")
RootCmd.PersistentFlags().StringVarP(&flagAbci, "abci", "", "socket", "either socket or grpc") RootCmd.PersistentFlags().StringVarP(&flagAbci, "abci", "", "socket", "either socket or grpc")
RootCmd.PersistentFlags().BoolVarP(&flagVerbose, "verbose", "v", false, "print the command and results as if it were a console session")
RootCmd.PersistentFlags().BoolVarP(&flagVerbose,
"verbose",
"v",
false,
"print the command and results as if it were a console session")
RootCmd.PersistentFlags().StringVarP(&flagLogLevel, "log_level", "", "debug", "set the logger level") RootCmd.PersistentFlags().StringVarP(&flagLogLevel, "log_level", "", "debug", "set the logger level")
} }
func addQueryFlags() { func addQueryFlags() {
queryCmd.PersistentFlags().StringVarP(&flagPath, "path", "", "/store", "path to prefix query with") queryCmd.PersistentFlags().StringVarP(&flagPath, "path", "", "/store", "path to prefix query with")
queryCmd.PersistentFlags().IntVarP(&flagHeight, "height", "", 0, "height to query the blockchain at") queryCmd.PersistentFlags().IntVarP(&flagHeight, "height", "", 0, "height to query the blockchain at")
queryCmd.PersistentFlags().BoolVarP(&flagProve, "prove", "", false, "whether or not to return a merkle proof of the query result")
queryCmd.PersistentFlags().BoolVarP(&flagProve,
"prove",
"",
false,
"whether or not to return a merkle proof of the query result")
} }
func addCounterFlags() { func addCounterFlags() {


+ 6
- 1
abci/example/kvstore/kvstore_test.go View File

@ -175,7 +175,12 @@ func TestValUpdates(t *testing.T) {
} }
func makeApplyBlock(t *testing.T, kvstore types.Application, heightInt int, diff []types.ValidatorUpdate, txs ...[]byte) {
func makeApplyBlock(
t *testing.T,
kvstore types.Application,
heightInt int,
diff []types.ValidatorUpdate,
txs ...[]byte) {
// make and apply block // make and apply block
height := int64(heightInt) height := int64(heightInt)
hash := []byte("foo") hash := []byte("foo")


+ 1
- 1
abci/types/application.go View File

@ -18,7 +18,7 @@ type Application interface {
CheckTx(RequestCheckTx) ResponseCheckTx // Validate a tx for the mempool CheckTx(RequestCheckTx) ResponseCheckTx // Validate a tx for the mempool
// Consensus Connection // Consensus Connection
InitChain(RequestInitChain) ResponseInitChain // Initialize blockchain with validators and other info from TendermintCore
InitChain(RequestInitChain) ResponseInitChain // Initialize blockchain w validators/other info from TendermintCore
BeginBlock(RequestBeginBlock) ResponseBeginBlock // Signals the beginning of a block BeginBlock(RequestBeginBlock) ResponseBeginBlock // Signals the beginning of a block
DeliverTx(RequestDeliverTx) ResponseDeliverTx // Deliver a tx for full processing DeliverTx(RequestDeliverTx) ResponseDeliverTx // Deliver a tx for full processing
EndBlock(RequestEndBlock) ResponseEndBlock // Signals the end of a block, returns changes to the validator set EndBlock(RequestEndBlock) ResponseEndBlock // Signals the end of a block, returns changes to the validator set


+ 18
- 3
behaviour/reporter_test.go View File

@ -131,9 +131,24 @@ func TestMockPeerBehaviourReporterConcurrency(t *testing.T) {
}{ }{
{"1", []bh.PeerBehaviour{bh.ConsensusVote("1", "")}}, {"1", []bh.PeerBehaviour{bh.ConsensusVote("1", "")}},
{"2", []bh.PeerBehaviour{bh.ConsensusVote("2", ""), bh.ConsensusVote("2", ""), bh.ConsensusVote("2", "")}}, {"2", []bh.PeerBehaviour{bh.ConsensusVote("2", ""), bh.ConsensusVote("2", ""), bh.ConsensusVote("2", "")}},
{"3", []bh.PeerBehaviour{bh.BlockPart("3", ""), bh.ConsensusVote("3", ""), bh.BlockPart("3", ""), bh.ConsensusVote("3", "")}},
{"4", []bh.PeerBehaviour{bh.ConsensusVote("4", ""), bh.ConsensusVote("4", ""), bh.ConsensusVote("4", ""), bh.ConsensusVote("4", "")}},
{"5", []bh.PeerBehaviour{bh.BlockPart("5", ""), bh.ConsensusVote("5", ""), bh.BlockPart("5", ""), bh.ConsensusVote("5", "")}},
{
"3",
[]bh.PeerBehaviour{bh.BlockPart("3", ""),
bh.ConsensusVote("3", ""),
bh.BlockPart("3", ""),
bh.ConsensusVote("3", "")}},
{
"4",
[]bh.PeerBehaviour{bh.ConsensusVote("4", ""),
bh.ConsensusVote("4", ""),
bh.ConsensusVote("4", ""),
bh.ConsensusVote("4", "")}},
{
"5",
[]bh.PeerBehaviour{bh.BlockPart("5", ""),
bh.ConsensusVote("5", ""),
bh.BlockPart("5", ""),
bh.ConsensusVote("5", "")}},
} }
) )


+ 8
- 1
blockchain/v0/pool.go View File

@ -247,7 +247,14 @@ func (pool *BlockPool) AddBlock(peerID p2p.ID, block *types.Block, blockSize int
requester := pool.requesters[block.Height] requester := pool.requesters[block.Height]
if requester == nil { if requester == nil {
pool.Logger.Info("peer sent us a block we didn't expect", "peer", peerID, "curHeight", pool.height, "blockHeight", block.Height)
pool.Logger.Info(
"peer sent us a block we didn't expect",
"peer",
peerID,
"curHeight",
pool.height,
"blockHeight",
block.Height)
diff := pool.height - block.Height diff := pool.height - block.Height
if diff < 0 { if diff < 0 {
diff *= -1 diff *= -1


+ 2
- 1
blockchain/v0/pool_test.go View File

@ -42,7 +42,8 @@ func (p testPeer) runInputRoutine() {
func (p testPeer) simulateInput(input inputData) { func (p testPeer) simulateInput(input inputData) {
block := &types.Block{Header: types.Header{Height: input.request.Height}} block := &types.Block{Header: types.Header{Height: input.request.Height}}
input.pool.AddBlock(input.request.PeerID, block, 123) input.pool.AddBlock(input.request.PeerID, block, 123)
// TODO: uncommenting this creates a race which is detected by: https://github.com/golang/go/blob/2bd767b1022dd3254bcec469f0ee164024726486/src/testing/testing.go#L854-L856
// TODO: uncommenting this creates a race which is detected by:
// https://github.com/golang/go/blob/2bd767b1022dd3254bcec469f0ee164024726486/src/testing/testing.go#L854-L856
// see: https://github.com/tendermint/tendermint/issues/3390#issue-418379890 // see: https://github.com/tendermint/tendermint/issues/3390#issue-418379890
// input.t.Logf("Added block from peer %v (height: %v)", input.request.PeerID, input.request.Height) // input.t.Logf("Added block from peer %v (height: %v)", input.request.PeerID, input.request.Height)
} }


+ 11
- 2
blockchain/v0/reactor_test.go View File

@ -50,7 +50,11 @@ type BlockchainReactorPair struct {
app proxy.AppConns app proxy.AppConns
} }
func newBlockchainReactor(logger log.Logger, genDoc *types.GenesisDoc, privVals []types.PrivValidator, maxBlockHeight int64) BlockchainReactorPair {
func newBlockchainReactor(
logger log.Logger,
genDoc *types.GenesisDoc,
privVals []types.PrivValidator,
maxBlockHeight int64) BlockchainReactorPair {
if len(privVals) != 1 { if len(privVals) != 1 {
panic("only support one validator") panic("only support one validator")
} }
@ -88,7 +92,12 @@ func newBlockchainReactor(logger log.Logger, genDoc *types.GenesisDoc, privVals
lastBlockMeta := blockStore.LoadBlockMeta(blockHeight - 1) lastBlockMeta := blockStore.LoadBlockMeta(blockHeight - 1)
lastBlock := blockStore.LoadBlock(blockHeight - 1) lastBlock := blockStore.LoadBlock(blockHeight - 1)
vote, err := types.MakeVote(lastBlock.Header.Height, lastBlockMeta.BlockID, state.Validators, privVals[0], lastBlock.Header.ChainID)
vote, err := types.MakeVote(
lastBlock.Header.Height,
lastBlockMeta.BlockID,
state.Validators,
privVals[0],
lastBlock.Header.ChainID)
if err != nil { if err != nil {
panic(err) panic(err)
} }


+ 18
- 6
blockchain/v1/pool_test.go View File

@ -198,14 +198,22 @@ func TestBlockPoolRemovePeer(t *testing.T) {
poolWanted: makeBlockPool(testBcR, 100, []BpPeer{}, map[int64]tPBlocks{}), poolWanted: makeBlockPool(testBcR, 100, []BpPeer{}, map[int64]tPBlocks{}),
}, },
{ {
name: "delete the shortest of two peers without blocks",
pool: makeBlockPool(testBcR, 100, []BpPeer{{ID: "P1", Height: 100}, {ID: "P2", Height: 120}}, map[int64]tPBlocks{}),
name: "delete the shortest of two peers without blocks",
pool: makeBlockPool(
testBcR,
100,
[]BpPeer{{ID: "P1", Height: 100}, {ID: "P2", Height: 120}},
map[int64]tPBlocks{}),
args: args{"P1", nil}, args: args{"P1", nil},
poolWanted: makeBlockPool(testBcR, 100, []BpPeer{{ID: "P2", Height: 120}}, map[int64]tPBlocks{}), poolWanted: makeBlockPool(testBcR, 100, []BpPeer{{ID: "P2", Height: 120}}, map[int64]tPBlocks{}),
}, },
{ {
name: "delete the tallest of two peers without blocks",
pool: makeBlockPool(testBcR, 100, []BpPeer{{ID: "P1", Height: 100}, {ID: "P2", Height: 120}}, map[int64]tPBlocks{}),
name: "delete the tallest of two peers without blocks",
pool: makeBlockPool(
testBcR,
100,
[]BpPeer{{ID: "P1", Height: 100}, {ID: "P2", Height: 120}},
map[int64]tPBlocks{}),
args: args{"P2", nil}, args: args{"P2", nil},
poolWanted: makeBlockPool(testBcR, 100, []BpPeer{{ID: "P1", Height: 100}}, map[int64]tPBlocks{}), poolWanted: makeBlockPool(testBcR, 100, []BpPeer{{ID: "P1", Height: 100}}, map[int64]tPBlocks{}),
}, },
@ -308,8 +316,12 @@ func TestBlockPoolSendRequestBatch(t *testing.T) {
expnumPendingBlockRequests: 2, expnumPendingBlockRequests: 2,
}, },
{ {
name: "n peers - send n*maxRequestsPerPeer block requests",
pool: makeBlockPool(testBcR, 10, []BpPeer{{ID: "P1", Height: 100}, {ID: "P2", Height: 100}}, map[int64]tPBlocks{}),
name: "n peers - send n*maxRequestsPerPeer block requests",
pool: makeBlockPool(
testBcR,
10,
[]BpPeer{{ID: "P1", Height: 100}, {ID: "P2", Height: 100}},
map[int64]tPBlocks{}),
maxRequestsPerPeer: 2, maxRequestsPerPeer: 2,
expRequests: map[int64]bool{10: true, 11: true}, expRequests: map[int64]bool{10: true, 11: true},
expPeerResults: []testPeerResult{ expPeerResults: []testPeerResult{


+ 6
- 4
blockchain/v1/reactor_fsm.go View File

@ -162,10 +162,12 @@ var (
errNoTallerPeer = errors.New("fast sync timed out on waiting for a peer taller than this node") errNoTallerPeer = errors.New("fast sync timed out on waiting for a peer taller than this node")
// reported eventually to the switch // reported eventually to the switch
errPeerLowersItsHeight = errors.New("fast sync peer reports a height lower than previous") // handle return
errNoPeerResponseForCurrentHeights = errors.New("fast sync timed out on peer block response for current heights") // handle return
errNoPeerResponse = errors.New("fast sync timed out on peer block response") // xx
errBadDataFromPeer = errors.New("fast sync received block from wrong peer or block is bad") // xx
// handle return
errPeerLowersItsHeight = errors.New("fast sync peer reports a height lower than previous")
// handle return
errNoPeerResponseForCurrentHeights = errors.New("fast sync timed out on peer block response for current heights")
errNoPeerResponse = errors.New("fast sync timed out on peer block response") // xx
errBadDataFromPeer = errors.New("fast sync received block from wrong peer or block is bad") // xx
errDuplicateBlock = errors.New("fast sync received duplicate block from peer") errDuplicateBlock = errors.New("fast sync received duplicate block from peer")
errBlockVerificationFailure = errors.New("fast sync block verification failure") // xx errBlockVerificationFailure = errors.New("fast sync block verification failure") // xx
errSlowPeer = errors.New("fast sync peer is not sending us data fast enough") // xx errSlowPeer = errors.New("fast sync peer is not sending us data fast enough") // xx


+ 15
- 3
blockchain/v1/reactor_test.go View File

@ -45,7 +45,11 @@ func randGenesisDoc(numValidators int, randPower bool, minPower int64) (*types.G
}, privValidators }, privValidators
} }
func makeVote(header *types.Header, blockID types.BlockID, valset *types.ValidatorSet, privVal types.PrivValidator) *types.Vote {
func makeVote(
header *types.Header,
blockID types.BlockID,
valset *types.ValidatorSet,
privVal types.PrivValidator) *types.Vote {
addr := privVal.GetPubKey().Address() addr := privVal.GetPubKey().Address()
idx, _ := valset.GetByAddress(addr) idx, _ := valset.GetByAddress(addr)
vote := &types.Vote{ vote := &types.Vote{
@ -68,7 +72,11 @@ type BlockchainReactorPair struct {
conR *consensusReactorTest conR *consensusReactorTest
} }
func newBlockchainReactor(logger log.Logger, genDoc *types.GenesisDoc, privVals []types.PrivValidator, maxBlockHeight int64) *BlockchainReactor {
func newBlockchainReactor(
logger log.Logger,
genDoc *types.GenesisDoc,
privVals []types.PrivValidator,
maxBlockHeight int64) *BlockchainReactor {
if len(privVals) != 1 { if len(privVals) != 1 {
panic("only support one validator") panic("only support one validator")
} }
@ -129,7 +137,11 @@ func newBlockchainReactor(logger log.Logger, genDoc *types.GenesisDoc, privVals
return bcReactor return bcReactor
} }
func newBlockchainReactorPair(logger log.Logger, genDoc *types.GenesisDoc, privVals []types.PrivValidator, maxBlockHeight int64) BlockchainReactorPair {
func newBlockchainReactorPair(
logger log.Logger,
genDoc *types.GenesisDoc,
privVals []types.PrivValidator,
maxBlockHeight int64) BlockchainReactorPair {
consensusReactor := &consensusReactorTest{} consensusReactor := &consensusReactorTest{}
consensusReactor.BaseReactor = *p2p.NewBaseReactor("Consensus reactor", consensusReactor) consensusReactor.BaseReactor = *p2p.NewBaseReactor("Consensus reactor", consensusReactor)


+ 5
- 1
cmd/tendermint/commands/lite.go View File

@ -40,7 +40,11 @@ func init() {
LiteCmd.Flags().StringVar(&nodeAddr, "node", "tcp://localhost:26657", "Connect to a Tendermint node at this address") LiteCmd.Flags().StringVar(&nodeAddr, "node", "tcp://localhost:26657", "Connect to a Tendermint node at this address")
LiteCmd.Flags().StringVar(&chainID, "chain-id", "tendermint", "Specify the Tendermint chain ID") LiteCmd.Flags().StringVar(&chainID, "chain-id", "tendermint", "Specify the Tendermint chain ID")
LiteCmd.Flags().StringVar(&home, "home-dir", ".tendermint-lite", "Specify the home directory") LiteCmd.Flags().StringVar(&home, "home-dir", ".tendermint-lite", "Specify the home directory")
LiteCmd.Flags().IntVar(&maxOpenConnections, "max-open-connections", 900, "Maximum number of simultaneous connections (including WebSocket).")
LiteCmd.Flags().IntVar(
&maxOpenConnections,
"max-open-connections",
900,
"Maximum number of simultaneous connections (including WebSocket).")
LiteCmd.Flags().IntVar(&cacheSize, "cache-size", 10, "Specify the memory trust store cache size") LiteCmd.Flags().IntVar(&cacheSize, "cache-size", 10, "Specify the memory trust store cache size")
} }


+ 23
- 5
cmd/tendermint/commands/run_node.go View File

@ -16,22 +16,37 @@ func AddNodeFlags(cmd *cobra.Command) {
cmd.Flags().String("moniker", config.Moniker, "Node Name") cmd.Flags().String("moniker", config.Moniker, "Node Name")
// priv val flags // priv val flags
cmd.Flags().String("priv_validator_laddr", config.PrivValidatorListenAddr, "Socket address to listen on for connections from external priv_validator process")
cmd.Flags().String(
"priv_validator_laddr",
config.PrivValidatorListenAddr,
"Socket address to listen on for connections from external priv_validator process")
// node flags // node flags
cmd.Flags().Bool("fast_sync", config.FastSyncMode, "Fast blockchain syncing") cmd.Flags().Bool("fast_sync", config.FastSyncMode, "Fast blockchain syncing")
// abci flags // abci flags
cmd.Flags().String("proxy_app", config.ProxyApp, "Proxy app address, or one of: 'kvstore', 'persistent_kvstore', 'counter', 'counter_serial' or 'noop' for local testing.")
cmd.Flags().String(
"proxy_app",
config.ProxyApp,
"Proxy app address, or one of: 'kvstore',"+
" 'persistent_kvstore',"+
" 'counter',"+
" 'counter_serial' or 'noop' for local testing.")
cmd.Flags().String("abci", config.ABCI, "Specify abci transport (socket | grpc)") cmd.Flags().String("abci", config.ABCI, "Specify abci transport (socket | grpc)")
// rpc flags // rpc flags
cmd.Flags().String("rpc.laddr", config.RPC.ListenAddress, "RPC listen address. Port required") cmd.Flags().String("rpc.laddr", config.RPC.ListenAddress, "RPC listen address. Port required")
cmd.Flags().String("rpc.grpc_laddr", config.RPC.GRPCListenAddress, "GRPC listen address (BroadcastTx only). Port required")
cmd.Flags().String(
"rpc.grpc_laddr",
config.RPC.GRPCListenAddress,
"GRPC listen address (BroadcastTx only). Port required")
cmd.Flags().Bool("rpc.unsafe", config.RPC.Unsafe, "Enabled unsafe rpc methods") cmd.Flags().Bool("rpc.unsafe", config.RPC.Unsafe, "Enabled unsafe rpc methods")
// p2p flags // p2p flags
cmd.Flags().String("p2p.laddr", config.P2P.ListenAddress, "Node listen address. (0.0.0.0:0 means any interface, any port)")
cmd.Flags().String(
"p2p.laddr",
config.P2P.ListenAddress,
"Node listen address. (0.0.0.0:0 means any interface, any port)")
cmd.Flags().String("p2p.seeds", config.P2P.Seeds, "Comma-delimited ID@host:port seed nodes") cmd.Flags().String("p2p.seeds", config.P2P.Seeds, "Comma-delimited ID@host:port seed nodes")
cmd.Flags().String("p2p.persistent_peers", config.P2P.PersistentPeers, "Comma-delimited ID@host:port persistent peers") cmd.Flags().String("p2p.persistent_peers", config.P2P.PersistentPeers, "Comma-delimited ID@host:port persistent peers")
cmd.Flags().Bool("p2p.upnp", config.P2P.UPNP, "Enable/disable UPNP port forwarding") cmd.Flags().Bool("p2p.upnp", config.P2P.UPNP, "Enable/disable UPNP port forwarding")
@ -40,7 +55,10 @@ func AddNodeFlags(cmd *cobra.Command) {
cmd.Flags().String("p2p.private_peer_ids", config.P2P.PrivatePeerIDs, "Comma-delimited private peer IDs") cmd.Flags().String("p2p.private_peer_ids", config.P2P.PrivatePeerIDs, "Comma-delimited private peer IDs")
// consensus flags // consensus flags
cmd.Flags().Bool("consensus.create_empty_blocks", config.Consensus.CreateEmptyBlocks, "Set this to false to only produce blocks when there are txs or when the AppHash changes")
cmd.Flags().Bool(
"consensus.create_empty_blocks",
config.Consensus.CreateEmptyBlocks,
"Set this to false to only produce blocks when there are txs or when the AppHash changes")
} }
// NewRunNodeCmd returns the command that allows the CLI to start a node. // NewRunNodeCmd returns the command that allows the CLI to start a node.


+ 9
- 3
cmd/tendermint/commands/testnet.go View File

@ -51,13 +51,19 @@ func init() {
"Prefix the directory name for each node with (node results in node0, node1, ...)") "Prefix the directory name for each node with (node results in node0, node1, ...)")
TestnetFilesCmd.Flags().BoolVar(&populatePersistentPeers, "populate-persistent-peers", true, TestnetFilesCmd.Flags().BoolVar(&populatePersistentPeers, "populate-persistent-peers", true,
"Update config of each node with the list of persistent peers build using either hostname-prefix or starting-ip-address")
"Update config of each node with the list of persistent peers build using either"+
" hostname-prefix or"+
" starting-ip-address")
TestnetFilesCmd.Flags().StringVar(&hostnamePrefix, "hostname-prefix", "node", TestnetFilesCmd.Flags().StringVar(&hostnamePrefix, "hostname-prefix", "node",
"Hostname prefix (\"node\" results in persistent peers list ID0@node0:26656, ID1@node1:26656, ...)") "Hostname prefix (\"node\" results in persistent peers list ID0@node0:26656, ID1@node1:26656, ...)")
TestnetFilesCmd.Flags().StringVar(&hostnameSuffix, "hostname-suffix", "", TestnetFilesCmd.Flags().StringVar(&hostnameSuffix, "hostname-suffix", "",
"Hostname suffix (\".xyz.com\" results in persistent peers list ID0@node0.xyz.com:26656, ID1@node1.xyz.com:26656, ...)")
"Hostname suffix ("+
"\".xyz.com\""+
" results in persistent peers list ID0@node0.xyz.com:26656, ID1@node1.xyz.com:26656, ...)")
TestnetFilesCmd.Flags().StringVar(&startingIPAddress, "starting-ip-address", "", TestnetFilesCmd.Flags().StringVar(&startingIPAddress, "starting-ip-address", "",
"Starting IP address (\"192.168.0.1\" results in persistent peers list ID0@192.168.0.1:26656, ID1@192.168.0.2:26656, ...)")
"Starting IP address ("+
"\"192.168.0.1\""+
" results in persistent peers list ID0@192.168.0.1:26656, ID1@192.168.0.2:26656, ...)")
TestnetFilesCmd.Flags().StringArrayVar(&hostnames, "hostname", []string{}, TestnetFilesCmd.Flags().StringArrayVar(&hostnames, "hostname", []string{},
"Manually override all hostnames of validators and non-validators (use --hostname multiple times for multiple hosts)") "Manually override all hostnames of validators and non-validators (use --hostname multiple times for multiple hosts)")
TestnetFilesCmd.Flags().IntVar(&p2pPort, "p2p-port", 26656, TestnetFilesCmd.Flags().IntVar(&p2pPort, "p2p-port", 26656,


+ 8
- 4
config/config.go View File

@ -371,13 +371,15 @@ type RPCConfig struct {
// the certFile should be the concatenation of the server's certificate, any intermediates, // the certFile should be the concatenation of the server's certificate, any intermediates,
// and the CA's certificate. // and the CA's certificate.
// //
// NOTE: both tls_cert_file and tls_key_file must be present for Tendermint to create HTTPS server. Otherwise, HTTP server is run.
// NOTE: both tls_cert_file and tls_key_file must be present for Tendermint to create HTTPS server.
// Otherwise, HTTP server is run.
TLSCertFile string `mapstructure:"tls_cert_file"` TLSCertFile string `mapstructure:"tls_cert_file"`
// The path to a file containing matching private key that is used to create the HTTPS server. // The path to a file containing matching private key that is used to create the HTTPS server.
// Migth be either absolute path or path related to tendermint's config directory. // Migth be either absolute path or path related to tendermint's config directory.
// //
// NOTE: both tls_cert_file and tls_key_file must be present for Tendermint to create HTTPS server. Otherwise, HTTP server is run.
// NOTE: both tls_cert_file and tls_key_file must be present for Tendermint to create HTTPS server.
// Otherwise, HTTP server is run.
TLSKeyFile string `mapstructure:"tls_key_file"` TLSKeyFile string `mapstructure:"tls_key_file"`
} }
@ -814,7 +816,8 @@ func (cfg *ConsensusConfig) Precommit(round int) time.Duration {
) * time.Nanosecond ) * time.Nanosecond
} }
// Commit returns the amount of time to wait for straggler votes after receiving +2/3 precommits for a single block (ie. a commit).
// Commit returns the amount of time to wait for straggler votes after receiving +2/3 precommits
// for a single block (ie. a commit).
func (cfg *ConsensusConfig) Commit(t time.Time) time.Time { func (cfg *ConsensusConfig) Commit(t time.Time) time.Time {
return t.Add(cfg.TimeoutCommit) return t.Add(cfg.TimeoutCommit)
} }
@ -878,7 +881,8 @@ type TxIndexConfig struct {
// //
// Options: // Options:
// 1) "null" // 1) "null"
// 2) "kv" (default) - the simplest possible indexer, backed by key-value storage (defaults to levelDB; see DBBackend).
// 2) "kv" (default) - the simplest possible indexer,
// backed by key-value storage (defaults to levelDB; see DBBackend).
Indexer string `mapstructure:"indexer"` Indexer string `mapstructure:"indexer"`
// Comma-separated list of tags to index (by default the only tag is "tx.hash") // Comma-separated list of tags to index (by default the only tag is "tx.hash")


+ 4
- 2
config/toml.go View File

@ -208,12 +208,14 @@ max_header_bytes = {{ .RPC.MaxHeaderBytes }}
# If the certificate is signed by a certificate authority, # If the certificate is signed by a certificate authority,
# the certFile should be the concatenation of the server's certificate, any intermediates, # the certFile should be the concatenation of the server's certificate, any intermediates,
# and the CA's certificate. # and the CA's certificate.
# NOTE: both tls_cert_file and tls_key_file must be present for Tendermint to create HTTPS server. Otherwise, HTTP server is run.
# NOTE: both tls_cert_file and tls_key_file must be present for Tendermint to create HTTPS server.
# Otherwise, HTTP server is run.
tls_cert_file = "{{ .RPC.TLSCertFile }}" tls_cert_file = "{{ .RPC.TLSCertFile }}"
# The path to a file containing matching private key that is used to create the HTTPS server. # The path to a file containing matching private key that is used to create the HTTPS server.
# Migth be either absolute path or path related to tendermint's config directory. # Migth be either absolute path or path related to tendermint's config directory.
# NOTE: both tls_cert_file and tls_key_file must be present for Tendermint to create HTTPS server. Otherwise, HTTP server is run.
# NOTE: both tls_cert_file and tls_key_file must be present for Tendermint to create HTTPS server.
# Otherwise, HTTP server is run.
tls_key_file = "{{ .RPC.TLSKeyFile }}" tls_key_file = "{{ .RPC.TLSKeyFile }}"
##### peer to peer configuration options ##### ##### peer to peer configuration options #####


+ 8
- 1
consensus/byzantine_test.go View File

@ -206,7 +206,14 @@ func byzantineDecideProposalFunc(t *testing.T, height int64, round int, cs *Cons
} }
} }
func sendProposalAndParts(height int64, round int, cs *ConsensusState, peer p2p.Peer, proposal *types.Proposal, blockHash []byte, parts *types.PartSet) {
func sendProposalAndParts(
height int64,
round int,
cs *ConsensusState,
peer p2p.Peer,
proposal *types.Proposal,
blockHash []byte,
parts *types.PartSet) {
// proposal // proposal
msg := &ProposalMessage{Proposal: proposal} msg := &ProposalMessage{Proposal: proposal}
peer.Send(DataChannel, cdc.MustMarshalBinaryBare(msg)) peer.Send(DataChannel, cdc.MustMarshalBinaryBare(msg))


+ 66
- 12
consensus/common_test.go View File

@ -77,7 +77,10 @@ func NewValidatorStub(privValidator types.PrivValidator, valIndex int) *validato
} }
} }
func (vs *validatorStub) signVote(voteType types.SignedMsgType, hash []byte, header types.PartSetHeader) (*types.Vote, error) {
func (vs *validatorStub) signVote(
voteType types.SignedMsgType,
hash []byte,
header types.PartSetHeader) (*types.Vote, error) {
addr := vs.PrivValidator.GetPubKey().Address() addr := vs.PrivValidator.GetPubKey().Address()
vote := &types.Vote{ vote := &types.Vote{
ValidatorIndex: vs.Index, ValidatorIndex: vs.Index,
@ -101,7 +104,11 @@ func signVote(vs *validatorStub, voteType types.SignedMsgType, hash []byte, head
return v return v
} }
func signVotes(voteType types.SignedMsgType, hash []byte, header types.PartSetHeader, vss ...*validatorStub) []*types.Vote {
func signVotes(
voteType types.SignedMsgType,
hash []byte,
header types.PartSetHeader,
vss ...*validatorStub) []*types.Vote {
votes := make([]*types.Vote, len(vss)) votes := make([]*types.Vote, len(vss))
for i, vs := range vss { for i, vs := range vss {
votes[i] = signVote(vs, voteType, hash, header) votes[i] = signVote(vs, voteType, hash, header)
@ -148,7 +155,11 @@ func startTestRound(cs *ConsensusState, height int64, round int) {
} }
// Create proposal block from cs1 but sign it with vs. // Create proposal block from cs1 but sign it with vs.
func decideProposal(cs1 *ConsensusState, vs *validatorStub, height int64, round int) (proposal *types.Proposal, block *types.Block) {
func decideProposal(
cs1 *ConsensusState,
vs *validatorStub,
height int64,
round int) (proposal *types.Proposal, block *types.Block) {
cs1.mtx.Lock() cs1.mtx.Lock()
block, blockParts := cs1.createProposalBlock() block, blockParts := cs1.createProposalBlock()
validRound := cs1.ValidRound validRound := cs1.ValidRound
@ -173,7 +184,12 @@ func addVotes(to *ConsensusState, votes ...*types.Vote) {
} }
} }
func signAddVotes(to *ConsensusState, voteType types.SignedMsgType, hash []byte, header types.PartSetHeader, vss ...*validatorStub) {
func signAddVotes(
to *ConsensusState,
voteType types.SignedMsgType,
hash []byte,
header types.PartSetHeader,
vss ...*validatorStub) {
votes := signVotes(voteType, hash, header, vss...) votes := signVotes(voteType, hash, header, vss...)
addVotes(to, votes...) addVotes(to, votes...)
} }
@ -208,7 +224,14 @@ func validateLastPrecommit(t *testing.T, cs *ConsensusState, privVal *validatorS
} }
} }
func validatePrecommit(t *testing.T, cs *ConsensusState, thisRound, lockRound int, privVal *validatorStub, votedBlockHash, lockedBlockHash []byte) {
func validatePrecommit(
t *testing.T,
cs *ConsensusState,
thisRound,
lockRound int,
privVal *validatorStub,
votedBlockHash,
lockedBlockHash []byte) {
precommits := cs.Votes.Precommits(thisRound) precommits := cs.Votes.Precommits(thisRound)
address := privVal.GetPubKey().Address() address := privVal.GetPubKey().Address()
var vote *types.Vote var vote *types.Vote
@ -228,17 +251,33 @@ func validatePrecommit(t *testing.T, cs *ConsensusState, thisRound, lockRound in
if lockedBlockHash == nil { if lockedBlockHash == nil {
if cs.LockedRound != lockRound || cs.LockedBlock != nil { if cs.LockedRound != lockRound || cs.LockedBlock != nil {
panic(fmt.Sprintf("Expected to be locked on nil at round %d. Got locked at round %d with block %v", lockRound, cs.LockedRound, cs.LockedBlock))
panic(fmt.Sprintf(
"Expected to be locked on nil at round %d. Got locked at round %d with block %v",
lockRound,
cs.LockedRound,
cs.LockedBlock))
} }
} else { } else {
if cs.LockedRound != lockRound || !bytes.Equal(cs.LockedBlock.Hash(), lockedBlockHash) { if cs.LockedRound != lockRound || !bytes.Equal(cs.LockedBlock.Hash(), lockedBlockHash) {
panic(fmt.Sprintf("Expected block to be locked on round %d, got %d. Got locked block %X, expected %X", lockRound, cs.LockedRound, cs.LockedBlock.Hash(), lockedBlockHash))
panic(fmt.Sprintf(
"Expected block to be locked on round %d, got %d. Got locked block %X, expected %X",
lockRound,
cs.LockedRound,
cs.LockedBlock.Hash(),
lockedBlockHash))
} }
} }
} }
func validatePrevoteAndPrecommit(t *testing.T, cs *ConsensusState, thisRound, lockRound int, privVal *validatorStub, votedBlockHash, lockedBlockHash []byte) {
func validatePrevoteAndPrecommit(
t *testing.T,
cs *ConsensusState,
thisRound,
lockRound int,
privVal *validatorStub,
votedBlockHash,
lockedBlockHash []byte) {
// verify the prevote // verify the prevote
validatePrevote(t, cs, thisRound, privVal, votedBlockHash) validatePrevote(t, cs, thisRound, privVal, votedBlockHash)
// verify precommit // verify precommit
@ -273,12 +312,21 @@ func newConsensusState(state sm.State, pv types.PrivValidator, app abci.Applicat
return newConsensusStateWithConfig(config, state, pv, app) return newConsensusStateWithConfig(config, state, pv, app)
} }
func newConsensusStateWithConfig(thisConfig *cfg.Config, state sm.State, pv types.PrivValidator, app abci.Application) *ConsensusState {
func newConsensusStateWithConfig(
thisConfig *cfg.Config,
state sm.State,
pv types.PrivValidator,
app abci.Application) *ConsensusState {
blockDB := dbm.NewMemDB() blockDB := dbm.NewMemDB()
return newConsensusStateWithConfigAndBlockStore(thisConfig, state, pv, app, blockDB) return newConsensusStateWithConfigAndBlockStore(thisConfig, state, pv, app, blockDB)
} }
func newConsensusStateWithConfigAndBlockStore(thisConfig *cfg.Config, state sm.State, pv types.PrivValidator, app abci.Application, blockDB dbm.DB) *ConsensusState {
func newConsensusStateWithConfigAndBlockStore(
thisConfig *cfg.Config,
state sm.State,
pv types.PrivValidator,
app abci.Application,
blockDB dbm.DB) *ConsensusState {
// Get BlockStore // Get BlockStore
blockStore := store.NewBlockStore(blockDB) blockStore := store.NewBlockStore(blockDB)
@ -597,7 +645,12 @@ func randConsensusNet(nValidators int, testName string, tickerFunc func() Timeou
} }
// nPeers = nValidators + nNotValidator // nPeers = nValidators + nNotValidator
func randConsensusNetWithPeers(nValidators, nPeers int, testName string, tickerFunc func() TimeoutTicker, appFunc func(string) abci.Application) ([]*ConsensusState, *types.GenesisDoc, *cfg.Config, cleanupFunc) {
func randConsensusNetWithPeers(
nValidators,
nPeers int,
testName string,
tickerFunc func() TimeoutTicker,
appFunc func(string) abci.Application) ([]*ConsensusState, *types.GenesisDoc, *cfg.Config, cleanupFunc) {
genDoc, privVals := randGenesisDoc(nValidators, false, testMinPower) genDoc, privVals := randGenesisDoc(nValidators, false, testMinPower)
css := make([]*ConsensusState, nPeers) css := make([]*ConsensusState, nPeers)
logger := consensusLogger() logger := consensusLogger()
@ -631,7 +684,8 @@ func randConsensusNetWithPeers(nValidators, nPeers int, testName string, tickerF
app := appFunc(path.Join(config.DBDir(), fmt.Sprintf("%s_%d", testName, i))) app := appFunc(path.Join(config.DBDir(), fmt.Sprintf("%s_%d", testName, i)))
vals := types.TM2PB.ValidatorUpdates(state.Validators) vals := types.TM2PB.ValidatorUpdates(state.Validators)
if _, ok := app.(*kvstore.PersistentKVStoreApplication); ok { if _, ok := app.(*kvstore.PersistentKVStoreApplication); ok {
state.Version.Consensus.App = kvstore.ProtocolVersion //simulate handshake, receive app version. If don't do this, replay test will fail
// simulate handshake, receive app version. If don't do this, replay test will fail
state.Version.Consensus.App = kvstore.ProtocolVersion
} }
app.InitChain(abci.RequestInitChain{Validators: vals}) app.InitChain(abci.RequestInitChain{Validators: vals})
//sm.SaveState(stateDB,state) //height 1's validatorsInfo already saved in LoadStateFromDBOrGenesisDoc above //sm.SaveState(stateDB,state) //height 1's validatorsInfo already saved in LoadStateFromDBOrGenesisDoc above


+ 20
- 5
consensus/reactor.go View File

@ -137,8 +137,9 @@ func (conR *ConsensusReactor) GetChannels() []*p2p.ChannelDescriptor {
RecvMessageCapacity: maxMsgSize, RecvMessageCapacity: maxMsgSize,
}, },
{ {
ID: DataChannel, // maybe split between gossiping current block and catchup stuff
Priority: 10, // once we gossip the whole block there's nothing left to send until next height or round
ID: DataChannel, // maybe split between gossiping current block and catchup stuff
// once we gossip the whole block there's nothing left to send until next height or round
Priority: 10,
SendQueueCapacity: 100, SendQueueCapacity: 100,
RecvBufferCapacity: 50 * 4096, RecvBufferCapacity: 50 * 4096,
RecvMessageCapacity: maxMsgSize, RecvMessageCapacity: maxMsgSize,
@ -670,7 +671,11 @@ OUTER_LOOP:
} }
} }
func (conR *ConsensusReactor) gossipVotesForHeight(logger log.Logger, rs *cstypes.RoundState, prs *cstypes.PeerRoundState, ps *PeerState) bool {
func (conR *ConsensusReactor) gossipVotesForHeight(
logger log.Logger,
rs *cstypes.RoundState,
prs *cstypes.PeerRoundState,
ps *PeerState) bool {
// If there are lastCommits to send... // If there are lastCommits to send...
if prs.Step == cstypes.RoundStepNewHeight { if prs.Step == cstypes.RoundStepNewHeight {
@ -1119,7 +1124,13 @@ func (ps *PeerState) ensureCatchupCommitRound(height int64, round int, numValida
NOTE: This is wrong, 'round' could change. NOTE: This is wrong, 'round' could change.
e.g. if orig round is not the same as block LastCommit round. e.g. if orig round is not the same as block LastCommit round.
if ps.CatchupCommitRound != -1 && ps.CatchupCommitRound != round { if ps.CatchupCommitRound != -1 && ps.CatchupCommitRound != round {
panic(fmt.Sprintf("Conflicting CatchupCommitRound. Height: %v, Orig: %v, New: %v", height, ps.CatchupCommitRound, round))
panic(fmt.Sprintf(
"Conflicting CatchupCommitRound. Height: %v,
Orig: %v,
New: %v",
height,
ps.CatchupCommitRound,
round))
} }
*/ */
if ps.PRS.CatchupCommitRound == round { if ps.PRS.CatchupCommitRound == round {
@ -1211,7 +1222,11 @@ func (ps *PeerState) SetHasVote(vote *types.Vote) {
} }
func (ps *PeerState) setHasVote(height int64, round int, type_ types.SignedMsgType, index int) { func (ps *PeerState) setHasVote(height int64, round int, type_ types.SignedMsgType, index int) {
logger := ps.logger.With("peerH/R", fmt.Sprintf("%d/%d", ps.PRS.Height, ps.PRS.Round), "H/R", fmt.Sprintf("%d/%d", height, round))
logger := ps.logger.With(
"peerH/R",
fmt.Sprintf("%d/%d", ps.PRS.Height, ps.PRS.Round),
"H/R",
fmt.Sprintf("%d/%d", height, round))
logger.Debug("setHasVote", "type", type_, "index", index) logger.Debug("setHasVote", "type", type_, "index", index)
// NOTE: some may be nil BitArrays -> no side effects. // NOTE: some may be nil BitArrays -> no side effects.


+ 35
- 8
consensus/reactor_test.go View File

@ -317,7 +317,11 @@ func TestReactorRecordsVotesAndBlockParts(t *testing.T) {
func TestReactorVotingPowerChange(t *testing.T) { func TestReactorVotingPowerChange(t *testing.T) {
nVals := 4 nVals := 4
logger := log.TestingLogger() logger := log.TestingLogger()
css, cleanup := randConsensusNet(nVals, "consensus_voting_power_changes_test", newMockTickerFunc(true), newPersistentKVStore)
css, cleanup := randConsensusNet(
nVals,
"consensus_voting_power_changes_test",
newMockTickerFunc(true),
newPersistentKVStore)
defer cleanup() defer cleanup()
reactors, blocksSubs, eventBuses := startConsensusNet(t, css, nVals) reactors, blocksSubs, eventBuses := startConsensusNet(t, css, nVals)
defer stopConsensusNet(logger, reactors, eventBuses) defer stopConsensusNet(logger, reactors, eventBuses)
@ -348,7 +352,10 @@ func TestReactorVotingPowerChange(t *testing.T) {
waitForAndValidateBlock(t, nVals, activeVals, blocksSubs, css) waitForAndValidateBlock(t, nVals, activeVals, blocksSubs, css)
if css[0].GetRoundState().LastValidators.TotalVotingPower() == previousTotalVotingPower { if css[0].GetRoundState().LastValidators.TotalVotingPower() == previousTotalVotingPower {
t.Fatalf("expected voting power to change (before: %d, after: %d)", previousTotalVotingPower, css[0].GetRoundState().LastValidators.TotalVotingPower())
t.Fatalf(
"expected voting power to change (before: %d, after: %d)",
previousTotalVotingPower,
css[0].GetRoundState().LastValidators.TotalVotingPower())
} }
updateValidatorTx = kvstore.MakeValSetChangeTx(val1PubKeyABCI, 2) updateValidatorTx = kvstore.MakeValSetChangeTx(val1PubKeyABCI, 2)
@ -360,7 +367,10 @@ func TestReactorVotingPowerChange(t *testing.T) {
waitForAndValidateBlock(t, nVals, activeVals, blocksSubs, css) waitForAndValidateBlock(t, nVals, activeVals, blocksSubs, css)
if css[0].GetRoundState().LastValidators.TotalVotingPower() == previousTotalVotingPower { if css[0].GetRoundState().LastValidators.TotalVotingPower() == previousTotalVotingPower {
t.Fatalf("expected voting power to change (before: %d, after: %d)", previousTotalVotingPower, css[0].GetRoundState().LastValidators.TotalVotingPower())
t.Fatalf(
"expected voting power to change (before: %d, after: %d)",
previousTotalVotingPower,
css[0].GetRoundState().LastValidators.TotalVotingPower())
} }
updateValidatorTx = kvstore.MakeValSetChangeTx(val1PubKeyABCI, 26) updateValidatorTx = kvstore.MakeValSetChangeTx(val1PubKeyABCI, 26)
@ -372,14 +382,22 @@ func TestReactorVotingPowerChange(t *testing.T) {
waitForAndValidateBlock(t, nVals, activeVals, blocksSubs, css) waitForAndValidateBlock(t, nVals, activeVals, blocksSubs, css)
if css[0].GetRoundState().LastValidators.TotalVotingPower() == previousTotalVotingPower { if css[0].GetRoundState().LastValidators.TotalVotingPower() == previousTotalVotingPower {
t.Fatalf("expected voting power to change (before: %d, after: %d)", previousTotalVotingPower, css[0].GetRoundState().LastValidators.TotalVotingPower())
t.Fatalf(
"expected voting power to change (before: %d, after: %d)",
previousTotalVotingPower,
css[0].GetRoundState().LastValidators.TotalVotingPower())
} }
} }
func TestReactorValidatorSetChanges(t *testing.T) { func TestReactorValidatorSetChanges(t *testing.T) {
nPeers := 7 nPeers := 7
nVals := 4 nVals := 4
css, _, _, cleanup := randConsensusNetWithPeers(nVals, nPeers, "consensus_val_set_changes_test", newMockTickerFunc(true), newPersistentKVStoreWithPath)
css, _, _, cleanup := randConsensusNetWithPeers(
nVals,
nPeers,
"consensus_val_set_changes_test",
newMockTickerFunc(true),
newPersistentKVStoreWithPath)
defer cleanup() defer cleanup()
logger := log.TestingLogger() logger := log.TestingLogger()
@ -440,7 +458,10 @@ func TestReactorValidatorSetChanges(t *testing.T) {
waitForBlockWithUpdatedValsAndValidateIt(t, nPeers, activeVals, blocksSubs, css) waitForBlockWithUpdatedValsAndValidateIt(t, nPeers, activeVals, blocksSubs, css)
if css[nVals].GetRoundState().LastValidators.TotalVotingPower() == previousTotalVotingPower { if css[nVals].GetRoundState().LastValidators.TotalVotingPower() == previousTotalVotingPower {
t.Errorf("expected voting power to change (before: %d, after: %d)", previousTotalVotingPower, css[nVals].GetRoundState().LastValidators.TotalVotingPower())
t.Errorf(
"expected voting power to change (before: %d, after: %d)",
previousTotalVotingPower,
css[nVals].GetRoundState().LastValidators.TotalVotingPower())
} }
//--------------------------------------------------------------------------- //---------------------------------------------------------------------------
@ -570,7 +591,10 @@ func waitForBlockWithUpdatedValsAndValidateIt(
css[j].Logger.Debug("waitForBlockWithUpdatedValsAndValidateIt: Got block", "height", newBlock.Height) css[j].Logger.Debug("waitForBlockWithUpdatedValsAndValidateIt: Got block", "height", newBlock.Height)
break LOOP break LOOP
} else { } else {
css[j].Logger.Debug("waitForBlockWithUpdatedValsAndValidateIt: Got block with no new validators. Skipping", "height", newBlock.Height)
css[j].Logger.Debug(
"waitForBlockWithUpdatedValsAndValidateIt: Got block with no new validators. Skipping",
"height",
newBlock.Height)
} }
} }
@ -582,7 +606,10 @@ func waitForBlockWithUpdatedValsAndValidateIt(
// expects high synchrony! // expects high synchrony!
func validateBlock(block *types.Block, activeVals map[string]struct{}) error { func validateBlock(block *types.Block, activeVals map[string]struct{}) error {
if block.LastCommit.Size() != len(activeVals) { if block.LastCommit.Size() != len(activeVals) {
return fmt.Errorf("Commit size doesn't match number of active validators. Got %d, expected %d", block.LastCommit.Size(), len(activeVals))
return fmt.Errorf(
"Commit size doesn't match number of active validators. Got %d, expected %d",
block.LastCommit.Size(),
len(activeVals))
} }
for _, vote := range block.LastCommit.Precommits { for _, vote := range block.LastCommit.Precommits {


+ 14
- 2
consensus/replay.go View File

@ -290,7 +290,14 @@ func (h *Handshaker) ReplayBlocks(
) ([]byte, error) { ) ([]byte, error) {
storeBlockHeight := h.store.Height() storeBlockHeight := h.store.Height()
stateBlockHeight := state.LastBlockHeight stateBlockHeight := state.LastBlockHeight
h.logger.Info("ABCI Replay Blocks", "appHeight", appBlockHeight, "storeHeight", storeBlockHeight, "stateHeight", stateBlockHeight)
h.logger.Info(
"ABCI Replay Blocks",
"appHeight",
appBlockHeight,
"storeHeight",
storeBlockHeight,
"stateHeight",
stateBlockHeight)
// If appBlockHeight == 0 it means that we are at genesis and hence should send InitChain. // If appBlockHeight == 0 it means that we are at genesis and hence should send InitChain.
if appBlockHeight == 0 { if appBlockHeight == 0 {
@ -405,7 +412,12 @@ func (h *Handshaker) ReplayBlocks(
appBlockHeight, storeBlockHeight, stateBlockHeight)) appBlockHeight, storeBlockHeight, stateBlockHeight))
} }
func (h *Handshaker) replayBlocks(state sm.State, proxyApp proxy.AppConns, appBlockHeight, storeBlockHeight int64, mutateState bool) ([]byte, error) {
func (h *Handshaker) replayBlocks(
state sm.State,
proxyApp proxy.AppConns,
appBlockHeight,
storeBlockHeight int64,
mutateState bool) ([]byte, error) {
// App is further behind than it should be, so we need to replay blocks. // App is further behind than it should be, so we need to replay blocks.
// We replay all blocks from appBlockHeight+1. // We replay all blocks from appBlockHeight+1.
// //


+ 49
- 11
consensus/replay_test.go View File

@ -67,7 +67,12 @@ func startNewConsensusStateAndWaitForBlock(t *testing.T, consensusReplayConfig *
logger := log.TestingLogger() logger := log.TestingLogger()
state, _ := sm.LoadStateFromDBOrGenesisFile(stateDB, consensusReplayConfig.GenesisFile()) state, _ := sm.LoadStateFromDBOrGenesisFile(stateDB, consensusReplayConfig.GenesisFile())
privValidator := loadPrivValidator(consensusReplayConfig) privValidator := loadPrivValidator(consensusReplayConfig)
cs := newConsensusStateWithConfigAndBlockStore(consensusReplayConfig, state, privValidator, kvstore.NewKVStoreApplication(), blockDB)
cs := newConsensusStateWithConfigAndBlockStore(
consensusReplayConfig,
state,
privValidator,
kvstore.NewKVStoreApplication(),
blockDB)
cs.SetLogger(logger) cs.SetLogger(logger)
bytes, _ := ioutil.ReadFile(cs.config.WalFile()) bytes, _ := ioutil.ReadFile(cs.config.WalFile())
@ -147,7 +152,12 @@ LOOP:
stateDB := blockDB stateDB := blockDB
state, _ := sm.MakeGenesisStateFromFile(consensusReplayConfig.GenesisFile()) state, _ := sm.MakeGenesisStateFromFile(consensusReplayConfig.GenesisFile())
privValidator := loadPrivValidator(consensusReplayConfig) privValidator := loadPrivValidator(consensusReplayConfig)
cs := newConsensusStateWithConfigAndBlockStore(consensusReplayConfig, state, privValidator, kvstore.NewKVStoreApplication(), blockDB)
cs := newConsensusStateWithConfigAndBlockStore(
consensusReplayConfig,
state,
privValidator,
kvstore.NewKVStoreApplication(),
blockDB)
cs.SetLogger(logger) cs.SetLogger(logger)
// start sending transactions // start sending transactions
@ -256,7 +266,9 @@ func (w *crashingWAL) WriteSync(m WALMessage) {
func (w *crashingWAL) FlushAndSync() error { return w.next.FlushAndSync() } func (w *crashingWAL) FlushAndSync() error { return w.next.FlushAndSync() }
func (w *crashingWAL) SearchForEndHeight(height int64, options *WALSearchOptions) (rd io.ReadCloser, found bool, err error) {
func (w *crashingWAL) SearchForEndHeight(
height int64,
options *WALSearchOptions) (rd io.ReadCloser, found bool, err error) {
return w.next.SearchForEndHeight(height, options) return w.next.SearchForEndHeight(height, options)
} }
@ -296,7 +308,12 @@ var modes = []uint{0, 1, 2}
func TestSimulateValidatorsChange(t *testing.T) { func TestSimulateValidatorsChange(t *testing.T) {
nPeers := 7 nPeers := 7
nVals := 4 nVals := 4
css, genDoc, config, cleanup := randConsensusNetWithPeers(nVals, nPeers, "replay_test", newMockTickerFunc(true), newPersistentKVStoreWithPath)
css, genDoc, config, cleanup := randConsensusNetWithPeers(
nVals,
nPeers,
"replay_test",
newMockTickerFunc(true),
newPersistentKVStoreWithPath)
sim.Config = config sim.Config = config
sim.GenesisState, _ = sm.MakeGenesisState(genDoc) sim.GenesisState, _ = sm.MakeGenesisState(genDoc)
sim.CleanupFunc = cleanup sim.CleanupFunc = cleanup
@ -585,7 +602,8 @@ func tempWALWithData(data []byte) string {
return walFile.Name() return walFile.Name()
} }
// Make some blocks. Start a fresh app and apply nBlocks blocks. Then restart the app and sync it up with the remaining blocks
// Make some blocks. Start a fresh app and apply nBlocks blocks.
// Then restart the app and sync it up with the remaining blocks
func testHandshakeReplay(t *testing.T, config *cfg.Config, nBlocks int, mode uint, testValidatorsChange bool) { func testHandshakeReplay(t *testing.T, config *cfg.Config, nBlocks int, mode uint, testValidatorsChange bool) {
var chain []*types.Block var chain []*types.Block
var commits []*types.Commit var commits []*types.Commit
@ -631,7 +649,8 @@ func testHandshakeReplay(t *testing.T, config *cfg.Config, nBlocks int, mode uin
latestAppHash := state.AppHash latestAppHash := state.AppHash
// make a new client creator // make a new client creator
kvstoreApp := kvstore.NewPersistentKVStoreApplication(filepath.Join(config.DBDir(), fmt.Sprintf("replay_test_%d_%d_a", nBlocks, mode)))
kvstoreApp := kvstore.NewPersistentKVStoreApplication(
filepath.Join(config.DBDir(), fmt.Sprintf("replay_test_%d_%d_a", nBlocks, mode)))
clientCreator2 := proxy.NewLocalClientCreator(kvstoreApp) clientCreator2 := proxy.NewLocalClientCreator(kvstoreApp)
if nBlocks > 0 { if nBlocks > 0 {
@ -663,7 +682,10 @@ func testHandshakeReplay(t *testing.T, config *cfg.Config, nBlocks int, mode uin
// the app hash should be synced up // the app hash should be synced up
if !bytes.Equal(latestAppHash, res.LastBlockAppHash) { if !bytes.Equal(latestAppHash, res.LastBlockAppHash) {
t.Fatalf("Expected app hashes to match after handshake/replay. got %X, expected %X", res.LastBlockAppHash, latestAppHash)
t.Fatalf(
"Expected app hashes to match after handshake/replay. got %X, expected %X",
res.LastBlockAppHash,
latestAppHash)
} }
expectedBlocksToSync := numBlocks - nBlocks expectedBlocksToSync := numBlocks - nBlocks
@ -728,9 +750,17 @@ func buildAppStateFromChain(proxyApp proxy.AppConns, stateDB dbm.DB,
} }
func buildTMStateFromChain(config *cfg.Config, stateDB dbm.DB, state sm.State, chain []*types.Block, nBlocks int, mode uint) sm.State {
func buildTMStateFromChain(
config *cfg.Config,
stateDB dbm.DB,
state sm.State,
chain []*types.Block,
nBlocks int,
mode uint) sm.State {
// run the whole chain against this client to build up the tendermint state // run the whole chain against this client to build up the tendermint state
clientCreator := proxy.NewLocalClientCreator(kvstore.NewPersistentKVStoreApplication(filepath.Join(config.DBDir(), fmt.Sprintf("replay_test_%d_%d_t", nBlocks, mode))))
clientCreator := proxy.NewLocalClientCreator(
kvstore.NewPersistentKVStoreApplication(
filepath.Join(config.DBDir(), fmt.Sprintf("replay_test_%d_%d_t", nBlocks, mode))))
proxyApp := proxy.NewAppConns(clientCreator) proxyApp := proxy.NewAppConns(clientCreator)
if err := proxyApp.Start(); err != nil { if err := proxyApp.Start(); err != nil {
panic(err) panic(err)
@ -853,7 +883,12 @@ func makeBlock(state sm.State, lastBlock *types.Block, lastBlockMeta *types.Bloc
lastCommit := types.NewCommit(types.BlockID{}, nil) lastCommit := types.NewCommit(types.BlockID{}, nil)
if height > 1 { if height > 1 {
vote, _ := types.MakeVote(lastBlock.Header.Height, lastBlockMeta.BlockID, state.Validators, privVal, lastBlock.Header.ChainID)
vote, _ := types.MakeVote(
lastBlock.Header.Height,
lastBlockMeta.BlockID,
state.Validators,
privVal,
lastBlock.Header.ChainID)
voteCommitSig := vote.CommitSig() voteCommitSig := vote.CommitSig()
lastCommit = types.NewCommit(lastBlockMeta.BlockID, []*types.CommitSig{voteCommitSig}) lastCommit = types.NewCommit(lastBlockMeta.BlockID, []*types.CommitSig{voteCommitSig})
} }
@ -994,7 +1029,10 @@ func readPieceFromWAL(msg *TimedWALMessage) interface{} {
} }
// fresh state and mock store // fresh state and mock store
func stateAndStore(config *cfg.Config, pubKey crypto.PubKey, appVersion version.Protocol) (dbm.DB, sm.State, *mockBlockStore) {
func stateAndStore(
config *cfg.Config,
pubKey crypto.PubKey,
appVersion version.Protocol) (dbm.DB, sm.State, *mockBlockStore) {
stateDB := dbm.NewMemDB() stateDB := dbm.NewMemDB()
state, _ := sm.MakeGenesisStateFromFile(config.GenesisFile()) state, _ := sm.MakeGenesisStateFromFile(config.GenesisFile())
state.Version.Consensus.App = appVersion state.Version.Consensus.App = appVersion


+ 128
- 24
consensus/state.go View File

@ -425,7 +425,11 @@ func (cs *ConsensusState) AddProposalBlockPart(height int64, round int, part *ty
} }
// SetProposalAndBlock inputs the proposal and all block parts. // SetProposalAndBlock inputs the proposal and all block parts.
func (cs *ConsensusState) SetProposalAndBlock(proposal *types.Proposal, block *types.Block, parts *types.PartSet, peerID p2p.ID) error {
func (cs *ConsensusState) SetProposalAndBlock(
proposal *types.Proposal,
block *types.Block,
parts *types.PartSet,
peerID p2p.ID) error {
if err := cs.SetProposal(proposal, peerID); err != nil { if err := cs.SetProposal(proposal, peerID); err != nil {
return err return err
} }
@ -511,7 +515,12 @@ func (cs *ConsensusState) updateToState(state sm.State) {
// signal the new round step, because other services (eg. txNotifier) // signal the new round step, because other services (eg. txNotifier)
// depend on having an up-to-date peer state! // depend on having an up-to-date peer state!
if !cs.state.IsEmpty() && (state.LastBlockHeight <= cs.state.LastBlockHeight) { if !cs.state.IsEmpty() && (state.LastBlockHeight <= cs.state.LastBlockHeight) {
cs.Logger.Info("Ignoring updateToState()", "newHeight", state.LastBlockHeight+1, "oldHeight", cs.state.LastBlockHeight+1)
cs.Logger.Info(
"Ignoring updateToState()",
"newHeight",
state.LastBlockHeight+1,
"oldHeight",
cs.state.LastBlockHeight+1)
cs.newStep() cs.newStep()
return return
} }
@ -679,7 +688,14 @@ func (cs *ConsensusState) handleMsg(mi msgInfo) {
} }
if err != nil && msg.Round != cs.Round { if err != nil && msg.Round != cs.Round {
cs.Logger.Debug("Received block part from wrong round", "height", cs.Height, "csRound", cs.Round, "blockRound", msg.Round)
cs.Logger.Debug(
"Received block part from wrong round",
"height",
cs.Height,
"csRound",
cs.Round,
"blockRound",
msg.Round)
err = nil err = nil
} }
case *VoteMessage: case *VoteMessage:
@ -791,7 +807,13 @@ func (cs *ConsensusState) enterNewRound(height int64, round int) {
logger := cs.Logger.With("height", height, "round", round) logger := cs.Logger.With("height", height, "round", round)
if cs.Height != height || round < cs.Round || (cs.Round == round && cs.Step != cstypes.RoundStepNewHeight) { if cs.Height != height || round < cs.Round || (cs.Round == round && cs.Step != cstypes.RoundStepNewHeight) {
logger.Debug(fmt.Sprintf("enterNewRound(%v/%v): Invalid args. Current step: %v/%v/%v", height, round, cs.Height, cs.Round, cs.Step))
logger.Debug(fmt.Sprintf(
"enterNewRound(%v/%v): Invalid args. Current step: %v/%v/%v",
height,
round,
cs.Height,
cs.Round,
cs.Step))
return return
} }
@ -855,13 +877,20 @@ func (cs *ConsensusState) needProofBlock(height int64) bool {
} }
// Enter (CreateEmptyBlocks): from enterNewRound(height,round) // Enter (CreateEmptyBlocks): from enterNewRound(height,round)
// Enter (CreateEmptyBlocks, CreateEmptyBlocksInterval > 0 ): after enterNewRound(height,round), after timeout of CreateEmptyBlocksInterval
// Enter (CreateEmptyBlocks, CreateEmptyBlocksInterval > 0 ):
// after enterNewRound(height,round), after timeout of CreateEmptyBlocksInterval
// Enter (!CreateEmptyBlocks) : after enterNewRound(height,round), once txs are in the mempool // Enter (!CreateEmptyBlocks) : after enterNewRound(height,round), once txs are in the mempool
func (cs *ConsensusState) enterPropose(height int64, round int) { func (cs *ConsensusState) enterPropose(height int64, round int) {
logger := cs.Logger.With("height", height, "round", round) logger := cs.Logger.With("height", height, "round", round)
if cs.Height != height || round < cs.Round || (cs.Round == round && cstypes.RoundStepPropose <= cs.Step) { if cs.Height != height || round < cs.Round || (cs.Round == round && cstypes.RoundStepPropose <= cs.Step) {
logger.Debug(fmt.Sprintf("enterPropose(%v/%v): Invalid args. Current step: %v/%v/%v", height, round, cs.Height, cs.Round, cs.Step))
logger.Debug(fmt.Sprintf(
"enterPropose(%v/%v): Invalid args. Current step: %v/%v/%v",
height,
round,
cs.Height,
cs.Round,
cs.Step))
return return
} }
logger.Info(fmt.Sprintf("enterPropose(%v/%v). Current: %v/%v/%v", height, round, cs.Height, cs.Round, cs.Step)) logger.Info(fmt.Sprintf("enterPropose(%v/%v). Current: %v/%v/%v", height, round, cs.Height, cs.Round, cs.Step))
@ -897,10 +926,18 @@ func (cs *ConsensusState) enterPropose(height int64, round int) {
logger.Debug("This node is a validator") logger.Debug("This node is a validator")
if cs.isProposer(address) { if cs.isProposer(address) {
logger.Info("enterPropose: Our turn to propose", "proposer", cs.Validators.GetProposer().Address, "privValidator", cs.privValidator)
logger.Info("enterPropose: Our turn to propose",
"proposer",
cs.Validators.GetProposer().Address,
"privValidator",
cs.privValidator)
cs.decideProposal(height, round) cs.decideProposal(height, round)
} else { } else {
logger.Info("enterPropose: Not our turn to propose", "proposer", cs.Validators.GetProposer().Address, "privValidator", cs.privValidator)
logger.Info("enterPropose: Not our turn to propose",
"proposer",
cs.Validators.GetProposer().Address,
"privValidator",
cs.privValidator)
} }
} }
@ -924,7 +961,8 @@ func (cs *ConsensusState) defaultDecideProposal(height int64, round int) {
} }
} }
// Flush the WAL. Otherwise, we may not recompute the same proposal to sign, and the privValidator will refuse to sign anything.
// Flush the WAL. Otherwise, we may not recompute the same proposal to sign,
// and the privValidator will refuse to sign anything.
cs.wal.FlushAndSync() cs.wal.FlushAndSync()
// Make proposal // Make proposal
@ -992,7 +1030,13 @@ func (cs *ConsensusState) createProposalBlock() (block *types.Block, blockParts
// Otherwise vote nil. // Otherwise vote nil.
func (cs *ConsensusState) enterPrevote(height int64, round int) { func (cs *ConsensusState) enterPrevote(height int64, round int) {
if cs.Height != height || round < cs.Round || (cs.Round == round && cstypes.RoundStepPrevote <= cs.Step) { if cs.Height != height || round < cs.Round || (cs.Round == round && cstypes.RoundStepPrevote <= cs.Step) {
cs.Logger.Debug(fmt.Sprintf("enterPrevote(%v/%v): Invalid args. Current step: %v/%v/%v", height, round, cs.Height, cs.Round, cs.Step))
cs.Logger.Debug(fmt.Sprintf(
"enterPrevote(%v/%v): Invalid args. Current step: %v/%v/%v",
height,
round,
cs.Height,
cs.Round,
cs.Step))
return return
} }
@ -1049,7 +1093,13 @@ func (cs *ConsensusState) enterPrevoteWait(height int64, round int) {
logger := cs.Logger.With("height", height, "round", round) logger := cs.Logger.With("height", height, "round", round)
if cs.Height != height || round < cs.Round || (cs.Round == round && cstypes.RoundStepPrevoteWait <= cs.Step) { if cs.Height != height || round < cs.Round || (cs.Round == round && cstypes.RoundStepPrevoteWait <= cs.Step) {
logger.Debug(fmt.Sprintf("enterPrevoteWait(%v/%v): Invalid args. Current step: %v/%v/%v", height, round, cs.Height, cs.Round, cs.Step))
logger.Debug(fmt.Sprintf(
"enterPrevoteWait(%v/%v): Invalid args. Current step: %v/%v/%v",
height,
round,
cs.Height,
cs.Round,
cs.Step))
return return
} }
if !cs.Votes.Prevotes(round).HasTwoThirdsAny() { if !cs.Votes.Prevotes(round).HasTwoThirdsAny() {
@ -1077,7 +1127,13 @@ func (cs *ConsensusState) enterPrecommit(height int64, round int) {
logger := cs.Logger.With("height", height, "round", round) logger := cs.Logger.With("height", height, "round", round)
if cs.Height != height || round < cs.Round || (cs.Round == round && cstypes.RoundStepPrecommit <= cs.Step) { if cs.Height != height || round < cs.Round || (cs.Round == round && cstypes.RoundStepPrecommit <= cs.Step) {
logger.Debug(fmt.Sprintf("enterPrecommit(%v/%v): Invalid args. Current step: %v/%v/%v", height, round, cs.Height, cs.Round, cs.Step))
logger.Debug(fmt.Sprintf(
"enterPrecommit(%v/%v): Invalid args. Current step: %v/%v/%v",
height,
round,
cs.Height,
cs.Round,
cs.Step))
return return
} }
@ -1201,7 +1257,13 @@ func (cs *ConsensusState) enterCommit(height int64, commitRound int) {
logger := cs.Logger.With("height", height, "commitRound", commitRound) logger := cs.Logger.With("height", height, "commitRound", commitRound)
if cs.Height != height || cstypes.RoundStepCommit <= cs.Step { if cs.Height != height || cstypes.RoundStepCommit <= cs.Step {
logger.Debug(fmt.Sprintf("enterCommit(%v/%v): Invalid args. Current step: %v/%v/%v", height, commitRound, cs.Height, cs.Round, cs.Step))
logger.Debug(fmt.Sprintf(
"enterCommit(%v/%v): Invalid args. Current step: %v/%v/%v",
height,
commitRound,
cs.Height,
cs.Round,
cs.Step))
return return
} }
logger.Info(fmt.Sprintf("enterCommit(%v/%v). Current: %v/%v/%v", height, commitRound, cs.Height, cs.Round, cs.Step)) logger.Info(fmt.Sprintf("enterCommit(%v/%v). Current: %v/%v/%v", height, commitRound, cs.Height, cs.Round, cs.Step))
@ -1235,7 +1297,12 @@ func (cs *ConsensusState) enterCommit(height int64, commitRound int) {
// If we don't have the block being committed, set up to get it. // If we don't have the block being committed, set up to get it.
if !cs.ProposalBlock.HashesTo(blockID.Hash) { if !cs.ProposalBlock.HashesTo(blockID.Hash) {
if !cs.ProposalBlockParts.HasHeader(blockID.PartsHeader) { if !cs.ProposalBlockParts.HasHeader(blockID.PartsHeader) {
logger.Info("Commit is for a block we don't know about. Set ProposalBlock=nil", "proposal", cs.ProposalBlock.Hash(), "commit", blockID.Hash)
logger.Info(
"Commit is for a block we don't know about. Set ProposalBlock=nil",
"proposal",
cs.ProposalBlock.Hash(),
"commit",
blockID.Hash)
// We're getting the wrong block. // We're getting the wrong block.
// Set up ProposalBlockParts and keep waiting. // Set up ProposalBlockParts and keep waiting.
cs.ProposalBlock = nil cs.ProposalBlock = nil
@ -1265,7 +1332,12 @@ func (cs *ConsensusState) tryFinalizeCommit(height int64) {
if !cs.ProposalBlock.HashesTo(blockID.Hash) { if !cs.ProposalBlock.HashesTo(blockID.Hash) {
// TODO: this happens every time if we're not a validator (ugly logs) // TODO: this happens every time if we're not a validator (ugly logs)
// TODO: ^^ wait, why does it matter that we're a validator? // TODO: ^^ wait, why does it matter that we're a validator?
logger.Info("Attempt to finalize failed. We don't have the commit block.", "proposal-block", cs.ProposalBlock.Hash(), "commit-block", blockID.Hash)
logger.Info(
"Attempt to finalize failed. We don't have the commit block.",
"proposal-block",
cs.ProposalBlock.Hash(),
"commit-block",
blockID.Hash)
return return
} }
@ -1276,7 +1348,12 @@ func (cs *ConsensusState) tryFinalizeCommit(height int64) {
// Increment height and goto cstypes.RoundStepNewHeight // Increment height and goto cstypes.RoundStepNewHeight
func (cs *ConsensusState) finalizeCommit(height int64) { func (cs *ConsensusState) finalizeCommit(height int64) {
if cs.Height != height || cs.Step != cstypes.RoundStepCommit { if cs.Height != height || cs.Step != cstypes.RoundStepCommit {
cs.Logger.Debug(fmt.Sprintf("finalizeCommit(%v): Invalid args. Current step: %v/%v/%v", height, cs.Height, cs.Round, cs.Step))
cs.Logger.Debug(fmt.Sprintf(
"finalizeCommit(%v): Invalid args. Current step: %v/%v/%v",
height,
cs.Height,
cs.Round,
cs.Step))
return return
} }
@ -1339,7 +1416,10 @@ func (cs *ConsensusState) finalizeCommit(height int64) {
// Execute and commit the block, update and save the state, and update the mempool. // Execute and commit the block, update and save the state, and update the mempool.
// NOTE The block.AppHash wont reflect these txs until the next block. // NOTE The block.AppHash wont reflect these txs until the next block.
var err error var err error
stateCopy, err = cs.blockExec.ApplyBlock(stateCopy, types.BlockID{Hash: block.Hash(), PartsHeader: blockParts.Header()}, block)
stateCopy, err = cs.blockExec.ApplyBlock(
stateCopy,
types.BlockID{Hash: block.Hash(), PartsHeader: blockParts.Header()},
block)
if err != nil { if err != nil {
cs.Logger.Error("Error on ApplyBlock. Did the application crash? Please restart tendermint", "err", err) cs.Logger.Error("Error on ApplyBlock. Did the application crash? Please restart tendermint", "err", err)
err := cmn.Kill() err := cmn.Kill()
@ -1446,7 +1526,8 @@ func (cs *ConsensusState) defaultSetProposal(proposal *types.Proposal) error {
} }
// NOTE: block is not necessarily valid. // NOTE: block is not necessarily valid.
// Asynchronously triggers either enterPrevote (before we timeout of propose) or tryFinalizeCommit, once we have the full block.
// Asynchronously triggers either enterPrevote (before we timeout of propose) or tryFinalizeCommit,
// once we have the full block.
func (cs *ConsensusState) addProposalBlockPart(msg *BlockPartMessage, peerID p2p.ID) (added bool, err error) { func (cs *ConsensusState) addProposalBlockPart(msg *BlockPartMessage, peerID p2p.ID) (added bool, err error) {
height, round, part := msg.Height, msg.Round, msg.Part height, round, part := msg.Height, msg.Round, msg.Part
@ -1528,7 +1609,14 @@ func (cs *ConsensusState) tryAddVote(vote *types.Vote, peerID p2p.ID) (bool, err
} else if voteErr, ok := err.(*types.ErrVoteConflictingVotes); ok { } else if voteErr, ok := err.(*types.ErrVoteConflictingVotes); ok {
addr := cs.privValidator.GetPubKey().Address() addr := cs.privValidator.GetPubKey().Address()
if bytes.Equal(vote.ValidatorAddress, addr) { if bytes.Equal(vote.ValidatorAddress, addr) {
cs.Logger.Error("Found conflicting vote from ourselves. Did you unsafe_reset a validator?", "height", vote.Height, "round", vote.Round, "type", vote.Type)
cs.Logger.Error(
"Found conflicting vote from ourselves. Did you unsafe_reset a validator?",
"height",
vote.Height,
"round",
vote.Round,
"type",
vote.Type)
return added, err return added, err
} }
cs.evpool.AddEvidence(voteErr.DuplicateVoteEvidence) cs.evpool.AddEvidence(voteErr.DuplicateVoteEvidence)
@ -1537,7 +1625,8 @@ func (cs *ConsensusState) tryAddVote(vote *types.Vote, peerID p2p.ID) (bool, err
// Either // Either
// 1) bad peer OR // 1) bad peer OR
// 2) not a bad peer? this can also err sometimes with "Unexpected step" OR // 2) not a bad peer? this can also err sometimes with "Unexpected step" OR
// 3) tmkms use with multiple validators connecting to a single tmkms instance (https://github.com/tendermint/tendermint/issues/3839).
// 3) tmkms use with multiple validators connecting to a single tmkms instance
// (https://github.com/tendermint/tendermint/issues/3839).
cs.Logger.Info("Error attempting to add vote", "err", err) cs.Logger.Info("Error attempting to add vote", "err", err)
return added, ErrAddingVote return added, ErrAddingVote
} }
@ -1547,8 +1636,19 @@ func (cs *ConsensusState) tryAddVote(vote *types.Vote, peerID p2p.ID) (bool, err
//----------------------------------------------------------------------------- //-----------------------------------------------------------------------------
func (cs *ConsensusState) addVote(vote *types.Vote, peerID p2p.ID) (added bool, err error) {
cs.Logger.Debug("addVote", "voteHeight", vote.Height, "voteType", vote.Type, "valIndex", vote.ValidatorIndex, "csHeight", cs.Height)
func (cs *ConsensusState) addVote(
vote *types.Vote,
peerID p2p.ID) (added bool, err error) {
cs.Logger.Debug(
"addVote",
"voteHeight",
vote.Height,
"voteType",
vote.Type,
"valIndex",
vote.ValidatorIndex,
"csHeight",
cs.Height)
// A precommit for the previous height? // A precommit for the previous height?
// These come in while we wait timeoutCommit // These come in while we wait timeoutCommit
@ -1694,8 +1794,12 @@ func (cs *ConsensusState) addVote(vote *types.Vote, peerID p2p.ID) (added bool,
return added, err return added, err
} }
func (cs *ConsensusState) signVote(type_ types.SignedMsgType, hash []byte, header types.PartSetHeader) (*types.Vote, error) {
// Flush the WAL. Otherwise, we may not recompute the same vote to sign, and the privValidator will refuse to sign anything.
func (cs *ConsensusState) signVote(
type_ types.SignedMsgType,
hash []byte,
header types.PartSetHeader) (*types.Vote, error) {
// Flush the WAL. Otherwise, we may not recompute the same vote to sign,
// and the privValidator will refuse to sign anything.
cs.wal.FlushAndSync() cs.wal.FlushAndSync()
addr := cs.privValidator.GetPubKey().Address() addr := cs.privValidator.GetPubKey().Address()


+ 28
- 6
consensus/state_test.go View File

@ -107,7 +107,10 @@ func TestStateProposerSelection2(t *testing.T) {
addr := vss[(i+round)%len(vss)].GetPubKey().Address() addr := vss[(i+round)%len(vss)].GetPubKey().Address()
correctProposer := addr correctProposer := addr
if !bytes.Equal(prop.Address, correctProposer) { if !bytes.Equal(prop.Address, correctProposer) {
panic(fmt.Sprintf("expected RoundState.Validators.GetProposer() to be validator %d. Got %X", (i+2)%len(vss), prop.Address))
panic(fmt.Sprintf(
"expected RoundState.Validators.GetProposer() to be validator %d. Got %X",
(i+2)%len(vss),
prop.Address))
} }
rs := cs1.GetRoundState() rs := cs1.GetRoundState()
@ -432,7 +435,10 @@ func TestStateLockNoPOL(t *testing.T) {
// now we're on a new round and are the proposer // now we're on a new round and are the proposer
if !bytes.Equal(rs.ProposalBlock.Hash(), rs.LockedBlock.Hash()) { if !bytes.Equal(rs.ProposalBlock.Hash(), rs.LockedBlock.Hash()) {
panic(fmt.Sprintf("Expected proposal block to be locked block. Got %v, Expected %v", rs.ProposalBlock, rs.LockedBlock))
panic(fmt.Sprintf(
"Expected proposal block to be locked block. Got %v, Expected %v",
rs.ProposalBlock,
rs.LockedBlock))
} }
ensurePrevote(voteCh, height, round) // prevote ensurePrevote(voteCh, height, round) // prevote
@ -446,7 +452,12 @@ func TestStateLockNoPOL(t *testing.T) {
validatePrecommit(t, cs1, round, 0, vss[0], nil, theBlockHash) // precommit nil but be locked on proposal validatePrecommit(t, cs1, round, 0, vss[0], nil, theBlockHash) // precommit nil but be locked on proposal
signAddVotes(cs1, types.PrecommitType, hash, rs.ProposalBlock.MakePartSet(partSize).Header(), vs2) // NOTE: conflicting precommits at same height
signAddVotes(
cs1,
types.PrecommitType,
hash,
rs.ProposalBlock.MakePartSet(partSize).Header(),
vs2) // NOTE: conflicting precommits at same height
ensurePrecommit(voteCh, height, round) ensurePrecommit(voteCh, height, round)
ensureNewTimeout(timeoutWaitCh, height, round, cs1.config.Precommit(round).Nanoseconds()) ensureNewTimeout(timeoutWaitCh, height, round, cs1.config.Precommit(round).Nanoseconds())
@ -486,7 +497,12 @@ func TestStateLockNoPOL(t *testing.T) {
ensurePrecommit(voteCh, height, round) ensurePrecommit(voteCh, height, round)
validatePrecommit(t, cs1, round, 0, vss[0], nil, theBlockHash) // precommit nil but locked on proposal validatePrecommit(t, cs1, round, 0, vss[0], nil, theBlockHash) // precommit nil but locked on proposal
signAddVotes(cs1, types.PrecommitType, propBlock.Hash(), propBlock.MakePartSet(partSize).Header(), vs2) // NOTE: conflicting precommits at same height
signAddVotes(
cs1,
types.PrecommitType,
propBlock.Hash(),
propBlock.MakePartSet(partSize).Header(),
vs2) // NOTE: conflicting precommits at same height
ensurePrecommit(voteCh, height, round) ensurePrecommit(voteCh, height, round)
} }
@ -1330,7 +1346,10 @@ func TestStartNextHeightCorrectly(t *testing.T) {
ensureNewTimeout(timeoutProposeCh, height+1, round, cs1.config.Propose(round).Nanoseconds()) ensureNewTimeout(timeoutProposeCh, height+1, round, cs1.config.Propose(round).Nanoseconds())
rs = cs1.GetRoundState() rs = cs1.GetRoundState()
assert.False(t, rs.TriggeredTimeoutPrecommit, "triggeredTimeoutPrecommit should be false at the beginning of each round")
assert.False(
t,
rs.TriggeredTimeoutPrecommit,
"triggeredTimeoutPrecommit should be false at the beginning of each round")
} }
func TestResetTimeoutPrecommitUponNewHeight(t *testing.T) { func TestResetTimeoutPrecommitUponNewHeight(t *testing.T) {
@ -1382,7 +1401,10 @@ func TestResetTimeoutPrecommitUponNewHeight(t *testing.T) {
ensureNewProposal(proposalCh, height+1, 0) ensureNewProposal(proposalCh, height+1, 0)
rs = cs1.GetRoundState() rs = cs1.GetRoundState()
assert.False(t, rs.TriggeredTimeoutPrecommit, "triggeredTimeoutPrecommit should be false at the beginning of each height")
assert.False(
t,
rs.TriggeredTimeoutPrecommit,
"triggeredTimeoutPrecommit should be false at the beginning of each height")
} }
//------------------------------------------------------------------------------------------ //------------------------------------------------------------------------------------------


+ 7
- 2
consensus/types/height_vote_set.go View File

@ -16,7 +16,8 @@ type RoundVoteSet struct {
} }
var ( var (
GotVoteFromUnwantedRoundError = errors.New("Peer has sent a vote that does not match our round for more than one round")
GotVoteFromUnwantedRoundError = errors.New(
"Peer has sent a vote that does not match our round for more than one round")
) )
/* /*
@ -176,7 +177,11 @@ func (hvs *HeightVoteSet) getVoteSet(round int, type_ types.SignedMsgType) *type
// NOTE: if there are too many peers, or too much peer churn, // NOTE: if there are too many peers, or too much peer churn,
// this can cause memory issues. // this can cause memory issues.
// TODO: implement ability to remove peers too // TODO: implement ability to remove peers too
func (hvs *HeightVoteSet) SetPeerMaj23(round int, type_ types.SignedMsgType, peerID p2p.ID, blockID types.BlockID) error {
func (hvs *HeightVoteSet) SetPeerMaj23(
round int,
type_ types.SignedMsgType,
peerID p2p.ID,
blockID types.BlockID) error {
hvs.mtx.Lock() hvs.mtx.Lock()
defer hvs.mtx.Unlock() defer hvs.mtx.Unlock()
if !types.IsVoteTypeValid(type_) { if !types.IsVoteTypeValid(type_) {


+ 22
- 12
consensus/types/peer_round_state.go View File

@ -13,21 +13,31 @@ import (
// PeerRoundState contains the known state of a peer. // PeerRoundState contains the known state of a peer.
// NOTE: Read-only when returned by PeerState.GetRoundState(). // NOTE: Read-only when returned by PeerState.GetRoundState().
type PeerRoundState struct { type PeerRoundState struct {
Height int64 `json:"height"` // Height peer is at
Round int `json:"round"` // Round peer is at, -1 if unknown.
Step RoundStepType `json:"step"` // Step peer is at
// Estimated start of round 0 at this height
StartTime time.Time `json:"start_time"`
// True if peer has proposal for this round
Proposal bool `json:"proposal"`
ProposalBlockPartsHeader types.PartSetHeader `json:"proposal_block_parts_header"` // ProposalBlockPartsHeader types.PartSetHeader `json:"proposal_block_parts_header"` //
StartTime time.Time `json:"start_time"` // Estimated start of round 0 at this height
Height int64 `json:"height"` // Height peer is at
Round int `json:"round"` // Round peer is at, -1 if unknown.
ProposalBlockParts *cmn.BitArray `json:"proposal_block_parts"` // ProposalBlockParts *cmn.BitArray `json:"proposal_block_parts"` //
ProposalPOLRound int `json:"proposal_pol_round"` // Proposal's POL round. -1 if none. ProposalPOLRound int `json:"proposal_pol_round"` // Proposal's POL round. -1 if none.
ProposalPOL *cmn.BitArray `json:"proposal_pol"` // nil until ProposalPOLMessage received.
Prevotes *cmn.BitArray `json:"prevotes"` // All votes peer has for this round
Precommits *cmn.BitArray `json:"precommits"` // All precommits peer has for this round
LastCommitRound int `json:"last_commit_round"` // Round of commit for last height. -1 if none.
LastCommit *cmn.BitArray `json:"last_commit"` // All commit precommits of commit for last height.
CatchupCommitRound int `json:"catchup_commit_round"` // Round that we have commit for. Not necessarily unique. -1 if none.
CatchupCommit *cmn.BitArray `json:"catchup_commit"` // All commit precommits peer has for this height & CatchupCommitRound
Step RoundStepType `json:"step"` // Step peer is at
Proposal bool `json:"proposal"` // True if peer has proposal for this round
// nil until ProposalPOLMessage received.
ProposalPOL *cmn.BitArray `json:"proposal_pol"`
Prevotes *cmn.BitArray `json:"prevotes"` // All votes peer has for this round
Precommits *cmn.BitArray `json:"precommits"` // All precommits peer has for this round
LastCommitRound int `json:"last_commit_round"` // Round of commit for last height. -1 if none.
LastCommit *cmn.BitArray `json:"last_commit"` // All commit precommits of commit for last height.
// Round that we have commit for. Not necessarily unique. -1 if none.
CatchupCommitRound int `json:"catchup_commit_round"`
// All commit precommits peer has for this height & CatchupCommitRound
CatchupCommit *cmn.BitArray `json:"catchup_commit"`
} }
// String returns a string representation of the PeerRoundState // String returns a string representation of the PeerRoundState


+ 21
- 15
consensus/types/round_state.go View File

@ -65,25 +65,31 @@ func (rs RoundStepType) String() string {
// NOTE: Not thread safe. Should only be manipulated by functions downstream // NOTE: Not thread safe. Should only be manipulated by functions downstream
// of the cs.receiveRoutine // of the cs.receiveRoutine
type RoundState struct { type RoundState struct {
StartTime time.Time `json:"start_time"`
CommitTime time.Time `json:"commit_time"` // Subjective time when +2/3 precommits for Block at Round were found
Height int64 `json:"height"` // Height we are working on
Round int `json:"round"`
Validators *types.ValidatorSet `json:"validators"`
Proposal *types.Proposal `json:"proposal"`
ProposalBlock *types.Block `json:"proposal_block"`
ProposalBlockParts *types.PartSet `json:"proposal_block_parts"`
LockedRound int `json:"locked_round"`
LockedBlock *types.Block `json:"locked_block"`
LockedBlockParts *types.PartSet `json:"locked_block_parts"`
ValidRound int `json:"valid_round"` // Last known round with POL for non-nil valid block.
ValidBlock *types.Block `json:"valid_block"` // Last known block of POL mentioned above.
ValidBlockParts *types.PartSet `json:"valid_block_parts"` // Last known block parts of POL metnioned above.
Height int64 `json:"height"` // Height we are working on
Round int `json:"round"`
Step RoundStepType `json:"step"`
StartTime time.Time `json:"start_time"`
// Subjective time when +2/3 precommits for Block at Round were found
CommitTime time.Time `json:"commit_time"`
Validators *types.ValidatorSet `json:"validators"`
Proposal *types.Proposal `json:"proposal"`
ProposalBlock *types.Block `json:"proposal_block"`
ProposalBlockParts *types.PartSet `json:"proposal_block_parts"`
LockedRound int `json:"locked_round"`
LockedBlock *types.Block `json:"locked_block"`
LockedBlockParts *types.PartSet `json:"locked_block_parts"`
// Last known round with POL for non-nil valid block.
ValidRound int `json:"valid_round"`
ValidBlock *types.Block `json:"valid_block"` // Last known block of POL mentioned above.
// Last known block parts of POL metnioned above.
ValidBlockParts *types.PartSet `json:"valid_block_parts"`
Votes *HeightVoteSet `json:"votes"` Votes *HeightVoteSet `json:"votes"`
CommitRound int `json:"commit_round"` // CommitRound int `json:"commit_round"` //
LastCommit *types.VoteSet `json:"last_commit"` // Last precommits at Height-1 LastCommit *types.VoteSet `json:"last_commit"` // Last precommits at Height-1
LastValidators *types.ValidatorSet `json:"last_validators"` LastValidators *types.ValidatorSet `json:"last_validators"`
Step RoundStepType `json:"step"`
TriggeredTimeoutPrecommit bool `json:"triggered_timeout_precommit"` TriggeredTimeoutPrecommit bool `json:"triggered_timeout_precommit"`
} }


+ 7
- 2
consensus/wal.go View File

@ -210,7 +210,9 @@ type WALSearchOptions struct {
// Group reader will be nil if found equals false. // Group reader will be nil if found equals false.
// //
// CONTRACT: caller must close group reader. // CONTRACT: caller must close group reader.
func (wal *baseWAL) SearchForEndHeight(height int64, options *WALSearchOptions) (rd io.ReadCloser, found bool, err error) {
func (wal *baseWAL) SearchForEndHeight(
height int64,
options *WALSearchOptions) (rd io.ReadCloser, found bool, err error) {
var ( var (
msg *TimedWALMessage msg *TimedWALMessage
gr *auto.GroupReader gr *auto.GroupReader
@ -355,7 +357,10 @@ func (dec *WALDecoder) Decode() (*TimedWALMessage, error) {
length := binary.BigEndian.Uint32(b) length := binary.BigEndian.Uint32(b)
if length > maxMsgSizeBytes { if length > maxMsgSizeBytes {
return nil, DataCorruptionError{fmt.Errorf("length %d exceeded maximum possible value of %d bytes", length, maxMsgSizeBytes)}
return nil, DataCorruptionError{fmt.Errorf(
"length %d exceeded maximum possible value of %d bytes",
length,
maxMsgSizeBytes)}
} }
data := make([]byte, length) data := make([]byte, length)


+ 5
- 2
consensus/wal_generator.go View File

@ -27,7 +27,8 @@ import (
// WALGenerateNBlocks generates a consensus WAL. It does this by spinning up a // WALGenerateNBlocks generates a consensus WAL. It does this by spinning up a
// stripped down version of node (proxy app, event bus, consensus state) with a // stripped down version of node (proxy app, event bus, consensus state) with a
// persistent kvstore application and special consensus wal instance // persistent kvstore application and special consensus wal instance
// (byteBufferWAL) and waits until numBlocks are created. If the node fails to produce given numBlocks, it returns an error.
// (byteBufferWAL) and waits until numBlocks are created.
// If the node fails to produce given numBlocks, it returns an error.
func WALGenerateNBlocks(t *testing.T, wr io.Writer, numBlocks int) (err error) { func WALGenerateNBlocks(t *testing.T, wr io.Writer, numBlocks int) (err error) {
config := getConfig(t) config := getConfig(t)
@ -197,7 +198,9 @@ func (w *byteBufferWAL) WriteSync(m WALMessage) {
func (w *byteBufferWAL) FlushAndSync() error { return nil } func (w *byteBufferWAL) FlushAndSync() error { return nil }
func (w *byteBufferWAL) SearchForEndHeight(height int64, options *WALSearchOptions) (rd io.ReadCloser, found bool, err error) {
func (w *byteBufferWAL) SearchForEndHeight(
height int64,
options *WALSearchOptions) (rd io.ReadCloser, found bool, err error) {
return nil, false, nil return nil, false, nil
} }


+ 20
- 4
crypto/merkle/simple_map_test.go View File

@ -16,11 +16,27 @@ func TestSimpleMap(t *testing.T) {
{[]string{"key1"}, []string{"value1"}, "a44d3cc7daba1a4600b00a2434b30f8b970652169810d6dfa9fb1793a2189324"}, {[]string{"key1"}, []string{"value1"}, "a44d3cc7daba1a4600b00a2434b30f8b970652169810d6dfa9fb1793a2189324"},
{[]string{"key1"}, []string{"value2"}, "0638e99b3445caec9d95c05e1a3fc1487b4ddec6a952ff337080360b0dcc078c"}, {[]string{"key1"}, []string{"value2"}, "0638e99b3445caec9d95c05e1a3fc1487b4ddec6a952ff337080360b0dcc078c"},
// swap order with 2 keys // swap order with 2 keys
{[]string{"key1", "key2"}, []string{"value1", "value2"}, "8fd19b19e7bb3f2b3ee0574027d8a5a4cec370464ea2db2fbfa5c7d35bb0cff3"},
{[]string{"key2", "key1"}, []string{"value2", "value1"}, "8fd19b19e7bb3f2b3ee0574027d8a5a4cec370464ea2db2fbfa5c7d35bb0cff3"},
{
[]string{"key1", "key2"},
[]string{"value1", "value2"},
"8fd19b19e7bb3f2b3ee0574027d8a5a4cec370464ea2db2fbfa5c7d35bb0cff3",
},
{
[]string{"key2", "key1"},
[]string{"value2", "value1"},
"8fd19b19e7bb3f2b3ee0574027d8a5a4cec370464ea2db2fbfa5c7d35bb0cff3",
},
// swap order with 3 keys // swap order with 3 keys
{[]string{"key1", "key2", "key3"}, []string{"value1", "value2", "value3"}, "1dd674ec6782a0d586a903c9c63326a41cbe56b3bba33ed6ff5b527af6efb3dc"},
{[]string{"key1", "key3", "key2"}, []string{"value1", "value3", "value2"}, "1dd674ec6782a0d586a903c9c63326a41cbe56b3bba33ed6ff5b527af6efb3dc"},
{
[]string{"key1", "key2", "key3"},
[]string{"value1", "value2", "value3"},
"1dd674ec6782a0d586a903c9c63326a41cbe56b3bba33ed6ff5b527af6efb3dc",
},
{
[]string{"key1", "key3", "key2"},
[]string{"value1", "value3", "value2"},
"1dd674ec6782a0d586a903c9c63326a41cbe56b3bba33ed6ff5b527af6efb3dc",
},
} }
for i, tc := range tests { for i, tc := range tests {
db := newSimpleMap() db := newSimpleMap()


+ 5
- 1
crypto/multisig/threshold_pubkey_test.go View File

@ -67,7 +67,11 @@ func TestThresholdMultisigValidCases(t *testing.T) {
) )
require.NoError( require.NoError(
t, t,
multisignature.AddSignatureFromPubKey(tc.signatures[tc.signingIndices[tc.k]], tc.pubkeys[tc.signingIndices[tc.k]], tc.pubkeys),
multisignature.AddSignatureFromPubKey(
tc.signatures[tc.signingIndices[tc.k]],
tc.pubkeys[tc.signingIndices[tc.k]],
tc.pubkeys,
),
) )
require.True( require.True(
t, t,


+ 3
- 1
crypto/secp256k1/internal/secp256k1/curve.go View File

@ -41,7 +41,9 @@ import (
/* /*
#include "libsecp256k1/include/secp256k1.h" #include "libsecp256k1/include/secp256k1.h"
extern int secp256k1_ext_scalar_mul(const secp256k1_context* ctx, const unsigned char *point, const unsigned char *scalar);
extern int secp256k1_ext_scalar_mul(const secp256k1_context* ctx,
const unsigned char *point,
const unsigned char *scalar);
*/ */
import "C" import "C"


+ 5
- 1
crypto/secp256k1/secp256k1_test.go View File

@ -94,7 +94,11 @@ func TestGenPrivKeySecp256k1(t *testing.T) {
secret []byte secret []byte
}{ }{
{"empty secret", []byte{}}, {"empty secret", []byte{}},
{"some long secret", []byte("We live in a society exquisitely dependent on science and technology, in which hardly anyone knows anything about science and technology.")},
{
"some long secret",
[]byte("We live in a society exquisitely dependent on science and technology, " +
"in which hardly anyone knows anything about science and technology."),
},
{"another seed used in cosmos tests #1", []byte{0}}, {"another seed used in cosmos tests #1", []byte{0}},
{"another seed used in cosmos tests #2", []byte("mySecret")}, {"another seed used in cosmos tests #2", []byte("mySecret")},
{"another seed used in cosmos tests #3", []byte("")}, {"another seed used in cosmos tests #3", []byte("")},


+ 22
- 3
crypto/xchacha20poly1305/vector_test.go View File

@ -94,10 +94,29 @@ var vectors = []struct {
key, nonce, ad, plaintext, ciphertext []byte key, nonce, ad, plaintext, ciphertext []byte
}{ }{
{ {
[]byte{0x80, 0x81, 0x82, 0x83, 0x84, 0x85, 0x86, 0x87, 0x88, 0x89, 0x8a, 0x8b, 0x8c, 0x8d, 0x8e, 0x8f, 0x90, 0x91, 0x92, 0x93, 0x94, 0x95, 0x96, 0x97, 0x98, 0x99, 0x9a, 0x9b, 0x9c, 0x9d, 0x9e, 0x9f},
[]byte{
0x80, 0x81, 0x82, 0x83, 0x84, 0x85, 0x86, 0x87, 0x88, 0x89, 0x8a,
0x8b, 0x8c, 0x8d, 0x8e, 0x8f, 0x90, 0x91, 0x92, 0x93, 0x94, 0x95,
0x96, 0x97, 0x98, 0x99, 0x9a, 0x9b, 0x9c, 0x9d, 0x9e, 0x9f,
},
[]byte{0x07, 0x00, 0x00, 0x00, 0x40, 0x41, 0x42, 0x43, 0x44, 0x45, 0x46, 0x47, 0x48, 0x49, 0x4a, 0x4b}, []byte{0x07, 0x00, 0x00, 0x00, 0x40, 0x41, 0x42, 0x43, 0x44, 0x45, 0x46, 0x47, 0x48, 0x49, 0x4a, 0x4b},
[]byte{0x50, 0x51, 0x52, 0x53, 0xc0, 0xc1, 0xc2, 0xc3, 0xc4, 0xc5, 0xc6, 0xc7}, []byte{0x50, 0x51, 0x52, 0x53, 0xc0, 0xc1, 0xc2, 0xc3, 0xc4, 0xc5, 0xc6, 0xc7},
[]byte("Ladies and Gentlemen of the class of '99: If I could offer you only one tip for the future, sunscreen would be it."),
[]byte{0x45, 0x3c, 0x06, 0x93, 0xa7, 0x40, 0x7f, 0x04, 0xff, 0x4c, 0x56, 0xae, 0xdb, 0x17, 0xa3, 0xc0, 0xa1, 0xaf, 0xff, 0x01, 0x17, 0x49, 0x30, 0xfc, 0x22, 0x28, 0x7c, 0x33, 0xdb, 0xcf, 0x0a, 0xc8, 0xb8, 0x9a, 0xd9, 0x29, 0x53, 0x0a, 0x1b, 0xb3, 0xab, 0x5e, 0x69, 0xf2, 0x4c, 0x7f, 0x60, 0x70, 0xc8, 0xf8, 0x40, 0xc9, 0xab, 0xb4, 0xf6, 0x9f, 0xbf, 0xc8, 0xa7, 0xff, 0x51, 0x26, 0xfa, 0xee, 0xbb, 0xb5, 0x58, 0x05, 0xee, 0x9c, 0x1c, 0xf2, 0xce, 0x5a, 0x57, 0x26, 0x32, 0x87, 0xae, 0xc5, 0x78, 0x0f, 0x04, 0xec, 0x32, 0x4c, 0x35, 0x14, 0x12, 0x2c, 0xfc, 0x32, 0x31, 0xfc, 0x1a, 0x8b, 0x71, 0x8a, 0x62, 0x86, 0x37, 0x30, 0xa2, 0x70, 0x2b, 0xb7, 0x63, 0x66, 0x11, 0x6b, 0xed, 0x09, 0xe0, 0xfd, 0x5c, 0x6d, 0x84, 0xb6, 0xb0, 0xc1, 0xab, 0xaf, 0x24, 0x9d, 0x5d, 0xd0, 0xf7, 0xf5, 0xa7, 0xea},
[]byte(
"Ladies and Gentlemen of the class of '99: If I could offer you only one tip for the future, sunscreen would be it.",
),
[]byte{
0x45, 0x3c, 0x06, 0x93, 0xa7, 0x40, 0x7f, 0x04, 0xff, 0x4c, 0x56,
0xae, 0xdb, 0x17, 0xa3, 0xc0, 0xa1, 0xaf, 0xff, 0x01, 0x17, 0x49,
0x30, 0xfc, 0x22, 0x28, 0x7c, 0x33, 0xdb, 0xcf, 0x0a, 0xc8, 0xb8,
0x9a, 0xd9, 0x29, 0x53, 0x0a, 0x1b, 0xb3, 0xab, 0x5e, 0x69, 0xf2,
0x4c, 0x7f, 0x60, 0x70, 0xc8, 0xf8, 0x40, 0xc9, 0xab, 0xb4, 0xf6,
0x9f, 0xbf, 0xc8, 0xa7, 0xff, 0x51, 0x26, 0xfa, 0xee, 0xbb, 0xb5,
0x58, 0x05, 0xee, 0x9c, 0x1c, 0xf2, 0xce, 0x5a, 0x57, 0x26, 0x32,
0x87, 0xae, 0xc5, 0x78, 0x0f, 0x04, 0xec, 0x32, 0x4c, 0x35, 0x14,
0x12, 0x2c, 0xfc, 0x32, 0x31, 0xfc, 0x1a, 0x8b, 0x71, 0x8a, 0x62,
0x86, 0x37, 0x30, 0xa2, 0x70, 0x2b, 0xb7, 0x63, 0x66, 0x11, 0x6b,
0xed, 0x09, 0xe0, 0xfd, 0x5c, 0x6d, 0x84, 0xb6, 0xb0, 0xc1, 0xab,
0xaf, 0x24, 0x9d, 0x5d, 0xd0, 0xf7, 0xf5, 0xa7, 0xea,
},
}, },
} }

+ 6
- 1
evidence/pool.go View File

@ -76,7 +76,12 @@ func (evpool *EvidencePool) Update(block *types.Block, state sm.State) {
// sanity check // sanity check
if state.LastBlockHeight != block.Height { if state.LastBlockHeight != block.Height {
panic(fmt.Sprintf("Failed EvidencePool.Update sanity check: got state.Height=%d with block.Height=%d", state.LastBlockHeight, block.Height))
panic(
fmt.Sprintf("Failed EvidencePool.Update sanity check: got state.Height=%d with block.Height=%d",
state.LastBlockHeight,
block.Height,
),
)
} }
// update the state // update the state


+ 12
- 2
evidence/reactor.go View File

@ -154,7 +154,10 @@ func (evR *EvidenceReactor) broadcastEvidenceRoutine(peer p2p.Peer) {
// Returns the message to send the peer, or nil if the evidence is invalid for the peer. // Returns the message to send the peer, or nil if the evidence is invalid for the peer.
// If message is nil, return true if we should sleep and try again. // If message is nil, return true if we should sleep and try again.
func (evR EvidenceReactor) checkSendEvidenceMessage(peer p2p.Peer, ev types.Evidence) (msg EvidenceMessage, retry bool) {
func (evR EvidenceReactor) checkSendEvidenceMessage(
peer p2p.Peer,
ev types.Evidence,
) (msg EvidenceMessage, retry bool) {
// make sure the peer is up to date // make sure the peer is up to date
evHeight := ev.Height() evHeight := ev.Height()
peerState, ok := peer.Get(types.PeerStateKey).(PeerState) peerState, ok := peer.Get(types.PeerStateKey).(PeerState)
@ -178,7 +181,14 @@ func (evR EvidenceReactor) checkSendEvidenceMessage(peer p2p.Peer, ev types.Evid
// evidence is too old, skip // evidence is too old, skip
// NOTE: if evidence is too old for an honest peer, // NOTE: if evidence is too old for an honest peer,
// then we're behind and either it already got committed or it never will! // then we're behind and either it already got committed or it never will!
evR.Logger.Info("Not sending peer old evidence", "peerHeight", peerHeight, "evHeight", evHeight, "maxAge", maxAge, "peer", peer)
evR.Logger.Info(
"Not sending peer old evidence",
"peerHeight", peerHeight,
"evHeight", evHeight,
"maxAge", maxAge,
"peer", peer,
)
return nil, false return nil, false
} }


+ 7
- 2
evidence/reactor_test.go View File

@ -75,8 +75,13 @@ func waitForEvidence(t *testing.T, evs types.EvidenceList, reactors []*EvidenceR
} }
// wait for all evidence on a single evpool // wait for all evidence on a single evpool
func _waitForEvidence(t *testing.T, wg *sync.WaitGroup, evs types.EvidenceList, reactorIdx int, reactors []*EvidenceReactor) {
func _waitForEvidence(
t *testing.T,
wg *sync.WaitGroup,
evs types.EvidenceList,
reactorIdx int,
reactors []*EvidenceReactor,
) {
evpool := reactors[reactorIdx].evpool evpool := reactors[reactorIdx].evpool
for len(evpool.PendingEvidence(-1)) != len(evs) { for len(evpool.PendingEvidence(-1)) != len(evs) {
time.Sleep(time.Millisecond * 100) time.Sleep(time.Millisecond * 100)


+ 5
- 1
libs/cli/flags/log_level.go View File

@ -66,7 +66,11 @@ func ParseLogLevel(lvl string, logger log.Logger, defaultLogLevelValue string) (
case "none": case "none":
option = log.AllowNoneWith("module", module) option = log.AllowNoneWith("module", module)
default: default:
return nil, fmt.Errorf("Expected either \"info\", \"debug\", \"error\" or \"none\" log level, given %s (pair %s, list %s)", level, item, list)
return nil,
fmt.Errorf("Expected either \"info\", \"debug\", \"error\" or \"none\" log level, given %s (pair %s, list %s)",
level,
item,
list)
} }
options = append(options, option) options = append(options, option)


+ 3
- 1
libs/common/async.go View File

@ -126,7 +126,9 @@ func Parallel(tasks ...Task) (trs *TaskResultSet, ok bool) {
var taskResultChz = make([]TaskResultCh, len(tasks)) // To return. var taskResultChz = make([]TaskResultCh, len(tasks)) // To return.
var taskDoneCh = make(chan bool, len(tasks)) // A "wait group" channel, early abort if any true received. var taskDoneCh = make(chan bool, len(tasks)) // A "wait group" channel, early abort if any true received.
var numPanics = new(int32) // Keep track of panics to set ok=false later. var numPanics = new(int32) // Keep track of panics to set ok=false later.
ok = true // We will set it to false iff any tasks panic'd or returned abort.
// We will set it to false iff any tasks panic'd or returned abort.
ok = true
// Start all tasks in parallel in separate goroutines. // Start all tasks in parallel in separate goroutines.
// When the task is complete, it will appear in the // When the task is complete, it will appear in the


+ 9
- 1
libs/common/bit_array_test.go View File

@ -102,7 +102,15 @@ func TestSub(t *testing.T) {
require.Nil(t, err) require.Nil(t, err)
got, _ := json.Marshal(bA.Sub(o)) got, _ := json.Marshal(bA.Sub(o))
require.Equal(t, tc.expectedBA, string(got), "%s minus %s doesn't equal %s", tc.initBA, tc.subtractingBA, tc.expectedBA)
require.Equal(
t,
tc.expectedBA,
string(got),
"%s minus %s doesn't equal %s",
tc.initBA,
tc.subtractingBA,
tc.expectedBA,
)
} }
} }


+ 6
- 1
libs/common/cmap_test.go View File

@ -35,7 +35,12 @@ func TestIterateKeysWithValues(t *testing.T) {
// Delete 1 Key // Delete 1 Key
cmap.Delete("key1") cmap.Delete("key1")
assert.NotEqual(t, len(keys), len(cmap.Keys()), "[]keys and []Keys() should not be equal, they are copies, one item was removed")
assert.NotEqual(
t,
len(keys),
len(cmap.Keys()),
"[]keys and []Keys() should not be equal, they are copies, one item was removed",
)
} }
func TestContains(t *testing.T) { func TestContains(t *testing.T) {


+ 6
- 2
libs/log/filter.go View File

@ -72,10 +72,14 @@ func (l *filter) Error(msg string, keyvals ...interface{}) {
// logger = log.NewFilter(logger, log.AllowError(), log.AllowInfoWith("module", "crypto")) // logger = log.NewFilter(logger, log.AllowError(), log.AllowInfoWith("module", "crypto"))
// logger.With("module", "crypto").Info("Hello") # produces "I... Hello module=crypto" // logger.With("module", "crypto").Info("Hello") # produces "I... Hello module=crypto"
// //
// logger = log.NewFilter(logger, log.AllowError(), log.AllowInfoWith("module", "crypto"), log.AllowNoneWith("user", "Sam"))
// logger = log.NewFilter(logger, log.AllowError(),
// log.AllowInfoWith("module", "crypto"),
// log.AllowNoneWith("user", "Sam"))
// logger.With("module", "crypto", "user", "Sam").Info("Hello") # returns nil // logger.With("module", "crypto", "user", "Sam").Info("Hello") # returns nil
// //
// logger = log.NewFilter(logger, log.AllowError(), log.AllowInfoWith("module", "crypto"), log.AllowNoneWith("user", "Sam"))
// logger = log.NewFilter(logger,
// log.AllowError(),
// log.AllowInfoWith("module", "crypto"), log.AllowNoneWith("user", "Sam"))
// logger.With("user", "Sam").With("module", "crypto").Info("Hello") # produces "I... Hello module=crypto user=Sam" // logger.With("user", "Sam").With("module", "crypto").Info("Hello") # produces "I... Hello module=crypto user=Sam"
func (l *filter) With(keyvals ...interface{}) Logger { func (l *filter) With(keyvals ...interface{}) Logger {
keyInAllowedKeyvals := false keyInAllowedKeyvals := false


+ 26
- 5
libs/log/filter_test.go View File

@ -79,7 +79,10 @@ func TestLevelContext(t *testing.T) {
logger = logger.With("context", "value") logger = logger.With("context", "value")
logger.Error("foo", "bar", "baz") logger.Error("foo", "bar", "baz")
if want, have := `{"_msg":"foo","bar":"baz","context":"value","level":"error"}`, strings.TrimSpace(buf.String()); want != have {
want := `{"_msg":"foo","bar":"baz","context":"value","level":"error"}`
have := strings.TrimSpace(buf.String())
if want != have {
t.Errorf("\nwant '%s'\nhave '%s'", want, have) t.Errorf("\nwant '%s'\nhave '%s'", want, have)
} }
@ -97,13 +100,22 @@ func TestVariousAllowWith(t *testing.T) {
logger1 := log.NewFilter(logger, log.AllowError(), log.AllowInfoWith("context", "value")) logger1 := log.NewFilter(logger, log.AllowError(), log.AllowInfoWith("context", "value"))
logger1.With("context", "value").Info("foo", "bar", "baz") logger1.With("context", "value").Info("foo", "bar", "baz")
if want, have := `{"_msg":"foo","bar":"baz","context":"value","level":"info"}`, strings.TrimSpace(buf.String()); want != have {
want := `{"_msg":"foo","bar":"baz","context":"value","level":"info"}`
have := strings.TrimSpace(buf.String())
if want != have {
t.Errorf("\nwant '%s'\nhave '%s'", want, have) t.Errorf("\nwant '%s'\nhave '%s'", want, have)
} }
buf.Reset() buf.Reset()
logger2 := log.NewFilter(logger, log.AllowError(), log.AllowInfoWith("context", "value"), log.AllowNoneWith("user", "Sam"))
logger2 := log.NewFilter(
logger,
log.AllowError(),
log.AllowInfoWith("context", "value"),
log.AllowNoneWith("user", "Sam"),
)
logger2.With("context", "value", "user", "Sam").Info("foo", "bar", "baz") logger2.With("context", "value", "user", "Sam").Info("foo", "bar", "baz")
if want, have := ``, strings.TrimSpace(buf.String()); want != have { if want, have := ``, strings.TrimSpace(buf.String()); want != have {
t.Errorf("\nwant '%s'\nhave '%s'", want, have) t.Errorf("\nwant '%s'\nhave '%s'", want, have)
@ -111,9 +123,18 @@ func TestVariousAllowWith(t *testing.T) {
buf.Reset() buf.Reset()
logger3 := log.NewFilter(logger, log.AllowError(), log.AllowInfoWith("context", "value"), log.AllowNoneWith("user", "Sam"))
logger3 := log.NewFilter(
logger,
log.AllowError(),
log.AllowInfoWith("context", "value"),
log.AllowNoneWith("user", "Sam"),
)
logger3.With("user", "Sam").With("context", "value").Info("foo", "bar", "baz") logger3.With("user", "Sam").With("context", "value").Info("foo", "bar", "baz")
if want, have := `{"_msg":"foo","bar":"baz","context":"value","level":"info","user":"Sam"}`, strings.TrimSpace(buf.String()); want != have {
want = `{"_msg":"foo","bar":"baz","context":"value","level":"info","user":"Sam"}`
have = strings.TrimSpace(buf.String())
if want != have {
t.Errorf("\nwant '%s'\nhave '%s'", want, have) t.Errorf("\nwant '%s'\nhave '%s'", want, have)
} }
} }

+ 27
- 4
libs/log/tracing_logger_test.go View File

@ -20,22 +20,45 @@ func TestTracingLogger(t *testing.T) {
err1 := errors.New("Courage is grace under pressure.") err1 := errors.New("Courage is grace under pressure.")
err2 := errors.New("It does not matter how slowly you go, so long as you do not stop.") err2 := errors.New("It does not matter how slowly you go, so long as you do not stop.")
logger1.With("err1", err1).Info("foo", "err2", err2) logger1.With("err1", err1).Info("foo", "err2", err2)
want := strings.Replace(
strings.Replace(
`{"_msg":"foo","err1":"`+
fmt.Sprintf("%+v", err1)+
`","err2":"`+
fmt.Sprintf("%+v", err2)+
`","level":"info"}`,
"\t", "", -1,
), "\n", "", -1)
have := strings.Replace(strings.Replace(strings.TrimSpace(buf.String()), "\\n", "", -1), "\\t", "", -1) have := strings.Replace(strings.Replace(strings.TrimSpace(buf.String()), "\\n", "", -1), "\\t", "", -1)
if want := strings.Replace(strings.Replace(`{"_msg":"foo","err1":"`+fmt.Sprintf("%+v", err1)+`","err2":"`+fmt.Sprintf("%+v", err2)+`","level":"info"}`, "\t", "", -1), "\n", "", -1); want != have {
if want != have {
t.Errorf("\nwant '%s'\nhave '%s'", want, have) t.Errorf("\nwant '%s'\nhave '%s'", want, have)
} }
buf.Reset() buf.Reset()
logger.With("err1", stderr.New("Opportunities don't happen. You create them.")).Info("foo", "err2", stderr.New("Once you choose hope, anything's possible."))
if want, have := `{"_msg":"foo","err1":"Opportunities don't happen. You create them.","err2":"Once you choose hope, anything's possible.","level":"info"}`, strings.TrimSpace(buf.String()); want != have {
logger.With(
"err1", stderr.New("Opportunities don't happen. You create them."),
).Info(
"foo", "err2", stderr.New("Once you choose hope, anything's possible."),
)
want = `{"_msg":"foo",` +
`"err1":"Opportunities don't happen. You create them.",` +
`"err2":"Once you choose hope, anything's possible.",` +
`"level":"info"}`
have = strings.TrimSpace(buf.String())
if want != have {
t.Errorf("\nwant '%s'\nhave '%s'", want, have) t.Errorf("\nwant '%s'\nhave '%s'", want, have)
} }
buf.Reset() buf.Reset()
logger.With("user", "Sam").With("context", "value").Info("foo", "bar", "baz") logger.With("user", "Sam").With("context", "value").Info("foo", "bar", "baz")
if want, have := `{"_msg":"foo","bar":"baz","context":"value","level":"info","user":"Sam"}`, strings.TrimSpace(buf.String()); want != have {
want = `{"_msg":"foo","bar":"baz","context":"value","level":"info","user":"Sam"}`
have = strings.TrimSpace(buf.String())
if want != have {
t.Errorf("\nwant '%s'\nhave '%s'", want, have) t.Errorf("\nwant '%s'\nhave '%s'", want, have)
} }
} }

+ 5
- 1
libs/pubsub/pubsub.go View File

@ -146,7 +146,11 @@ func (s *Server) BufferCapacity() int {
// outCapacity can be used to set a capacity for Subscription#Out channel (1 by // outCapacity can be used to set a capacity for Subscription#Out channel (1 by
// default). Panics if outCapacity is less than or equal to zero. If you want // default). Panics if outCapacity is less than or equal to zero. If you want
// an unbuffered channel, use SubscribeUnbuffered. // an unbuffered channel, use SubscribeUnbuffered.
func (s *Server) Subscribe(ctx context.Context, clientID string, query Query, outCapacity ...int) (*Subscription, error) {
func (s *Server) Subscribe(
ctx context.Context,
clientID string,
query Query,
outCapacity ...int) (*Subscription, error) {
outCap := 1 outCap := 1
if len(outCapacity) > 0 { if len(outCapacity) > 0 {
if outCapacity[0] <= 0 { if outCapacity[0] <= 0 {


+ 26
- 6
libs/pubsub/pubsub_test.go View File

@ -140,14 +140,26 @@ func TestDifferentClients(t *testing.T) {
require.NoError(t, err) require.NoError(t, err)
assertReceive(t, "Iceman", subscription1.Out()) assertReceive(t, "Iceman", subscription1.Out())
subscription2, err := s.Subscribe(ctx, "client-2", query.MustParse("tm.events.type='NewBlock' AND abci.account.name='Igor'"))
require.NoError(t, err)
err = s.PublishWithEvents(ctx, "Ultimo", map[string][]string{"tm.events.type": {"NewBlock"}, "abci.account.name": {"Igor"}})
subscription2, err := s.Subscribe(
ctx,
"client-2",
query.MustParse("tm.events.type='NewBlock' AND abci.account.name='Igor'"),
)
require.NoError(t, err)
err = s.PublishWithEvents(
ctx,
"Ultimo",
map[string][]string{"tm.events.type": {"NewBlock"}, "abci.account.name": {"Igor"}},
)
require.NoError(t, err) require.NoError(t, err)
assertReceive(t, "Ultimo", subscription1.Out()) assertReceive(t, "Ultimo", subscription1.Out())
assertReceive(t, "Ultimo", subscription2.Out()) assertReceive(t, "Ultimo", subscription2.Out())
subscription3, err := s.Subscribe(ctx, "client-3", query.MustParse("tm.events.type='NewRoundStep' AND abci.account.name='Igor' AND abci.invoice.number = 10"))
subscription3, err := s.Subscribe(
ctx,
"client-3",
query.MustParse("tm.events.type='NewRoundStep' AND abci.account.name='Igor' AND abci.invoice.number = 10"),
)
require.NoError(t, err) require.NoError(t, err)
err = s.PublishWithEvents(ctx, "Valeria Richards", map[string][]string{"tm.events.type": {"NewRoundStep"}}) err = s.PublishWithEvents(ctx, "Valeria Richards", map[string][]string{"tm.events.type": {"NewRoundStep"}})
require.NoError(t, err) require.NoError(t, err)
@ -344,7 +356,11 @@ func benchmarkNClients(n int, b *testing.B) {
ctx := context.Background() ctx := context.Background()
for i := 0; i < n; i++ { for i := 0; i < n; i++ {
subscription, err := s.Subscribe(ctx, clientID, query.MustParse(fmt.Sprintf("abci.Account.Owner = 'Ivan' AND abci.Invoices.Number = %d", i)))
subscription, err := s.Subscribe(
ctx,
clientID,
query.MustParse(fmt.Sprintf("abci.Account.Owner = 'Ivan' AND abci.Invoices.Number = %d", i)),
)
if err != nil { if err != nil {
b.Fatal(err) b.Fatal(err)
} }
@ -363,7 +379,11 @@ func benchmarkNClients(n int, b *testing.B) {
b.ReportAllocs() b.ReportAllocs()
b.ResetTimer() b.ResetTimer()
for i := 0; i < b.N; i++ { for i := 0; i < b.N; i++ {
s.PublishWithEvents(ctx, "Gamora", map[string][]string{"abci.Account.Owner": {"Ivan"}, "abci.Invoices.Number": {string(i)}})
s.PublishWithEvents(
ctx,
"Gamora",
map[string][]string{"abci.Account.Owner": {"Ivan"}, "abci.Invoices.Number": {string(i)}},
)
} }
} }


+ 30
- 8
libs/pubsub/query/query.go View File

@ -120,26 +120,36 @@ func (q *Query) Conditions() []Condition {
if strings.ContainsAny(number, ".") { // if it looks like a floating-point number if strings.ContainsAny(number, ".") { // if it looks like a floating-point number
value, err := strconv.ParseFloat(number, 64) value, err := strconv.ParseFloat(number, 64)
if err != nil { if err != nil {
panic(fmt.Sprintf("got %v while trying to parse %s as float64 (should never happen if the grammar is correct)", err, number))
panic(fmt.Sprintf("got %v while trying to parse %s as float64 (should never happen if the grammar is correct)",
err,
number))
} }
conditions = append(conditions, Condition{tag, op, value}) conditions = append(conditions, Condition{tag, op, value})
} else { } else {
value, err := strconv.ParseInt(number, 10, 64) value, err := strconv.ParseInt(number, 10, 64)
if err != nil { if err != nil {
panic(fmt.Sprintf("got %v while trying to parse %s as int64 (should never happen if the grammar is correct)", err, number))
panic(fmt.Sprintf("got %v while trying to parse %s as int64 (should never happen if the grammar is correct)",
err,
number))
} }
conditions = append(conditions, Condition{tag, op, value}) conditions = append(conditions, Condition{tag, op, value})
} }
case ruletime: case ruletime:
value, err := time.Parse(TimeLayout, buffer[begin:end]) value, err := time.Parse(TimeLayout, buffer[begin:end])
if err != nil { if err != nil {
panic(fmt.Sprintf("got %v while trying to parse %s as time.Time / RFC3339 (should never happen if the grammar is correct)", err, buffer[begin:end]))
panic(fmt.Sprintf(
"got %v while trying to parse %s as time.Time / RFC3339 (should never happen if the grammar is correct)",
err,
buffer[begin:end]))
} }
conditions = append(conditions, Condition{tag, op, value}) conditions = append(conditions, Condition{tag, op, value})
case ruledate: case ruledate:
value, err := time.Parse("2006-01-02", buffer[begin:end]) value, err := time.Parse("2006-01-02", buffer[begin:end])
if err != nil { if err != nil {
panic(fmt.Sprintf("got %v while trying to parse %s as time.Time / '2006-01-02' (should never happen if the grammar is correct)", err, buffer[begin:end]))
panic(fmt.Sprintf(
"got %v while trying to parse %s as time.Time / '2006-01-02' (should never happen if the grammar is correct)",
err,
buffer[begin:end]))
} }
conditions = append(conditions, Condition{tag, op, value}) conditions = append(conditions, Condition{tag, op, value})
} }
@ -199,7 +209,10 @@ func (q *Query) Matches(events map[string][]string) bool {
if strings.ContainsAny(number, ".") { // if it looks like a floating-point number if strings.ContainsAny(number, ".") { // if it looks like a floating-point number
value, err := strconv.ParseFloat(number, 64) value, err := strconv.ParseFloat(number, 64)
if err != nil { if err != nil {
panic(fmt.Sprintf("got %v while trying to parse %s as float64 (should never happen if the grammar is correct)", err, number))
panic(fmt.Sprintf(
"got %v while trying to parse %s as float64 (should never happen if the grammar is correct)",
err,
number))
} }
if !match(tag, op, reflect.ValueOf(value), events) { if !match(tag, op, reflect.ValueOf(value), events) {
return false return false
@ -207,7 +220,10 @@ func (q *Query) Matches(events map[string][]string) bool {
} else { } else {
value, err := strconv.ParseInt(number, 10, 64) value, err := strconv.ParseInt(number, 10, 64)
if err != nil { if err != nil {
panic(fmt.Sprintf("got %v while trying to parse %s as int64 (should never happen if the grammar is correct)", err, number))
panic(fmt.Sprintf(
"got %v while trying to parse %s as int64 (should never happen if the grammar is correct)",
err,
number))
} }
if !match(tag, op, reflect.ValueOf(value), events) { if !match(tag, op, reflect.ValueOf(value), events) {
return false return false
@ -216,7 +232,10 @@ func (q *Query) Matches(events map[string][]string) bool {
case ruletime: case ruletime:
value, err := time.Parse(TimeLayout, buffer[begin:end]) value, err := time.Parse(TimeLayout, buffer[begin:end])
if err != nil { if err != nil {
panic(fmt.Sprintf("got %v while trying to parse %s as time.Time / RFC3339 (should never happen if the grammar is correct)", err, buffer[begin:end]))
panic(fmt.Sprintf(
"got %v while trying to parse %s as time.Time / RFC3339 (should never happen if the grammar is correct)",
err,
buffer[begin:end]))
} }
if !match(tag, op, reflect.ValueOf(value), events) { if !match(tag, op, reflect.ValueOf(value), events) {
return false return false
@ -224,7 +243,10 @@ func (q *Query) Matches(events map[string][]string) bool {
case ruledate: case ruledate:
value, err := time.Parse("2006-01-02", buffer[begin:end]) value, err := time.Parse("2006-01-02", buffer[begin:end])
if err != nil { if err != nil {
panic(fmt.Sprintf("got %v while trying to parse %s as time.Time / '2006-01-02' (should never happen if the grammar is correct)", err, buffer[begin:end]))
panic(fmt.Sprintf(
"got %v while trying to parse %s as time.Time / '2006-01-02' (should never happen if the grammar is correct)",
err,
buffer[begin:end]))
} }
if !match(tag, op, reflect.ValueOf(value), events) { if !match(tag, op, reflect.ValueOf(value), events) {
return false return false


+ 73
- 13
libs/pubsub/query/query_test.go View File

@ -31,28 +31,72 @@ func TestMatches(t *testing.T) {
{"account.balance < 1000.0", map[string][]string{"account.balance": {"900"}}, false, true}, {"account.balance < 1000.0", map[string][]string{"account.balance": {"900"}}, false, true},
{"apples.kg <= 4", map[string][]string{"apples.kg": {"4.0"}}, false, true}, {"apples.kg <= 4", map[string][]string{"apples.kg": {"4.0"}}, false, true},
{"body.weight >= 4.5", map[string][]string{"body.weight": {fmt.Sprintf("%v", float32(4.5))}}, false, true}, {"body.weight >= 4.5", map[string][]string{"body.weight": {fmt.Sprintf("%v", float32(4.5))}}, false, true},
{"oranges.kg < 4 AND watermellons.kg > 10", map[string][]string{"oranges.kg": {"3"}, "watermellons.kg": {"12"}}, false, true},
{
"oranges.kg < 4 AND watermellons.kg > 10",
map[string][]string{"oranges.kg": {"3"}, "watermellons.kg": {"12"}},
false,
true,
},
{"peaches.kg < 4", map[string][]string{"peaches.kg": {"5"}}, false, false}, {"peaches.kg < 4", map[string][]string{"peaches.kg": {"5"}}, false, false},
{"tx.date > DATE 2017-01-01", map[string][]string{"tx.date": {time.Now().Format(query.DateLayout)}}, false, true},
{
"tx.date > DATE 2017-01-01",
map[string][]string{"tx.date": {time.Now().Format(query.DateLayout)}},
false,
true,
},
{"tx.date = DATE 2017-01-01", map[string][]string{"tx.date": {txDate}}, false, true}, {"tx.date = DATE 2017-01-01", map[string][]string{"tx.date": {txDate}}, false, true},
{"tx.date = DATE 2018-01-01", map[string][]string{"tx.date": {txDate}}, false, false}, {"tx.date = DATE 2018-01-01", map[string][]string{"tx.date": {txDate}}, false, false},
{"tx.time >= TIME 2013-05-03T14:45:00Z", map[string][]string{"tx.time": {time.Now().Format(query.TimeLayout)}}, false, true},
{
"tx.time >= TIME 2013-05-03T14:45:00Z",
map[string][]string{"tx.time": {time.Now().Format(query.TimeLayout)}},
false,
true,
},
{"tx.time = TIME 2013-05-03T14:45:00Z", map[string][]string{"tx.time": {txTime}}, false, false}, {"tx.time = TIME 2013-05-03T14:45:00Z", map[string][]string{"tx.time": {txTime}}, false, false},
{"abci.owner.name CONTAINS 'Igor'", map[string][]string{"abci.owner.name": {"Igor,Ivan"}}, false, true}, {"abci.owner.name CONTAINS 'Igor'", map[string][]string{"abci.owner.name": {"Igor,Ivan"}}, false, true},
{"abci.owner.name CONTAINS 'Igor'", map[string][]string{"abci.owner.name": {"Pavel,Ivan"}}, false, false}, {"abci.owner.name CONTAINS 'Igor'", map[string][]string{"abci.owner.name": {"Pavel,Ivan"}}, false, false},
{"abci.owner.name = 'Igor'", map[string][]string{"abci.owner.name": {"Igor", "Ivan"}}, false, true}, {"abci.owner.name = 'Igor'", map[string][]string{"abci.owner.name": {"Igor", "Ivan"}}, false, true},
{"abci.owner.name = 'Ivan'", map[string][]string{"abci.owner.name": {"Igor", "Ivan"}}, false, true},
{"abci.owner.name = 'Ivan' AND abci.owner.name = 'Igor'", map[string][]string{"abci.owner.name": {"Igor", "Ivan"}}, false, true},
{"abci.owner.name = 'Ivan' AND abci.owner.name = 'John'", map[string][]string{"abci.owner.name": {"Igor", "Ivan"}}, false, false},
{"tm.events.type='NewBlock'", map[string][]string{"tm.events.type": {"NewBlock"}, "app.name": {"fuzzed"}}, false, true},
{
"abci.owner.name = 'Ivan'",
map[string][]string{"abci.owner.name": {"Igor", "Ivan"}},
false,
true,
},
{
"abci.owner.name = 'Ivan' AND abci.owner.name = 'Igor'",
map[string][]string{"abci.owner.name": {"Igor", "Ivan"}},
false,
true,
},
{
"abci.owner.name = 'Ivan' AND abci.owner.name = 'John'",
map[string][]string{"abci.owner.name": {"Igor", "Ivan"}},
false,
false,
},
{
"tm.events.type='NewBlock'",
map[string][]string{"tm.events.type": {"NewBlock"}, "app.name": {"fuzzed"}},
false,
true,
},
{"app.name = 'fuzzed'", map[string][]string{"tm.events.type": {"NewBlock"}, "app.name": {"fuzzed"}}, false, true}, {"app.name = 'fuzzed'", map[string][]string{"tm.events.type": {"NewBlock"}, "app.name": {"fuzzed"}}, false, true},
{"tm.events.type='NewBlock' AND app.name = 'fuzzed'", map[string][]string{"tm.events.type": {"NewBlock"}, "app.name": {"fuzzed"}}, false, true},
{"tm.events.type='NewHeader' AND app.name = 'fuzzed'", map[string][]string{"tm.events.type": {"NewBlock"}, "app.name": {"fuzzed"}}, false, false},
{
"tm.events.type='NewBlock' AND app.name = 'fuzzed'",
map[string][]string{"tm.events.type": {"NewBlock"}, "app.name": {"fuzzed"}},
false,
true,
},
{
"tm.events.type='NewHeader' AND app.name = 'fuzzed'",
map[string][]string{"tm.events.type": {"NewBlock"}, "app.name": {"fuzzed"}},
false,
false,
},
} }
for _, tc := range testCases { for _, tc := range testCases {
@ -84,9 +128,25 @@ func TestConditions(t *testing.T) {
s string s string
conditions []query.Condition conditions []query.Condition
}{ }{
{s: "tm.events.type='NewBlock'", conditions: []query.Condition{{Tag: "tm.events.type", Op: query.OpEqual, Operand: "NewBlock"}}},
{s: "tx.gas > 7 AND tx.gas < 9", conditions: []query.Condition{{Tag: "tx.gas", Op: query.OpGreater, Operand: int64(7)}, {Tag: "tx.gas", Op: query.OpLess, Operand: int64(9)}}},
{s: "tx.time >= TIME 2013-05-03T14:45:00Z", conditions: []query.Condition{{Tag: "tx.time", Op: query.OpGreaterEqual, Operand: txTime}}},
{
s: "tm.events.type='NewBlock'",
conditions: []query.Condition{
{Tag: "tm.events.type", Op: query.OpEqual, Operand: "NewBlock"},
},
},
{
s: "tx.gas > 7 AND tx.gas < 9",
conditions: []query.Condition{
{Tag: "tx.gas", Op: query.OpGreater, Operand: int64(7)},
{Tag: "tx.gas", Op: query.OpLess, Operand: int64(9)},
},
},
{
s: "tx.time >= TIME 2013-05-03T14:45:00Z",
conditions: []query.Condition{
{Tag: "tx.time", Op: query.OpGreaterEqual, Operand: txTime},
},
},
} }
for _, tc := range testCases { for _, tc := range testCases {


+ 26
- 6
lite/proxy/proxy.go View File

@ -95,7 +95,11 @@ func makeStatusFunc(c client.StatusClient) func(ctx *rpctypes.Context) (*ctypes.
} }
} }
func makeBlockchainInfoFunc(c rpcclient.Client) func(ctx *rpctypes.Context, minHeight, maxHeight int64) (*ctypes.ResultBlockchainInfo, error) {
func makeBlockchainInfoFunc(c rpcclient.Client) func(
ctx *rpctypes.Context,
minHeight,
maxHeight int64,
) (*ctypes.ResultBlockchainInfo, error) {
return func(ctx *rpctypes.Context, minHeight, maxHeight int64) (*ctypes.ResultBlockchainInfo, error) { return func(ctx *rpctypes.Context, minHeight, maxHeight int64) (*ctypes.ResultBlockchainInfo, error) {
return c.BlockchainInfo(minHeight, maxHeight) return c.BlockchainInfo(minHeight, maxHeight)
} }
@ -125,31 +129,47 @@ func makeTxFunc(c rpcclient.Client) func(ctx *rpctypes.Context, hash []byte, pro
} }
} }
func makeValidatorsFunc(c rpcclient.Client) func(ctx *rpctypes.Context, height *int64) (*ctypes.ResultValidators, error) {
func makeValidatorsFunc(c rpcclient.Client) func(
ctx *rpctypes.Context,
height *int64,
) (*ctypes.ResultValidators, error) {
return func(ctx *rpctypes.Context, height *int64) (*ctypes.ResultValidators, error) { return func(ctx *rpctypes.Context, height *int64) (*ctypes.ResultValidators, error) {
return c.Validators(height) return c.Validators(height)
} }
} }
func makeBroadcastTxCommitFunc(c rpcclient.Client) func(ctx *rpctypes.Context, tx types.Tx) (*ctypes.ResultBroadcastTxCommit, error) {
func makeBroadcastTxCommitFunc(c rpcclient.Client) func(
ctx *rpctypes.Context,
tx types.Tx,
) (*ctypes.ResultBroadcastTxCommit, error) {
return func(ctx *rpctypes.Context, tx types.Tx) (*ctypes.ResultBroadcastTxCommit, error) { return func(ctx *rpctypes.Context, tx types.Tx) (*ctypes.ResultBroadcastTxCommit, error) {
return c.BroadcastTxCommit(tx) return c.BroadcastTxCommit(tx)
} }
} }
func makeBroadcastTxSyncFunc(c rpcclient.Client) func(ctx *rpctypes.Context, tx types.Tx) (*ctypes.ResultBroadcastTx, error) {
func makeBroadcastTxSyncFunc(c rpcclient.Client) func(
ctx *rpctypes.Context,
tx types.Tx,
) (*ctypes.ResultBroadcastTx, error) {
return func(ctx *rpctypes.Context, tx types.Tx) (*ctypes.ResultBroadcastTx, error) { return func(ctx *rpctypes.Context, tx types.Tx) (*ctypes.ResultBroadcastTx, error) {
return c.BroadcastTxSync(tx) return c.BroadcastTxSync(tx)
} }
} }
func makeBroadcastTxAsyncFunc(c rpcclient.Client) func(ctx *rpctypes.Context, tx types.Tx) (*ctypes.ResultBroadcastTx, error) {
func makeBroadcastTxAsyncFunc(c rpcclient.Client) func(
ctx *rpctypes.Context,
tx types.Tx,
) (*ctypes.ResultBroadcastTx, error) {
return func(ctx *rpctypes.Context, tx types.Tx) (*ctypes.ResultBroadcastTx, error) { return func(ctx *rpctypes.Context, tx types.Tx) (*ctypes.ResultBroadcastTx, error) {
return c.BroadcastTxAsync(tx) return c.BroadcastTxAsync(tx)
} }
} }
func makeABCIQueryFunc(c rpcclient.Client) func(ctx *rpctypes.Context, path string, data cmn.HexBytes) (*ctypes.ResultABCIQuery, error) {
func makeABCIQueryFunc(c rpcclient.Client) func(
ctx *rpctypes.Context,
path string,
data cmn.HexBytes,
) (*ctypes.ResultABCIQuery, error) {
return func(ctx *rpctypes.Context, path string, data cmn.HexBytes) (*ctypes.ResultABCIQuery, error) { return func(ctx *rpctypes.Context, path string, data cmn.HexBytes) (*ctypes.ResultABCIQuery, error) {
return c.ABCIQuery(path, data) return c.ABCIQuery(path, data)
} }


+ 7
- 1
lite/proxy/verifier.go View File

@ -9,7 +9,13 @@ import (
dbm "github.com/tendermint/tm-db" dbm "github.com/tendermint/tm-db"
) )
func NewVerifier(chainID, rootDir string, client lclient.SignStatusClient, logger log.Logger, cacheSize int) (*lite.DynamicVerifier, error) {
func NewVerifier(
chainID,
rootDir string,
client lclient.SignStatusClient,
logger log.Logger,
cacheSize int,
) (*lite.DynamicVerifier, error) {
logger = logger.With("module", "lite/proxy") logger = logger.With("module", "lite/proxy")
logger.Info("lite/proxy/NewVerifier()...", "chainID", chainID, "rootDir", rootDir, "client", client) logger.Info("lite/proxy/NewVerifier()...", "chainID", chainID, "rootDir", rootDir, "client", client)


+ 8
- 1
mempool/clist_mempool_test.go View File

@ -574,7 +574,14 @@ func TestMempoolRemoteAppConcurrency(t *testing.T) {
} }
// caller must close server // caller must close server
func newRemoteApp(t *testing.T, addr string, app abci.Application) (clientCreator proxy.ClientCreator, server cmn.Service) {
func newRemoteApp(
t *testing.T,
addr string,
app abci.Application,
) (
clientCreator proxy.ClientCreator,
server cmn.Service,
) {
clientCreator = proxy.NewRemoteClientCreator(addr, "socket", true) clientCreator = proxy.NewRemoteClientCreator(addr, "socket", true)
// Start server // Start server


+ 7
- 1
mempool/mempool.go View File

@ -44,7 +44,13 @@ type Mempool interface {
// Update informs the mempool that the given txs were committed and can be discarded. // Update informs the mempool that the given txs were committed and can be discarded.
// NOTE: this should be called *after* block is committed by consensus. // NOTE: this should be called *after* block is committed by consensus.
// NOTE: unsafe; Lock/Unlock must be managed by caller // NOTE: unsafe; Lock/Unlock must be managed by caller
Update(blockHeight int64, blockTxs types.Txs, deliverTxResponses []*abci.ResponseDeliverTx, newPreFn PreCheckFunc, newPostFn PostCheckFunc) error
Update(
blockHeight int64,
blockTxs types.Txs,
deliverTxResponses []*abci.ResponseDeliverTx,
newPreFn PreCheckFunc,
newPostFn PostCheckFunc,
) error
// FlushAppConn flushes the mempool connection to ensure async reqResCb calls are // FlushAppConn flushes the mempool connection to ensure async reqResCb calls are
// done. E.g. from CheckTx. // done. E.g. from CheckTx.


+ 21
- 4
node/node.go View File

@ -266,8 +266,14 @@ func createAndStartIndexerService(config *cfg.Config, dbProvider DBProvider,
return indexerService, txIndexer, nil return indexerService, txIndexer, nil
} }
func doHandshake(stateDB dbm.DB, state sm.State, blockStore sm.BlockStore,
genDoc *types.GenesisDoc, eventBus types.BlockEventPublisher, proxyApp proxy.AppConns, consensusLogger log.Logger) error {
func doHandshake(
stateDB dbm.DB,
state sm.State,
blockStore sm.BlockStore,
genDoc *types.GenesisDoc,
eventBus types.BlockEventPublisher,
proxyApp proxy.AppConns,
consensusLogger log.Logger) error {
handshaker := cs.NewHandshaker(stateDB, state, blockStore, genDoc) handshaker := cs.NewHandshaker(stateDB, state, blockStore, genDoc)
handshaker.SetLogger(consensusLogger) handshaker.SetLogger(consensusLogger)
@ -400,7 +406,15 @@ func createConsensusReactor(config *cfg.Config,
return consensusReactor, consensusState return consensusReactor, consensusState
} }
func createTransport(config *cfg.Config, nodeInfo p2p.NodeInfo, nodeKey *p2p.NodeKey, proxyApp proxy.AppConns) (*p2p.MultiplexTransport, []p2p.PeerFilterFunc) {
func createTransport(
config *cfg.Config,
nodeInfo p2p.NodeInfo,
nodeKey *p2p.NodeKey,
proxyApp proxy.AppConns,
) (
*p2p.MultiplexTransport,
[]p2p.PeerFilterFunc,
) {
var ( var (
mConnConfig = p2p.MConnConfig(config.P2P) mConnConfig = p2p.MConnConfig(config.P2P)
transport = p2p.NewMultiplexTransport(nodeInfo, *nodeKey, mConnConfig) transport = p2p.NewMultiplexTransport(nodeInfo, *nodeKey, mConnConfig)
@ -1114,7 +1128,10 @@ var (
// database, or creates one using the given genesisDocProvider and persists the // database, or creates one using the given genesisDocProvider and persists the
// result to the database. On success this also returns the genesis doc loaded // result to the database. On success this also returns the genesis doc loaded
// through the given provider. // through the given provider.
func LoadStateFromDBOrGenesisDocProvider(stateDB dbm.DB, genesisDocProvider GenesisDocProvider) (sm.State, *types.GenesisDoc, error) {
func LoadStateFromDBOrGenesisDocProvider(
stateDB dbm.DB,
genesisDocProvider GenesisDocProvider,
) (sm.State, *types.GenesisDoc, error) {
// Get genesis doc // Get genesis doc
genDoc, err := loadGenesisDoc(stateDB) genDoc, err := loadGenesisDoc(stateDB)
if err != nil { if err != nil {


+ 13
- 2
p2p/conn/connection.go View File

@ -145,7 +145,12 @@ func DefaultMConnConfig() MConnConfig {
} }
// NewMConnection wraps net.Conn and creates multiplex connection // NewMConnection wraps net.Conn and creates multiplex connection
func NewMConnection(conn net.Conn, chDescs []*ChannelDescriptor, onReceive receiveCbFunc, onError errorCbFunc) *MConnection {
func NewMConnection(
conn net.Conn,
chDescs []*ChannelDescriptor,
onReceive receiveCbFunc,
onError errorCbFunc,
) *MConnection {
return NewMConnectionWithConfig( return NewMConnectionWithConfig(
conn, conn,
chDescs, chDescs,
@ -155,7 +160,13 @@ func NewMConnection(conn net.Conn, chDescs []*ChannelDescriptor, onReceive recei
} }
// NewMConnectionWithConfig wraps net.Conn and creates multiplex connection with a config // NewMConnectionWithConfig wraps net.Conn and creates multiplex connection with a config
func NewMConnectionWithConfig(conn net.Conn, chDescs []*ChannelDescriptor, onReceive receiveCbFunc, onError errorCbFunc, config MConnConfig) *MConnection {
func NewMConnectionWithConfig(
conn net.Conn,
chDescs []*ChannelDescriptor,
onReceive receiveCbFunc,
onError errorCbFunc,
config MConnConfig,
) *MConnection {
if config.PongTimeout >= config.PingInterval { if config.PongTimeout >= config.PingInterval {
panic("pongTimeout must be less than pingInterval (otherwise, next ping will reset pong timer)") panic("pongTimeout must be less than pingInterval (otherwise, next ping will reset pong timer)")
} }


+ 5
- 1
p2p/conn/connection_test.go View File

@ -26,7 +26,11 @@ func createTestMConnection(conn net.Conn) *MConnection {
return c return c
} }
func createMConnectionWithCallbacks(conn net.Conn, onReceive func(chID byte, msgBytes []byte), onError func(r interface{})) *MConnection {
func createMConnectionWithCallbacks(
conn net.Conn,
onReceive func(chID byte, msgBytes []byte),
onError func(r interface{}),
) *MConnection {
cfg := DefaultMConnConfig() cfg := DefaultMConnConfig()
cfg.PingInterval = 90 * time.Millisecond cfg.PingInterval = 90 * time.Millisecond
cfg.PongTimeout = 45 * time.Millisecond cfg.PongTimeout = 45 * time.Millisecond


+ 4
- 1
p2p/conn/secret_connection.go View File

@ -349,7 +349,10 @@ func hasSmallOrder(pubKey [32]byte) bool {
return isSmallOrderPoint return isSmallOrderPoint
} }
func deriveSecretAndChallenge(dhSecret *[32]byte, locIsLeast bool) (recvSecret, sendSecret *[aeadKeySize]byte, challenge *[32]byte) {
func deriveSecretAndChallenge(
dhSecret *[32]byte,
locIsLeast bool,
) (recvSecret, sendSecret *[aeadKeySize]byte, challenge *[32]byte) {
hash := sha256.New hash := sha256.New
hkdf := hkdf.New(hash, dhSecret[:], nil, []byte("TENDERMINT_SECRET_CONNECTION_KEY_AND_CHALLENGE_GEN")) hkdf := hkdf.New(hash, dhSecret[:], nil, []byte("TENDERMINT_SECRET_CONNECTION_KEY_AND_CHALLENGE_GEN"))
// get enough data for 2 aead keys, and a 32 byte challenge // get enough data for 2 aead keys, and a 32 byte challenge


+ 29
- 5
p2p/netaddress_test.go View File

@ -35,9 +35,24 @@ func TestNewNetAddressString(t *testing.T) {
{"no node id w/ tcp input", "tcp://127.0.0.1:8080", "", false}, {"no node id w/ tcp input", "tcp://127.0.0.1:8080", "", false},
{"no node id w/ udp input", "udp://127.0.0.1:8080", "", false}, {"no node id w/ udp input", "udp://127.0.0.1:8080", "", false},
{"no protocol", "deadbeefdeadbeefdeadbeefdeadbeefdeadbeef@127.0.0.1:8080", "deadbeefdeadbeefdeadbeefdeadbeefdeadbeef@127.0.0.1:8080", true},
{"tcp input", "tcp://deadbeefdeadbeefdeadbeefdeadbeefdeadbeef@127.0.0.1:8080", "deadbeefdeadbeefdeadbeefdeadbeefdeadbeef@127.0.0.1:8080", true},
{"udp input", "udp://deadbeefdeadbeefdeadbeefdeadbeefdeadbeef@127.0.0.1:8080", "deadbeefdeadbeefdeadbeefdeadbeefdeadbeef@127.0.0.1:8080", true},
{
"no protocol",
"deadbeefdeadbeefdeadbeefdeadbeefdeadbeef@127.0.0.1:8080",
"deadbeefdeadbeefdeadbeefdeadbeefdeadbeef@127.0.0.1:8080",
true,
},
{
"tcp input",
"tcp://deadbeefdeadbeefdeadbeefdeadbeefdeadbeef@127.0.0.1:8080",
"deadbeefdeadbeefdeadbeefdeadbeefdeadbeef@127.0.0.1:8080",
true,
},
{
"udp input",
"udp://deadbeefdeadbeefdeadbeefdeadbeefdeadbeef@127.0.0.1:8080",
"deadbeefdeadbeefdeadbeefdeadbeefdeadbeef@127.0.0.1:8080",
true,
},
{"malformed tcp input", "tcp//deadbeefdeadbeefdeadbeefdeadbeefdeadbeef@127.0.0.1:8080", "", false}, {"malformed tcp input", "tcp//deadbeefdeadbeefdeadbeefdeadbeefdeadbeef@127.0.0.1:8080", "", false},
{"malformed udp input", "udp//deadbeefdeadbeefdeadbeefdeadbeefdeadbeef@127.0.0.1:8080", "", false}, {"malformed udp input", "udp//deadbeefdeadbeefdeadbeefdeadbeefdeadbeef@127.0.0.1:8080", "", false},
@ -55,7 +70,12 @@ func TestNewNetAddressString(t *testing.T) {
{"too short nodeId w/tcp", "tcp://deadbeef@127.0.0.1:8080", "", false}, {"too short nodeId w/tcp", "tcp://deadbeef@127.0.0.1:8080", "", false},
{"too short notHex nodeId w/tcp", "tcp://this-isnot-hex@127.0.0.1:8080", "", false}, {"too short notHex nodeId w/tcp", "tcp://this-isnot-hex@127.0.0.1:8080", "", false},
{"notHex nodeId w/tcp", "tcp://xxxxbeefdeadbeefdeadbeefdeadbeefdeadbeef@127.0.0.1:8080", "", false}, {"notHex nodeId w/tcp", "tcp://xxxxbeefdeadbeefdeadbeefdeadbeefdeadbeef@127.0.0.1:8080", "", false},
{"correct nodeId w/tcp", "tcp://deadbeefdeadbeefdeadbeefdeadbeefdeadbeef@127.0.0.1:8080", "deadbeefdeadbeefdeadbeefdeadbeefdeadbeef@127.0.0.1:8080", true},
{
"correct nodeId w/tcp",
"tcp://deadbeefdeadbeefdeadbeefdeadbeefdeadbeef@127.0.0.1:8080",
"deadbeefdeadbeefdeadbeefdeadbeefdeadbeef@127.0.0.1:8080",
true,
},
{"no node id", "tcp://@127.0.0.1:8080", "", false}, {"no node id", "tcp://@127.0.0.1:8080", "", false},
{"no node id or IP", "tcp://@", "", false}, {"no node id or IP", "tcp://@", "", false},
@ -129,7 +149,11 @@ func TestNetAddressReachabilityTo(t *testing.T) {
other string other string
reachability int reachability int
}{ }{
{"deadbeefdeadbeefdeadbeefdeadbeefdeadbeef@127.0.0.1:8080", "deadbeefdeadbeefdeadbeefdeadbeefdeadbeef@127.0.0.1:8081", 0},
{
"deadbeefdeadbeefdeadbeefdeadbeefdeadbeef@127.0.0.1:8080",
"deadbeefdeadbeefdeadbeefdeadbeefdeadbeef@127.0.0.1:8081",
0,
},
{"deadbeefdeadbeefdeadbeefdeadbeefdeadbeef@ya.ru:80", "deadbeefdeadbeefdeadbeefdeadbeefdeadbeef@127.0.0.1:8080", 1}, {"deadbeefdeadbeefdeadbeefdeadbeefdeadbeef@ya.ru:80", "deadbeefdeadbeefdeadbeefdeadbeefdeadbeef@127.0.0.1:8080", 1},
} }


+ 5
- 1
p2p/node_info_test.go View File

@ -31,7 +31,11 @@ func TestNodeInfoValidate(t *testing.T) {
malleateNodeInfo func(*DefaultNodeInfo) malleateNodeInfo func(*DefaultNodeInfo)
expectErr bool expectErr bool
}{ }{
{"Too Many Channels", func(ni *DefaultNodeInfo) { ni.Channels = append(channels, byte(maxNumChannels)) }, true}, // nolint: gocritic
{
"Too Many Channels",
func(ni *DefaultNodeInfo) { ni.Channels = append(channels, byte(maxNumChannels)) }, // nolint: gocritic
true,
},
{"Duplicate Channel", func(ni *DefaultNodeInfo) { ni.Channels = dupChannels }, true}, {"Duplicate Channel", func(ni *DefaultNodeInfo) { ni.Channels = dupChannels }, true},
{"Good Channels", func(ni *DefaultNodeInfo) { ni.Channels = ni.Channels[:5] }, false}, {"Good Channels", func(ni *DefaultNodeInfo) { ni.Channels = ni.Channels[:5] }, false},


+ 14
- 2
p2p/pex/addrbook_test.go View File

@ -349,10 +349,22 @@ func TestAddrBookGetSelectionWithBias(t *testing.T) {
// compute some slack to protect against small differences due to rounding: // compute some slack to protect against small differences due to rounding:
slack := int(math.Round(float64(100) / float64(len(selection)))) slack := int(math.Round(float64(100) / float64(len(selection))))
if got > expected+slack { if got > expected+slack {
t.Fatalf("got more good peers (%% got: %d, %% expected: %d, number of good addrs: %d, total: %d)", got, expected, good, len(selection))
t.Fatalf(
"got more good peers (%% got: %d, %% expected: %d, number of good addrs: %d, total: %d)",
got,
expected,
good,
len(selection),
)
} }
if got < expected-slack { if got < expected-slack {
t.Fatalf("got fewer good peers (%% got: %d, %% expected: %d, number of good addrs: %d, total: %d)", got, expected, good, len(selection))
t.Fatalf(
"got fewer good peers (%% got: %d, %% expected: %d, number of good addrs: %d, total: %d)",
got,
expected,
good,
len(selection),
)
} }
} }


+ 2
- 1
p2p/pex/pex_reactor.go View File

@ -303,7 +303,8 @@ func (r *PEXReactor) receiveRequest(src Peer) error {
now := time.Now() now := time.Now()
minInterval := r.minReceiveRequestInterval() minInterval := r.minReceiveRequestInterval()
if now.Sub(lastReceived) < minInterval { if now.Sub(lastReceived) < minInterval {
return fmt.Errorf("peer (%v) sent next PEX request too soon. lastReceived: %v, now: %v, minInterval: %v. Disconnecting",
return fmt.Errorf(
"peer (%v) sent next PEX request too soon. lastReceived: %v, now: %v, minInterval: %v. Disconnecting",
src.ID(), src.ID(),
lastReceived, lastReceived,
now, now,


+ 22
- 6
p2p/switch_test.go View File

@ -136,12 +136,28 @@ func TestSwitches(t *testing.T) {
s1.Broadcast(byte(0x01), ch1Msg) s1.Broadcast(byte(0x01), ch1Msg)
s1.Broadcast(byte(0x02), ch2Msg) s1.Broadcast(byte(0x02), ch2Msg)
assertMsgReceivedWithTimeout(t, ch0Msg, byte(0x00), s2.Reactor("foo").(*TestReactor), 10*time.Millisecond, 5*time.Second)
assertMsgReceivedWithTimeout(t, ch1Msg, byte(0x01), s2.Reactor("foo").(*TestReactor), 10*time.Millisecond, 5*time.Second)
assertMsgReceivedWithTimeout(t, ch2Msg, byte(0x02), s2.Reactor("bar").(*TestReactor), 10*time.Millisecond, 5*time.Second)
}
func assertMsgReceivedWithTimeout(t *testing.T, msgBytes []byte, channel byte, reactor *TestReactor, checkPeriod, timeout time.Duration) {
assertMsgReceivedWithTimeout(t,
ch0Msg,
byte(0x00),
s2.Reactor("foo").(*TestReactor), 10*time.Millisecond, 5*time.Second)
assertMsgReceivedWithTimeout(t,
ch1Msg,
byte(0x01),
s2.Reactor("foo").(*TestReactor), 10*time.Millisecond, 5*time.Second)
assertMsgReceivedWithTimeout(t,
ch2Msg,
byte(0x02),
s2.Reactor("bar").(*TestReactor), 10*time.Millisecond, 5*time.Second)
}
func assertMsgReceivedWithTimeout(
t *testing.T,
msgBytes []byte,
channel byte,
reactor *TestReactor,
checkPeriod,
timeout time.Duration,
) {
ticker := time.NewTicker(checkPeriod) ticker := time.NewTicker(checkPeriod)
for { for {
select { select {


+ 11
- 2
p2p/test_util.go View File

@ -51,7 +51,12 @@ func CreateRandomPeer(outbound bool) *peer {
func CreateRoutableAddr() (addr string, netAddr *NetAddress) { func CreateRoutableAddr() (addr string, netAddr *NetAddress) {
for { for {
var err error var err error
addr = fmt.Sprintf("%X@%v.%v.%v.%v:26656", cmn.RandBytes(20), cmn.RandInt()%256, cmn.RandInt()%256, cmn.RandInt()%256, cmn.RandInt()%256)
addr = fmt.Sprintf("%X@%v.%v.%v.%v:26656",
cmn.RandBytes(20),
cmn.RandInt()%256,
cmn.RandInt()%256,
cmn.RandInt()%256,
cmn.RandInt()%256)
netAddr, err = NewNetAddressString(addr) netAddr, err = NewNetAddressString(addr)
if err != nil { if err != nil {
panic(err) panic(err)
@ -72,7 +77,11 @@ const TEST_HOST = "localhost"
// If connect==Connect2Switches, the switches will be fully connected. // If connect==Connect2Switches, the switches will be fully connected.
// initSwitch defines how the i'th switch should be initialized (ie. with what reactors). // initSwitch defines how the i'th switch should be initialized (ie. with what reactors).
// NOTE: panics if any switch fails to start. // NOTE: panics if any switch fails to start.
func MakeConnectedSwitches(cfg *config.P2PConfig, n int, initSwitch func(int, *Switch) *Switch, connect func([]*Switch, int, int)) []*Switch {
func MakeConnectedSwitches(cfg *config.P2PConfig,
n int,
initSwitch func(int, *Switch) *Switch,
connect func([]*Switch, int, int),
) []*Switch {
switches := make([]*Switch, n) switches := make([]*Switch, n)
for i := 0; i < n; i++ { for i := 0; i < n; i++ {
switches[i] = MakeSwitch(cfg, i, TEST_HOST, "123.123.123", initSwitch) switches[i] = MakeSwitch(cfg, i, TEST_HOST, "123.123.123", initSwitch)


+ 14
- 3
p2p/upnp/upnp.go View File

@ -27,7 +27,12 @@ type upnpNAT struct {
// protocol is either "udp" or "tcp" // protocol is either "udp" or "tcp"
type NAT interface { type NAT interface {
GetExternalAddress() (addr net.IP, err error) GetExternalAddress() (addr net.IP, err error)
AddPortMapping(protocol string, externalPort, internalPort int, description string, timeout int) (mappedExternalPort int, err error)
AddPortMapping(
protocol string,
externalPort,
internalPort int,
description string,
timeout int) (mappedExternalPort int, err error)
DeletePortMapping(protocol string, externalPort, internalPort int) (err error) DeletePortMapping(protocol string, externalPort, internalPort int) (err error)
} }
@ -254,7 +259,8 @@ func combineURL(rootURL, subURL string) string {
func soapRequest(url, function, message, domain string) (r *http.Response, err error) { func soapRequest(url, function, message, domain string) (r *http.Response, err error) {
fullMessage := "<?xml version=\"1.0\" ?>" + fullMessage := "<?xml version=\"1.0\" ?>" +
"<s:Envelope xmlns:s=\"http://schemas.xmlsoap.org/soap/envelope/\" s:encodingStyle=\"http://schemas.xmlsoap.org/soap/encoding/\">\r\n" +
"<s:Envelope xmlns:s=\"http://schemas.xmlsoap.org/soap/envelope/\" " +
"s:encodingStyle=\"http://schemas.xmlsoap.org/soap/encoding/\">\r\n" +
"<s:Body>" + message + "</s:Body></s:Envelope>" "<s:Body>" + message + "</s:Body></s:Envelope>"
req, err := http.NewRequest("POST", url, strings.NewReader(fullMessage)) req, err := http.NewRequest("POST", url, strings.NewReader(fullMessage))
@ -339,7 +345,12 @@ func (n *upnpNAT) GetExternalAddress() (addr net.IP, err error) {
return return
} }
func (n *upnpNAT) AddPortMapping(protocol string, externalPort, internalPort int, description string, timeout int) (mappedExternalPort int, err error) {
func (n *upnpNAT) AddPortMapping(
protocol string,
externalPort,
internalPort int,
description string,
timeout int) (mappedExternalPort int, err error) {
// A single concatenation would break ARM compilation. // A single concatenation would break ARM compilation.
message := "<u:AddPortMapping xmlns:u=\"urn:" + n.urnDomain + ":service:WANIPConnection:1\">\r\n" + message := "<u:AddPortMapping xmlns:u=\"urn:" + n.urnDomain + ":service:WANIPConnection:1\">\r\n" +
"<NewRemoteHost></NewRemoteHost><NewExternalPort>" + strconv.Itoa(externalPort) "<NewRemoteHost></NewRemoteHost><NewExternalPort>" + strconv.Itoa(externalPort)


+ 4
- 2
privval/doc.go View File

@ -4,11 +4,13 @@ Package privval provides different implementations of the types.PrivValidator.
FilePV FilePV
FilePV is the simplest implementation and developer default. It uses one file for the private key and another to store state.
FilePV is the simplest implementation and developer default.
It uses one file for the private key and another to store state.
SignerListenerEndpoint SignerListenerEndpoint
SignerListenerEndpoint establishes a connection to an external process, like a Key Management Server (KMS), using a socket.
SignerListenerEndpoint establishes a connection to an external process,
like a Key Management Server (KMS), using a socket.
SignerListenerEndpoint listens for the external KMS process to dial in. SignerListenerEndpoint listens for the external KMS process to dial in.
SignerListenerEndpoint takes a listener, which determines the type of connection SignerListenerEndpoint takes a listener, which determines the type of connection
(ie. encrypted over tcp, or unencrypted over unix). (ie. encrypted over tcp, or unencrypted over unix).


+ 14
- 2
privval/file.go View File

@ -96,7 +96,13 @@ func (lss *FilePVLastSignState) CheckHRS(height int64, round int, step int8) (bo
if lss.Round == round { if lss.Round == round {
if lss.Step > step { if lss.Step > step {
return false, fmt.Errorf("step regression at height %v round %v. Got %v, last step %v", height, round, step, lss.Step)
return false, fmt.Errorf(
"step regression at height %v round %v. Got %v, last step %v",
height,
round,
step,
lss.Step,
)
} else if lss.Step == step { } else if lss.Step == step {
if lss.SignBytes != nil { if lss.SignBytes != nil {
if lss.Signature == nil { if lss.Signature == nil {
@ -271,7 +277,13 @@ func (pv *FilePV) Reset() {
// String returns a string representation of the FilePV. // String returns a string representation of the FilePV.
func (pv *FilePV) String() string { func (pv *FilePV) String() string {
return fmt.Sprintf("PrivValidator{%v LH:%v, LR:%v, LS:%v}", pv.GetAddress(), pv.LastSignState.Height, pv.LastSignState.Round, pv.LastSignState.Step)
return fmt.Sprintf(
"PrivValidator{%v LH:%v, LR:%v, LS:%v}",
pv.GetAddress(),
pv.LastSignState.Height,
pv.LastSignState.Round,
pv.LastSignState.Step,
)
} }
//------------------------------------------------------------------------------------ //------------------------------------------------------------------------------------


+ 6
- 1
privval/file_deprecated_test.go View File

@ -11,6 +11,11 @@ import (
"github.com/tendermint/tendermint/privval" "github.com/tendermint/tendermint/privval"
) )
const lastSignBytes = "750802110500000000000000220B08B398F3E00510F48DA6402A480A20F" +
"C258973076512999C3E6839A22E9FBDB1B77CF993E8A9955412A41A59D4" +
"CAD312240A20C971B286ACB8AAA6FCA0365EB0A660B189EDC08B46B5AF2" +
"995DEFA51A28D215B10013211746573742D636861696E2D533245415533"
const oldPrivvalContent = `{ const oldPrivvalContent = `{
"address": "1D8089FAFDFAE4A637F3D616E17B92905FA2D91D", "address": "1D8089FAFDFAE4A637F3D616E17B92905FA2D91D",
"pub_key": { "pub_key": {
@ -21,7 +26,7 @@ const oldPrivvalContent = `{
"last_round": "0", "last_round": "0",
"last_step": 3, "last_step": 3,
"last_signature": "CTr7b9ZQlrJJf+12rPl5t/YSCUc/KqV7jQogCfFJA24e7hof69X6OMT7eFLVQHyodPjD/QTA298XHV5ejxInDQ==", "last_signature": "CTr7b9ZQlrJJf+12rPl5t/YSCUc/KqV7jQogCfFJA24e7hof69X6OMT7eFLVQHyodPjD/QTA298XHV5ejxInDQ==",
"last_signbytes": "750802110500000000000000220B08B398F3E00510F48DA6402A480A20FC258973076512999C3E6839A22E9FBDB1B77CF993E8A9955412A41A59D4CAD312240A20C971B286ACB8AAA6FCA0365EB0A660B189EDC08B46B5AF2995DEFA51A28D215B10013211746573742D636861696E2D533245415533",
"last_signbytes": "` + lastSignBytes + `",
"priv_key": { "priv_key": {
"type": "tendermint/PrivKeyEd25519", "type": "tendermint/PrivKeyEd25519",
"value": "7MwvTGEWWjsYwjn2IpRb+GYsWi9nnFsw8jPLLY1UtP6vdiDYCENnvjkI1Olq+wZT6ZFnxalFeqgm7KqM3yYmrQ==" "value": "7MwvTGEWWjsYwjn2IpRb+GYsWi9nnFsw8jPLLY1UtP6vdiDYCENnvjkI1Olq+wZT6ZFnxalFeqgm7KqM3yYmrQ=="


+ 5
- 1
privval/signer_requestHandler.go View File

@ -7,7 +7,11 @@ import (
"github.com/tendermint/tendermint/types" "github.com/tendermint/tendermint/types"
) )
func DefaultValidationRequestHandler(privVal types.PrivValidator, req SignerMessage, chainID string) (SignerMessage, error) {
func DefaultValidationRequestHandler(
privVal types.PrivValidator,
req SignerMessage,
chainID string,
) (SignerMessage, error) {
var res SignerMessage var res SignerMessage
var err error var err error


+ 4
- 1
rpc/client/httpclient.go View File

@ -170,7 +170,10 @@ func (c *baseRPCClient) ABCIQuery(path string, data cmn.HexBytes) (*ctypes.Resul
return c.ABCIQueryWithOptions(path, data, DefaultABCIQueryOptions) return c.ABCIQueryWithOptions(path, data, DefaultABCIQueryOptions)
} }
func (c *baseRPCClient) ABCIQueryWithOptions(path string, data cmn.HexBytes, opts ABCIQueryOptions) (*ctypes.ResultABCIQuery, error) {
func (c *baseRPCClient) ABCIQueryWithOptions(
path string,
data cmn.HexBytes,
opts ABCIQueryOptions) (*ctypes.ResultABCIQuery, error) {
result := new(ctypes.ResultABCIQuery) result := new(ctypes.ResultABCIQuery)
_, err := c.caller.Call("abci_query", _, err := c.caller.Call("abci_query",
map[string]interface{}{"path": path, "data": data, "height": opts.Height, "prove": opts.Prove}, map[string]interface{}{"path": path, "data": data, "height": opts.Height, "prove": opts.Prove},


+ 14
- 3
rpc/client/localclient.go View File

@ -77,7 +77,10 @@ func (c *Local) ABCIQuery(path string, data cmn.HexBytes) (*ctypes.ResultABCIQue
return c.ABCIQueryWithOptions(path, data, DefaultABCIQueryOptions) return c.ABCIQueryWithOptions(path, data, DefaultABCIQueryOptions)
} }
func (c *Local) ABCIQueryWithOptions(path string, data cmn.HexBytes, opts ABCIQueryOptions) (*ctypes.ResultABCIQuery, error) {
func (c *Local) ABCIQueryWithOptions(
path string,
data cmn.HexBytes,
opts ABCIQueryOptions) (*ctypes.ResultABCIQuery, error) {
return core.ABCIQuery(c.ctx, path, data, opts.Height, opts.Prove) return core.ABCIQuery(c.ctx, path, data, opts.Height, opts.Prove)
} }
@ -161,7 +164,11 @@ func (c *Local) BroadcastEvidence(ev types.Evidence) (*ctypes.ResultBroadcastEvi
return core.BroadcastEvidence(c.ctx, ev) return core.BroadcastEvidence(c.ctx, ev)
} }
func (c *Local) Subscribe(ctx context.Context, subscriber, query string, outCapacity ...int) (out <-chan ctypes.ResultEvent, err error) {
func (c *Local) Subscribe(
ctx context.Context,
subscriber,
query string,
outCapacity ...int) (out <-chan ctypes.ResultEvent, err error) {
q, err := tmquery.New(query) q, err := tmquery.New(query)
if err != nil { if err != nil {
return nil, errors.Wrap(err, "failed to parse query") return nil, errors.Wrap(err, "failed to parse query")
@ -182,7 +189,11 @@ func (c *Local) Subscribe(ctx context.Context, subscriber, query string, outCapa
return outc, nil return outc, nil
} }
func (c *Local) eventsRoutine(sub types.Subscription, subscriber string, q tmpubsub.Query, outc chan<- ctypes.ResultEvent) {
func (c *Local) eventsRoutine(
sub types.Subscription,
subscriber string,
q tmpubsub.Query,
outc chan<- ctypes.ResultEvent) {
for { for {
select { select {
case msg := <-sub.Out(): case msg := <-sub.Out():


+ 12
- 3
rpc/client/mock/abci.go View File

@ -30,7 +30,10 @@ func (a ABCIApp) ABCIQuery(path string, data cmn.HexBytes) (*ctypes.ResultABCIQu
return a.ABCIQueryWithOptions(path, data, client.DefaultABCIQueryOptions) return a.ABCIQueryWithOptions(path, data, client.DefaultABCIQueryOptions)
} }
func (a ABCIApp) ABCIQueryWithOptions(path string, data cmn.HexBytes, opts client.ABCIQueryOptions) (*ctypes.ResultABCIQuery, error) {
func (a ABCIApp) ABCIQueryWithOptions(
path string,
data cmn.HexBytes,
opts client.ABCIQueryOptions) (*ctypes.ResultABCIQuery, error) {
q := a.App.Query(abci.RequestQuery{ q := a.App.Query(abci.RequestQuery{
Data: data, Data: data,
Path: path, Path: path,
@ -94,7 +97,10 @@ func (m ABCIMock) ABCIQuery(path string, data cmn.HexBytes) (*ctypes.ResultABCIQ
return m.ABCIQueryWithOptions(path, data, client.DefaultABCIQueryOptions) return m.ABCIQueryWithOptions(path, data, client.DefaultABCIQueryOptions)
} }
func (m ABCIMock) ABCIQueryWithOptions(path string, data cmn.HexBytes, opts client.ABCIQueryOptions) (*ctypes.ResultABCIQuery, error) {
func (m ABCIMock) ABCIQueryWithOptions(
path string,
data cmn.HexBytes,
opts client.ABCIQueryOptions) (*ctypes.ResultABCIQuery, error) {
res, err := m.Query.GetResponse(QueryArgs{path, data, opts.Height, opts.Prove}) res, err := m.Query.GetResponse(QueryArgs{path, data, opts.Height, opts.Prove})
if err != nil { if err != nil {
return nil, err return nil, err
@ -166,7 +172,10 @@ func (r *ABCIRecorder) ABCIQuery(path string, data cmn.HexBytes) (*ctypes.Result
return r.ABCIQueryWithOptions(path, data, client.DefaultABCIQueryOptions) return r.ABCIQueryWithOptions(path, data, client.DefaultABCIQueryOptions)
} }
func (r *ABCIRecorder) ABCIQueryWithOptions(path string, data cmn.HexBytes, opts client.ABCIQueryOptions) (*ctypes.ResultABCIQuery, error) {
func (r *ABCIRecorder) ABCIQueryWithOptions(
path string,
data cmn.HexBytes,
opts client.ABCIQueryOptions) (*ctypes.ResultABCIQuery, error) {
res, err := r.Client.ABCIQueryWithOptions(path, data, opts) res, err := r.Client.ABCIQueryWithOptions(path, data, opts)
r.addCall(Call{ r.addCall(Call{
Name: "abci_query", Name: "abci_query",


+ 4
- 1
rpc/client/mock/client.go View File

@ -90,7 +90,10 @@ func (c Client) ABCIQuery(path string, data cmn.HexBytes) (*ctypes.ResultABCIQue
return c.ABCIQueryWithOptions(path, data, client.DefaultABCIQueryOptions) return c.ABCIQueryWithOptions(path, data, client.DefaultABCIQueryOptions)
} }
func (c Client) ABCIQueryWithOptions(path string, data cmn.HexBytes, opts client.ABCIQueryOptions) (*ctypes.ResultABCIQuery, error) {
func (c Client) ABCIQueryWithOptions(
path string,
data cmn.HexBytes,
opts client.ABCIQueryOptions) (*ctypes.ResultABCIQuery, error) {
return core.ABCIQuery(&rpctypes.Context{}, path, data, opts.Height, opts.Prove) return core.ABCIQuery(&rpctypes.Context{}, path, data, opts.Height, opts.Prove)
} }


+ 12
- 2
rpc/client/rpc_test.go View File

@ -488,7 +488,13 @@ func deepcpVote(vote *types.Vote) (res *types.Vote) {
return return
} }
func newEvidence(t *testing.T, val *privval.FilePV, vote *types.Vote, vote2 *types.Vote, chainID string) types.DuplicateVoteEvidence {
func newEvidence(
t *testing.T,
val *privval.FilePV,
vote *types.Vote,
vote2 *types.Vote,
chainID string,
) types.DuplicateVoteEvidence {
var err error var err error
vote2_ := deepcpVote(vote2) vote2_ := deepcpVote(vote2)
vote2_.Signature, err = val.Key.PrivKey.Sign(vote2_.SignBytes(chainID)) vote2_.Signature, err = val.Key.PrivKey.Sign(vote2_.SignBytes(chainID))
@ -501,7 +507,11 @@ func newEvidence(t *testing.T, val *privval.FilePV, vote *types.Vote, vote2 *typ
} }
} }
func makeEvidences(t *testing.T, val *privval.FilePV, chainID string) (ev types.DuplicateVoteEvidence, fakes []types.DuplicateVoteEvidence) {
func makeEvidences(
t *testing.T,
val *privval.FilePV,
chainID string,
) (ev types.DuplicateVoteEvidence, fakes []types.DuplicateVoteEvidence) {
vote := &types.Vote{ vote := &types.Vote{
ValidatorAddress: val.Key.Address, ValidatorAddress: val.Key.Address,
ValidatorIndex: 0, ValidatorIndex: 0,


+ 11
- 2
rpc/core/abci.go View File

@ -33,7 +33,10 @@ import (
// "response": { // "response": {
// "log": "exists", // "log": "exists",
// "height": "0", // "height": "0",
// "proof": "010114FED0DAD959F36091AD761C922ABA3CBF1D8349990101020103011406AA2262E2F448242DF2C2607C3CDC705313EE3B0001149D16177BC71E445476174622EA559715C293740C",
// "proof": "010114FED0DAD959F36091AD761C922ABA3CBF1" +
// "D8349990101020103011406AA2262E2F448242D" +
// "F2C2607C3CDC705313EE3B0001149D16177BC71" +
// "E445476174622EA559715C293740C",
// "value": "61626364", // "value": "61626364",
// "key": "61626364", // "key": "61626364",
// "index": "-1", // "index": "-1",
@ -53,7 +56,13 @@ import (
// | data | []byte | false | true | Data | // | data | []byte | false | true | Data |
// | height | int64 | 0 | false | Height (0 means latest) | // | height | int64 | 0 | false | Height (0 means latest) |
// | prove | bool | false | false | Includes proof if true | // | prove | bool | false | false | Includes proof if true |
func ABCIQuery(ctx *rpctypes.Context, path string, data cmn.HexBytes, height int64, prove bool) (*ctypes.ResultABCIQuery, error) {
func ABCIQuery(
ctx *rpctypes.Context,
path string,
data cmn.HexBytes,
height int64,
prove bool,
) (*ctypes.ResultABCIQuery, error) {
resQuery, err := proxyAppQuery.QuerySync(abci.RequestQuery{ resQuery, err := proxyAppQuery.QuerySync(abci.RequestQuery{
Path: path, Path: path,
Data: data, Data: data,


+ 6
- 2
rpc/core/blocks.go View File

@ -150,7 +150,9 @@ func filterMinMax(height, min, max, limit int64) (int64, int64, error) {
// "precommits": [ // "precommits": [
// { // {
// "signature": { // "signature": {
// "data": "12C0D8893B8A38224488DC1DE6270DF76BB1A5E9DB1C68577706A6A97C6EC34FFD12339183D5CA8BC2F46148773823DE905B7F6F5862FD564038BB7AE03BF50D",
// "data": "12C0D8893B8A38224488DC1DE6270DF76BB1A5E9DB" +
// "1C68577706A6A97C6EC34FFD12339183D5CA8BC2F4" +
// "6148773823DE905B7F6F5862FD564038BB7AE03BF50D",
// "type": "ed25519" // "type": "ed25519"
// }, // },
// "block_id": { // "block_id": {
@ -267,7 +269,9 @@ func Block(ctx *rpctypes.Context, heightPtr *int64) (*ctypes.ResultBlock, error)
// "precommits": [ // "precommits": [
// { // {
// "signature": { // "signature": {
// "data": "00970429FEC652E9E21D106A90AE8C5413759A7488775CEF4A3F44DC46C7F9D941070E4FBE9ED54DF247FA3983359A0C3A238D61DE55C75C9116D72ABC9CF50F",
// "data": "00970429FEC652E9E21D106A90AE8C5413759A74887" +
// "75CEF4A3F44DC46C7F9D941070E4FBE9ED54DF247FA" +
// "3983359A0C3A238D61DE55C75C9116D72ABC9CF50F",
// "type": "ed25519" // "type": "ed25519"
// }, // },
// "block_id": { // "block_id": {


+ 11
- 5
rpc/core/doc.go View File

@ -12,14 +12,18 @@ See it here: https://github.com/tendermint/tendermint/tree/master/rpc/lib
## Configuration ## Configuration
RPC can be configured by tuning parameters under `[rpc]` table in the `$TMHOME/config/config.toml` file or by using the `--rpc.X` command-line flags.
RPC can be configured by tuning parameters under `[rpc]` table in the `$TMHOME/config/config.toml` file
or by using the `--rpc.X` command-line flags.
Default rpc listen address is `tcp://0.0.0.0:26657`. To set another address, set the `laddr` config parameter to desired value.
CORS (Cross-Origin Resource Sharing) can be enabled by setting `cors_allowed_origins`, `cors_allowed_methods`, `cors_allowed_headers` config parameters.
Default rpc listen address is `tcp://0.0.0.0:26657`.
To set another address, set the `laddr` config parameter to desired value.
CORS (Cross-Origin Resource Sharing) can be enabled by setting
`cors_allowed_origins`, `cors_allowed_methods`, `cors_allowed_headers` config parameters.
## Arguments ## Arguments
Arguments which expect strings or byte arrays may be passed as quoted strings, like `"abc"` or as `0x`-prefixed strings, like `0x616263`.
Arguments which expect strings or byte arrays may be passed as quoted strings,
like `"abc"` or as `0x`-prefixed strings, like `0x616263`.
## URI/HTTP ## URI/HTTP
@ -58,7 +62,9 @@ JSONRPC requests can be POST'd to the root RPC endpoint via HTTP (e.g. `http://l
## JSONRPC/websockets ## JSONRPC/websockets
JSONRPC requests can be made via websocket. The websocket endpoint is at `/websocket`, e.g. `localhost:26657/websocket`. Asynchronous RPC functions like event `subscribe` and `unsubscribe` are only available via websockets.
JSONRPC requests can be made via websocket.
The websocket endpoint is at `/websocket`, e.g. `localhost:26657/websocket`.
Asynchronous RPC functions like event `subscribe` and `unsubscribe` are only available via websockets.
## More Examples ## More Examples


+ 3
- 1
rpc/core/evidence.go View File

@ -19,7 +19,9 @@ import (
// // handle error // // handle error
// } // }
// defer client.Stop() // defer client.Stop()
// res, err := client.BroadcastEvidence(&types.DuplicateVoteEvidence{PubKey: ev.PubKey, VoteA: ev.VoteA, VoteB: ev.VoteB})
// res, err := client.BroadcastEvidence(
// &types.DuplicateVoteEvidence{PubKey: ev.PubKey, VoteA: ev.VoteA, VoteB: ev.VoteB},
// )
// ``` // ```
// //
// > The above command returns JSON structured like this: // > The above command returns JSON structured like this:


+ 4
- 1
rpc/grpc/grpc_test.go View File

@ -25,7 +25,10 @@ func TestMain(m *testing.M) {
} }
func TestBroadcastTx(t *testing.T) { func TestBroadcastTx(t *testing.T) {
res, err := rpctest.GetGRPCClient().BroadcastTx(context.Background(), &core_grpc.RequestBroadcastTx{Tx: []byte("this is a tx")})
res, err := rpctest.GetGRPCClient().BroadcastTx(
context.Background(),
&core_grpc.RequestBroadcastTx{Tx: []byte("this is a tx")},
)
require.NoError(t, err) require.NoError(t, err)
require.EqualValues(t, 0, res.CheckTx.Code) require.EqualValues(t, 0, res.CheckTx.Code)
require.EqualValues(t, 0, res.DeliverTx.Code) require.EqualValues(t, 0, res.DeliverTx.Code)


+ 22
- 4
rpc/lib/client/http_client.go View File

@ -291,7 +291,11 @@ func (b *JSONRPCRequestBatch) Send() ([]interface{}, error) {
// Call enqueues a request to call the given RPC method with the specified // Call enqueues a request to call the given RPC method with the specified
// parameters, in the same way that the `JSONRPCClient.Call` function would. // parameters, in the same way that the `JSONRPCClient.Call` function would.
func (b *JSONRPCRequestBatch) Call(method string, params map[string]interface{}, result interface{}) (interface{}, error) {
func (b *JSONRPCRequestBatch) Call(
method string,
params map[string]interface{},
result interface{},
) (interface{}, error) {
request, err := types.MapToRequest(b.client.cdc, b.client.id, method, params) request, err := types.MapToRequest(b.client.cdc, b.client.id, method, params)
if err != nil { if err != nil {
return nil, err return nil, err
@ -351,7 +355,12 @@ func (c *URIClient) SetCodec(cdc *amino.Codec) {
//------------------------------------------------ //------------------------------------------------
func unmarshalResponseBytes(cdc *amino.Codec, responseBytes []byte, expectedID types.JSONRPCStringID, result interface{}) (interface{}, error) {
func unmarshalResponseBytes(
cdc *amino.Codec,
responseBytes []byte,
expectedID types.JSONRPCStringID,
result interface{},
) (interface{}, error) {
// Read response. If rpc/core/types is imported, the result will unmarshal // Read response. If rpc/core/types is imported, the result will unmarshal
// into the correct type. // into the correct type.
// log.Notice("response", "response", string(responseBytes)) // log.Notice("response", "response", string(responseBytes))
@ -377,7 +386,12 @@ func unmarshalResponseBytes(cdc *amino.Codec, responseBytes []byte, expectedID t
return result, nil return result, nil
} }
func unmarshalResponseBytesArray(cdc *amino.Codec, responseBytes []byte, expectedID types.JSONRPCStringID, results []interface{}) ([]interface{}, error) {
func unmarshalResponseBytesArray(
cdc *amino.Codec,
responseBytes []byte,
expectedID types.JSONRPCStringID,
results []interface{},
) ([]interface{}, error) {
var ( var (
err error err error
responses []types.RPCResponse responses []types.RPCResponse
@ -390,7 +404,11 @@ func unmarshalResponseBytesArray(cdc *amino.Codec, responseBytes []byte, expecte
// and unsuccessful responses. // and unsuccessful responses.
if len(results) != len(responses) { if len(results) != len(responses) {
return nil, errors.Errorf("expected %d result objects into which to inject responses, but got %d", len(responses), len(results))
return nil, errors.Errorf(
"expected %d result objects into which to inject responses, but got %d",
len(responses),
len(results),
)
} }
for i, response := range responses { for i, response := range responses {


+ 4
- 1
rpc/lib/client/ws_client.go View File

@ -414,7 +414,10 @@ func (c *WSClient) writeRoutine() {
case <-c.readRoutineQuit: case <-c.readRoutineQuit:
return return
case <-c.Quit(): case <-c.Quit():
if err := c.conn.WriteMessage(websocket.CloseMessage, websocket.FormatCloseMessage(websocket.CloseNormalClosure, "")); err != nil {
if err := c.conn.WriteMessage(
websocket.CloseMessage,
websocket.FormatCloseMessage(websocket.CloseNormalClosure, ""),
); err != nil {
c.Logger.Error("failed to write message", "err", err) c.Logger.Error("failed to write message", "err", err)
} }
return return


+ 71
- 16
rpc/lib/server/handlers.go View File

@ -23,8 +23,10 @@ import (
types "github.com/tendermint/tendermint/rpc/lib/types" types "github.com/tendermint/tendermint/rpc/lib/types"
) )
// RegisterRPCFuncs adds a route for each function in the funcMap, as well as general jsonrpc and websocket handlers for all functions.
// "result" is the interface on which the result objects are registered, and is popualted with every RPCResponse
// RegisterRPCFuncs adds a route for each function in the funcMap,
// as well as general jsonrpc and websocket handlers for all functions.
// "result" is the interface on which the result objects are registered,
// and is popualted with every RPCResponse
func RegisterRPCFuncs(mux *http.ServeMux, funcMap map[string]*RPCFunc, cdc *amino.Codec, logger log.Logger) { func RegisterRPCFuncs(mux *http.ServeMux, funcMap map[string]*RPCFunc, cdc *amino.Codec, logger log.Logger) {
// HTTP endpoints // HTTP endpoints
for funcName, rpcFunc := range funcMap { for funcName, rpcFunc := range funcMap {
@ -103,7 +105,13 @@ func makeJSONRPCHandler(funcMap map[string]*RPCFunc, cdc *amino.Codec, logger lo
return func(w http.ResponseWriter, r *http.Request) { return func(w http.ResponseWriter, r *http.Request) {
b, err := ioutil.ReadAll(r.Body) b, err := ioutil.ReadAll(r.Body)
if err != nil { if err != nil {
WriteRPCResponseHTTP(w, types.RPCInvalidRequestError(types.JSONRPCStringID(""), errors.Wrap(err, "error reading request body")))
WriteRPCResponseHTTP(
w,
types.RPCInvalidRequestError(
types.JSONRPCStringID(""),
errors.Wrap(err, "error reading request body"),
),
)
return return
} }
// if its an empty request (like from a browser), // if its an empty request (like from a browser),
@ -122,7 +130,13 @@ func makeJSONRPCHandler(funcMap map[string]*RPCFunc, cdc *amino.Codec, logger lo
// next, try to unmarshal as a single request // next, try to unmarshal as a single request
var request types.RPCRequest var request types.RPCRequest
if err := json.Unmarshal(b, &request); err != nil { if err := json.Unmarshal(b, &request); err != nil {
WriteRPCResponseHTTP(w, types.RPCParseError(types.JSONRPCStringID(""), errors.Wrap(err, "error unmarshalling request")))
WriteRPCResponseHTTP(
w,
types.RPCParseError(
types.JSONRPCStringID(""),
errors.Wrap(err, "error unmarshalling request"),
),
)
return return
} }
requests = []types.RPCRequest{request} requests = []types.RPCRequest{request}
@ -133,11 +147,16 @@ func makeJSONRPCHandler(funcMap map[string]*RPCFunc, cdc *amino.Codec, logger lo
// A Notification is a Request object without an "id" member. // A Notification is a Request object without an "id" member.
// The Server MUST NOT reply to a Notification, including those that are within a batch request. // The Server MUST NOT reply to a Notification, including those that are within a batch request.
if request.ID == types.JSONRPCStringID("") { if request.ID == types.JSONRPCStringID("") {
logger.Debug("HTTPJSONRPC received a notification, skipping... (please send a non-empty ID if you want to call a method)")
logger.Debug(
"HTTPJSONRPC received a notification, skipping... (please send a non-empty ID if you want to call a method)",
)
continue continue
} }
if len(r.URL.Path) > 1 { if len(r.URL.Path) > 1 {
responses = append(responses, types.RPCInvalidRequestError(request.ID, errors.Errorf("path %s is invalid", r.URL.Path)))
responses = append(
responses,
types.RPCInvalidRequestError(request.ID, errors.Errorf("path %s is invalid", r.URL.Path)),
)
continue continue
} }
rpcFunc, ok := funcMap[request.Method] rpcFunc, ok := funcMap[request.Method]
@ -150,7 +169,10 @@ func makeJSONRPCHandler(funcMap map[string]*RPCFunc, cdc *amino.Codec, logger lo
if len(request.Params) > 0 { if len(request.Params) > 0 {
fnArgs, err := jsonParamsToArgs(rpcFunc, cdc, request.Params) fnArgs, err := jsonParamsToArgs(rpcFunc, cdc, request.Params)
if err != nil { if err != nil {
responses = append(responses, types.RPCInvalidParamsError(request.ID, errors.Wrap(err, "error converting json params to arguments")))
responses = append(
responses,
types.RPCInvalidParamsError(request.ID, errors.Wrap(err, "error converting json params to arguments")),
)
continue continue
} }
args = append(args, fnArgs...) args = append(args, fnArgs...)
@ -172,8 +194,8 @@ func makeJSONRPCHandler(funcMap map[string]*RPCFunc, cdc *amino.Codec, logger lo
func handleInvalidJSONRPCPaths(next http.HandlerFunc) http.HandlerFunc { func handleInvalidJSONRPCPaths(next http.HandlerFunc) http.HandlerFunc {
return func(w http.ResponseWriter, r *http.Request) { return func(w http.ResponseWriter, r *http.Request) {
// Since the pattern "/" matches all paths not matched by other registered patterns we check whether the path is indeed
// "/", otherwise return a 404 error
// Since the pattern "/" matches all paths not matched by other registered patterns,
// we check whether the path is indeed "/", otherwise return a 404 error
if r.URL.Path != "/" { if r.URL.Path != "/" {
http.NotFound(w, r) http.NotFound(w, r)
return return
@ -183,7 +205,12 @@ func handleInvalidJSONRPCPaths(next http.HandlerFunc) http.HandlerFunc {
} }
} }
func mapParamsToArgs(rpcFunc *RPCFunc, cdc *amino.Codec, params map[string]json.RawMessage, argsOffset int) ([]reflect.Value, error) {
func mapParamsToArgs(
rpcFunc *RPCFunc,
cdc *amino.Codec,
params map[string]json.RawMessage,
argsOffset int,
) ([]reflect.Value, error) {
values := make([]reflect.Value, len(rpcFunc.argNames)) values := make([]reflect.Value, len(rpcFunc.argNames))
for i, argName := range rpcFunc.argNames { for i, argName := range rpcFunc.argNames {
argType := rpcFunc.args[i+argsOffset] argType := rpcFunc.args[i+argsOffset]
@ -203,7 +230,12 @@ func mapParamsToArgs(rpcFunc *RPCFunc, cdc *amino.Codec, params map[string]json.
return values, nil return values, nil
} }
func arrayParamsToArgs(rpcFunc *RPCFunc, cdc *amino.Codec, params []json.RawMessage, argsOffset int) ([]reflect.Value, error) {
func arrayParamsToArgs(
rpcFunc *RPCFunc,
cdc *amino.Codec,
params []json.RawMessage,
argsOffset int,
) ([]reflect.Value, error) {
if len(rpcFunc.argNames) != len(params) { if len(rpcFunc.argNames) != len(params) {
return nil, errors.Errorf("expected %v parameters (%v), got %v (%v)", return nil, errors.Errorf("expected %v parameters (%v), got %v (%v)",
len(rpcFunc.argNames), rpcFunc.argNames, len(params), params) len(rpcFunc.argNames), rpcFunc.argNames, len(params), params)
@ -272,7 +304,13 @@ func makeHTTPHandler(rpcFunc *RPCFunc, cdc *amino.Codec, logger log.Logger) func
fnArgs, err := httpParamsToArgs(rpcFunc, cdc, r) fnArgs, err := httpParamsToArgs(rpcFunc, cdc, r)
if err != nil { if err != nil {
WriteRPCResponseHTTP(w, types.RPCInvalidParamsError(types.JSONRPCStringID(""), errors.Wrap(err, "error converting http params to arguments")))
WriteRPCResponseHTTP(
w,
types.RPCInvalidParamsError(
types.JSONRPCStringID(""),
errors.Wrap(err, "error converting http params to arguments"),
),
)
return return
} }
args = append(args, fnArgs...) args = append(args, fnArgs...)
@ -363,7 +401,16 @@ func _nonJSONStringToArg(cdc *amino.Codec, rt reflect.Type, arg string) (reflect
var expectingString, expectingByteSlice, expectingInt bool var expectingString, expectingByteSlice, expectingInt bool
switch rt.Kind() { switch rt.Kind() {
case reflect.Int, reflect.Uint, reflect.Int8, reflect.Uint8, reflect.Int16, reflect.Uint16, reflect.Int32, reflect.Uint32, reflect.Int64, reflect.Uint64:
case reflect.Int,
reflect.Uint,
reflect.Int8,
reflect.Uint8,
reflect.Int16,
reflect.Uint16,
reflect.Int32,
reflect.Uint32,
reflect.Int64,
reflect.Uint64:
expectingInt = true expectingInt = true
case reflect.String: case reflect.String:
expectingString = true expectingString = true
@ -661,7 +708,9 @@ func (wsc *wsConnection) readRoutine() {
// A Notification is a Request object without an "id" member. // A Notification is a Request object without an "id" member.
// The Server MUST NOT reply to a Notification, including those that are within a batch request. // The Server MUST NOT reply to a Notification, including those that are within a batch request.
if request.ID == types.JSONRPCStringID("") { if request.ID == types.JSONRPCStringID("") {
wsc.Logger.Debug("WSJSONRPC received a notification, skipping... (please send a non-empty ID if you want to call a method)")
wsc.Logger.Debug(
"WSJSONRPC received a notification, skipping... (please send a non-empty ID if you want to call a method)",
)
continue continue
} }
@ -677,7 +726,9 @@ func (wsc *wsConnection) readRoutine() {
if len(request.Params) > 0 { if len(request.Params) > 0 {
fnArgs, err := jsonParamsToArgs(rpcFunc, wsc.cdc, request.Params) fnArgs, err := jsonParamsToArgs(rpcFunc, wsc.cdc, request.Params)
if err != nil { if err != nil {
wsc.WriteRPCResponse(types.RPCInternalError(request.ID, errors.Wrap(err, "error converting json params to arguments")))
wsc.WriteRPCResponse(
types.RPCInternalError(request.ID, errors.Wrap(err, "error converting json params to arguments")),
)
continue continue
} }
args = append(args, fnArgs...) args = append(args, fnArgs...)
@ -773,7 +824,11 @@ type WebsocketManager struct {
// NewWebsocketManager returns a new WebsocketManager that passes a map of // NewWebsocketManager returns a new WebsocketManager that passes a map of
// functions, connection options and logger to new WS connections. // functions, connection options and logger to new WS connections.
func NewWebsocketManager(funcMap map[string]*RPCFunc, cdc *amino.Codec, wsConnOptions ...func(*wsConnection)) *WebsocketManager {
func NewWebsocketManager(
funcMap map[string]*RPCFunc,
cdc *amino.Codec,
wsConnOptions ...func(*wsConnection),
) *WebsocketManager {
return &WebsocketManager{ return &WebsocketManager{
funcMap: funcMap, funcMap: funcMap,
cdc: cdc, cdc: cdc,


+ 8
- 2
rpc/lib/server/handlers_test.go View File

@ -54,7 +54,8 @@ func TestRPCParams(t *testing.T) {
// bad // bad
{`{"jsonrpc": "2.0", "id": "0"}`, "Method not found", types.JSONRPCStringID("0")}, {`{"jsonrpc": "2.0", "id": "0"}`, "Method not found", types.JSONRPCStringID("0")},
{`{"jsonrpc": "2.0", "method": "y", "id": "0"}`, "Method not found", types.JSONRPCStringID("0")}, {`{"jsonrpc": "2.0", "method": "y", "id": "0"}`, "Method not found", types.JSONRPCStringID("0")},
{`{"method": "c", "id": "0", "params": a}`, "invalid character", types.JSONRPCStringID("")}, // id not captured in JSON parsing failures
// id not captured in JSON parsing failures
{`{"method": "c", "id": "0", "params": a}`, "invalid character", types.JSONRPCStringID("")},
{`{"method": "c", "id": "0", "params": ["a"]}`, "got 1", types.JSONRPCStringID("0")}, {`{"method": "c", "id": "0", "params": ["a"]}`, "got 1", types.JSONRPCStringID("0")},
{`{"method": "c", "id": "0", "params": ["a", "b"]}`, "invalid character", types.JSONRPCStringID("0")}, {`{"method": "c", "id": "0", "params": ["a", "b"]}`, "invalid character", types.JSONRPCStringID("0")},
{`{"method": "c", "id": "0", "params": [1, 1]}`, "of type string", types.JSONRPCStringID("0")}, {`{"method": "c", "id": "0", "params": [1, 1]}`, "of type string", types.JSONRPCStringID("0")},
@ -253,7 +254,12 @@ func TestWebsocketManagerHandler(t *testing.T) {
} }
// check basic functionality works // check basic functionality works
req, err := types.MapToRequest(amino.NewCodec(), types.JSONRPCStringID("TestWebsocketManager"), "c", map[string]interface{}{"s": "a", "i": 10})
req, err := types.MapToRequest(
amino.NewCodec(),
types.JSONRPCStringID("TestWebsocketManager"),
"c",
map[string]interface{}{"s": "a", "i": 10},
)
require.NoError(t, err) require.NoError(t, err)
err = c.WriteJSON(req) err = c.WriteJSON(req)
require.NoError(t, err) require.NoError(t, err)


+ 5
- 1
rpc/lib/server/http_server.go View File

@ -160,7 +160,11 @@ func RecoverAndLogHandler(handler http.Handler, logger log.Logger) http.Handler
"Panic in RPC HTTP handler", "err", e, "stack", "Panic in RPC HTTP handler", "err", e, "stack",
string(debug.Stack()), string(debug.Stack()),
) )
WriteRPCResponseHTTPError(rww, http.StatusInternalServerError, types.RPCInternalError(types.JSONRPCStringID(""), e.(error)))
WriteRPCResponseHTTPError(
rww,
http.StatusInternalServerError,
types.RPCInternalError(types.JSONRPCStringID(""), e.(error)),
)
} }
} }


+ 8
- 2
rpc/lib/types/types_test.go View File

@ -43,7 +43,10 @@ func TestResponses(t *testing.T) {
d := RPCParseError(jsonid, errors.New("Hello world")) d := RPCParseError(jsonid, errors.New("Hello world"))
e, _ := json.Marshal(d) e, _ := json.Marshal(d)
f := fmt.Sprintf(`{"jsonrpc":"2.0","id":%v,"error":{"code":-32700,"message":"Parse error. Invalid JSON","data":"Hello world"}}`, tt.expected)
f := fmt.Sprintf(
`{"jsonrpc":"2.0","id":%v,"error":{"code":-32700,"message":"Parse error. Invalid JSON","data":"Hello world"}}`,
tt.expected,
)
assert.Equal(f, string(e)) assert.Equal(f, string(e))
g := RPCMethodNotFoundError(jsonid) g := RPCMethodNotFoundError(jsonid)
@ -58,7 +61,10 @@ func TestUnmarshallResponses(t *testing.T) {
cdc := amino.NewCodec() cdc := amino.NewCodec()
for _, tt := range responseTests { for _, tt := range responseTests {
response := &RPCResponse{} response := &RPCResponse{}
err := json.Unmarshal([]byte(fmt.Sprintf(`{"jsonrpc":"2.0","id":%v,"result":{"Value":"hello"}}`, tt.expected)), response)
err := json.Unmarshal(
[]byte(fmt.Sprintf(`{"jsonrpc":"2.0","id":%v,"result":{"Value":"hello"}}`, tt.expected)),
response,
)
assert.Nil(err) assert.Nil(err)
a := NewRPCSuccessResponse(cdc, tt.id, &SampleResult{"hello"}) a := NewRPCSuccessResponse(cdc, tt.id, &SampleResult{"hello"})
assert.Equal(*response, a) assert.Equal(*response, a)


+ 5
- 1
scripts/privValUpgrade.go View File

@ -16,7 +16,11 @@ func main() {
args := os.Args[1:] args := os.Args[1:]
if len(args) != 3 { if len(args) != 3 {
fmt.Println("Expected three args: <old path> <new key path> <new state path>") fmt.Println("Expected three args: <old path> <new key path> <new state path>")
fmt.Println("Eg. ~/.tendermint/config/priv_validator.json ~/.tendermint/config/priv_validator_key.json ~/.tendermint/data/priv_validator_state.json")
fmt.Println(
"Eg. ~/.tendermint/config/priv_validator.json" +
" ~/.tendermint/config/priv_validator_key.json" +
" ~/.tendermint/data/priv_validator_state.json",
)
os.Exit(1) os.Exit(1)
} }
err := loadAndUpgrade(args[0], args[1], args[2]) err := loadAndUpgrade(args[0], args[1], args[2])


+ 5
- 1
scripts/privValUpgrade_test.go View File

@ -11,6 +11,10 @@ import (
"github.com/tendermint/tendermint/privval" "github.com/tendermint/tendermint/privval"
) )
const lastSignBytes = "750802110500000000000000220B08B398F3E00510F48DA6402A480A20FC25" +
"8973076512999C3E6839A22E9FBDB1B77CF993E8A9955412A41A59D4CAD312240A20C971B286ACB8AA" +
"A6FCA0365EB0A660B189EDC08B46B5AF2995DEFA51A28D215B10013211746573742D636861696E2D533245415533"
const oldPrivvalContent = `{ const oldPrivvalContent = `{
"address": "1D8089FAFDFAE4A637F3D616E17B92905FA2D91D", "address": "1D8089FAFDFAE4A637F3D616E17B92905FA2D91D",
"pub_key": { "pub_key": {
@ -21,7 +25,7 @@ const oldPrivvalContent = `{
"last_round": "0", "last_round": "0",
"last_step": 3, "last_step": 3,
"last_signature": "CTr7b9ZQlrJJf+12rPl5t/YSCUc/KqV7jQogCfFJA24e7hof69X6OMT7eFLVQHyodPjD/QTA298XHV5ejxInDQ==", "last_signature": "CTr7b9ZQlrJJf+12rPl5t/YSCUc/KqV7jQogCfFJA24e7hof69X6OMT7eFLVQHyodPjD/QTA298XHV5ejxInDQ==",
"last_signbytes": "750802110500000000000000220B08B398F3E00510F48DA6402A480A20FC258973076512999C3E6839A22E9FBDB1B77CF993E8A9955412A41A59D4CAD312240A20C971B286ACB8AAA6FCA0365EB0A660B189EDC08B46B5AF2995DEFA51A28D215B10013211746573742D636861696E2D533245415533",
"last_signbytes": "` + lastSignBytes + `",
"priv_key": { "priv_key": {
"type": "tendermint/PrivKeyEd25519", "type": "tendermint/PrivKeyEd25519",
"value": "7MwvTGEWWjsYwjn2IpRb+GYsWi9nnFsw8jPLLY1UtP6vdiDYCENnvjkI1Olq+wZT6ZFnxalFeqgm7KqM3yYmrQ==" "value": "7MwvTGEWWjsYwjn2IpRb+GYsWi9nnFsw8jPLLY1UtP6vdiDYCENnvjkI1Olq+wZT6ZFnxalFeqgm7KqM3yYmrQ=="


+ 17
- 3
state/errors.go View File

@ -50,18 +50,32 @@ func (e ErrUnknownBlock) Error() string {
} }
func (e ErrBlockHashMismatch) Error() string { func (e ErrBlockHashMismatch) Error() string {
return fmt.Sprintf("App block hash (%X) does not match core block hash (%X) for height %d", e.AppHash, e.CoreHash, e.Height)
return fmt.Sprintf(
"App block hash (%X) does not match core block hash (%X) for height %d",
e.AppHash,
e.CoreHash,
e.Height,
)
} }
func (e ErrAppBlockHeightTooHigh) Error() string { func (e ErrAppBlockHeightTooHigh) Error() string {
return fmt.Sprintf("App block height (%d) is higher than core (%d)", e.AppHeight, e.CoreHeight) return fmt.Sprintf("App block height (%d) is higher than core (%d)", e.AppHeight, e.CoreHeight)
} }
func (e ErrLastStateMismatch) Error() string { func (e ErrLastStateMismatch) Error() string {
return fmt.Sprintf("Latest tendermint block (%d) LastAppHash (%X) does not match app's AppHash (%X)", e.Height, e.Core, e.App)
return fmt.Sprintf(
"Latest tendermint block (%d) LastAppHash (%X) does not match app's AppHash (%X)",
e.Height,
e.Core,
e.App,
)
} }
func (e ErrStateMismatch) Error() string { func (e ErrStateMismatch) Error() string {
return fmt.Sprintf("State after replay does not match saved state. Got ----\n%v\nExpected ----\n%v\n", e.Got, e.Expected)
return fmt.Sprintf(
"State after replay does not match saved state. Got ----\n%v\nExpected ----\n%v\n",
e.Got,
e.Expected,
)
} }
func (e ErrNoValSetForHeight) Error() string { func (e ErrNoValSetForHeight) Error() string {


+ 15
- 2
state/execution.go View File

@ -49,7 +49,14 @@ func BlockExecutorWithMetrics(metrics *Metrics) BlockExecutorOption {
// NewBlockExecutor returns a new BlockExecutor with a NopEventBus. // NewBlockExecutor returns a new BlockExecutor with a NopEventBus.
// Call SetEventBus to provide one. // Call SetEventBus to provide one.
func NewBlockExecutor(db dbm.DB, logger log.Logger, proxyApp proxy.AppConnConsensus, mempool mempl.Mempool, evpool EvidencePool, options ...BlockExecutorOption) *BlockExecutor {
func NewBlockExecutor(
db dbm.DB,
logger log.Logger,
proxyApp proxy.AppConnConsensus,
mempool mempl.Mempool,
evpool EvidencePool,
options ...BlockExecutorOption,
) *BlockExecutor {
res := &BlockExecutor{ res := &BlockExecutor{
db: db, db: db,
proxyApp: proxyApp, proxyApp: proxyApp,
@ -445,7 +452,13 @@ func updateState(
// Fire NewBlock, NewBlockHeader. // Fire NewBlock, NewBlockHeader.
// Fire TxEvent for every tx. // Fire TxEvent for every tx.
// NOTE: if Tendermint crashes before commit, some or all of these events may be published again. // NOTE: if Tendermint crashes before commit, some or all of these events may be published again.
func fireEvents(logger log.Logger, eventBus types.BlockEventPublisher, block *types.Block, abciResponses *ABCIResponses, validatorUpdates []*types.Validator) {
func fireEvents(
logger log.Logger,
eventBus types.BlockEventPublisher,
block *types.Block,
abciResponses *ABCIResponses,
validatorUpdates []*types.Validator,
) {
eventBus.PublishEventNewBlock(types.EventDataNewBlock{ eventBus.PublishEventNewBlock(types.EventDataNewBlock{
Block: block, Block: block,
ResultBeginBlock: *abciResponses.BeginBlock, ResultBeginBlock: *abciResponses.BeginBlock,


+ 19
- 3
state/execution_test.go View File

@ -309,7 +309,13 @@ func TestEndBlockValidatorUpdates(t *testing.T) {
state, stateDB, _ := makeState(1, 1) state, stateDB, _ := makeState(1, 1)
blockExec := sm.NewBlockExecutor(stateDB, log.TestingLogger(), proxyApp.Consensus(), mock.Mempool{}, sm.MockEvidencePool{})
blockExec := sm.NewBlockExecutor(
stateDB,
log.TestingLogger(),
proxyApp.Consensus(),
mock.Mempool{},
sm.MockEvidencePool{},
)
eventBus := types.NewEventBus() eventBus := types.NewEventBus()
err = eventBus.Start() err = eventBus.Start()
@ -317,7 +323,11 @@ func TestEndBlockValidatorUpdates(t *testing.T) {
defer eventBus.Stop() defer eventBus.Stop()
blockExec.SetEventBus(eventBus) blockExec.SetEventBus(eventBus)
updatesSub, err := eventBus.Subscribe(context.Background(), "TestEndBlockValidatorUpdates", types.EventQueryValidatorSetUpdates)
updatesSub, err := eventBus.Subscribe(
context.Background(),
"TestEndBlockValidatorUpdates",
types.EventQueryValidatorSetUpdates,
)
require.NoError(t, err) require.NoError(t, err)
block := makeBlock(state, 1) block := makeBlock(state, 1)
@ -366,7 +376,13 @@ func TestEndBlockValidatorUpdatesResultingInEmptySet(t *testing.T) {
defer proxyApp.Stop() defer proxyApp.Stop()
state, stateDB, _ := makeState(1, 1) state, stateDB, _ := makeState(1, 1)
blockExec := sm.NewBlockExecutor(stateDB, log.TestingLogger(), proxyApp.Consensus(), mock.Mempool{}, sm.MockEvidencePool{})
blockExec := sm.NewBlockExecutor(
stateDB,
log.TestingLogger(),
proxyApp.Consensus(),
mock.Mempool{},
sm.MockEvidencePool{},
)
block := makeBlock(state, 1) block := makeBlock(state, 1)
blockID := types.BlockID{Hash: block.Hash(), PartsHeader: block.MakePartSet(testPartSize).Header()} blockID := types.BlockID{Hash: block.Hash(), PartsHeader: block.MakePartSet(testPartSize).Header()}


+ 25
- 5
state/helpers_test.go View File

@ -69,7 +69,12 @@ func makeAndApplyGoodBlock(state sm.State, height int64, lastCommit *types.Commi
return state, blockID, nil return state, blockID, nil
} }
func makeValidCommit(height int64, blockID types.BlockID, vals *types.ValidatorSet, privVals map[string]types.PrivValidator) (*types.Commit, error) {
func makeValidCommit(
height int64,
blockID types.BlockID,
vals *types.ValidatorSet,
privVals map[string]types.PrivValidator,
) (*types.Commit, error) {
sigs := make([]*types.CommitSig, 0) sigs := make([]*types.CommitSig, 0)
for i := 0; i < vals.Size(); i++ { for i := 0; i < vals.Size(); i++ {
_, val := vals.GetByIndex(i) _, val := vals.GetByIndex(i)
@ -123,7 +128,13 @@ func makeState(nVals, height int) (sm.State, dbm.DB, map[string]types.PrivValida
} }
func makeBlock(state sm.State, height int64) *types.Block { func makeBlock(state sm.State, height int64) *types.Block {
block, _ := state.MakeBlock(height, makeTxs(state.LastBlockHeight), new(types.Commit), nil, state.Validators.GetProposer().Address)
block, _ := state.MakeBlock(
height,
makeTxs(state.LastBlockHeight),
new(types.Commit),
nil,
state.Validators.GetProposer().Address,
)
return block return block
} }
@ -152,7 +163,10 @@ func makeConsensusParams(
} }
} }
func makeHeaderPartsResponsesValPubKeyChange(state sm.State, pubkey crypto.PubKey) (types.Header, types.BlockID, *sm.ABCIResponses) {
func makeHeaderPartsResponsesValPubKeyChange(
state sm.State,
pubkey crypto.PubKey,
) (types.Header, types.BlockID, *sm.ABCIResponses) {
block := makeBlock(state, state.LastBlockHeight+1) block := makeBlock(state, state.LastBlockHeight+1)
abciResponses := &sm.ABCIResponses{ abciResponses := &sm.ABCIResponses{
@ -173,7 +187,10 @@ func makeHeaderPartsResponsesValPubKeyChange(state sm.State, pubkey crypto.PubKe
return block.Header, types.BlockID{Hash: block.Hash(), PartsHeader: types.PartSetHeader{}}, abciResponses return block.Header, types.BlockID{Hash: block.Hash(), PartsHeader: types.PartSetHeader{}}, abciResponses
} }
func makeHeaderPartsResponsesValPowerChange(state sm.State, power int64) (types.Header, types.BlockID, *sm.ABCIResponses) {
func makeHeaderPartsResponsesValPowerChange(
state sm.State,
power int64,
) (types.Header, types.BlockID, *sm.ABCIResponses) {
block := makeBlock(state, state.LastBlockHeight+1) block := makeBlock(state, state.LastBlockHeight+1)
abciResponses := &sm.ABCIResponses{ abciResponses := &sm.ABCIResponses{
@ -193,7 +210,10 @@ func makeHeaderPartsResponsesValPowerChange(state sm.State, power int64) (types.
return block.Header, types.BlockID{Hash: block.Hash(), PartsHeader: types.PartSetHeader{}}, abciResponses return block.Header, types.BlockID{Hash: block.Hash(), PartsHeader: types.PartSetHeader{}}, abciResponses
} }
func makeHeaderPartsResponsesParams(state sm.State, params types.ConsensusParams) (types.Header, types.BlockID, *sm.ABCIResponses) {
func makeHeaderPartsResponsesParams(
state sm.State,
params types.ConsensusParams,
) (types.Header, types.BlockID, *sm.ABCIResponses) {
block := makeBlock(state, state.LastBlockHeight+1) block := makeBlock(state, state.LastBlockHeight+1)
abciResponses := &sm.ABCIResponses{ abciResponses := &sm.ABCIResponses{


+ 38
- 6
state/state_test.go View File

@ -378,7 +378,11 @@ func testProposerFreq(t *testing.T, caseNum int, valSet *types.ValidatorSet) {
// https://github.com/cwgoes/tm-proposer-idris // https://github.com/cwgoes/tm-proposer-idris
// and inferred to generalize to N-1 // and inferred to generalize to N-1
bound := N - 1 bound := N - 1
require.True(t, abs <= bound, fmt.Sprintf("Case %d val %d (%d): got %d, expected %d", caseNum, i, N, gotFreq, expectFreq))
require.True(
t,
abs <= bound,
fmt.Sprintf("Case %d val %d (%d): got %d, expected %d", caseNum, i, N, gotFreq, expectFreq),
)
} }
} }
@ -563,7 +567,13 @@ func TestProposerPriorityProposerAlternates(t *testing.T) {
expectedVal1Prio -= totalPower // 1, val1 proposer expectedVal1Prio -= totalPower // 1, val1 proposer
assert.EqualValues(t, expectedVal1Prio, updatedVal1.ProposerPriority) assert.EqualValues(t, expectedVal1Prio, updatedVal1.ProposerPriority)
assert.EqualValues(t, expectedVal2Prio, updatedVal2.ProposerPriority, "unexpected proposer priority for validator: %v", updatedVal2)
assert.EqualValues(
t,
expectedVal2Prio,
updatedVal2.ProposerPriority,
"unexpected proposer priority for validator: %v",
updatedVal2,
)
validatorUpdates, err = types.PB2TM.ValidatorUpdates(abciResponses.EndBlock.ValidatorUpdates) validatorUpdates, err = types.PB2TM.ValidatorUpdates(abciResponses.EndBlock.ValidatorUpdates)
require.NoError(t, err) require.NoError(t, err)
@ -586,8 +596,20 @@ func TestProposerPriorityProposerAlternates(t *testing.T) {
expectedVal1Prio2 := expectedVal1Prio + val1VotingPower // 1 + 10 == 11 expectedVal1Prio2 := expectedVal1Prio + val1VotingPower // 1 + 10 == 11
expectedVal1Prio2 -= totalPower // -9, val1 proposer expectedVal1Prio2 -= totalPower // -9, val1 proposer
assert.EqualValues(t, expectedVal1Prio2, updatedVal1.ProposerPriority, "unexpected proposer priority for validator: %v", updatedVal2)
assert.EqualValues(t, expectedVal2Prio2, updatedVal2.ProposerPriority, "unexpected proposer priority for validator: %v", updatedVal2)
assert.EqualValues(
t,
expectedVal1Prio2,
updatedVal1.ProposerPriority,
"unexpected proposer priority for validator: %v",
updatedVal2,
)
assert.EqualValues(
t,
expectedVal2Prio2,
updatedVal2.ProposerPriority,
"unexpected proposer priority for validator: %v",
updatedVal2,
)
// no changes in voting power and both validators have same voting power // no changes in voting power and both validators have same voting power
// -> proposers should alternate: // -> proposers should alternate:
@ -616,7 +638,13 @@ func TestProposerPriorityProposerAlternates(t *testing.T) {
updatedState, err := sm.UpdateState(oldState, blockID, &block.Header, abciResponses, validatorUpdates) updatedState, err := sm.UpdateState(oldState, blockID, &block.Header, abciResponses, validatorUpdates)
assert.NoError(t, err) assert.NoError(t, err)
// alternate (and cyclic priorities): // alternate (and cyclic priorities):
assert.NotEqual(t, updatedState.Validators.Proposer.Address, updatedState.NextValidators.Proposer.Address, "iter: %v", i)
assert.NotEqual(
t,
updatedState.Validators.Proposer.Address,
updatedState.NextValidators.Proposer.Address,
"iter: %v",
i,
)
assert.Equal(t, oldState.Validators.Proposer.Address, updatedState.NextValidators.Proposer.Address, "iter: %v", i) assert.Equal(t, oldState.Validators.Proposer.Address, updatedState.NextValidators.Proposer.Address, "iter: %v", i)
_, updatedVal1 = updatedState.NextValidators.GetByAddress(val1PubKey.Address()) _, updatedVal1 = updatedState.NextValidators.GetByAddress(val1PubKey.Address())
@ -643,7 +671,11 @@ func TestLargeGenesisValidator(t *testing.T) {
genesisVotingPower := types.MaxTotalVotingPower / 1000 genesisVotingPower := types.MaxTotalVotingPower / 1000
genesisPubKey := ed25519.GenPrivKey().PubKey() genesisPubKey := ed25519.GenPrivKey().PubKey()
// fmt.Println("genesis addr: ", genesisPubKey.Address()) // fmt.Println("genesis addr: ", genesisPubKey.Address())
genesisVal := &types.Validator{Address: genesisPubKey.Address(), PubKey: genesisPubKey, VotingPower: genesisVotingPower}
genesisVal := &types.Validator{
Address: genesisPubKey.Address(),
PubKey: genesisPubKey,
VotingPower: genesisVotingPower,
}
// reset state validators to above validator // reset state validators to above validator
state.Validators = types.NewValidatorSet([]*types.Validator{genesisVal}) state.Validators = types.NewValidatorSet([]*types.Validator{genesisVal})
state.NextValidators = state.Validators state.NextValidators = state.Validators


+ 4
- 1
state/txindex/indexer_service.go View File

@ -35,7 +35,10 @@ func (is *IndexerService) OnStart() error {
// cancelled due to not pulling messages fast enough. Cause this might // cancelled due to not pulling messages fast enough. Cause this might
// sometimes happen when there are no other subscribers. // sometimes happen when there are no other subscribers.
blockHeadersSub, err := is.eventBus.SubscribeUnbuffered(context.Background(), subscriber, types.EventQueryNewBlockHeader)
blockHeadersSub, err := is.eventBus.SubscribeUnbuffered(
context.Background(),
subscriber,
types.EventQueryNewBlockHeader)
if err != nil { if err != nil {
return err return err
} }


+ 12
- 2
state/txindex/kv/kv.go View File

@ -370,7 +370,12 @@ func isRangeOperation(op query.Operator) bool {
// non-intersecting matches are removed. // non-intersecting matches are removed.
// //
// NOTE: filteredHashes may be empty if no previous condition has matched. // NOTE: filteredHashes may be empty if no previous condition has matched.
func (txi *TxIndex) match(c query.Condition, startKeyBz []byte, filteredHashes map[string][]byte, firstRun bool) map[string][]byte {
func (txi *TxIndex) match(
c query.Condition,
startKeyBz []byte,
filteredHashes map[string][]byte,
firstRun bool,
) map[string][]byte {
// A previous match was attempted but resulted in no matches, so we return // A previous match was attempted but resulted in no matches, so we return
// no matches (assuming AND operand). // no matches (assuming AND operand).
if !firstRun && len(filteredHashes) == 0 { if !firstRun && len(filteredHashes) == 0 {
@ -435,7 +440,12 @@ func (txi *TxIndex) match(c query.Condition, startKeyBz []byte, filteredHashes m
// any non-intersecting matches are removed. // any non-intersecting matches are removed.
// //
// NOTE: filteredHashes may be empty if no previous condition has matched. // NOTE: filteredHashes may be empty if no previous condition has matched.
func (txi *TxIndex) matchRange(r queryRange, startKey []byte, filteredHashes map[string][]byte, firstRun bool) map[string][]byte {
func (txi *TxIndex) matchRange(
r queryRange,
startKey []byte,
filteredHashes map[string][]byte,
firstRun bool,
) map[string][]byte {
// A previous match was attempted but resulted in no matches, so we return // A previous match was attempted but resulted in no matches, so we return
// no matches (assuming AND operand). // no matches (assuming AND operand).
if !firstRun && len(filteredHashes) == 0 { if !firstRun && len(filteredHashes) == 0 {


+ 53
- 7
state/validation_test.go View File

@ -23,7 +23,13 @@ func TestValidateBlockHeader(t *testing.T) {
defer proxyApp.Stop() defer proxyApp.Stop()
state, stateDB, privVals := makeState(3, 1) state, stateDB, privVals := makeState(3, 1)
blockExec := sm.NewBlockExecutor(stateDB, log.TestingLogger(), proxyApp.Consensus(), mock.Mempool{}, sm.MockEvidencePool{})
blockExec := sm.NewBlockExecutor(
stateDB,
log.TestingLogger(),
proxyApp.Consensus(),
mock.Mempool{},
sm.MockEvidencePool{},
)
lastCommit := types.NewCommit(types.BlockID{}, nil) lastCommit := types.NewCommit(types.BlockID{}, nil)
// some bad values // some bad values
@ -89,7 +95,13 @@ func TestValidateBlockCommit(t *testing.T) {
defer proxyApp.Stop() defer proxyApp.Stop()
state, stateDB, privVals := makeState(1, 1) state, stateDB, privVals := makeState(1, 1)
blockExec := sm.NewBlockExecutor(stateDB, log.TestingLogger(), proxyApp.Consensus(), mock.Mempool{}, sm.MockEvidencePool{})
blockExec := sm.NewBlockExecutor(
stateDB,
log.TestingLogger(),
proxyApp.Consensus(),
mock.Mempool{},
sm.MockEvidencePool{},
)
lastCommit := types.NewCommit(types.BlockID{}, nil) lastCommit := types.NewCommit(types.BlockID{}, nil)
wrongPrecommitsCommit := types.NewCommit(types.BlockID{}, nil) wrongPrecommitsCommit := types.NewCommit(types.BlockID{}, nil)
badPrivVal := types.NewMockPV() badPrivVal := types.NewMockPV()
@ -101,7 +113,13 @@ func TestValidateBlockCommit(t *testing.T) {
#2589: ensure state.LastValidators.VerifyCommit fails here #2589: ensure state.LastValidators.VerifyCommit fails here
*/ */
// should be height-1 instead of height // should be height-1 instead of height
wrongHeightVote, err := types.MakeVote(height, state.LastBlockID, state.Validators, privVals[proposerAddr.String()], chainID)
wrongHeightVote, err := types.MakeVote(
height,
state.LastBlockID,
state.Validators,
privVals[proposerAddr.String()],
chainID,
)
require.NoError(t, err, "height %d", height) require.NoError(t, err, "height %d", height)
wrongHeightCommit := types.NewCommit(state.LastBlockID, []*types.CommitSig{wrongHeightVote.CommitSig()}) wrongHeightCommit := types.NewCommit(state.LastBlockID, []*types.CommitSig{wrongHeightVote.CommitSig()})
block, _ := state.MakeBlock(height, makeTxs(height), wrongHeightCommit, nil, proposerAddr) block, _ := state.MakeBlock(height, makeTxs(height), wrongHeightCommit, nil, proposerAddr)
@ -115,7 +133,13 @@ func TestValidateBlockCommit(t *testing.T) {
block, _ = state.MakeBlock(height, makeTxs(height), wrongPrecommitsCommit, nil, proposerAddr) block, _ = state.MakeBlock(height, makeTxs(height), wrongPrecommitsCommit, nil, proposerAddr)
err = blockExec.ValidateBlock(state, block) err = blockExec.ValidateBlock(state, block)
_, isErrInvalidCommitPrecommits := err.(types.ErrInvalidCommitPrecommits) _, isErrInvalidCommitPrecommits := err.(types.ErrInvalidCommitPrecommits)
require.True(t, isErrInvalidCommitPrecommits, "expected ErrInvalidCommitPrecommits at height %d but got: %v", height, err)
require.True(
t,
isErrInvalidCommitPrecommits,
"expected ErrInvalidCommitPrecommits at height %d but got: %v",
height,
err,
)
} }
/* /*
@ -123,7 +147,15 @@ func TestValidateBlockCommit(t *testing.T) {
*/ */
var err error var err error
var blockID types.BlockID var blockID types.BlockID
state, blockID, lastCommit, err = makeAndCommitGoodBlock(state, height, lastCommit, proposerAddr, blockExec, privVals, nil)
state, blockID, lastCommit, err = makeAndCommitGoodBlock(
state,
height,
lastCommit,
proposerAddr,
blockExec,
privVals,
nil,
)
require.NoError(t, err, "height %d", height) require.NoError(t, err, "height %d", height)
/* /*
@ -152,7 +184,13 @@ func TestValidateBlockEvidence(t *testing.T) {
defer proxyApp.Stop() defer proxyApp.Stop()
state, stateDB, privVals := makeState(3, 1) state, stateDB, privVals := makeState(3, 1)
blockExec := sm.NewBlockExecutor(stateDB, log.TestingLogger(), proxyApp.Consensus(), mock.Mempool{}, sm.MockEvidencePool{})
blockExec := sm.NewBlockExecutor(
stateDB,
log.TestingLogger(),
proxyApp.Consensus(),
mock.Mempool{},
sm.MockEvidencePool{},
)
lastCommit := types.NewCommit(types.BlockID{}, nil) lastCommit := types.NewCommit(types.BlockID{}, nil)
for height := int64(1); height < validationTestsStopHeight; height++ { for height := int64(1); height < validationTestsStopHeight; height++ {
@ -190,7 +228,15 @@ func TestValidateBlockEvidence(t *testing.T) {
} }
var err error var err error
state, _, lastCommit, err = makeAndCommitGoodBlock(state, height, lastCommit, proposerAddr, blockExec, privVals, evidence)
state, _, lastCommit, err = makeAndCommitGoodBlock(
state,
height,
lastCommit,
proposerAddr,
blockExec,
privVals,
evidence,
)
require.NoError(t, err, "height %d", height) require.NoError(t, err, "height %d", height)
} }
} }


+ 10
- 2
tools/tm-bench/main.go View File

@ -22,6 +22,8 @@ func main() {
var durationInt, txsRate, connections, txSize int var durationInt, txsRate, connections, txSize int
var verbose bool var verbose bool
var outputFormat, broadcastTxMethod string var outputFormat, broadcastTxMethod string
var usage = "tm-bench [-c 1] [-T 10] [-r 1000] [-s 250]" +
" [endpoints] [-output-format <plain|json> [-broadcast-tx-method <async|sync|commit>]]"
flagSet := flag.NewFlagSet("tm-bench", flag.ExitOnError) flagSet := flag.NewFlagSet("tm-bench", flag.ExitOnError)
flagSet.IntVar(&connections, "c", 1, "Connections to keep open per endpoint") flagSet.IntVar(&connections, "c", 1, "Connections to keep open per endpoint")
@ -29,14 +31,20 @@ func main() {
flagSet.IntVar(&txsRate, "r", 1000, "Txs per second to send in a connection") flagSet.IntVar(&txsRate, "r", 1000, "Txs per second to send in a connection")
flagSet.IntVar(&txSize, "s", 250, "The size of a transaction in bytes, must be greater than or equal to 40.") flagSet.IntVar(&txSize, "s", 250, "The size of a transaction in bytes, must be greater than or equal to 40.")
flagSet.StringVar(&outputFormat, "output-format", "plain", "Output format: plain or json") flagSet.StringVar(&outputFormat, "output-format", "plain", "Output format: plain or json")
flagSet.StringVar(&broadcastTxMethod, "broadcast-tx-method", "async", "Broadcast method: async (no guarantees; fastest), sync (ensures tx is checked) or commit (ensures tx is checked and committed; slowest)")
flagSet.StringVar(
&broadcastTxMethod,
"broadcast-tx-method",
"async",
"Broadcast method: async (no guarantees; fastest),"+
" sync (ensures tx is checked) or commit (ensures tx is checked and committed; slowest)",
)
flagSet.BoolVar(&verbose, "v", false, "Verbose output") flagSet.BoolVar(&verbose, "v", false, "Verbose output")
flagSet.Usage = func() { flagSet.Usage = func() {
fmt.Println(`Tendermint blockchain benchmarking tool. fmt.Println(`Tendermint blockchain benchmarking tool.
Usage: Usage:
tm-bench [-c 1] [-T 10] [-r 1000] [-s 250] [endpoints] [-output-format <plain|json> [-broadcast-tx-method <async|sync|commit>]]
` + usage + `
Examples: Examples:
tm-bench localhost:26657`) tm-bench localhost:26657`)


+ 6
- 1
tools/tm-monitor/monitor/monitor.go View File

@ -165,7 +165,12 @@ func (m *Monitor) Stop() {
} }
// main loop where we listen for events from the node // main loop where we listen for events from the node
func (m *Monitor) listen(nodeName string, blockCh <-chan tmtypes.Header, blockLatencyCh <-chan float64, disconnectCh <-chan bool, quit <-chan struct{}) {
func (m *Monitor) listen(
nodeName string,
blockCh <-chan tmtypes.Header,
blockLatencyCh <-chan float64,
disconnectCh <-chan bool,
quit <-chan struct{}) {
logger := m.logger.With("node", nodeName) logger := m.logger.With("node", nodeName)
for { for {


+ 4
- 1
tools/tm-monitor/monitor/monitor_test.go View File

@ -61,7 +61,10 @@ func createValidatorNode(t *testing.T) (n *monitor.Node, emMock *mock.EventMeter
stubs := make(map[string]interface{}) stubs := make(map[string]interface{})
pubKey := ed25519.GenPrivKey().PubKey() pubKey := ed25519.GenPrivKey().PubKey()
stubs["validators"] = ctypes.ResultValidators{BlockHeight: blockHeight, Validators: []*tmtypes.Validator{tmtypes.NewValidator(pubKey, 0)}}
stubs["validators"] = ctypes.ResultValidators{
BlockHeight: blockHeight,
Validators: []*tmtypes.Validator{tmtypes.NewValidator(pubKey, 0)},
}
stubs["status"] = ctypes.ResultStatus{ValidatorInfo: ctypes.ValidatorInfo{PubKey: pubKey}} stubs["status"] = ctypes.ResultStatus{ValidatorInfo: ctypes.ValidatorInfo{PubKey: pubKey}}
cdc := amino.NewCodec() cdc := amino.NewCodec()
rpcClientMock := &mock.RpcClient{Stubs: stubs} rpcClientMock := &mock.RpcClient{Stubs: stubs}


+ 5
- 1
tools/tm-monitor/monitor/node.go View File

@ -53,7 +53,11 @@ func NewNode(rpcAddr string, options ...func(*Node)) *Node {
return NewNodeWithEventMeterAndRpcClient(rpcAddr, em, rpcClient, options...) return NewNodeWithEventMeterAndRpcClient(rpcAddr, em, rpcClient, options...)
} }
func NewNodeWithEventMeterAndRpcClient(rpcAddr string, em eventMeter, rpcClient rpc_client.HTTPClient, options ...func(*Node)) *Node {
func NewNodeWithEventMeterAndRpcClient(
rpcAddr string,
em eventMeter,
rpcClient rpc_client.HTTPClient,
options ...func(*Node)) *Node {
n := &Node{ n := &Node{
rpcAddr: rpcAddr, rpcAddr: rpcAddr,
em: em, em: em,


+ 4
- 1
tools/tm-monitor/monitor/node_test.go View File

@ -79,7 +79,10 @@ func startValidatorNode(t *testing.T) (n *monitor.Node, emMock *mock.EventMeter)
stubs := make(map[string]interface{}) stubs := make(map[string]interface{})
pubKey := ed25519.GenPrivKey().PubKey() pubKey := ed25519.GenPrivKey().PubKey()
stubs["validators"] = ctypes.ResultValidators{BlockHeight: blockHeight, Validators: []*tmtypes.Validator{tmtypes.NewValidator(pubKey, 0)}}
stubs["validators"] = ctypes.ResultValidators{
BlockHeight: blockHeight,
Validators: []*tmtypes.Validator{tmtypes.NewValidator(pubKey, 0)},
}
stubs["status"] = ctypes.ResultStatus{ValidatorInfo: ctypes.ValidatorInfo{PubKey: pubKey}} stubs["status"] = ctypes.ResultStatus{ValidatorInfo: ctypes.ValidatorInfo{PubKey: pubKey}}
cdc := amino.NewCodec() cdc := amino.NewCodec()
rpcClientMock := &mock.RpcClient{Stubs: stubs} rpcClientMock := &mock.RpcClient{Stubs: stubs}


+ 6
- 1
tools/tm-monitor/ton.go View File

@ -67,7 +67,12 @@ func (o *Ton) printHeader() {
fmt.Fprintf(o.Output, "Avg block time: %.3f ms\n", n.AvgBlockTime) fmt.Fprintf(o.Output, "Avg block time: %.3f ms\n", n.AvgBlockTime)
fmt.Fprintf(o.Output, "Avg tx throughput: %.3f per sec\n", n.AvgTxThroughput) fmt.Fprintf(o.Output, "Avg tx throughput: %.3f per sec\n", n.AvgTxThroughput)
fmt.Fprintf(o.Output, "Avg block latency: %.3f ms\n", n.AvgBlockLatency) fmt.Fprintf(o.Output, "Avg block latency: %.3f ms\n", n.AvgBlockLatency)
fmt.Fprintf(o.Output, "Active nodes: %d/%d (health: %s) Validators: %d\n", n.NumNodesMonitoredOnline, n.NumNodesMonitored, n.GetHealthString(), n.NumValidators)
fmt.Fprintf(o.Output,
"Active nodes: %d/%d (health: %s) Validators: %d\n",
n.NumNodesMonitoredOnline,
n.NumNodesMonitored,
n.GetHealthString(),
n.NumValidators)
} }
func (o *Ton) printTable() { func (o *Ton) printTable() {


+ 7
- 1
tools/tm-signer-harness/internal/test_harness_test.go View File

@ -121,7 +121,13 @@ func TestRemoteSignerVoteSigningFailed(t *testing.T) {
) )
} }
func newMockSignerServer(t *testing.T, th *TestHarness, privKey crypto.PrivKey, breakProposalSigning bool, breakVoteSigning bool) *privval.SignerServer {
func newMockSignerServer(
t *testing.T,
th *TestHarness,
privKey crypto.PrivKey,
breakProposalSigning bool,
breakVoteSigning bool,
) *privval.SignerServer {
mockPV := types.NewMockPVWithParams(privKey, breakProposalSigning, breakVoteSigning) mockPV := types.NewMockPVWithParams(privKey, breakProposalSigning, breakVoteSigning)
dialerEndpoint := privval.NewSignerDialerEndpoint( dialerEndpoint := privval.NewSignerDialerEndpoint(


Some files were not shown because too many files changed in this diff

Loading…
Cancel
Save