You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

1177 lines
37 KiB

9 years ago
new pubsub package comment out failing consensus tests for now rewrite rpc httpclient to use new pubsub package import pubsub as tmpubsub, query as tmquery make event IDs constants EventKey -> EventTypeKey rename EventsPubsub to PubSub mempool does not use pubsub rename eventsSub to pubsub new subscribe API fix channel size issues and consensus tests bugs refactor rpc client add missing discardFromChan method add mutex rename pubsub to eventBus remove IsRunning from WSRPCConnection interface (not needed) add a comment in broadcastNewRoundStepsAndVotes rename registerEventCallbacks to broadcastNewRoundStepsAndVotes See https://dave.cheney.net/2014/03/19/channel-axioms stop eventBuses after reactor tests remove unnecessary Unsubscribe return subscribe helper function move discardFromChan to where it is used subscribe now returns an err this gives us ability to refuse to subscribe if pubsub is at its max capacity. use context for control overflow cache queries handle err when subscribing in replay_test rename testClientID to testSubscriber extract var set channel buffer capacity to 1 in replay_file fix byzantine_test unsubscribe from single event, not all events refactor httpclient to return events to appropriate channels return failing testReplayCrashBeforeWriteVote test fix TestValidatorSetChanges refactor code a bit fix testReplayCrashBeforeWriteVote add comment fix TestValidatorSetChanges fixes from Bucky's review update comment [ci skip] test TxEventBuffer update changelog fix TestValidatorSetChanges (2nd attempt) only do wg.Done when no errors benchmark event bus create pubsub server inside NewEventBus only expose config params (later if needed) set buffer capacity to 0 so we are not testing cache new tx event format: key = "Tx" plus a tag {"tx.hash": XYZ} This should allow to subscribe to all transactions! or a specific one using a query: "tm.events.type = Tx and tx.hash = '013ABF99434...'" use TimeoutCommit instead of afterPublishEventNewBlockTimeout TimeoutCommit is the time a node waits after committing a block, before it goes into the next height. So it will finish everything from the last block, but then wait a bit. The idea is this gives it time to hear more votes from other validators, to strengthen the commit it includes in the next block. But it also gives it time to hear about new transactions. waitForBlockWithUpdatedVals rewrite WAL crash tests Task: test that we can recover from any WAL crash. Solution: the old tests were relying on event hub being run in the same thread (we were injecting the private validator's last signature). when considering a rewrite, we considered two possible solutions: write a "fuzzy" testing system where WAL is crashing upon receiving a new message, or inject failures and trigger them in tests using something like https://github.com/coreos/gofail. remove sleep no cs.Lock around wal.Save test different cases (empty block, non-empty block, ...) comments add comments test 4 cases: empty block, non-empty block, non-empty block with smaller part size, many blocks fixes as per Bucky's last review reset subscriptions on UnsubscribeAll use a simple counter to track message for which we panicked also, set a smaller part size for all test cases
8 years ago
9 years ago
9 years ago
8 years ago
9 years ago
9 years ago
9 years ago
new pubsub package comment out failing consensus tests for now rewrite rpc httpclient to use new pubsub package import pubsub as tmpubsub, query as tmquery make event IDs constants EventKey -> EventTypeKey rename EventsPubsub to PubSub mempool does not use pubsub rename eventsSub to pubsub new subscribe API fix channel size issues and consensus tests bugs refactor rpc client add missing discardFromChan method add mutex rename pubsub to eventBus remove IsRunning from WSRPCConnection interface (not needed) add a comment in broadcastNewRoundStepsAndVotes rename registerEventCallbacks to broadcastNewRoundStepsAndVotes See https://dave.cheney.net/2014/03/19/channel-axioms stop eventBuses after reactor tests remove unnecessary Unsubscribe return subscribe helper function move discardFromChan to where it is used subscribe now returns an err this gives us ability to refuse to subscribe if pubsub is at its max capacity. use context for control overflow cache queries handle err when subscribing in replay_test rename testClientID to testSubscriber extract var set channel buffer capacity to 1 in replay_file fix byzantine_test unsubscribe from single event, not all events refactor httpclient to return events to appropriate channels return failing testReplayCrashBeforeWriteVote test fix TestValidatorSetChanges refactor code a bit fix testReplayCrashBeforeWriteVote add comment fix TestValidatorSetChanges fixes from Bucky's review update comment [ci skip] test TxEventBuffer update changelog fix TestValidatorSetChanges (2nd attempt) only do wg.Done when no errors benchmark event bus create pubsub server inside NewEventBus only expose config params (later if needed) set buffer capacity to 0 so we are not testing cache new tx event format: key = "Tx" plus a tag {"tx.hash": XYZ} This should allow to subscribe to all transactions! or a specific one using a query: "tm.events.type = Tx and tx.hash = '013ABF99434...'" use TimeoutCommit instead of afterPublishEventNewBlockTimeout TimeoutCommit is the time a node waits after committing a block, before it goes into the next height. So it will finish everything from the last block, but then wait a bit. The idea is this gives it time to hear more votes from other validators, to strengthen the commit it includes in the next block. But it also gives it time to hear about new transactions. waitForBlockWithUpdatedVals rewrite WAL crash tests Task: test that we can recover from any WAL crash. Solution: the old tests were relying on event hub being run in the same thread (we were injecting the private validator's last signature). when considering a rewrite, we considered two possible solutions: write a "fuzzy" testing system where WAL is crashing upon receiving a new message, or inject failures and trigger them in tests using something like https://github.com/coreos/gofail. remove sleep no cs.Lock around wal.Save test different cases (empty block, non-empty block, ...) comments add comments test 4 cases: empty block, non-empty block, non-empty block with smaller part size, many blocks fixes as per Bucky's last review reset subscriptions on UnsubscribeAll use a simple counter to track message for which we panicked also, set a smaller part size for all test cases
8 years ago
new pubsub package comment out failing consensus tests for now rewrite rpc httpclient to use new pubsub package import pubsub as tmpubsub, query as tmquery make event IDs constants EventKey -> EventTypeKey rename EventsPubsub to PubSub mempool does not use pubsub rename eventsSub to pubsub new subscribe API fix channel size issues and consensus tests bugs refactor rpc client add missing discardFromChan method add mutex rename pubsub to eventBus remove IsRunning from WSRPCConnection interface (not needed) add a comment in broadcastNewRoundStepsAndVotes rename registerEventCallbacks to broadcastNewRoundStepsAndVotes See https://dave.cheney.net/2014/03/19/channel-axioms stop eventBuses after reactor tests remove unnecessary Unsubscribe return subscribe helper function move discardFromChan to where it is used subscribe now returns an err this gives us ability to refuse to subscribe if pubsub is at its max capacity. use context for control overflow cache queries handle err when subscribing in replay_test rename testClientID to testSubscriber extract var set channel buffer capacity to 1 in replay_file fix byzantine_test unsubscribe from single event, not all events refactor httpclient to return events to appropriate channels return failing testReplayCrashBeforeWriteVote test fix TestValidatorSetChanges refactor code a bit fix testReplayCrashBeforeWriteVote add comment fix TestValidatorSetChanges fixes from Bucky's review update comment [ci skip] test TxEventBuffer update changelog fix TestValidatorSetChanges (2nd attempt) only do wg.Done when no errors benchmark event bus create pubsub server inside NewEventBus only expose config params (later if needed) set buffer capacity to 0 so we are not testing cache new tx event format: key = "Tx" plus a tag {"tx.hash": XYZ} This should allow to subscribe to all transactions! or a specific one using a query: "tm.events.type = Tx and tx.hash = '013ABF99434...'" use TimeoutCommit instead of afterPublishEventNewBlockTimeout TimeoutCommit is the time a node waits after committing a block, before it goes into the next height. So it will finish everything from the last block, but then wait a bit. The idea is this gives it time to hear more votes from other validators, to strengthen the commit it includes in the next block. But it also gives it time to hear about new transactions. waitForBlockWithUpdatedVals rewrite WAL crash tests Task: test that we can recover from any WAL crash. Solution: the old tests were relying on event hub being run in the same thread (we were injecting the private validator's last signature). when considering a rewrite, we considered two possible solutions: write a "fuzzy" testing system where WAL is crashing upon receiving a new message, or inject failures and trigger them in tests using something like https://github.com/coreos/gofail. remove sleep no cs.Lock around wal.Save test different cases (empty block, non-empty block, ...) comments add comments test 4 cases: empty block, non-empty block, non-empty block with smaller part size, many blocks fixes as per Bucky's last review reset subscriptions on UnsubscribeAll use a simple counter to track message for which we panicked also, set a smaller part size for all test cases
8 years ago
9 years ago
9 years ago
9 years ago
new pubsub package comment out failing consensus tests for now rewrite rpc httpclient to use new pubsub package import pubsub as tmpubsub, query as tmquery make event IDs constants EventKey -> EventTypeKey rename EventsPubsub to PubSub mempool does not use pubsub rename eventsSub to pubsub new subscribe API fix channel size issues and consensus tests bugs refactor rpc client add missing discardFromChan method add mutex rename pubsub to eventBus remove IsRunning from WSRPCConnection interface (not needed) add a comment in broadcastNewRoundStepsAndVotes rename registerEventCallbacks to broadcastNewRoundStepsAndVotes See https://dave.cheney.net/2014/03/19/channel-axioms stop eventBuses after reactor tests remove unnecessary Unsubscribe return subscribe helper function move discardFromChan to where it is used subscribe now returns an err this gives us ability to refuse to subscribe if pubsub is at its max capacity. use context for control overflow cache queries handle err when subscribing in replay_test rename testClientID to testSubscriber extract var set channel buffer capacity to 1 in replay_file fix byzantine_test unsubscribe from single event, not all events refactor httpclient to return events to appropriate channels return failing testReplayCrashBeforeWriteVote test fix TestValidatorSetChanges refactor code a bit fix testReplayCrashBeforeWriteVote add comment fix TestValidatorSetChanges fixes from Bucky's review update comment [ci skip] test TxEventBuffer update changelog fix TestValidatorSetChanges (2nd attempt) only do wg.Done when no errors benchmark event bus create pubsub server inside NewEventBus only expose config params (later if needed) set buffer capacity to 0 so we are not testing cache new tx event format: key = "Tx" plus a tag {"tx.hash": XYZ} This should allow to subscribe to all transactions! or a specific one using a query: "tm.events.type = Tx and tx.hash = '013ABF99434...'" use TimeoutCommit instead of afterPublishEventNewBlockTimeout TimeoutCommit is the time a node waits after committing a block, before it goes into the next height. So it will finish everything from the last block, but then wait a bit. The idea is this gives it time to hear more votes from other validators, to strengthen the commit it includes in the next block. But it also gives it time to hear about new transactions. waitForBlockWithUpdatedVals rewrite WAL crash tests Task: test that we can recover from any WAL crash. Solution: the old tests were relying on event hub being run in the same thread (we were injecting the private validator's last signature). when considering a rewrite, we considered two possible solutions: write a "fuzzy" testing system where WAL is crashing upon receiving a new message, or inject failures and trigger them in tests using something like https://github.com/coreos/gofail. remove sleep no cs.Lock around wal.Save test different cases (empty block, non-empty block, ...) comments add comments test 4 cases: empty block, non-empty block, non-empty block with smaller part size, many blocks fixes as per Bucky's last review reset subscriptions on UnsubscribeAll use a simple counter to track message for which we panicked also, set a smaller part size for all test cases
8 years ago
9 years ago
9 years ago
8 years ago
9 years ago
9 years ago
new pubsub package comment out failing consensus tests for now rewrite rpc httpclient to use new pubsub package import pubsub as tmpubsub, query as tmquery make event IDs constants EventKey -> EventTypeKey rename EventsPubsub to PubSub mempool does not use pubsub rename eventsSub to pubsub new subscribe API fix channel size issues and consensus tests bugs refactor rpc client add missing discardFromChan method add mutex rename pubsub to eventBus remove IsRunning from WSRPCConnection interface (not needed) add a comment in broadcastNewRoundStepsAndVotes rename registerEventCallbacks to broadcastNewRoundStepsAndVotes See https://dave.cheney.net/2014/03/19/channel-axioms stop eventBuses after reactor tests remove unnecessary Unsubscribe return subscribe helper function move discardFromChan to where it is used subscribe now returns an err this gives us ability to refuse to subscribe if pubsub is at its max capacity. use context for control overflow cache queries handle err when subscribing in replay_test rename testClientID to testSubscriber extract var set channel buffer capacity to 1 in replay_file fix byzantine_test unsubscribe from single event, not all events refactor httpclient to return events to appropriate channels return failing testReplayCrashBeforeWriteVote test fix TestValidatorSetChanges refactor code a bit fix testReplayCrashBeforeWriteVote add comment fix TestValidatorSetChanges fixes from Bucky's review update comment [ci skip] test TxEventBuffer update changelog fix TestValidatorSetChanges (2nd attempt) only do wg.Done when no errors benchmark event bus create pubsub server inside NewEventBus only expose config params (later if needed) set buffer capacity to 0 so we are not testing cache new tx event format: key = "Tx" plus a tag {"tx.hash": XYZ} This should allow to subscribe to all transactions! or a specific one using a query: "tm.events.type = Tx and tx.hash = '013ABF99434...'" use TimeoutCommit instead of afterPublishEventNewBlockTimeout TimeoutCommit is the time a node waits after committing a block, before it goes into the next height. So it will finish everything from the last block, but then wait a bit. The idea is this gives it time to hear more votes from other validators, to strengthen the commit it includes in the next block. But it also gives it time to hear about new transactions. waitForBlockWithUpdatedVals rewrite WAL crash tests Task: test that we can recover from any WAL crash. Solution: the old tests were relying on event hub being run in the same thread (we were injecting the private validator's last signature). when considering a rewrite, we considered two possible solutions: write a "fuzzy" testing system where WAL is crashing upon receiving a new message, or inject failures and trigger them in tests using something like https://github.com/coreos/gofail. remove sleep no cs.Lock around wal.Save test different cases (empty block, non-empty block, ...) comments add comments test 4 cases: empty block, non-empty block, non-empty block with smaller part size, many blocks fixes as per Bucky's last review reset subscriptions on UnsubscribeAll use a simple counter to track message for which we panicked also, set a smaller part size for all test cases
8 years ago
9 years ago
9 years ago
9 years ago
9 years ago
8 years ago
9 years ago
9 years ago
new pubsub package comment out failing consensus tests for now rewrite rpc httpclient to use new pubsub package import pubsub as tmpubsub, query as tmquery make event IDs constants EventKey -> EventTypeKey rename EventsPubsub to PubSub mempool does not use pubsub rename eventsSub to pubsub new subscribe API fix channel size issues and consensus tests bugs refactor rpc client add missing discardFromChan method add mutex rename pubsub to eventBus remove IsRunning from WSRPCConnection interface (not needed) add a comment in broadcastNewRoundStepsAndVotes rename registerEventCallbacks to broadcastNewRoundStepsAndVotes See https://dave.cheney.net/2014/03/19/channel-axioms stop eventBuses after reactor tests remove unnecessary Unsubscribe return subscribe helper function move discardFromChan to where it is used subscribe now returns an err this gives us ability to refuse to subscribe if pubsub is at its max capacity. use context for control overflow cache queries handle err when subscribing in replay_test rename testClientID to testSubscriber extract var set channel buffer capacity to 1 in replay_file fix byzantine_test unsubscribe from single event, not all events refactor httpclient to return events to appropriate channels return failing testReplayCrashBeforeWriteVote test fix TestValidatorSetChanges refactor code a bit fix testReplayCrashBeforeWriteVote add comment fix TestValidatorSetChanges fixes from Bucky's review update comment [ci skip] test TxEventBuffer update changelog fix TestValidatorSetChanges (2nd attempt) only do wg.Done when no errors benchmark event bus create pubsub server inside NewEventBus only expose config params (later if needed) set buffer capacity to 0 so we are not testing cache new tx event format: key = "Tx" plus a tag {"tx.hash": XYZ} This should allow to subscribe to all transactions! or a specific one using a query: "tm.events.type = Tx and tx.hash = '013ABF99434...'" use TimeoutCommit instead of afterPublishEventNewBlockTimeout TimeoutCommit is the time a node waits after committing a block, before it goes into the next height. So it will finish everything from the last block, but then wait a bit. The idea is this gives it time to hear more votes from other validators, to strengthen the commit it includes in the next block. But it also gives it time to hear about new transactions. waitForBlockWithUpdatedVals rewrite WAL crash tests Task: test that we can recover from any WAL crash. Solution: the old tests were relying on event hub being run in the same thread (we were injecting the private validator's last signature). when considering a rewrite, we considered two possible solutions: write a "fuzzy" testing system where WAL is crashing upon receiving a new message, or inject failures and trigger them in tests using something like https://github.com/coreos/gofail. remove sleep no cs.Lock around wal.Save test different cases (empty block, non-empty block, ...) comments add comments test 4 cases: empty block, non-empty block, non-empty block with smaller part size, many blocks fixes as per Bucky's last review reset subscriptions on UnsubscribeAll use a simple counter to track message for which we panicked also, set a smaller part size for all test cases
8 years ago
9 years ago
9 years ago
9 years ago
9 years ago
9 years ago
9 years ago
9 years ago
9 years ago
9 years ago
9 years ago
fix TestFullRound1 race (Refs #846) ``` ================== WARNING: DATA RACE Write at 0x00c42d7605f0 by goroutine 844: github.com/tendermint/tendermint/consensus.(*ConsensusState).updateToState() /home/vagrant/go/src/github.com/tendermint/tendermint/consensus/state.go:465 +0x59e I[11-14|22:37:28.781] Added to prevote vote="Vote{0:646753DCE124 1/02/1(Prevote) E9B19636DCDB {/CAD5FA805E8C.../}}" prevotes="VoteSet{H:1 R:2 T:1 +2/3:<nil> BA{2:X_} map[]}" github.com/tendermint/tendermint/consensus.(*ConsensusState).finalizeCommit() /home/vagrant/go/src/github.com/tendermint/tendermint/consensus/state.go:1229 +0x16a9 github.com/tendermint/tendermint/consensus.(*ConsensusState).tryFinalizeCommit() /home/vagrant/go/src/github.com/tendermint/tendermint/consensus/state.go:1135 +0x721 github.com/tendermint/tendermint/consensus.(*ConsensusState).enterCommit.func1() /home/vagrant/go/src/github.com/tendermint/tendermint/consensus/state.go:1087 +0x153 github.com/tendermint/tendermint/consensus.(*ConsensusState).enterCommit() /home/vagrant/go/src/github.com/tendermint/tendermint/consensus/state.go:1114 +0xa34 github.com/tendermint/tendermint/consensus.(*ConsensusState).addVote() /home/vagrant/go/src/github.com/tendermint/tendermint/consensus/state.go:1423 +0xdd6 github.com/tendermint/tendermint/consensus.(*ConsensusState).tryAddVote() /home/vagrant/go/src/github.com/tendermint/tendermint/consensus/state.go:1317 +0x77 github.com/tendermint/tendermint/consensus.(*ConsensusState).handleMsg() /home/vagrant/go/src/github.com/tendermint/tendermint/consensus/state.go:565 +0x7a9 github.com/tendermint/tendermint/consensus.(*ConsensusState).receiveRoutine() /home/vagrant/go/src/github.com/tendermint/tendermint/consensus/state.go:523 +0x6d2 Previous read at 0x00c42d7605f0 by goroutine 654: github.com/tendermint/tendermint/consensus.validatePrevote() /home/vagrant/go/src/github.com/tendermint/tendermint/consensus/common_test.go:149 +0x57 github.com/tendermint/tendermint/consensus.TestFullRound1() /home/vagrant/go/src/github.com/tendermint/tendermint/consensus/state_test.go:256 +0x3c5 testing.tRunner() /usr/local/go/src/testing/testing.go:746 +0x16c Goroutine 844 (running) created at: github.com/tendermint/tendermint/consensus.(*ConsensusState).startRoutines() /home/vagrant/go/src/github.com/tendermint/tendermint/consensus/state.go:258 +0x8c github.com/tendermint/tendermint/consensus.startTestRound() /home/vagrant/go/src/github.com/tendermint/tendermint/consensus/common_test.go:118 +0x63 github.com/tendermint/tendermint/consensus.TestFullRound1() /home/vagrant/go/src/github.com/tendermint/tendermint/consensus/state_test.go:247 +0x1fb testing.tRunner() /usr/local/go/src/testing/testing.go:746 +0x16c Goroutine 654 (running) created at: testing.(*T).Run() /usr/local/go/src/testing/testing.go:789 +0x568 testing.runTests.func1() /usr/local/go/src/testing/testing.go:1004 +0xa7 testing.tRunner() /usr/local/go/src/testing/testing.go:746 +0x16c testing.runTests() /usr/local/go/src/testing/testing.go:1002 +0x521 testing.(*M).Run() /usr/local/go/src/testing/testing.go:921 +0x206 main.main() github.com/tendermint/tendermint/consensus/_test/_testmain.go:106 +0x1d3 ================== ```
7 years ago
new pubsub package comment out failing consensus tests for now rewrite rpc httpclient to use new pubsub package import pubsub as tmpubsub, query as tmquery make event IDs constants EventKey -> EventTypeKey rename EventsPubsub to PubSub mempool does not use pubsub rename eventsSub to pubsub new subscribe API fix channel size issues and consensus tests bugs refactor rpc client add missing discardFromChan method add mutex rename pubsub to eventBus remove IsRunning from WSRPCConnection interface (not needed) add a comment in broadcastNewRoundStepsAndVotes rename registerEventCallbacks to broadcastNewRoundStepsAndVotes See https://dave.cheney.net/2014/03/19/channel-axioms stop eventBuses after reactor tests remove unnecessary Unsubscribe return subscribe helper function move discardFromChan to where it is used subscribe now returns an err this gives us ability to refuse to subscribe if pubsub is at its max capacity. use context for control overflow cache queries handle err when subscribing in replay_test rename testClientID to testSubscriber extract var set channel buffer capacity to 1 in replay_file fix byzantine_test unsubscribe from single event, not all events refactor httpclient to return events to appropriate channels return failing testReplayCrashBeforeWriteVote test fix TestValidatorSetChanges refactor code a bit fix testReplayCrashBeforeWriteVote add comment fix TestValidatorSetChanges fixes from Bucky's review update comment [ci skip] test TxEventBuffer update changelog fix TestValidatorSetChanges (2nd attempt) only do wg.Done when no errors benchmark event bus create pubsub server inside NewEventBus only expose config params (later if needed) set buffer capacity to 0 so we are not testing cache new tx event format: key = "Tx" plus a tag {"tx.hash": XYZ} This should allow to subscribe to all transactions! or a specific one using a query: "tm.events.type = Tx and tx.hash = '013ABF99434...'" use TimeoutCommit instead of afterPublishEventNewBlockTimeout TimeoutCommit is the time a node waits after committing a block, before it goes into the next height. So it will finish everything from the last block, but then wait a bit. The idea is this gives it time to hear more votes from other validators, to strengthen the commit it includes in the next block. But it also gives it time to hear about new transactions. waitForBlockWithUpdatedVals rewrite WAL crash tests Task: test that we can recover from any WAL crash. Solution: the old tests were relying on event hub being run in the same thread (we were injecting the private validator's last signature). when considering a rewrite, we considered two possible solutions: write a "fuzzy" testing system where WAL is crashing upon receiving a new message, or inject failures and trigger them in tests using something like https://github.com/coreos/gofail. remove sleep no cs.Lock around wal.Save test different cases (empty block, non-empty block, ...) comments add comments test 4 cases: empty block, non-empty block, non-empty block with smaller part size, many blocks fixes as per Bucky's last review reset subscriptions on UnsubscribeAll use a simple counter to track message for which we panicked also, set a smaller part size for all test cases
8 years ago
7 years ago
9 years ago
9 years ago
9 years ago
9 years ago
new pubsub package comment out failing consensus tests for now rewrite rpc httpclient to use new pubsub package import pubsub as tmpubsub, query as tmquery make event IDs constants EventKey -> EventTypeKey rename EventsPubsub to PubSub mempool does not use pubsub rename eventsSub to pubsub new subscribe API fix channel size issues and consensus tests bugs refactor rpc client add missing discardFromChan method add mutex rename pubsub to eventBus remove IsRunning from WSRPCConnection interface (not needed) add a comment in broadcastNewRoundStepsAndVotes rename registerEventCallbacks to broadcastNewRoundStepsAndVotes See https://dave.cheney.net/2014/03/19/channel-axioms stop eventBuses after reactor tests remove unnecessary Unsubscribe return subscribe helper function move discardFromChan to where it is used subscribe now returns an err this gives us ability to refuse to subscribe if pubsub is at its max capacity. use context for control overflow cache queries handle err when subscribing in replay_test rename testClientID to testSubscriber extract var set channel buffer capacity to 1 in replay_file fix byzantine_test unsubscribe from single event, not all events refactor httpclient to return events to appropriate channels return failing testReplayCrashBeforeWriteVote test fix TestValidatorSetChanges refactor code a bit fix testReplayCrashBeforeWriteVote add comment fix TestValidatorSetChanges fixes from Bucky's review update comment [ci skip] test TxEventBuffer update changelog fix TestValidatorSetChanges (2nd attempt) only do wg.Done when no errors benchmark event bus create pubsub server inside NewEventBus only expose config params (later if needed) set buffer capacity to 0 so we are not testing cache new tx event format: key = "Tx" plus a tag {"tx.hash": XYZ} This should allow to subscribe to all transactions! or a specific one using a query: "tm.events.type = Tx and tx.hash = '013ABF99434...'" use TimeoutCommit instead of afterPublishEventNewBlockTimeout TimeoutCommit is the time a node waits after committing a block, before it goes into the next height. So it will finish everything from the last block, but then wait a bit. The idea is this gives it time to hear more votes from other validators, to strengthen the commit it includes in the next block. But it also gives it time to hear about new transactions. waitForBlockWithUpdatedVals rewrite WAL crash tests Task: test that we can recover from any WAL crash. Solution: the old tests were relying on event hub being run in the same thread (we were injecting the private validator's last signature). when considering a rewrite, we considered two possible solutions: write a "fuzzy" testing system where WAL is crashing upon receiving a new message, or inject failures and trigger them in tests using something like https://github.com/coreos/gofail. remove sleep no cs.Lock around wal.Save test different cases (empty block, non-empty block, ...) comments add comments test 4 cases: empty block, non-empty block, non-empty block with smaller part size, many blocks fixes as per Bucky's last review reset subscriptions on UnsubscribeAll use a simple counter to track message for which we panicked also, set a smaller part size for all test cases
8 years ago
9 years ago
9 years ago
9 years ago
9 years ago
new pubsub package comment out failing consensus tests for now rewrite rpc httpclient to use new pubsub package import pubsub as tmpubsub, query as tmquery make event IDs constants EventKey -> EventTypeKey rename EventsPubsub to PubSub mempool does not use pubsub rename eventsSub to pubsub new subscribe API fix channel size issues and consensus tests bugs refactor rpc client add missing discardFromChan method add mutex rename pubsub to eventBus remove IsRunning from WSRPCConnection interface (not needed) add a comment in broadcastNewRoundStepsAndVotes rename registerEventCallbacks to broadcastNewRoundStepsAndVotes See https://dave.cheney.net/2014/03/19/channel-axioms stop eventBuses after reactor tests remove unnecessary Unsubscribe return subscribe helper function move discardFromChan to where it is used subscribe now returns an err this gives us ability to refuse to subscribe if pubsub is at its max capacity. use context for control overflow cache queries handle err when subscribing in replay_test rename testClientID to testSubscriber extract var set channel buffer capacity to 1 in replay_file fix byzantine_test unsubscribe from single event, not all events refactor httpclient to return events to appropriate channels return failing testReplayCrashBeforeWriteVote test fix TestValidatorSetChanges refactor code a bit fix testReplayCrashBeforeWriteVote add comment fix TestValidatorSetChanges fixes from Bucky's review update comment [ci skip] test TxEventBuffer update changelog fix TestValidatorSetChanges (2nd attempt) only do wg.Done when no errors benchmark event bus create pubsub server inside NewEventBus only expose config params (later if needed) set buffer capacity to 0 so we are not testing cache new tx event format: key = "Tx" plus a tag {"tx.hash": XYZ} This should allow to subscribe to all transactions! or a specific one using a query: "tm.events.type = Tx and tx.hash = '013ABF99434...'" use TimeoutCommit instead of afterPublishEventNewBlockTimeout TimeoutCommit is the time a node waits after committing a block, before it goes into the next height. So it will finish everything from the last block, but then wait a bit. The idea is this gives it time to hear more votes from other validators, to strengthen the commit it includes in the next block. But it also gives it time to hear about new transactions. waitForBlockWithUpdatedVals rewrite WAL crash tests Task: test that we can recover from any WAL crash. Solution: the old tests were relying on event hub being run in the same thread (we were injecting the private validator's last signature). when considering a rewrite, we considered two possible solutions: write a "fuzzy" testing system where WAL is crashing upon receiving a new message, or inject failures and trigger them in tests using something like https://github.com/coreos/gofail. remove sleep no cs.Lock around wal.Save test different cases (empty block, non-empty block, ...) comments add comments test 4 cases: empty block, non-empty block, non-empty block with smaller part size, many blocks fixes as per Bucky's last review reset subscriptions on UnsubscribeAll use a simple counter to track message for which we panicked also, set a smaller part size for all test cases
8 years ago
9 years ago
9 years ago
9 years ago
9 years ago
9 years ago
9 years ago
9 years ago
9 years ago
9 years ago
9 years ago
new pubsub package comment out failing consensus tests for now rewrite rpc httpclient to use new pubsub package import pubsub as tmpubsub, query as tmquery make event IDs constants EventKey -> EventTypeKey rename EventsPubsub to PubSub mempool does not use pubsub rename eventsSub to pubsub new subscribe API fix channel size issues and consensus tests bugs refactor rpc client add missing discardFromChan method add mutex rename pubsub to eventBus remove IsRunning from WSRPCConnection interface (not needed) add a comment in broadcastNewRoundStepsAndVotes rename registerEventCallbacks to broadcastNewRoundStepsAndVotes See https://dave.cheney.net/2014/03/19/channel-axioms stop eventBuses after reactor tests remove unnecessary Unsubscribe return subscribe helper function move discardFromChan to where it is used subscribe now returns an err this gives us ability to refuse to subscribe if pubsub is at its max capacity. use context for control overflow cache queries handle err when subscribing in replay_test rename testClientID to testSubscriber extract var set channel buffer capacity to 1 in replay_file fix byzantine_test unsubscribe from single event, not all events refactor httpclient to return events to appropriate channels return failing testReplayCrashBeforeWriteVote test fix TestValidatorSetChanges refactor code a bit fix testReplayCrashBeforeWriteVote add comment fix TestValidatorSetChanges fixes from Bucky's review update comment [ci skip] test TxEventBuffer update changelog fix TestValidatorSetChanges (2nd attempt) only do wg.Done when no errors benchmark event bus create pubsub server inside NewEventBus only expose config params (later if needed) set buffer capacity to 0 so we are not testing cache new tx event format: key = "Tx" plus a tag {"tx.hash": XYZ} This should allow to subscribe to all transactions! or a specific one using a query: "tm.events.type = Tx and tx.hash = '013ABF99434...'" use TimeoutCommit instead of afterPublishEventNewBlockTimeout TimeoutCommit is the time a node waits after committing a block, before it goes into the next height. So it will finish everything from the last block, but then wait a bit. The idea is this gives it time to hear more votes from other validators, to strengthen the commit it includes in the next block. But it also gives it time to hear about new transactions. waitForBlockWithUpdatedVals rewrite WAL crash tests Task: test that we can recover from any WAL crash. Solution: the old tests were relying on event hub being run in the same thread (we were injecting the private validator's last signature). when considering a rewrite, we considered two possible solutions: write a "fuzzy" testing system where WAL is crashing upon receiving a new message, or inject failures and trigger them in tests using something like https://github.com/coreos/gofail. remove sleep no cs.Lock around wal.Save test different cases (empty block, non-empty block, ...) comments add comments test 4 cases: empty block, non-empty block, non-empty block with smaller part size, many blocks fixes as per Bucky's last review reset subscriptions on UnsubscribeAll use a simple counter to track message for which we panicked also, set a smaller part size for all test cases
8 years ago
9 years ago
7 years ago
9 years ago
9 years ago
9 years ago
9 years ago
9 years ago
9 years ago
9 years ago
8 years ago
9 years ago
9 years ago
7 years ago
9 years ago
9 years ago
9 years ago
9 years ago
9 years ago
9 years ago
9 years ago
9 years ago
9 years ago
8 years ago
9 years ago
9 years ago
9 years ago
7 years ago
9 years ago
9 years ago
9 years ago
9 years ago
9 years ago
9 years ago
9 years ago
8 years ago
9 years ago
9 years ago
9 years ago
9 years ago
new pubsub package comment out failing consensus tests for now rewrite rpc httpclient to use new pubsub package import pubsub as tmpubsub, query as tmquery make event IDs constants EventKey -> EventTypeKey rename EventsPubsub to PubSub mempool does not use pubsub rename eventsSub to pubsub new subscribe API fix channel size issues and consensus tests bugs refactor rpc client add missing discardFromChan method add mutex rename pubsub to eventBus remove IsRunning from WSRPCConnection interface (not needed) add a comment in broadcastNewRoundStepsAndVotes rename registerEventCallbacks to broadcastNewRoundStepsAndVotes See https://dave.cheney.net/2014/03/19/channel-axioms stop eventBuses after reactor tests remove unnecessary Unsubscribe return subscribe helper function move discardFromChan to where it is used subscribe now returns an err this gives us ability to refuse to subscribe if pubsub is at its max capacity. use context for control overflow cache queries handle err when subscribing in replay_test rename testClientID to testSubscriber extract var set channel buffer capacity to 1 in replay_file fix byzantine_test unsubscribe from single event, not all events refactor httpclient to return events to appropriate channels return failing testReplayCrashBeforeWriteVote test fix TestValidatorSetChanges refactor code a bit fix testReplayCrashBeforeWriteVote add comment fix TestValidatorSetChanges fixes from Bucky's review update comment [ci skip] test TxEventBuffer update changelog fix TestValidatorSetChanges (2nd attempt) only do wg.Done when no errors benchmark event bus create pubsub server inside NewEventBus only expose config params (later if needed) set buffer capacity to 0 so we are not testing cache new tx event format: key = "Tx" plus a tag {"tx.hash": XYZ} This should allow to subscribe to all transactions! or a specific one using a query: "tm.events.type = Tx and tx.hash = '013ABF99434...'" use TimeoutCommit instead of afterPublishEventNewBlockTimeout TimeoutCommit is the time a node waits after committing a block, before it goes into the next height. So it will finish everything from the last block, but then wait a bit. The idea is this gives it time to hear more votes from other validators, to strengthen the commit it includes in the next block. But it also gives it time to hear about new transactions. waitForBlockWithUpdatedVals rewrite WAL crash tests Task: test that we can recover from any WAL crash. Solution: the old tests were relying on event hub being run in the same thread (we were injecting the private validator's last signature). when considering a rewrite, we considered two possible solutions: write a "fuzzy" testing system where WAL is crashing upon receiving a new message, or inject failures and trigger them in tests using something like https://github.com/coreos/gofail. remove sleep no cs.Lock around wal.Save test different cases (empty block, non-empty block, ...) comments add comments test 4 cases: empty block, non-empty block, non-empty block with smaller part size, many blocks fixes as per Bucky's last review reset subscriptions on UnsubscribeAll use a simple counter to track message for which we panicked also, set a smaller part size for all test cases
8 years ago
9 years ago
9 years ago
7 years ago
9 years ago
9 years ago
9 years ago
9 years ago
9 years ago
9 years ago
9 years ago
8 years ago
9 years ago
9 years ago
9 years ago
9 years ago
9 years ago
9 years ago
9 years ago
7 years ago
7 years ago
9 years ago
9 years ago
9 years ago
new pubsub package comment out failing consensus tests for now rewrite rpc httpclient to use new pubsub package import pubsub as tmpubsub, query as tmquery make event IDs constants EventKey -> EventTypeKey rename EventsPubsub to PubSub mempool does not use pubsub rename eventsSub to pubsub new subscribe API fix channel size issues and consensus tests bugs refactor rpc client add missing discardFromChan method add mutex rename pubsub to eventBus remove IsRunning from WSRPCConnection interface (not needed) add a comment in broadcastNewRoundStepsAndVotes rename registerEventCallbacks to broadcastNewRoundStepsAndVotes See https://dave.cheney.net/2014/03/19/channel-axioms stop eventBuses after reactor tests remove unnecessary Unsubscribe return subscribe helper function move discardFromChan to where it is used subscribe now returns an err this gives us ability to refuse to subscribe if pubsub is at its max capacity. use context for control overflow cache queries handle err when subscribing in replay_test rename testClientID to testSubscriber extract var set channel buffer capacity to 1 in replay_file fix byzantine_test unsubscribe from single event, not all events refactor httpclient to return events to appropriate channels return failing testReplayCrashBeforeWriteVote test fix TestValidatorSetChanges refactor code a bit fix testReplayCrashBeforeWriteVote add comment fix TestValidatorSetChanges fixes from Bucky's review update comment [ci skip] test TxEventBuffer update changelog fix TestValidatorSetChanges (2nd attempt) only do wg.Done when no errors benchmark event bus create pubsub server inside NewEventBus only expose config params (later if needed) set buffer capacity to 0 so we are not testing cache new tx event format: key = "Tx" plus a tag {"tx.hash": XYZ} This should allow to subscribe to all transactions! or a specific one using a query: "tm.events.type = Tx and tx.hash = '013ABF99434...'" use TimeoutCommit instead of afterPublishEventNewBlockTimeout TimeoutCommit is the time a node waits after committing a block, before it goes into the next height. So it will finish everything from the last block, but then wait a bit. The idea is this gives it time to hear more votes from other validators, to strengthen the commit it includes in the next block. But it also gives it time to hear about new transactions. waitForBlockWithUpdatedVals rewrite WAL crash tests Task: test that we can recover from any WAL crash. Solution: the old tests were relying on event hub being run in the same thread (we were injecting the private validator's last signature). when considering a rewrite, we considered two possible solutions: write a "fuzzy" testing system where WAL is crashing upon receiving a new message, or inject failures and trigger them in tests using something like https://github.com/coreos/gofail. remove sleep no cs.Lock around wal.Save test different cases (empty block, non-empty block, ...) comments add comments test 4 cases: empty block, non-empty block, non-empty block with smaller part size, many blocks fixes as per Bucky's last review reset subscriptions on UnsubscribeAll use a simple counter to track message for which we panicked also, set a smaller part size for all test cases
8 years ago
9 years ago
7 years ago
9 years ago
9 years ago
9 years ago
9 years ago
9 years ago
9 years ago
9 years ago
9 years ago
7 years ago
9 years ago
8 years ago
9 years ago
9 years ago
9 years ago
9 years ago
9 years ago
9 years ago
new pubsub package comment out failing consensus tests for now rewrite rpc httpclient to use new pubsub package import pubsub as tmpubsub, query as tmquery make event IDs constants EventKey -> EventTypeKey rename EventsPubsub to PubSub mempool does not use pubsub rename eventsSub to pubsub new subscribe API fix channel size issues and consensus tests bugs refactor rpc client add missing discardFromChan method add mutex rename pubsub to eventBus remove IsRunning from WSRPCConnection interface (not needed) add a comment in broadcastNewRoundStepsAndVotes rename registerEventCallbacks to broadcastNewRoundStepsAndVotes See https://dave.cheney.net/2014/03/19/channel-axioms stop eventBuses after reactor tests remove unnecessary Unsubscribe return subscribe helper function move discardFromChan to where it is used subscribe now returns an err this gives us ability to refuse to subscribe if pubsub is at its max capacity. use context for control overflow cache queries handle err when subscribing in replay_test rename testClientID to testSubscriber extract var set channel buffer capacity to 1 in replay_file fix byzantine_test unsubscribe from single event, not all events refactor httpclient to return events to appropriate channels return failing testReplayCrashBeforeWriteVote test fix TestValidatorSetChanges refactor code a bit fix testReplayCrashBeforeWriteVote add comment fix TestValidatorSetChanges fixes from Bucky's review update comment [ci skip] test TxEventBuffer update changelog fix TestValidatorSetChanges (2nd attempt) only do wg.Done when no errors benchmark event bus create pubsub server inside NewEventBus only expose config params (later if needed) set buffer capacity to 0 so we are not testing cache new tx event format: key = "Tx" plus a tag {"tx.hash": XYZ} This should allow to subscribe to all transactions! or a specific one using a query: "tm.events.type = Tx and tx.hash = '013ABF99434...'" use TimeoutCommit instead of afterPublishEventNewBlockTimeout TimeoutCommit is the time a node waits after committing a block, before it goes into the next height. So it will finish everything from the last block, but then wait a bit. The idea is this gives it time to hear more votes from other validators, to strengthen the commit it includes in the next block. But it also gives it time to hear about new transactions. waitForBlockWithUpdatedVals rewrite WAL crash tests Task: test that we can recover from any WAL crash. Solution: the old tests were relying on event hub being run in the same thread (we were injecting the private validator's last signature). when considering a rewrite, we considered two possible solutions: write a "fuzzy" testing system where WAL is crashing upon receiving a new message, or inject failures and trigger them in tests using something like https://github.com/coreos/gofail. remove sleep no cs.Lock around wal.Save test different cases (empty block, non-empty block, ...) comments add comments test 4 cases: empty block, non-empty block, non-empty block with smaller part size, many blocks fixes as per Bucky's last review reset subscriptions on UnsubscribeAll use a simple counter to track message for which we panicked also, set a smaller part size for all test cases
8 years ago
9 years ago
7 years ago
9 years ago
9 years ago
9 years ago
9 years ago
9 years ago
8 years ago
9 years ago
9 years ago
9 years ago
9 years ago
8 years ago
9 years ago
7 years ago
9 years ago
8 years ago
9 years ago
9 years ago
9 years ago
9 years ago
9 years ago
9 years ago
8 years ago
9 years ago
9 years ago
9 years ago
9 years ago
new pubsub package comment out failing consensus tests for now rewrite rpc httpclient to use new pubsub package import pubsub as tmpubsub, query as tmquery make event IDs constants EventKey -> EventTypeKey rename EventsPubsub to PubSub mempool does not use pubsub rename eventsSub to pubsub new subscribe API fix channel size issues and consensus tests bugs refactor rpc client add missing discardFromChan method add mutex rename pubsub to eventBus remove IsRunning from WSRPCConnection interface (not needed) add a comment in broadcastNewRoundStepsAndVotes rename registerEventCallbacks to broadcastNewRoundStepsAndVotes See https://dave.cheney.net/2014/03/19/channel-axioms stop eventBuses after reactor tests remove unnecessary Unsubscribe return subscribe helper function move discardFromChan to where it is used subscribe now returns an err this gives us ability to refuse to subscribe if pubsub is at its max capacity. use context for control overflow cache queries handle err when subscribing in replay_test rename testClientID to testSubscriber extract var set channel buffer capacity to 1 in replay_file fix byzantine_test unsubscribe from single event, not all events refactor httpclient to return events to appropriate channels return failing testReplayCrashBeforeWriteVote test fix TestValidatorSetChanges refactor code a bit fix testReplayCrashBeforeWriteVote add comment fix TestValidatorSetChanges fixes from Bucky's review update comment [ci skip] test TxEventBuffer update changelog fix TestValidatorSetChanges (2nd attempt) only do wg.Done when no errors benchmark event bus create pubsub server inside NewEventBus only expose config params (later if needed) set buffer capacity to 0 so we are not testing cache new tx event format: key = "Tx" plus a tag {"tx.hash": XYZ} This should allow to subscribe to all transactions! or a specific one using a query: "tm.events.type = Tx and tx.hash = '013ABF99434...'" use TimeoutCommit instead of afterPublishEventNewBlockTimeout TimeoutCommit is the time a node waits after committing a block, before it goes into the next height. So it will finish everything from the last block, but then wait a bit. The idea is this gives it time to hear more votes from other validators, to strengthen the commit it includes in the next block. But it also gives it time to hear about new transactions. waitForBlockWithUpdatedVals rewrite WAL crash tests Task: test that we can recover from any WAL crash. Solution: the old tests were relying on event hub being run in the same thread (we were injecting the private validator's last signature). when considering a rewrite, we considered two possible solutions: write a "fuzzy" testing system where WAL is crashing upon receiving a new message, or inject failures and trigger them in tests using something like https://github.com/coreos/gofail. remove sleep no cs.Lock around wal.Save test different cases (empty block, non-empty block, ...) comments add comments test 4 cases: empty block, non-empty block, non-empty block with smaller part size, many blocks fixes as per Bucky's last review reset subscriptions on UnsubscribeAll use a simple counter to track message for which we panicked also, set a smaller part size for all test cases
8 years ago
9 years ago
9 years ago
8 years ago
9 years ago
9 years ago
9 years ago
new pubsub package comment out failing consensus tests for now rewrite rpc httpclient to use new pubsub package import pubsub as tmpubsub, query as tmquery make event IDs constants EventKey -> EventTypeKey rename EventsPubsub to PubSub mempool does not use pubsub rename eventsSub to pubsub new subscribe API fix channel size issues and consensus tests bugs refactor rpc client add missing discardFromChan method add mutex rename pubsub to eventBus remove IsRunning from WSRPCConnection interface (not needed) add a comment in broadcastNewRoundStepsAndVotes rename registerEventCallbacks to broadcastNewRoundStepsAndVotes See https://dave.cheney.net/2014/03/19/channel-axioms stop eventBuses after reactor tests remove unnecessary Unsubscribe return subscribe helper function move discardFromChan to where it is used subscribe now returns an err this gives us ability to refuse to subscribe if pubsub is at its max capacity. use context for control overflow cache queries handle err when subscribing in replay_test rename testClientID to testSubscriber extract var set channel buffer capacity to 1 in replay_file fix byzantine_test unsubscribe from single event, not all events refactor httpclient to return events to appropriate channels return failing testReplayCrashBeforeWriteVote test fix TestValidatorSetChanges refactor code a bit fix testReplayCrashBeforeWriteVote add comment fix TestValidatorSetChanges fixes from Bucky's review update comment [ci skip] test TxEventBuffer update changelog fix TestValidatorSetChanges (2nd attempt) only do wg.Done when no errors benchmark event bus create pubsub server inside NewEventBus only expose config params (later if needed) set buffer capacity to 0 so we are not testing cache new tx event format: key = "Tx" plus a tag {"tx.hash": XYZ} This should allow to subscribe to all transactions! or a specific one using a query: "tm.events.type = Tx and tx.hash = '013ABF99434...'" use TimeoutCommit instead of afterPublishEventNewBlockTimeout TimeoutCommit is the time a node waits after committing a block, before it goes into the next height. So it will finish everything from the last block, but then wait a bit. The idea is this gives it time to hear more votes from other validators, to strengthen the commit it includes in the next block. But it also gives it time to hear about new transactions. waitForBlockWithUpdatedVals rewrite WAL crash tests Task: test that we can recover from any WAL crash. Solution: the old tests were relying on event hub being run in the same thread (we were injecting the private validator's last signature). when considering a rewrite, we considered two possible solutions: write a "fuzzy" testing system where WAL is crashing upon receiving a new message, or inject failures and trigger them in tests using something like https://github.com/coreos/gofail. remove sleep no cs.Lock around wal.Save test different cases (empty block, non-empty block, ...) comments add comments test 4 cases: empty block, non-empty block, non-empty block with smaller part size, many blocks fixes as per Bucky's last review reset subscriptions on UnsubscribeAll use a simple counter to track message for which we panicked also, set a smaller part size for all test cases
8 years ago
9 years ago
9 years ago
9 years ago
9 years ago
9 years ago
9 years ago
8 years ago
9 years ago
9 years ago
9 years ago
9 years ago
9 years ago
9 years ago
9 years ago
9 years ago
9 years ago
8 years ago
9 years ago
9 years ago
9 years ago
9 years ago
9 years ago
new pubsub package comment out failing consensus tests for now rewrite rpc httpclient to use new pubsub package import pubsub as tmpubsub, query as tmquery make event IDs constants EventKey -> EventTypeKey rename EventsPubsub to PubSub mempool does not use pubsub rename eventsSub to pubsub new subscribe API fix channel size issues and consensus tests bugs refactor rpc client add missing discardFromChan method add mutex rename pubsub to eventBus remove IsRunning from WSRPCConnection interface (not needed) add a comment in broadcastNewRoundStepsAndVotes rename registerEventCallbacks to broadcastNewRoundStepsAndVotes See https://dave.cheney.net/2014/03/19/channel-axioms stop eventBuses after reactor tests remove unnecessary Unsubscribe return subscribe helper function move discardFromChan to where it is used subscribe now returns an err this gives us ability to refuse to subscribe if pubsub is at its max capacity. use context for control overflow cache queries handle err when subscribing in replay_test rename testClientID to testSubscriber extract var set channel buffer capacity to 1 in replay_file fix byzantine_test unsubscribe from single event, not all events refactor httpclient to return events to appropriate channels return failing testReplayCrashBeforeWriteVote test fix TestValidatorSetChanges refactor code a bit fix testReplayCrashBeforeWriteVote add comment fix TestValidatorSetChanges fixes from Bucky's review update comment [ci skip] test TxEventBuffer update changelog fix TestValidatorSetChanges (2nd attempt) only do wg.Done when no errors benchmark event bus create pubsub server inside NewEventBus only expose config params (later if needed) set buffer capacity to 0 so we are not testing cache new tx event format: key = "Tx" plus a tag {"tx.hash": XYZ} This should allow to subscribe to all transactions! or a specific one using a query: "tm.events.type = Tx and tx.hash = '013ABF99434...'" use TimeoutCommit instead of afterPublishEventNewBlockTimeout TimeoutCommit is the time a node waits after committing a block, before it goes into the next height. So it will finish everything from the last block, but then wait a bit. The idea is this gives it time to hear more votes from other validators, to strengthen the commit it includes in the next block. But it also gives it time to hear about new transactions. waitForBlockWithUpdatedVals rewrite WAL crash tests Task: test that we can recover from any WAL crash. Solution: the old tests were relying on event hub being run in the same thread (we were injecting the private validator's last signature). when considering a rewrite, we considered two possible solutions: write a "fuzzy" testing system where WAL is crashing upon receiving a new message, or inject failures and trigger them in tests using something like https://github.com/coreos/gofail. remove sleep no cs.Lock around wal.Save test different cases (empty block, non-empty block, ...) comments add comments test 4 cases: empty block, non-empty block, non-empty block with smaller part size, many blocks fixes as per Bucky's last review reset subscriptions on UnsubscribeAll use a simple counter to track message for which we panicked also, set a smaller part size for all test cases
8 years ago
9 years ago
9 years ago
9 years ago
9 years ago
9 years ago
9 years ago
9 years ago
new pubsub package comment out failing consensus tests for now rewrite rpc httpclient to use new pubsub package import pubsub as tmpubsub, query as tmquery make event IDs constants EventKey -> EventTypeKey rename EventsPubsub to PubSub mempool does not use pubsub rename eventsSub to pubsub new subscribe API fix channel size issues and consensus tests bugs refactor rpc client add missing discardFromChan method add mutex rename pubsub to eventBus remove IsRunning from WSRPCConnection interface (not needed) add a comment in broadcastNewRoundStepsAndVotes rename registerEventCallbacks to broadcastNewRoundStepsAndVotes See https://dave.cheney.net/2014/03/19/channel-axioms stop eventBuses after reactor tests remove unnecessary Unsubscribe return subscribe helper function move discardFromChan to where it is used subscribe now returns an err this gives us ability to refuse to subscribe if pubsub is at its max capacity. use context for control overflow cache queries handle err when subscribing in replay_test rename testClientID to testSubscriber extract var set channel buffer capacity to 1 in replay_file fix byzantine_test unsubscribe from single event, not all events refactor httpclient to return events to appropriate channels return failing testReplayCrashBeforeWriteVote test fix TestValidatorSetChanges refactor code a bit fix testReplayCrashBeforeWriteVote add comment fix TestValidatorSetChanges fixes from Bucky's review update comment [ci skip] test TxEventBuffer update changelog fix TestValidatorSetChanges (2nd attempt) only do wg.Done when no errors benchmark event bus create pubsub server inside NewEventBus only expose config params (later if needed) set buffer capacity to 0 so we are not testing cache new tx event format: key = "Tx" plus a tag {"tx.hash": XYZ} This should allow to subscribe to all transactions! or a specific one using a query: "tm.events.type = Tx and tx.hash = '013ABF99434...'" use TimeoutCommit instead of afterPublishEventNewBlockTimeout TimeoutCommit is the time a node waits after committing a block, before it goes into the next height. So it will finish everything from the last block, but then wait a bit. The idea is this gives it time to hear more votes from other validators, to strengthen the commit it includes in the next block. But it also gives it time to hear about new transactions. waitForBlockWithUpdatedVals rewrite WAL crash tests Task: test that we can recover from any WAL crash. Solution: the old tests were relying on event hub being run in the same thread (we were injecting the private validator's last signature). when considering a rewrite, we considered two possible solutions: write a "fuzzy" testing system where WAL is crashing upon receiving a new message, or inject failures and trigger them in tests using something like https://github.com/coreos/gofail. remove sleep no cs.Lock around wal.Save test different cases (empty block, non-empty block, ...) comments add comments test 4 cases: empty block, non-empty block, non-empty block with smaller part size, many blocks fixes as per Bucky's last review reset subscriptions on UnsubscribeAll use a simple counter to track message for which we panicked also, set a smaller part size for all test cases
8 years ago
9 years ago
9 years ago
9 years ago
9 years ago
9 years ago
9 years ago
9 years ago
9 years ago
9 years ago
9 years ago
new pubsub package comment out failing consensus tests for now rewrite rpc httpclient to use new pubsub package import pubsub as tmpubsub, query as tmquery make event IDs constants EventKey -> EventTypeKey rename EventsPubsub to PubSub mempool does not use pubsub rename eventsSub to pubsub new subscribe API fix channel size issues and consensus tests bugs refactor rpc client add missing discardFromChan method add mutex rename pubsub to eventBus remove IsRunning from WSRPCConnection interface (not needed) add a comment in broadcastNewRoundStepsAndVotes rename registerEventCallbacks to broadcastNewRoundStepsAndVotes See https://dave.cheney.net/2014/03/19/channel-axioms stop eventBuses after reactor tests remove unnecessary Unsubscribe return subscribe helper function move discardFromChan to where it is used subscribe now returns an err this gives us ability to refuse to subscribe if pubsub is at its max capacity. use context for control overflow cache queries handle err when subscribing in replay_test rename testClientID to testSubscriber extract var set channel buffer capacity to 1 in replay_file fix byzantine_test unsubscribe from single event, not all events refactor httpclient to return events to appropriate channels return failing testReplayCrashBeforeWriteVote test fix TestValidatorSetChanges refactor code a bit fix testReplayCrashBeforeWriteVote add comment fix TestValidatorSetChanges fixes from Bucky's review update comment [ci skip] test TxEventBuffer update changelog fix TestValidatorSetChanges (2nd attempt) only do wg.Done when no errors benchmark event bus create pubsub server inside NewEventBus only expose config params (later if needed) set buffer capacity to 0 so we are not testing cache new tx event format: key = "Tx" plus a tag {"tx.hash": XYZ} This should allow to subscribe to all transactions! or a specific one using a query: "tm.events.type = Tx and tx.hash = '013ABF99434...'" use TimeoutCommit instead of afterPublishEventNewBlockTimeout TimeoutCommit is the time a node waits after committing a block, before it goes into the next height. So it will finish everything from the last block, but then wait a bit. The idea is this gives it time to hear more votes from other validators, to strengthen the commit it includes in the next block. But it also gives it time to hear about new transactions. waitForBlockWithUpdatedVals rewrite WAL crash tests Task: test that we can recover from any WAL crash. Solution: the old tests were relying on event hub being run in the same thread (we were injecting the private validator's last signature). when considering a rewrite, we considered two possible solutions: write a "fuzzy" testing system where WAL is crashing upon receiving a new message, or inject failures and trigger them in tests using something like https://github.com/coreos/gofail. remove sleep no cs.Lock around wal.Save test different cases (empty block, non-empty block, ...) comments add comments test 4 cases: empty block, non-empty block, non-empty block with smaller part size, many blocks fixes as per Bucky's last review reset subscriptions on UnsubscribeAll use a simple counter to track message for which we panicked also, set a smaller part size for all test cases
8 years ago
9 years ago
7 years ago
9 years ago
9 years ago
9 years ago
9 years ago
9 years ago
9 years ago
7 years ago
9 years ago
8 years ago
9 years ago
9 years ago
9 years ago
7 years ago
9 years ago
9 years ago
new pubsub package comment out failing consensus tests for now rewrite rpc httpclient to use new pubsub package import pubsub as tmpubsub, query as tmquery make event IDs constants EventKey -> EventTypeKey rename EventsPubsub to PubSub mempool does not use pubsub rename eventsSub to pubsub new subscribe API fix channel size issues and consensus tests bugs refactor rpc client add missing discardFromChan method add mutex rename pubsub to eventBus remove IsRunning from WSRPCConnection interface (not needed) add a comment in broadcastNewRoundStepsAndVotes rename registerEventCallbacks to broadcastNewRoundStepsAndVotes See https://dave.cheney.net/2014/03/19/channel-axioms stop eventBuses after reactor tests remove unnecessary Unsubscribe return subscribe helper function move discardFromChan to where it is used subscribe now returns an err this gives us ability to refuse to subscribe if pubsub is at its max capacity. use context for control overflow cache queries handle err when subscribing in replay_test rename testClientID to testSubscriber extract var set channel buffer capacity to 1 in replay_file fix byzantine_test unsubscribe from single event, not all events refactor httpclient to return events to appropriate channels return failing testReplayCrashBeforeWriteVote test fix TestValidatorSetChanges refactor code a bit fix testReplayCrashBeforeWriteVote add comment fix TestValidatorSetChanges fixes from Bucky's review update comment [ci skip] test TxEventBuffer update changelog fix TestValidatorSetChanges (2nd attempt) only do wg.Done when no errors benchmark event bus create pubsub server inside NewEventBus only expose config params (later if needed) set buffer capacity to 0 so we are not testing cache new tx event format: key = "Tx" plus a tag {"tx.hash": XYZ} This should allow to subscribe to all transactions! or a specific one using a query: "tm.events.type = Tx and tx.hash = '013ABF99434...'" use TimeoutCommit instead of afterPublishEventNewBlockTimeout TimeoutCommit is the time a node waits after committing a block, before it goes into the next height. So it will finish everything from the last block, but then wait a bit. The idea is this gives it time to hear more votes from other validators, to strengthen the commit it includes in the next block. But it also gives it time to hear about new transactions. waitForBlockWithUpdatedVals rewrite WAL crash tests Task: test that we can recover from any WAL crash. Solution: the old tests were relying on event hub being run in the same thread (we were injecting the private validator's last signature). when considering a rewrite, we considered two possible solutions: write a "fuzzy" testing system where WAL is crashing upon receiving a new message, or inject failures and trigger them in tests using something like https://github.com/coreos/gofail. remove sleep no cs.Lock around wal.Save test different cases (empty block, non-empty block, ...) comments add comments test 4 cases: empty block, non-empty block, non-empty block with smaller part size, many blocks fixes as per Bucky's last review reset subscriptions on UnsubscribeAll use a simple counter to track message for which we panicked also, set a smaller part size for all test cases
8 years ago
new pubsub package comment out failing consensus tests for now rewrite rpc httpclient to use new pubsub package import pubsub as tmpubsub, query as tmquery make event IDs constants EventKey -> EventTypeKey rename EventsPubsub to PubSub mempool does not use pubsub rename eventsSub to pubsub new subscribe API fix channel size issues and consensus tests bugs refactor rpc client add missing discardFromChan method add mutex rename pubsub to eventBus remove IsRunning from WSRPCConnection interface (not needed) add a comment in broadcastNewRoundStepsAndVotes rename registerEventCallbacks to broadcastNewRoundStepsAndVotes See https://dave.cheney.net/2014/03/19/channel-axioms stop eventBuses after reactor tests remove unnecessary Unsubscribe return subscribe helper function move discardFromChan to where it is used subscribe now returns an err this gives us ability to refuse to subscribe if pubsub is at its max capacity. use context for control overflow cache queries handle err when subscribing in replay_test rename testClientID to testSubscriber extract var set channel buffer capacity to 1 in replay_file fix byzantine_test unsubscribe from single event, not all events refactor httpclient to return events to appropriate channels return failing testReplayCrashBeforeWriteVote test fix TestValidatorSetChanges refactor code a bit fix testReplayCrashBeforeWriteVote add comment fix TestValidatorSetChanges fixes from Bucky's review update comment [ci skip] test TxEventBuffer update changelog fix TestValidatorSetChanges (2nd attempt) only do wg.Done when no errors benchmark event bus create pubsub server inside NewEventBus only expose config params (later if needed) set buffer capacity to 0 so we are not testing cache new tx event format: key = "Tx" plus a tag {"tx.hash": XYZ} This should allow to subscribe to all transactions! or a specific one using a query: "tm.events.type = Tx and tx.hash = '013ABF99434...'" use TimeoutCommit instead of afterPublishEventNewBlockTimeout TimeoutCommit is the time a node waits after committing a block, before it goes into the next height. So it will finish everything from the last block, but then wait a bit. The idea is this gives it time to hear more votes from other validators, to strengthen the commit it includes in the next block. But it also gives it time to hear about new transactions. waitForBlockWithUpdatedVals rewrite WAL crash tests Task: test that we can recover from any WAL crash. Solution: the old tests were relying on event hub being run in the same thread (we were injecting the private validator's last signature). when considering a rewrite, we considered two possible solutions: write a "fuzzy" testing system where WAL is crashing upon receiving a new message, or inject failures and trigger them in tests using something like https://github.com/coreos/gofail. remove sleep no cs.Lock around wal.Save test different cases (empty block, non-empty block, ...) comments add comments test 4 cases: empty block, non-empty block, non-empty block with smaller part size, many blocks fixes as per Bucky's last review reset subscriptions on UnsubscribeAll use a simple counter to track message for which we panicked also, set a smaller part size for all test cases
8 years ago
  1. package consensus
  2. import (
  3. "bytes"
  4. "context"
  5. "fmt"
  6. "testing"
  7. "time"
  8. "github.com/stretchr/testify/require"
  9. cstypes "github.com/tendermint/tendermint/consensus/types"
  10. cmn "github.com/tendermint/tendermint/libs/common"
  11. "github.com/tendermint/tendermint/libs/log"
  12. tmpubsub "github.com/tendermint/tendermint/libs/pubsub"
  13. p2pdummy "github.com/tendermint/tendermint/p2p/dummy"
  14. "github.com/tendermint/tendermint/types"
  15. )
  16. func init() {
  17. config = ResetConfig("consensus_state_test")
  18. }
  19. func ensureProposeTimeout(timeoutPropose int) time.Duration {
  20. return time.Duration(timeoutPropose*2) * time.Millisecond
  21. }
  22. /*
  23. ProposeSuite
  24. x * TestProposerSelection0 - round robin ordering, round 0
  25. x * TestProposerSelection2 - round robin ordering, round 2++
  26. x * TestEnterProposeNoValidator - timeout into prevote round
  27. x * TestEnterPropose - finish propose without timing out (we have the proposal)
  28. x * TestBadProposal - 2 vals, bad proposal (bad block state hash), should prevote and precommit nil
  29. FullRoundSuite
  30. x * TestFullRound1 - 1 val, full successful round
  31. x * TestFullRoundNil - 1 val, full round of nil
  32. x * TestFullRound2 - 2 vals, both required for full round
  33. LockSuite
  34. x * TestLockNoPOL - 2 vals, 4 rounds. one val locked, precommits nil every round except first.
  35. x * TestLockPOLRelock - 4 vals, one precommits, other 3 polka at next round, so we unlock and precomit the polka
  36. x * TestLockPOLUnlock - 4 vals, one precommits, other 3 polka nil at next round, so we unlock and precomit nil
  37. x * TestLockPOLSafety1 - 4 vals. We shouldn't change lock based on polka at earlier round
  38. x * TestLockPOLSafety2 - 4 vals. After unlocking, we shouldn't relock based on polka at earlier round
  39. * TestNetworkLock - once +1/3 precommits, network should be locked
  40. * TestNetworkLockPOL - once +1/3 precommits, the block with more recent polka is committed
  41. SlashingSuite
  42. x * TestSlashingPrevotes - a validator prevoting twice in a round gets slashed
  43. x * TestSlashingPrecommits - a validator precomitting twice in a round gets slashed
  44. CatchupSuite
  45. * TestCatchup - if we might be behind and we've seen any 2/3 prevotes, round skip to new round, precommit, or prevote
  46. HaltSuite
  47. x * TestHalt1 - if we see +2/3 precommits after timing out into new round, we should still commit
  48. */
  49. //----------------------------------------------------------------------------------------------------
  50. // ProposeSuite
  51. func TestStateProposerSelection0(t *testing.T) {
  52. cs1, vss := randConsensusState(4)
  53. height, round := cs1.Height, cs1.Round
  54. newRoundCh := subscribe(cs1.eventBus, types.EventQueryNewRound)
  55. proposalCh := subscribe(cs1.eventBus, types.EventQueryCompleteProposal)
  56. startTestRound(cs1, height, round)
  57. // wait for new round so proposer is set
  58. <-newRoundCh
  59. // lets commit a block and ensure proposer for the next height is correct
  60. prop := cs1.GetRoundState().Validators.GetProposer()
  61. if !bytes.Equal(prop.Address, cs1.privValidator.GetAddress()) {
  62. t.Fatalf("expected proposer to be validator %d. Got %X", 0, prop.Address)
  63. }
  64. // wait for complete proposal
  65. <-proposalCh
  66. rs := cs1.GetRoundState()
  67. signAddVotes(cs1, types.VoteTypePrecommit, rs.ProposalBlock.Hash(), rs.ProposalBlockParts.Header(), vss[1:]...)
  68. // wait for new round so next validator is set
  69. <-newRoundCh
  70. prop = cs1.GetRoundState().Validators.GetProposer()
  71. if !bytes.Equal(prop.Address, vss[1].GetAddress()) {
  72. panic(fmt.Sprintf("expected proposer to be validator %d. Got %X", 1, prop.Address))
  73. }
  74. }
  75. // Now let's do it all again, but starting from round 2 instead of 0
  76. func TestStateProposerSelection2(t *testing.T) {
  77. cs1, vss := randConsensusState(4) // test needs more work for more than 3 validators
  78. newRoundCh := subscribe(cs1.eventBus, types.EventQueryNewRound)
  79. // this time we jump in at round 2
  80. incrementRound(vss[1:]...)
  81. incrementRound(vss[1:]...)
  82. startTestRound(cs1, cs1.Height, 2)
  83. <-newRoundCh // wait for the new round
  84. // everyone just votes nil. we get a new proposer each round
  85. for i := 0; i < len(vss); i++ {
  86. prop := cs1.GetRoundState().Validators.GetProposer()
  87. correctProposer := vss[(i+2)%len(vss)].GetAddress()
  88. if !bytes.Equal(prop.Address, correctProposer) {
  89. panic(fmt.Sprintf("expected RoundState.Validators.GetProposer() to be validator %d. Got %X", (i+2)%len(vss), prop.Address))
  90. }
  91. rs := cs1.GetRoundState()
  92. signAddVotes(cs1, types.VoteTypePrecommit, nil, rs.ProposalBlockParts.Header(), vss[1:]...)
  93. <-newRoundCh // wait for the new round event each round
  94. incrementRound(vss[1:]...)
  95. }
  96. }
  97. // a non-validator should timeout into the prevote round
  98. func TestStateEnterProposeNoPrivValidator(t *testing.T) {
  99. cs, _ := randConsensusState(1)
  100. cs.SetPrivValidator(nil)
  101. height, round := cs.Height, cs.Round
  102. // Listen for propose timeout event
  103. timeoutCh := subscribe(cs.eventBus, types.EventQueryTimeoutPropose)
  104. startTestRound(cs, height, round)
  105. // if we're not a validator, EnterPropose should timeout
  106. ticker := time.NewTicker(ensureProposeTimeout(cs.config.TimeoutPropose))
  107. select {
  108. case <-timeoutCh:
  109. case <-ticker.C:
  110. panic("Expected EnterPropose to timeout")
  111. }
  112. if cs.GetRoundState().Proposal != nil {
  113. t.Error("Expected to make no proposal, since no privValidator")
  114. }
  115. }
  116. // a validator should not timeout of the prevote round (TODO: unless the block is really big!)
  117. func TestStateEnterProposeYesPrivValidator(t *testing.T) {
  118. cs, _ := randConsensusState(1)
  119. height, round := cs.Height, cs.Round
  120. // Listen for propose timeout event
  121. timeoutCh := subscribe(cs.eventBus, types.EventQueryTimeoutPropose)
  122. proposalCh := subscribe(cs.eventBus, types.EventQueryCompleteProposal)
  123. cs.enterNewRound(height, round)
  124. cs.startRoutines(3)
  125. <-proposalCh
  126. // Check that Proposal, ProposalBlock, ProposalBlockParts are set.
  127. rs := cs.GetRoundState()
  128. if rs.Proposal == nil {
  129. t.Error("rs.Proposal should be set")
  130. }
  131. if rs.ProposalBlock == nil {
  132. t.Error("rs.ProposalBlock should be set")
  133. }
  134. if rs.ProposalBlockParts.Total() == 0 {
  135. t.Error("rs.ProposalBlockParts should be set")
  136. }
  137. // if we're a validator, enterPropose should not timeout
  138. ticker := time.NewTicker(ensureProposeTimeout(cs.config.TimeoutPropose))
  139. select {
  140. case <-timeoutCh:
  141. panic("Expected EnterPropose not to timeout")
  142. case <-ticker.C:
  143. }
  144. }
  145. func TestStateBadProposal(t *testing.T) {
  146. cs1, vss := randConsensusState(2)
  147. height, round := cs1.Height, cs1.Round
  148. vs2 := vss[1]
  149. partSize := types.BlockPartSizeBytes
  150. proposalCh := subscribe(cs1.eventBus, types.EventQueryCompleteProposal)
  151. voteCh := subscribe(cs1.eventBus, types.EventQueryVote)
  152. propBlock, _ := cs1.createProposalBlock() //changeProposer(t, cs1, vs2)
  153. // make the second validator the proposer by incrementing round
  154. round = round + 1
  155. incrementRound(vss[1:]...)
  156. // make the block bad by tampering with statehash
  157. stateHash := propBlock.AppHash
  158. if len(stateHash) == 0 {
  159. stateHash = make([]byte, 32)
  160. }
  161. stateHash[0] = byte((stateHash[0] + 1) % 255)
  162. propBlock.AppHash = stateHash
  163. propBlockParts := propBlock.MakePartSet(partSize)
  164. proposal := types.NewProposal(vs2.Height, round, propBlockParts.Header(), -1, types.BlockID{})
  165. if err := vs2.SignProposal(config.ChainID(), proposal); err != nil {
  166. t.Fatal("failed to sign bad proposal", err)
  167. }
  168. // set the proposal block
  169. if err := cs1.SetProposalAndBlock(proposal, propBlock, propBlockParts, "some peer"); err != nil {
  170. t.Fatal(err)
  171. }
  172. // start the machine
  173. startTestRound(cs1, height, round)
  174. // wait for proposal
  175. <-proposalCh
  176. // wait for prevote
  177. <-voteCh
  178. validatePrevote(t, cs1, round, vss[0], nil)
  179. // add bad prevote from vs2 and wait for it
  180. signAddVotes(cs1, types.VoteTypePrevote, propBlock.Hash(), propBlock.MakePartSet(partSize).Header(), vs2)
  181. <-voteCh
  182. // wait for precommit
  183. <-voteCh
  184. validatePrecommit(t, cs1, round, 0, vss[0], nil, nil)
  185. signAddVotes(cs1, types.VoteTypePrecommit, propBlock.Hash(), propBlock.MakePartSet(partSize).Header(), vs2)
  186. }
  187. //----------------------------------------------------------------------------------------------------
  188. // FullRoundSuite
  189. // propose, prevote, and precommit a block
  190. func TestStateFullRound1(t *testing.T) {
  191. cs, vss := randConsensusState(1)
  192. height, round := cs.Height, cs.Round
  193. // NOTE: buffer capacity of 0 ensures we can validate prevote and last commit
  194. // before consensus can move to the next height (and cause a race condition)
  195. cs.eventBus.Stop()
  196. eventBus := types.NewEventBusWithBufferCapacity(0)
  197. eventBus.SetLogger(log.TestingLogger().With("module", "events"))
  198. cs.SetEventBus(eventBus)
  199. eventBus.Start()
  200. voteCh := subscribe(cs.eventBus, types.EventQueryVote)
  201. propCh := subscribe(cs.eventBus, types.EventQueryCompleteProposal)
  202. newRoundCh := subscribe(cs.eventBus, types.EventQueryNewRound)
  203. startTestRound(cs, height, round)
  204. <-newRoundCh
  205. // grab proposal
  206. re := <-propCh
  207. propBlockHash := re.(types.EventDataRoundState).RoundState.(*cstypes.RoundState).ProposalBlock.Hash()
  208. <-voteCh // wait for prevote
  209. validatePrevote(t, cs, round, vss[0], propBlockHash)
  210. <-voteCh // wait for precommit
  211. // we're going to roll right into new height
  212. <-newRoundCh
  213. validateLastPrecommit(t, cs, vss[0], propBlockHash)
  214. }
  215. // nil is proposed, so prevote and precommit nil
  216. func TestStateFullRoundNil(t *testing.T) {
  217. cs, vss := randConsensusState(1)
  218. height, round := cs.Height, cs.Round
  219. voteCh := subscribe(cs.eventBus, types.EventQueryVote)
  220. cs.enterPrevote(height, round)
  221. cs.startRoutines(4)
  222. <-voteCh // prevote
  223. <-voteCh // precommit
  224. // should prevote and precommit nil
  225. validatePrevoteAndPrecommit(t, cs, round, 0, vss[0], nil, nil)
  226. }
  227. // run through propose, prevote, precommit commit with two validators
  228. // where the first validator has to wait for votes from the second
  229. func TestStateFullRound2(t *testing.T) {
  230. cs1, vss := randConsensusState(2)
  231. vs2 := vss[1]
  232. height, round := cs1.Height, cs1.Round
  233. voteCh := subscribe(cs1.eventBus, types.EventQueryVote)
  234. newBlockCh := subscribe(cs1.eventBus, types.EventQueryNewBlock)
  235. // start round and wait for propose and prevote
  236. startTestRound(cs1, height, round)
  237. <-voteCh // prevote
  238. // we should be stuck in limbo waiting for more prevotes
  239. rs := cs1.GetRoundState()
  240. propBlockHash, propPartsHeader := rs.ProposalBlock.Hash(), rs.ProposalBlockParts.Header()
  241. // prevote arrives from vs2:
  242. signAddVotes(cs1, types.VoteTypePrevote, propBlockHash, propPartsHeader, vs2)
  243. <-voteCh
  244. <-voteCh //precommit
  245. // the proposed block should now be locked and our precommit added
  246. validatePrecommit(t, cs1, 0, 0, vss[0], propBlockHash, propBlockHash)
  247. // we should be stuck in limbo waiting for more precommits
  248. // precommit arrives from vs2:
  249. signAddVotes(cs1, types.VoteTypePrecommit, propBlockHash, propPartsHeader, vs2)
  250. <-voteCh
  251. // wait to finish commit, propose in next height
  252. <-newBlockCh
  253. }
  254. //------------------------------------------------------------------------------------------
  255. // LockSuite
  256. // two validators, 4 rounds.
  257. // two vals take turns proposing. val1 locks on first one, precommits nil on everything else
  258. func TestStateLockNoPOL(t *testing.T) {
  259. cs1, vss := randConsensusState(2)
  260. vs2 := vss[1]
  261. height := cs1.Height
  262. partSize := types.BlockPartSizeBytes
  263. timeoutProposeCh := subscribe(cs1.eventBus, types.EventQueryTimeoutPropose)
  264. timeoutWaitCh := subscribe(cs1.eventBus, types.EventQueryTimeoutWait)
  265. voteCh := subscribe(cs1.eventBus, types.EventQueryVote)
  266. proposalCh := subscribe(cs1.eventBus, types.EventQueryCompleteProposal)
  267. newRoundCh := subscribe(cs1.eventBus, types.EventQueryNewRound)
  268. /*
  269. Round1 (cs1, B) // B B // B B2
  270. */
  271. // start round and wait for prevote
  272. cs1.enterNewRound(height, 0)
  273. cs1.startRoutines(0)
  274. re := <-proposalCh
  275. rs := re.(types.EventDataRoundState).RoundState.(*cstypes.RoundState)
  276. theBlockHash := rs.ProposalBlock.Hash()
  277. <-voteCh // prevote
  278. // we should now be stuck in limbo forever, waiting for more prevotes
  279. // prevote arrives from vs2:
  280. signAddVotes(cs1, types.VoteTypePrevote, cs1.ProposalBlock.Hash(), cs1.ProposalBlockParts.Header(), vs2)
  281. <-voteCh // prevote
  282. <-voteCh // precommit
  283. // the proposed block should now be locked and our precommit added
  284. validatePrecommit(t, cs1, 0, 0, vss[0], theBlockHash, theBlockHash)
  285. // we should now be stuck in limbo forever, waiting for more precommits
  286. // lets add one for a different block
  287. // NOTE: in practice we should never get to a point where there are precommits for different blocks at the same round
  288. hash := make([]byte, len(theBlockHash))
  289. copy(hash, theBlockHash)
  290. hash[0] = byte((hash[0] + 1) % 255)
  291. signAddVotes(cs1, types.VoteTypePrecommit, hash, rs.ProposalBlock.MakePartSet(partSize).Header(), vs2)
  292. <-voteCh // precommit
  293. // (note we're entering precommit for a second time this round)
  294. // but with invalid args. then we enterPrecommitWait, and the timeout to new round
  295. <-timeoutWaitCh
  296. ///
  297. <-newRoundCh
  298. t.Log("#### ONTO ROUND 1")
  299. /*
  300. Round2 (cs1, B) // B B2
  301. */
  302. incrementRound(vs2)
  303. // now we're on a new round and not the proposer, so wait for timeout
  304. re = <-timeoutProposeCh
  305. rs = re.(types.EventDataRoundState).RoundState.(*cstypes.RoundState)
  306. if rs.ProposalBlock != nil {
  307. panic("Expected proposal block to be nil")
  308. }
  309. // wait to finish prevote
  310. <-voteCh
  311. // we should have prevoted our locked block
  312. validatePrevote(t, cs1, 1, vss[0], rs.LockedBlock.Hash())
  313. // add a conflicting prevote from the other validator
  314. signAddVotes(cs1, types.VoteTypePrevote, hash, rs.LockedBlock.MakePartSet(partSize).Header(), vs2)
  315. <-voteCh
  316. // now we're going to enter prevote again, but with invalid args
  317. // and then prevote wait, which should timeout. then wait for precommit
  318. <-timeoutWaitCh
  319. <-voteCh // precommit
  320. // the proposed block should still be locked and our precommit added
  321. // we should precommit nil and be locked on the proposal
  322. validatePrecommit(t, cs1, 1, 0, vss[0], nil, theBlockHash)
  323. // add conflicting precommit from vs2
  324. // NOTE: in practice we should never get to a point where there are precommits for different blocks at the same round
  325. signAddVotes(cs1, types.VoteTypePrecommit, hash, rs.LockedBlock.MakePartSet(partSize).Header(), vs2)
  326. <-voteCh
  327. // (note we're entering precommit for a second time this round, but with invalid args
  328. // then we enterPrecommitWait and timeout into NewRound
  329. <-timeoutWaitCh
  330. <-newRoundCh
  331. t.Log("#### ONTO ROUND 2")
  332. /*
  333. Round3 (vs2, _) // B, B2
  334. */
  335. incrementRound(vs2)
  336. re = <-proposalCh
  337. rs = re.(types.EventDataRoundState).RoundState.(*cstypes.RoundState)
  338. // now we're on a new round and are the proposer
  339. if !bytes.Equal(rs.ProposalBlock.Hash(), rs.LockedBlock.Hash()) {
  340. panic(fmt.Sprintf("Expected proposal block to be locked block. Got %v, Expected %v", rs.ProposalBlock, rs.LockedBlock))
  341. }
  342. <-voteCh // prevote
  343. validatePrevote(t, cs1, 2, vss[0], rs.LockedBlock.Hash())
  344. signAddVotes(cs1, types.VoteTypePrevote, hash, rs.ProposalBlock.MakePartSet(partSize).Header(), vs2)
  345. <-voteCh
  346. <-timeoutWaitCh // prevote wait
  347. <-voteCh // precommit
  348. validatePrecommit(t, cs1, 2, 0, vss[0], nil, theBlockHash) // precommit nil but be locked on proposal
  349. signAddVotes(cs1, types.VoteTypePrecommit, hash, rs.ProposalBlock.MakePartSet(partSize).Header(), vs2) // NOTE: conflicting precommits at same height
  350. <-voteCh
  351. <-timeoutWaitCh
  352. // before we time out into new round, set next proposal block
  353. prop, propBlock := decideProposal(cs1, vs2, vs2.Height, vs2.Round+1)
  354. if prop == nil || propBlock == nil {
  355. t.Fatal("Failed to create proposal block with vs2")
  356. }
  357. incrementRound(vs2)
  358. <-newRoundCh
  359. t.Log("#### ONTO ROUND 3")
  360. /*
  361. Round4 (vs2, C) // B C // B C
  362. */
  363. // now we're on a new round and not the proposer
  364. // so set the proposal block
  365. if err := cs1.SetProposalAndBlock(prop, propBlock, propBlock.MakePartSet(partSize), ""); err != nil {
  366. t.Fatal(err)
  367. }
  368. <-proposalCh
  369. <-voteCh // prevote
  370. // prevote for locked block (not proposal)
  371. validatePrevote(t, cs1, 0, vss[0], cs1.LockedBlock.Hash())
  372. signAddVotes(cs1, types.VoteTypePrevote, propBlock.Hash(), propBlock.MakePartSet(partSize).Header(), vs2)
  373. <-voteCh
  374. <-timeoutWaitCh
  375. <-voteCh
  376. validatePrecommit(t, cs1, 2, 0, vss[0], nil, theBlockHash) // precommit nil but locked on proposal
  377. signAddVotes(cs1, types.VoteTypePrecommit, propBlock.Hash(), propBlock.MakePartSet(partSize).Header(), vs2) // NOTE: conflicting precommits at same height
  378. <-voteCh
  379. }
  380. // 4 vals, one precommits, other 3 polka at next round, so we unlock and precomit the polka
  381. func TestStateLockPOLRelock(t *testing.T) {
  382. cs1, vss := randConsensusState(4)
  383. vs2, vs3, vs4 := vss[1], vss[2], vss[3]
  384. partSize := types.BlockPartSizeBytes
  385. timeoutProposeCh := subscribe(cs1.eventBus, types.EventQueryTimeoutPropose)
  386. timeoutWaitCh := subscribe(cs1.eventBus, types.EventQueryTimeoutWait)
  387. proposalCh := subscribe(cs1.eventBus, types.EventQueryCompleteProposal)
  388. voteCh := subscribe(cs1.eventBus, types.EventQueryVote)
  389. newRoundCh := subscribe(cs1.eventBus, types.EventQueryNewRound)
  390. newBlockCh := subscribe(cs1.eventBus, types.EventQueryNewBlockHeader)
  391. // everything done from perspective of cs1
  392. /*
  393. Round1 (cs1, B) // B B B B// B nil B nil
  394. eg. vs2 and vs4 didn't see the 2/3 prevotes
  395. */
  396. // start round and wait for propose and prevote
  397. startTestRound(cs1, cs1.Height, 0)
  398. <-newRoundCh
  399. re := <-proposalCh
  400. rs := re.(types.EventDataRoundState).RoundState.(*cstypes.RoundState)
  401. theBlockHash := rs.ProposalBlock.Hash()
  402. <-voteCh // prevote
  403. signAddVotes(cs1, types.VoteTypePrevote, cs1.ProposalBlock.Hash(), cs1.ProposalBlockParts.Header(), vs2, vs3, vs4)
  404. // prevotes
  405. discardFromChan(voteCh, 3)
  406. <-voteCh // our precommit
  407. // the proposed block should now be locked and our precommit added
  408. validatePrecommit(t, cs1, 0, 0, vss[0], theBlockHash, theBlockHash)
  409. // add precommits from the rest
  410. signAddVotes(cs1, types.VoteTypePrecommit, nil, types.PartSetHeader{}, vs2, vs4)
  411. signAddVotes(cs1, types.VoteTypePrecommit, cs1.ProposalBlock.Hash(), cs1.ProposalBlockParts.Header(), vs3)
  412. // precommites
  413. discardFromChan(voteCh, 3)
  414. // before we timeout to the new round set the new proposal
  415. prop, propBlock := decideProposal(cs1, vs2, vs2.Height, vs2.Round+1)
  416. propBlockParts := propBlock.MakePartSet(partSize)
  417. propBlockHash := propBlock.Hash()
  418. incrementRound(vs2, vs3, vs4)
  419. // timeout to new round
  420. <-timeoutWaitCh
  421. //XXX: this isnt guaranteed to get there before the timeoutPropose ...
  422. if err := cs1.SetProposalAndBlock(prop, propBlock, propBlockParts, "some peer"); err != nil {
  423. t.Fatal(err)
  424. }
  425. <-newRoundCh
  426. t.Log("### ONTO ROUND 1")
  427. /*
  428. Round2 (vs2, C) // B C C C // C C C _)
  429. cs1 changes lock!
  430. */
  431. // now we're on a new round and not the proposer
  432. // but we should receive the proposal
  433. select {
  434. case <-proposalCh:
  435. case <-timeoutProposeCh:
  436. <-proposalCh
  437. }
  438. // go to prevote, prevote for locked block (not proposal), move on
  439. <-voteCh
  440. validatePrevote(t, cs1, 0, vss[0], theBlockHash)
  441. // now lets add prevotes from everyone else for the new block
  442. signAddVotes(cs1, types.VoteTypePrevote, propBlockHash, propBlockParts.Header(), vs2, vs3, vs4)
  443. // prevotes
  444. discardFromChan(voteCh, 3)
  445. // now either we go to PrevoteWait or Precommit
  446. select {
  447. case <-timeoutWaitCh: // we're in PrevoteWait, go to Precommit
  448. // XXX: there's no guarantee we see the polka, this might be a precommit for nil,
  449. // in which case the test fails!
  450. <-voteCh
  451. case <-voteCh: // we went straight to Precommit
  452. }
  453. // we should have unlocked and locked on the new block
  454. validatePrecommit(t, cs1, 1, 1, vss[0], propBlockHash, propBlockHash)
  455. signAddVotes(cs1, types.VoteTypePrecommit, propBlockHash, propBlockParts.Header(), vs2, vs3)
  456. discardFromChan(voteCh, 2)
  457. be := <-newBlockCh
  458. b := be.(types.EventDataNewBlockHeader)
  459. re = <-newRoundCh
  460. rs = re.(types.EventDataRoundState).RoundState.(*cstypes.RoundState)
  461. if rs.Height != 2 {
  462. panic("Expected height to increment")
  463. }
  464. if !bytes.Equal(b.Header.Hash(), propBlockHash) {
  465. panic("Expected new block to be proposal block")
  466. }
  467. }
  468. // 4 vals, one precommits, other 3 polka at next round, so we unlock and precomit the polka
  469. func TestStateLockPOLUnlock(t *testing.T) {
  470. cs1, vss := randConsensusState(4)
  471. vs2, vs3, vs4 := vss[1], vss[2], vss[3]
  472. partSize := types.BlockPartSizeBytes
  473. proposalCh := subscribe(cs1.eventBus, types.EventQueryCompleteProposal)
  474. timeoutProposeCh := subscribe(cs1.eventBus, types.EventQueryTimeoutPropose)
  475. timeoutWaitCh := subscribe(cs1.eventBus, types.EventQueryTimeoutWait)
  476. newRoundCh := subscribe(cs1.eventBus, types.EventQueryNewRound)
  477. unlockCh := subscribe(cs1.eventBus, types.EventQueryUnlock)
  478. voteCh := subscribeToVoter(cs1, cs1.privValidator.GetAddress())
  479. // everything done from perspective of cs1
  480. /*
  481. Round1 (cs1, B) // B B B B // B nil B nil
  482. eg. didn't see the 2/3 prevotes
  483. */
  484. // start round and wait for propose and prevote
  485. startTestRound(cs1, cs1.Height, 0)
  486. <-newRoundCh
  487. re := <-proposalCh
  488. rs := re.(types.EventDataRoundState).RoundState.(*cstypes.RoundState)
  489. theBlockHash := rs.ProposalBlock.Hash()
  490. <-voteCh // prevote
  491. signAddVotes(cs1, types.VoteTypePrevote, cs1.ProposalBlock.Hash(), cs1.ProposalBlockParts.Header(), vs2, vs3, vs4)
  492. <-voteCh //precommit
  493. // the proposed block should now be locked and our precommit added
  494. validatePrecommit(t, cs1, 0, 0, vss[0], theBlockHash, theBlockHash)
  495. rs = cs1.GetRoundState()
  496. // add precommits from the rest
  497. signAddVotes(cs1, types.VoteTypePrecommit, nil, types.PartSetHeader{}, vs2, vs4)
  498. signAddVotes(cs1, types.VoteTypePrecommit, cs1.ProposalBlock.Hash(), cs1.ProposalBlockParts.Header(), vs3)
  499. // before we time out into new round, set next proposal block
  500. prop, propBlock := decideProposal(cs1, vs2, vs2.Height, vs2.Round+1)
  501. propBlockParts := propBlock.MakePartSet(partSize)
  502. incrementRound(vs2, vs3, vs4)
  503. // timeout to new round
  504. re = <-timeoutWaitCh
  505. rs = re.(types.EventDataRoundState).RoundState.(*cstypes.RoundState)
  506. lockedBlockHash := rs.LockedBlock.Hash()
  507. //XXX: this isnt guaranteed to get there before the timeoutPropose ...
  508. if err := cs1.SetProposalAndBlock(prop, propBlock, propBlockParts, "some peer"); err != nil {
  509. t.Fatal(err)
  510. }
  511. <-newRoundCh
  512. t.Log("#### ONTO ROUND 1")
  513. /*
  514. Round2 (vs2, C) // B nil nil nil // nil nil nil _
  515. cs1 unlocks!
  516. */
  517. // now we're on a new round and not the proposer,
  518. // but we should receive the proposal
  519. select {
  520. case <-proposalCh:
  521. case <-timeoutProposeCh:
  522. <-proposalCh
  523. }
  524. // go to prevote, prevote for locked block (not proposal)
  525. <-voteCh
  526. validatePrevote(t, cs1, 0, vss[0], lockedBlockHash)
  527. // now lets add prevotes from everyone else for nil (a polka!)
  528. signAddVotes(cs1, types.VoteTypePrevote, nil, types.PartSetHeader{}, vs2, vs3, vs4)
  529. // the polka makes us unlock and precommit nil
  530. <-unlockCh
  531. <-voteCh // precommit
  532. // we should have unlocked and committed nil
  533. // NOTE: since we don't relock on nil, the lock round is 0
  534. validatePrecommit(t, cs1, 1, 0, vss[0], nil, nil)
  535. signAddVotes(cs1, types.VoteTypePrecommit, nil, types.PartSetHeader{}, vs2, vs3)
  536. <-newRoundCh
  537. }
  538. // 4 vals
  539. // a polka at round 1 but we miss it
  540. // then a polka at round 2 that we lock on
  541. // then we see the polka from round 1 but shouldn't unlock
  542. func TestStateLockPOLSafety1(t *testing.T) {
  543. cs1, vss := randConsensusState(4)
  544. vs2, vs3, vs4 := vss[1], vss[2], vss[3]
  545. partSize := types.BlockPartSizeBytes
  546. proposalCh := subscribe(cs1.eventBus, types.EventQueryCompleteProposal)
  547. timeoutProposeCh := subscribe(cs1.eventBus, types.EventQueryTimeoutPropose)
  548. timeoutWaitCh := subscribe(cs1.eventBus, types.EventQueryTimeoutWait)
  549. newRoundCh := subscribe(cs1.eventBus, types.EventQueryNewRound)
  550. voteCh := subscribeToVoter(cs1, cs1.privValidator.GetAddress())
  551. // start round and wait for propose and prevote
  552. startTestRound(cs1, cs1.Height, 0)
  553. <-newRoundCh
  554. re := <-proposalCh
  555. rs := re.(types.EventDataRoundState).RoundState.(*cstypes.RoundState)
  556. propBlock := rs.ProposalBlock
  557. <-voteCh // prevote
  558. validatePrevote(t, cs1, 0, vss[0], propBlock.Hash())
  559. // the others sign a polka but we don't see it
  560. prevotes := signVotes(types.VoteTypePrevote, propBlock.Hash(), propBlock.MakePartSet(partSize).Header(), vs2, vs3, vs4)
  561. // before we time out into new round, set next proposer
  562. // and next proposal block
  563. /*
  564. _, v1 := cs1.Validators.GetByAddress(vss[0].Address)
  565. v1.VotingPower = 1
  566. if updated := cs1.Validators.Update(v1); !updated {
  567. panic("failed to update validator")
  568. }*/
  569. t.Logf("old prop hash %v", fmt.Sprintf("%X", propBlock.Hash()))
  570. // we do see them precommit nil
  571. signAddVotes(cs1, types.VoteTypePrecommit, nil, types.PartSetHeader{}, vs2, vs3, vs4)
  572. prop, propBlock := decideProposal(cs1, vs2, vs2.Height, vs2.Round+1)
  573. propBlockHash := propBlock.Hash()
  574. propBlockParts := propBlock.MakePartSet(partSize)
  575. incrementRound(vs2, vs3, vs4)
  576. //XXX: this isnt guaranteed to get there before the timeoutPropose ...
  577. if err := cs1.SetProposalAndBlock(prop, propBlock, propBlockParts, "some peer"); err != nil {
  578. t.Fatal(err)
  579. }
  580. <-newRoundCh
  581. t.Log("### ONTO ROUND 1")
  582. /*Round2
  583. // we timeout and prevote our lock
  584. // a polka happened but we didn't see it!
  585. */
  586. // now we're on a new round and not the proposer,
  587. // but we should receive the proposal
  588. select {
  589. case re = <-proposalCh:
  590. case <-timeoutProposeCh:
  591. re = <-proposalCh
  592. }
  593. rs = re.(types.EventDataRoundState).RoundState.(*cstypes.RoundState)
  594. if rs.LockedBlock != nil {
  595. panic("we should not be locked!")
  596. }
  597. t.Logf("new prop hash %v", fmt.Sprintf("%X", propBlockHash))
  598. // go to prevote, prevote for proposal block
  599. <-voteCh
  600. validatePrevote(t, cs1, 1, vss[0], propBlockHash)
  601. // now we see the others prevote for it, so we should lock on it
  602. signAddVotes(cs1, types.VoteTypePrevote, propBlockHash, propBlockParts.Header(), vs2, vs3, vs4)
  603. <-voteCh // precommit
  604. // we should have precommitted
  605. validatePrecommit(t, cs1, 1, 1, vss[0], propBlockHash, propBlockHash)
  606. signAddVotes(cs1, types.VoteTypePrecommit, nil, types.PartSetHeader{}, vs2, vs3)
  607. <-timeoutWaitCh
  608. incrementRound(vs2, vs3, vs4)
  609. <-newRoundCh
  610. t.Log("### ONTO ROUND 2")
  611. /*Round3
  612. we see the polka from round 1 but we shouldn't unlock!
  613. */
  614. // timeout of propose
  615. <-timeoutProposeCh
  616. // finish prevote
  617. <-voteCh
  618. // we should prevote what we're locked on
  619. validatePrevote(t, cs1, 2, vss[0], propBlockHash)
  620. newStepCh := subscribe(cs1.eventBus, types.EventQueryNewRoundStep)
  621. // add prevotes from the earlier round
  622. addVotes(cs1, prevotes...)
  623. t.Log("Done adding prevotes!")
  624. ensureNoNewStep(newStepCh)
  625. }
  626. // 4 vals.
  627. // polka P0 at R0, P1 at R1, and P2 at R2,
  628. // we lock on P0 at R0, don't see P1, and unlock using P2 at R2
  629. // then we should make sure we don't lock using P1
  630. // What we want:
  631. // dont see P0, lock on P1 at R1, dont unlock using P0 at R2
  632. func TestStateLockPOLSafety2(t *testing.T) {
  633. cs1, vss := randConsensusState(4)
  634. vs2, vs3, vs4 := vss[1], vss[2], vss[3]
  635. partSize := types.BlockPartSizeBytes
  636. proposalCh := subscribe(cs1.eventBus, types.EventQueryCompleteProposal)
  637. timeoutProposeCh := subscribe(cs1.eventBus, types.EventQueryTimeoutPropose)
  638. timeoutWaitCh := subscribe(cs1.eventBus, types.EventQueryTimeoutWait)
  639. newRoundCh := subscribe(cs1.eventBus, types.EventQueryNewRound)
  640. unlockCh := subscribe(cs1.eventBus, types.EventQueryUnlock)
  641. voteCh := subscribeToVoter(cs1, cs1.privValidator.GetAddress())
  642. // the block for R0: gets polkad but we miss it
  643. // (even though we signed it, shhh)
  644. _, propBlock0 := decideProposal(cs1, vss[0], cs1.Height, cs1.Round)
  645. propBlockHash0 := propBlock0.Hash()
  646. propBlockParts0 := propBlock0.MakePartSet(partSize)
  647. // the others sign a polka but we don't see it
  648. prevotes := signVotes(types.VoteTypePrevote, propBlockHash0, propBlockParts0.Header(), vs2, vs3, vs4)
  649. // the block for round 1
  650. prop1, propBlock1 := decideProposal(cs1, vs2, vs2.Height, vs2.Round+1)
  651. propBlockHash1 := propBlock1.Hash()
  652. propBlockParts1 := propBlock1.MakePartSet(partSize)
  653. propBlockID1 := types.BlockID{propBlockHash1, propBlockParts1.Header()}
  654. incrementRound(vs2, vs3, vs4)
  655. cs1.updateRoundStep(0, cstypes.RoundStepPrecommitWait)
  656. t.Log("### ONTO Round 1")
  657. // jump in at round 1
  658. height := cs1.Height
  659. startTestRound(cs1, height, 1)
  660. <-newRoundCh
  661. if err := cs1.SetProposalAndBlock(prop1, propBlock1, propBlockParts1, "some peer"); err != nil {
  662. t.Fatal(err)
  663. }
  664. <-proposalCh
  665. <-voteCh // prevote
  666. signAddVotes(cs1, types.VoteTypePrevote, propBlockHash1, propBlockParts1.Header(), vs2, vs3, vs4)
  667. <-voteCh // precommit
  668. // the proposed block should now be locked and our precommit added
  669. validatePrecommit(t, cs1, 1, 1, vss[0], propBlockHash1, propBlockHash1)
  670. // add precommits from the rest
  671. signAddVotes(cs1, types.VoteTypePrecommit, nil, types.PartSetHeader{}, vs2, vs4)
  672. signAddVotes(cs1, types.VoteTypePrecommit, propBlockHash1, propBlockParts1.Header(), vs3)
  673. incrementRound(vs2, vs3, vs4)
  674. // timeout of precommit wait to new round
  675. <-timeoutWaitCh
  676. // in round 2 we see the polkad block from round 0
  677. newProp := types.NewProposal(height, 2, propBlockParts0.Header(), 0, propBlockID1)
  678. if err := vs3.SignProposal(config.ChainID(), newProp); err != nil {
  679. t.Fatal(err)
  680. }
  681. if err := cs1.SetProposalAndBlock(newProp, propBlock0, propBlockParts0, "some peer"); err != nil {
  682. t.Fatal(err)
  683. }
  684. // Add the pol votes
  685. addVotes(cs1, prevotes...)
  686. <-newRoundCh
  687. t.Log("### ONTO Round 2")
  688. /*Round2
  689. // now we see the polka from round 1, but we shouldnt unlock
  690. */
  691. select {
  692. case <-timeoutProposeCh:
  693. <-proposalCh
  694. case <-proposalCh:
  695. }
  696. select {
  697. case <-unlockCh:
  698. panic("validator unlocked using an old polka")
  699. case <-voteCh:
  700. // prevote our locked block
  701. }
  702. validatePrevote(t, cs1, 2, vss[0], propBlockHash1)
  703. }
  704. //------------------------------------------------------------------------------------------
  705. // SlashingSuite
  706. // TODO: Slashing
  707. /*
  708. func TestStateSlashingPrevotes(t *testing.T) {
  709. cs1, vss := randConsensusState(2)
  710. vs2 := vss[1]
  711. proposalCh := subscribe(cs1.eventBus, types.EventQueryCompleteProposal)
  712. timeoutWaitCh := subscribe(cs1.eventBus, types.EventQueryTimeoutWait)
  713. newRoundCh := subscribe(cs1.eventBus, types.EventQueryNewRound)
  714. voteCh := subscribeToVoter(cs1, cs1.privValidator.GetAddress())
  715. // start round and wait for propose and prevote
  716. startTestRound(cs1, cs1.Height, 0)
  717. <-newRoundCh
  718. re := <-proposalCh
  719. <-voteCh // prevote
  720. rs := re.(types.EventDataRoundState).RoundState.(*cstypes.RoundState)
  721. // we should now be stuck in limbo forever, waiting for more prevotes
  722. // add one for a different block should cause us to go into prevote wait
  723. hash := rs.ProposalBlock.Hash()
  724. hash[0] = byte(hash[0]+1) % 255
  725. signAddVotes(cs1, types.VoteTypePrevote, hash, rs.ProposalBlockParts.Header(), vs2)
  726. <-timeoutWaitCh
  727. // NOTE: we have to send the vote for different block first so we don't just go into precommit round right
  728. // away and ignore more prevotes (and thus fail to slash!)
  729. // add the conflicting vote
  730. signAddVotes(cs1, types.VoteTypePrevote, rs.ProposalBlock.Hash(), rs.ProposalBlockParts.Header(), vs2)
  731. // XXX: Check for existence of Dupeout info
  732. }
  733. func TestStateSlashingPrecommits(t *testing.T) {
  734. cs1, vss := randConsensusState(2)
  735. vs2 := vss[1]
  736. proposalCh := subscribe(cs1.eventBus, types.EventQueryCompleteProposal)
  737. timeoutWaitCh := subscribe(cs1.eventBus, types.EventQueryTimeoutWait)
  738. newRoundCh := subscribe(cs1.eventBus, types.EventQueryNewRound)
  739. voteCh := subscribeToVoter(cs1, cs1.privValidator.GetAddress())
  740. // start round and wait for propose and prevote
  741. startTestRound(cs1, cs1.Height, 0)
  742. <-newRoundCh
  743. re := <-proposalCh
  744. <-voteCh // prevote
  745. // add prevote from vs2
  746. signAddVotes(cs1, types.VoteTypePrevote, rs.ProposalBlock.Hash(), rs.ProposalBlockParts.Header(), vs2)
  747. <-voteCh // precommit
  748. // we should now be stuck in limbo forever, waiting for more prevotes
  749. // add one for a different block should cause us to go into prevote wait
  750. hash := rs.ProposalBlock.Hash()
  751. hash[0] = byte(hash[0]+1) % 255
  752. signAddVotes(cs1, types.VoteTypePrecommit, hash, rs.ProposalBlockParts.Header(), vs2)
  753. // NOTE: we have to send the vote for different block first so we don't just go into precommit round right
  754. // away and ignore more prevotes (and thus fail to slash!)
  755. // add precommit from vs2
  756. signAddVotes(cs1, types.VoteTypePrecommit, rs.ProposalBlock.Hash(), rs.ProposalBlockParts.Header(), vs2)
  757. // XXX: Check for existence of Dupeout info
  758. }
  759. */
  760. //------------------------------------------------------------------------------------------
  761. // CatchupSuite
  762. //------------------------------------------------------------------------------------------
  763. // HaltSuite
  764. // 4 vals.
  765. // we receive a final precommit after going into next round, but others might have gone to commit already!
  766. func TestStateHalt1(t *testing.T) {
  767. cs1, vss := randConsensusState(4)
  768. vs2, vs3, vs4 := vss[1], vss[2], vss[3]
  769. partSize := types.BlockPartSizeBytes
  770. proposalCh := subscribe(cs1.eventBus, types.EventQueryCompleteProposal)
  771. timeoutWaitCh := subscribe(cs1.eventBus, types.EventQueryTimeoutWait)
  772. newRoundCh := subscribe(cs1.eventBus, types.EventQueryNewRound)
  773. newBlockCh := subscribe(cs1.eventBus, types.EventQueryNewBlock)
  774. voteCh := subscribeToVoter(cs1, cs1.privValidator.GetAddress())
  775. // start round and wait for propose and prevote
  776. startTestRound(cs1, cs1.Height, 0)
  777. <-newRoundCh
  778. re := <-proposalCh
  779. rs := re.(types.EventDataRoundState).RoundState.(*cstypes.RoundState)
  780. propBlock := rs.ProposalBlock
  781. propBlockParts := propBlock.MakePartSet(partSize)
  782. <-voteCh // prevote
  783. signAddVotes(cs1, types.VoteTypePrevote, propBlock.Hash(), propBlockParts.Header(), vs3, vs4)
  784. <-voteCh // precommit
  785. // the proposed block should now be locked and our precommit added
  786. validatePrecommit(t, cs1, 0, 0, vss[0], propBlock.Hash(), propBlock.Hash())
  787. // add precommits from the rest
  788. signAddVotes(cs1, types.VoteTypePrecommit, nil, types.PartSetHeader{}, vs2) // didnt receive proposal
  789. signAddVotes(cs1, types.VoteTypePrecommit, propBlock.Hash(), propBlockParts.Header(), vs3)
  790. // we receive this later, but vs3 might receive it earlier and with ours will go to commit!
  791. precommit4 := signVote(vs4, types.VoteTypePrecommit, propBlock.Hash(), propBlockParts.Header())
  792. incrementRound(vs2, vs3, vs4)
  793. // timeout to new round
  794. <-timeoutWaitCh
  795. re = <-newRoundCh
  796. rs = re.(types.EventDataRoundState).RoundState.(*cstypes.RoundState)
  797. t.Log("### ONTO ROUND 1")
  798. /*Round2
  799. // we timeout and prevote our lock
  800. // a polka happened but we didn't see it!
  801. */
  802. // go to prevote, prevote for locked block
  803. <-voteCh // prevote
  804. validatePrevote(t, cs1, 0, vss[0], rs.LockedBlock.Hash())
  805. // now we receive the precommit from the previous round
  806. addVotes(cs1, precommit4)
  807. // receiving that precommit should take us straight to commit
  808. <-newBlockCh
  809. re = <-newRoundCh
  810. rs = re.(types.EventDataRoundState).RoundState.(*cstypes.RoundState)
  811. if rs.Height != 2 {
  812. panic("expected height to increment")
  813. }
  814. }
  815. func TestStateOutputsBlockPartsStats(t *testing.T) {
  816. // create dummy peer
  817. cs, _ := randConsensusState(1)
  818. peer := p2pdummy.NewPeer()
  819. // 1) new block part
  820. parts := types.NewPartSetFromData(cmn.RandBytes(100), 10)
  821. msg := &BlockPartMessage{
  822. Height: 1,
  823. Round: 0,
  824. Part: parts.GetPart(0),
  825. }
  826. cs.ProposalBlockParts = types.NewPartSetFromHeader(parts.Header())
  827. cs.handleMsg(msgInfo{msg, peer.ID()})
  828. statsMessage := <-cs.statsMsgQueue
  829. require.Equal(t, msg, statsMessage.Msg, "")
  830. require.Equal(t, peer.ID(), statsMessage.PeerID, "")
  831. // sending the same part from different peer
  832. cs.handleMsg(msgInfo{msg, "peer2"})
  833. // sending the part with the same height, but different round
  834. msg.Round = 1
  835. cs.handleMsg(msgInfo{msg, peer.ID()})
  836. // sending the part from the smaller height
  837. msg.Height = 0
  838. cs.handleMsg(msgInfo{msg, peer.ID()})
  839. // sending the part from the bigger height
  840. msg.Height = 3
  841. cs.handleMsg(msgInfo{msg, peer.ID()})
  842. select {
  843. case <-cs.statsMsgQueue:
  844. t.Errorf("Should not output stats message after receiving the known block part!")
  845. case <-time.After(50 * time.Millisecond):
  846. }
  847. }
  848. func TestStateOutputVoteStats(t *testing.T) {
  849. cs, vss := randConsensusState(2)
  850. // create dummy peer
  851. peer := p2pdummy.NewPeer()
  852. vote := signVote(vss[1], types.VoteTypePrecommit, []byte("test"), types.PartSetHeader{})
  853. voteMessage := &VoteMessage{vote}
  854. cs.handleMsg(msgInfo{voteMessage, peer.ID()})
  855. statsMessage := <-cs.statsMsgQueue
  856. require.Equal(t, voteMessage, statsMessage.Msg, "")
  857. require.Equal(t, peer.ID(), statsMessage.PeerID, "")
  858. // sending the same part from different peer
  859. cs.handleMsg(msgInfo{&VoteMessage{vote}, "peer2"})
  860. // sending the vote for the bigger height
  861. incrementHeight(vss[1])
  862. vote = signVote(vss[1], types.VoteTypePrecommit, []byte("test"), types.PartSetHeader{})
  863. cs.handleMsg(msgInfo{&VoteMessage{vote}, peer.ID()})
  864. select {
  865. case <-cs.statsMsgQueue:
  866. t.Errorf("Should not output stats message after receiving the known vote or vote from bigger height")
  867. case <-time.After(50 * time.Millisecond):
  868. }
  869. }
  870. // subscribe subscribes test client to the given query and returns a channel with cap = 1.
  871. func subscribe(eventBus *types.EventBus, q tmpubsub.Query) <-chan interface{} {
  872. out := make(chan interface{}, 1)
  873. err := eventBus.Subscribe(context.Background(), testSubscriber, q, out)
  874. if err != nil {
  875. panic(fmt.Sprintf("failed to subscribe %s to %v", testSubscriber, q))
  876. }
  877. return out
  878. }
  879. // discardFromChan reads n values from the channel.
  880. func discardFromChan(ch <-chan interface{}, n int) {
  881. for i := 0; i < n; i++ {
  882. <-ch
  883. }
  884. }