You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

1194 lines
39 KiB

9 years ago
new pubsub package comment out failing consensus tests for now rewrite rpc httpclient to use new pubsub package import pubsub as tmpubsub, query as tmquery make event IDs constants EventKey -> EventTypeKey rename EventsPubsub to PubSub mempool does not use pubsub rename eventsSub to pubsub new subscribe API fix channel size issues and consensus tests bugs refactor rpc client add missing discardFromChan method add mutex rename pubsub to eventBus remove IsRunning from WSRPCConnection interface (not needed) add a comment in broadcastNewRoundStepsAndVotes rename registerEventCallbacks to broadcastNewRoundStepsAndVotes See https://dave.cheney.net/2014/03/19/channel-axioms stop eventBuses after reactor tests remove unnecessary Unsubscribe return subscribe helper function move discardFromChan to where it is used subscribe now returns an err this gives us ability to refuse to subscribe if pubsub is at its max capacity. use context for control overflow cache queries handle err when subscribing in replay_test rename testClientID to testSubscriber extract var set channel buffer capacity to 1 in replay_file fix byzantine_test unsubscribe from single event, not all events refactor httpclient to return events to appropriate channels return failing testReplayCrashBeforeWriteVote test fix TestValidatorSetChanges refactor code a bit fix testReplayCrashBeforeWriteVote add comment fix TestValidatorSetChanges fixes from Bucky's review update comment [ci skip] test TxEventBuffer update changelog fix TestValidatorSetChanges (2nd attempt) only do wg.Done when no errors benchmark event bus create pubsub server inside NewEventBus only expose config params (later if needed) set buffer capacity to 0 so we are not testing cache new tx event format: key = "Tx" plus a tag {"tx.hash": XYZ} This should allow to subscribe to all transactions! or a specific one using a query: "tm.events.type = Tx and tx.hash = '013ABF99434...'" use TimeoutCommit instead of afterPublishEventNewBlockTimeout TimeoutCommit is the time a node waits after committing a block, before it goes into the next height. So it will finish everything from the last block, but then wait a bit. The idea is this gives it time to hear more votes from other validators, to strengthen the commit it includes in the next block. But it also gives it time to hear about new transactions. waitForBlockWithUpdatedVals rewrite WAL crash tests Task: test that we can recover from any WAL crash. Solution: the old tests were relying on event hub being run in the same thread (we were injecting the private validator's last signature). when considering a rewrite, we considered two possible solutions: write a "fuzzy" testing system where WAL is crashing upon receiving a new message, or inject failures and trigger them in tests using something like https://github.com/coreos/gofail. remove sleep no cs.Lock around wal.Save test different cases (empty block, non-empty block, ...) comments add comments test 4 cases: empty block, non-empty block, non-empty block with smaller part size, many blocks fixes as per Bucky's last review reset subscriptions on UnsubscribeAll use a simple counter to track message for which we panicked also, set a smaller part size for all test cases
8 years ago
9 years ago
9 years ago
9 years ago
9 years ago
9 years ago
new pubsub package comment out failing consensus tests for now rewrite rpc httpclient to use new pubsub package import pubsub as tmpubsub, query as tmquery make event IDs constants EventKey -> EventTypeKey rename EventsPubsub to PubSub mempool does not use pubsub rename eventsSub to pubsub new subscribe API fix channel size issues and consensus tests bugs refactor rpc client add missing discardFromChan method add mutex rename pubsub to eventBus remove IsRunning from WSRPCConnection interface (not needed) add a comment in broadcastNewRoundStepsAndVotes rename registerEventCallbacks to broadcastNewRoundStepsAndVotes See https://dave.cheney.net/2014/03/19/channel-axioms stop eventBuses after reactor tests remove unnecessary Unsubscribe return subscribe helper function move discardFromChan to where it is used subscribe now returns an err this gives us ability to refuse to subscribe if pubsub is at its max capacity. use context for control overflow cache queries handle err when subscribing in replay_test rename testClientID to testSubscriber extract var set channel buffer capacity to 1 in replay_file fix byzantine_test unsubscribe from single event, not all events refactor httpclient to return events to appropriate channels return failing testReplayCrashBeforeWriteVote test fix TestValidatorSetChanges refactor code a bit fix testReplayCrashBeforeWriteVote add comment fix TestValidatorSetChanges fixes from Bucky's review update comment [ci skip] test TxEventBuffer update changelog fix TestValidatorSetChanges (2nd attempt) only do wg.Done when no errors benchmark event bus create pubsub server inside NewEventBus only expose config params (later if needed) set buffer capacity to 0 so we are not testing cache new tx event format: key = "Tx" plus a tag {"tx.hash": XYZ} This should allow to subscribe to all transactions! or a specific one using a query: "tm.events.type = Tx and tx.hash = '013ABF99434...'" use TimeoutCommit instead of afterPublishEventNewBlockTimeout TimeoutCommit is the time a node waits after committing a block, before it goes into the next height. So it will finish everything from the last block, but then wait a bit. The idea is this gives it time to hear more votes from other validators, to strengthen the commit it includes in the next block. But it also gives it time to hear about new transactions. waitForBlockWithUpdatedVals rewrite WAL crash tests Task: test that we can recover from any WAL crash. Solution: the old tests were relying on event hub being run in the same thread (we were injecting the private validator's last signature). when considering a rewrite, we considered two possible solutions: write a "fuzzy" testing system where WAL is crashing upon receiving a new message, or inject failures and trigger them in tests using something like https://github.com/coreos/gofail. remove sleep no cs.Lock around wal.Save test different cases (empty block, non-empty block, ...) comments add comments test 4 cases: empty block, non-empty block, non-empty block with smaller part size, many blocks fixes as per Bucky's last review reset subscriptions on UnsubscribeAll use a simple counter to track message for which we panicked also, set a smaller part size for all test cases
8 years ago
new pubsub package comment out failing consensus tests for now rewrite rpc httpclient to use new pubsub package import pubsub as tmpubsub, query as tmquery make event IDs constants EventKey -> EventTypeKey rename EventsPubsub to PubSub mempool does not use pubsub rename eventsSub to pubsub new subscribe API fix channel size issues and consensus tests bugs refactor rpc client add missing discardFromChan method add mutex rename pubsub to eventBus remove IsRunning from WSRPCConnection interface (not needed) add a comment in broadcastNewRoundStepsAndVotes rename registerEventCallbacks to broadcastNewRoundStepsAndVotes See https://dave.cheney.net/2014/03/19/channel-axioms stop eventBuses after reactor tests remove unnecessary Unsubscribe return subscribe helper function move discardFromChan to where it is used subscribe now returns an err this gives us ability to refuse to subscribe if pubsub is at its max capacity. use context for control overflow cache queries handle err when subscribing in replay_test rename testClientID to testSubscriber extract var set channel buffer capacity to 1 in replay_file fix byzantine_test unsubscribe from single event, not all events refactor httpclient to return events to appropriate channels return failing testReplayCrashBeforeWriteVote test fix TestValidatorSetChanges refactor code a bit fix testReplayCrashBeforeWriteVote add comment fix TestValidatorSetChanges fixes from Bucky's review update comment [ci skip] test TxEventBuffer update changelog fix TestValidatorSetChanges (2nd attempt) only do wg.Done when no errors benchmark event bus create pubsub server inside NewEventBus only expose config params (later if needed) set buffer capacity to 0 so we are not testing cache new tx event format: key = "Tx" plus a tag {"tx.hash": XYZ} This should allow to subscribe to all transactions! or a specific one using a query: "tm.events.type = Tx and tx.hash = '013ABF99434...'" use TimeoutCommit instead of afterPublishEventNewBlockTimeout TimeoutCommit is the time a node waits after committing a block, before it goes into the next height. So it will finish everything from the last block, but then wait a bit. The idea is this gives it time to hear more votes from other validators, to strengthen the commit it includes in the next block. But it also gives it time to hear about new transactions. waitForBlockWithUpdatedVals rewrite WAL crash tests Task: test that we can recover from any WAL crash. Solution: the old tests were relying on event hub being run in the same thread (we were injecting the private validator's last signature). when considering a rewrite, we considered two possible solutions: write a "fuzzy" testing system where WAL is crashing upon receiving a new message, or inject failures and trigger them in tests using something like https://github.com/coreos/gofail. remove sleep no cs.Lock around wal.Save test different cases (empty block, non-empty block, ...) comments add comments test 4 cases: empty block, non-empty block, non-empty block with smaller part size, many blocks fixes as per Bucky's last review reset subscriptions on UnsubscribeAll use a simple counter to track message for which we panicked also, set a smaller part size for all test cases
8 years ago
9 years ago
9 years ago
9 years ago
new pubsub package comment out failing consensus tests for now rewrite rpc httpclient to use new pubsub package import pubsub as tmpubsub, query as tmquery make event IDs constants EventKey -> EventTypeKey rename EventsPubsub to PubSub mempool does not use pubsub rename eventsSub to pubsub new subscribe API fix channel size issues and consensus tests bugs refactor rpc client add missing discardFromChan method add mutex rename pubsub to eventBus remove IsRunning from WSRPCConnection interface (not needed) add a comment in broadcastNewRoundStepsAndVotes rename registerEventCallbacks to broadcastNewRoundStepsAndVotes See https://dave.cheney.net/2014/03/19/channel-axioms stop eventBuses after reactor tests remove unnecessary Unsubscribe return subscribe helper function move discardFromChan to where it is used subscribe now returns an err this gives us ability to refuse to subscribe if pubsub is at its max capacity. use context for control overflow cache queries handle err when subscribing in replay_test rename testClientID to testSubscriber extract var set channel buffer capacity to 1 in replay_file fix byzantine_test unsubscribe from single event, not all events refactor httpclient to return events to appropriate channels return failing testReplayCrashBeforeWriteVote test fix TestValidatorSetChanges refactor code a bit fix testReplayCrashBeforeWriteVote add comment fix TestValidatorSetChanges fixes from Bucky's review update comment [ci skip] test TxEventBuffer update changelog fix TestValidatorSetChanges (2nd attempt) only do wg.Done when no errors benchmark event bus create pubsub server inside NewEventBus only expose config params (later if needed) set buffer capacity to 0 so we are not testing cache new tx event format: key = "Tx" plus a tag {"tx.hash": XYZ} This should allow to subscribe to all transactions! or a specific one using a query: "tm.events.type = Tx and tx.hash = '013ABF99434...'" use TimeoutCommit instead of afterPublishEventNewBlockTimeout TimeoutCommit is the time a node waits after committing a block, before it goes into the next height. So it will finish everything from the last block, but then wait a bit. The idea is this gives it time to hear more votes from other validators, to strengthen the commit it includes in the next block. But it also gives it time to hear about new transactions. waitForBlockWithUpdatedVals rewrite WAL crash tests Task: test that we can recover from any WAL crash. Solution: the old tests were relying on event hub being run in the same thread (we were injecting the private validator's last signature). when considering a rewrite, we considered two possible solutions: write a "fuzzy" testing system where WAL is crashing upon receiving a new message, or inject failures and trigger them in tests using something like https://github.com/coreos/gofail. remove sleep no cs.Lock around wal.Save test different cases (empty block, non-empty block, ...) comments add comments test 4 cases: empty block, non-empty block, non-empty block with smaller part size, many blocks fixes as per Bucky's last review reset subscriptions on UnsubscribeAll use a simple counter to track message for which we panicked also, set a smaller part size for all test cases
8 years ago
9 years ago
9 years ago
9 years ago
9 years ago
new pubsub package comment out failing consensus tests for now rewrite rpc httpclient to use new pubsub package import pubsub as tmpubsub, query as tmquery make event IDs constants EventKey -> EventTypeKey rename EventsPubsub to PubSub mempool does not use pubsub rename eventsSub to pubsub new subscribe API fix channel size issues and consensus tests bugs refactor rpc client add missing discardFromChan method add mutex rename pubsub to eventBus remove IsRunning from WSRPCConnection interface (not needed) add a comment in broadcastNewRoundStepsAndVotes rename registerEventCallbacks to broadcastNewRoundStepsAndVotes See https://dave.cheney.net/2014/03/19/channel-axioms stop eventBuses after reactor tests remove unnecessary Unsubscribe return subscribe helper function move discardFromChan to where it is used subscribe now returns an err this gives us ability to refuse to subscribe if pubsub is at its max capacity. use context for control overflow cache queries handle err when subscribing in replay_test rename testClientID to testSubscriber extract var set channel buffer capacity to 1 in replay_file fix byzantine_test unsubscribe from single event, not all events refactor httpclient to return events to appropriate channels return failing testReplayCrashBeforeWriteVote test fix TestValidatorSetChanges refactor code a bit fix testReplayCrashBeforeWriteVote add comment fix TestValidatorSetChanges fixes from Bucky's review update comment [ci skip] test TxEventBuffer update changelog fix TestValidatorSetChanges (2nd attempt) only do wg.Done when no errors benchmark event bus create pubsub server inside NewEventBus only expose config params (later if needed) set buffer capacity to 0 so we are not testing cache new tx event format: key = "Tx" plus a tag {"tx.hash": XYZ} This should allow to subscribe to all transactions! or a specific one using a query: "tm.events.type = Tx and tx.hash = '013ABF99434...'" use TimeoutCommit instead of afterPublishEventNewBlockTimeout TimeoutCommit is the time a node waits after committing a block, before it goes into the next height. So it will finish everything from the last block, but then wait a bit. The idea is this gives it time to hear more votes from other validators, to strengthen the commit it includes in the next block. But it also gives it time to hear about new transactions. waitForBlockWithUpdatedVals rewrite WAL crash tests Task: test that we can recover from any WAL crash. Solution: the old tests were relying on event hub being run in the same thread (we were injecting the private validator's last signature). when considering a rewrite, we considered two possible solutions: write a "fuzzy" testing system where WAL is crashing upon receiving a new message, or inject failures and trigger them in tests using something like https://github.com/coreos/gofail. remove sleep no cs.Lock around wal.Save test different cases (empty block, non-empty block, ...) comments add comments test 4 cases: empty block, non-empty block, non-empty block with smaller part size, many blocks fixes as per Bucky's last review reset subscriptions on UnsubscribeAll use a simple counter to track message for which we panicked also, set a smaller part size for all test cases
8 years ago
9 years ago
9 years ago
9 years ago
9 years ago
9 years ago
9 years ago
new pubsub package comment out failing consensus tests for now rewrite rpc httpclient to use new pubsub package import pubsub as tmpubsub, query as tmquery make event IDs constants EventKey -> EventTypeKey rename EventsPubsub to PubSub mempool does not use pubsub rename eventsSub to pubsub new subscribe API fix channel size issues and consensus tests bugs refactor rpc client add missing discardFromChan method add mutex rename pubsub to eventBus remove IsRunning from WSRPCConnection interface (not needed) add a comment in broadcastNewRoundStepsAndVotes rename registerEventCallbacks to broadcastNewRoundStepsAndVotes See https://dave.cheney.net/2014/03/19/channel-axioms stop eventBuses after reactor tests remove unnecessary Unsubscribe return subscribe helper function move discardFromChan to where it is used subscribe now returns an err this gives us ability to refuse to subscribe if pubsub is at its max capacity. use context for control overflow cache queries handle err when subscribing in replay_test rename testClientID to testSubscriber extract var set channel buffer capacity to 1 in replay_file fix byzantine_test unsubscribe from single event, not all events refactor httpclient to return events to appropriate channels return failing testReplayCrashBeforeWriteVote test fix TestValidatorSetChanges refactor code a bit fix testReplayCrashBeforeWriteVote add comment fix TestValidatorSetChanges fixes from Bucky's review update comment [ci skip] test TxEventBuffer update changelog fix TestValidatorSetChanges (2nd attempt) only do wg.Done when no errors benchmark event bus create pubsub server inside NewEventBus only expose config params (later if needed) set buffer capacity to 0 so we are not testing cache new tx event format: key = "Tx" plus a tag {"tx.hash": XYZ} This should allow to subscribe to all transactions! or a specific one using a query: "tm.events.type = Tx and tx.hash = '013ABF99434...'" use TimeoutCommit instead of afterPublishEventNewBlockTimeout TimeoutCommit is the time a node waits after committing a block, before it goes into the next height. So it will finish everything from the last block, but then wait a bit. The idea is this gives it time to hear more votes from other validators, to strengthen the commit it includes in the next block. But it also gives it time to hear about new transactions. waitForBlockWithUpdatedVals rewrite WAL crash tests Task: test that we can recover from any WAL crash. Solution: the old tests were relying on event hub being run in the same thread (we were injecting the private validator's last signature). when considering a rewrite, we considered two possible solutions: write a "fuzzy" testing system where WAL is crashing upon receiving a new message, or inject failures and trigger them in tests using something like https://github.com/coreos/gofail. remove sleep no cs.Lock around wal.Save test different cases (empty block, non-empty block, ...) comments add comments test 4 cases: empty block, non-empty block, non-empty block with smaller part size, many blocks fixes as per Bucky's last review reset subscriptions on UnsubscribeAll use a simple counter to track message for which we panicked also, set a smaller part size for all test cases
8 years ago
9 years ago
9 years ago
9 years ago
9 years ago
9 years ago
9 years ago
9 years ago
9 years ago
9 years ago
9 years ago
fix TestFullRound1 race (Refs #846) ``` ================== WARNING: DATA RACE Write at 0x00c42d7605f0 by goroutine 844: github.com/tendermint/tendermint/consensus.(*ConsensusState).updateToState() /home/vagrant/go/src/github.com/tendermint/tendermint/consensus/state.go:465 +0x59e I[11-14|22:37:28.781] Added to prevote vote="Vote{0:646753DCE124 1/02/1(Prevote) E9B19636DCDB {/CAD5FA805E8C.../}}" prevotes="VoteSet{H:1 R:2 T:1 +2/3:<nil> BA{2:X_} map[]}" github.com/tendermint/tendermint/consensus.(*ConsensusState).finalizeCommit() /home/vagrant/go/src/github.com/tendermint/tendermint/consensus/state.go:1229 +0x16a9 github.com/tendermint/tendermint/consensus.(*ConsensusState).tryFinalizeCommit() /home/vagrant/go/src/github.com/tendermint/tendermint/consensus/state.go:1135 +0x721 github.com/tendermint/tendermint/consensus.(*ConsensusState).enterCommit.func1() /home/vagrant/go/src/github.com/tendermint/tendermint/consensus/state.go:1087 +0x153 github.com/tendermint/tendermint/consensus.(*ConsensusState).enterCommit() /home/vagrant/go/src/github.com/tendermint/tendermint/consensus/state.go:1114 +0xa34 github.com/tendermint/tendermint/consensus.(*ConsensusState).addVote() /home/vagrant/go/src/github.com/tendermint/tendermint/consensus/state.go:1423 +0xdd6 github.com/tendermint/tendermint/consensus.(*ConsensusState).tryAddVote() /home/vagrant/go/src/github.com/tendermint/tendermint/consensus/state.go:1317 +0x77 github.com/tendermint/tendermint/consensus.(*ConsensusState).handleMsg() /home/vagrant/go/src/github.com/tendermint/tendermint/consensus/state.go:565 +0x7a9 github.com/tendermint/tendermint/consensus.(*ConsensusState).receiveRoutine() /home/vagrant/go/src/github.com/tendermint/tendermint/consensus/state.go:523 +0x6d2 Previous read at 0x00c42d7605f0 by goroutine 654: github.com/tendermint/tendermint/consensus.validatePrevote() /home/vagrant/go/src/github.com/tendermint/tendermint/consensus/common_test.go:149 +0x57 github.com/tendermint/tendermint/consensus.TestFullRound1() /home/vagrant/go/src/github.com/tendermint/tendermint/consensus/state_test.go:256 +0x3c5 testing.tRunner() /usr/local/go/src/testing/testing.go:746 +0x16c Goroutine 844 (running) created at: github.com/tendermint/tendermint/consensus.(*ConsensusState).startRoutines() /home/vagrant/go/src/github.com/tendermint/tendermint/consensus/state.go:258 +0x8c github.com/tendermint/tendermint/consensus.startTestRound() /home/vagrant/go/src/github.com/tendermint/tendermint/consensus/common_test.go:118 +0x63 github.com/tendermint/tendermint/consensus.TestFullRound1() /home/vagrant/go/src/github.com/tendermint/tendermint/consensus/state_test.go:247 +0x1fb testing.tRunner() /usr/local/go/src/testing/testing.go:746 +0x16c Goroutine 654 (running) created at: testing.(*T).Run() /usr/local/go/src/testing/testing.go:789 +0x568 testing.runTests.func1() /usr/local/go/src/testing/testing.go:1004 +0xa7 testing.tRunner() /usr/local/go/src/testing/testing.go:746 +0x16c testing.runTests() /usr/local/go/src/testing/testing.go:1002 +0x521 testing.(*M).Run() /usr/local/go/src/testing/testing.go:921 +0x206 main.main() github.com/tendermint/tendermint/consensus/_test/_testmain.go:106 +0x1d3 ================== ```
7 years ago
new pubsub package comment out failing consensus tests for now rewrite rpc httpclient to use new pubsub package import pubsub as tmpubsub, query as tmquery make event IDs constants EventKey -> EventTypeKey rename EventsPubsub to PubSub mempool does not use pubsub rename eventsSub to pubsub new subscribe API fix channel size issues and consensus tests bugs refactor rpc client add missing discardFromChan method add mutex rename pubsub to eventBus remove IsRunning from WSRPCConnection interface (not needed) add a comment in broadcastNewRoundStepsAndVotes rename registerEventCallbacks to broadcastNewRoundStepsAndVotes See https://dave.cheney.net/2014/03/19/channel-axioms stop eventBuses after reactor tests remove unnecessary Unsubscribe return subscribe helper function move discardFromChan to where it is used subscribe now returns an err this gives us ability to refuse to subscribe if pubsub is at its max capacity. use context for control overflow cache queries handle err when subscribing in replay_test rename testClientID to testSubscriber extract var set channel buffer capacity to 1 in replay_file fix byzantine_test unsubscribe from single event, not all events refactor httpclient to return events to appropriate channels return failing testReplayCrashBeforeWriteVote test fix TestValidatorSetChanges refactor code a bit fix testReplayCrashBeforeWriteVote add comment fix TestValidatorSetChanges fixes from Bucky's review update comment [ci skip] test TxEventBuffer update changelog fix TestValidatorSetChanges (2nd attempt) only do wg.Done when no errors benchmark event bus create pubsub server inside NewEventBus only expose config params (later if needed) set buffer capacity to 0 so we are not testing cache new tx event format: key = "Tx" plus a tag {"tx.hash": XYZ} This should allow to subscribe to all transactions! or a specific one using a query: "tm.events.type = Tx and tx.hash = '013ABF99434...'" use TimeoutCommit instead of afterPublishEventNewBlockTimeout TimeoutCommit is the time a node waits after committing a block, before it goes into the next height. So it will finish everything from the last block, but then wait a bit. The idea is this gives it time to hear more votes from other validators, to strengthen the commit it includes in the next block. But it also gives it time to hear about new transactions. waitForBlockWithUpdatedVals rewrite WAL crash tests Task: test that we can recover from any WAL crash. Solution: the old tests were relying on event hub being run in the same thread (we were injecting the private validator's last signature). when considering a rewrite, we considered two possible solutions: write a "fuzzy" testing system where WAL is crashing upon receiving a new message, or inject failures and trigger them in tests using something like https://github.com/coreos/gofail. remove sleep no cs.Lock around wal.Save test different cases (empty block, non-empty block, ...) comments add comments test 4 cases: empty block, non-empty block, non-empty block with smaller part size, many blocks fixes as per Bucky's last review reset subscriptions on UnsubscribeAll use a simple counter to track message for which we panicked also, set a smaller part size for all test cases
8 years ago
7 years ago
9 years ago
9 years ago
9 years ago
9 years ago
new pubsub package comment out failing consensus tests for now rewrite rpc httpclient to use new pubsub package import pubsub as tmpubsub, query as tmquery make event IDs constants EventKey -> EventTypeKey rename EventsPubsub to PubSub mempool does not use pubsub rename eventsSub to pubsub new subscribe API fix channel size issues and consensus tests bugs refactor rpc client add missing discardFromChan method add mutex rename pubsub to eventBus remove IsRunning from WSRPCConnection interface (not needed) add a comment in broadcastNewRoundStepsAndVotes rename registerEventCallbacks to broadcastNewRoundStepsAndVotes See https://dave.cheney.net/2014/03/19/channel-axioms stop eventBuses after reactor tests remove unnecessary Unsubscribe return subscribe helper function move discardFromChan to where it is used subscribe now returns an err this gives us ability to refuse to subscribe if pubsub is at its max capacity. use context for control overflow cache queries handle err when subscribing in replay_test rename testClientID to testSubscriber extract var set channel buffer capacity to 1 in replay_file fix byzantine_test unsubscribe from single event, not all events refactor httpclient to return events to appropriate channels return failing testReplayCrashBeforeWriteVote test fix TestValidatorSetChanges refactor code a bit fix testReplayCrashBeforeWriteVote add comment fix TestValidatorSetChanges fixes from Bucky's review update comment [ci skip] test TxEventBuffer update changelog fix TestValidatorSetChanges (2nd attempt) only do wg.Done when no errors benchmark event bus create pubsub server inside NewEventBus only expose config params (later if needed) set buffer capacity to 0 so we are not testing cache new tx event format: key = "Tx" plus a tag {"tx.hash": XYZ} This should allow to subscribe to all transactions! or a specific one using a query: "tm.events.type = Tx and tx.hash = '013ABF99434...'" use TimeoutCommit instead of afterPublishEventNewBlockTimeout TimeoutCommit is the time a node waits after committing a block, before it goes into the next height. So it will finish everything from the last block, but then wait a bit. The idea is this gives it time to hear more votes from other validators, to strengthen the commit it includes in the next block. But it also gives it time to hear about new transactions. waitForBlockWithUpdatedVals rewrite WAL crash tests Task: test that we can recover from any WAL crash. Solution: the old tests were relying on event hub being run in the same thread (we were injecting the private validator's last signature). when considering a rewrite, we considered two possible solutions: write a "fuzzy" testing system where WAL is crashing upon receiving a new message, or inject failures and trigger them in tests using something like https://github.com/coreos/gofail. remove sleep no cs.Lock around wal.Save test different cases (empty block, non-empty block, ...) comments add comments test 4 cases: empty block, non-empty block, non-empty block with smaller part size, many blocks fixes as per Bucky's last review reset subscriptions on UnsubscribeAll use a simple counter to track message for which we panicked also, set a smaller part size for all test cases
8 years ago
9 years ago
9 years ago
9 years ago
9 years ago
new pubsub package comment out failing consensus tests for now rewrite rpc httpclient to use new pubsub package import pubsub as tmpubsub, query as tmquery make event IDs constants EventKey -> EventTypeKey rename EventsPubsub to PubSub mempool does not use pubsub rename eventsSub to pubsub new subscribe API fix channel size issues and consensus tests bugs refactor rpc client add missing discardFromChan method add mutex rename pubsub to eventBus remove IsRunning from WSRPCConnection interface (not needed) add a comment in broadcastNewRoundStepsAndVotes rename registerEventCallbacks to broadcastNewRoundStepsAndVotes See https://dave.cheney.net/2014/03/19/channel-axioms stop eventBuses after reactor tests remove unnecessary Unsubscribe return subscribe helper function move discardFromChan to where it is used subscribe now returns an err this gives us ability to refuse to subscribe if pubsub is at its max capacity. use context for control overflow cache queries handle err when subscribing in replay_test rename testClientID to testSubscriber extract var set channel buffer capacity to 1 in replay_file fix byzantine_test unsubscribe from single event, not all events refactor httpclient to return events to appropriate channels return failing testReplayCrashBeforeWriteVote test fix TestValidatorSetChanges refactor code a bit fix testReplayCrashBeforeWriteVote add comment fix TestValidatorSetChanges fixes from Bucky's review update comment [ci skip] test TxEventBuffer update changelog fix TestValidatorSetChanges (2nd attempt) only do wg.Done when no errors benchmark event bus create pubsub server inside NewEventBus only expose config params (later if needed) set buffer capacity to 0 so we are not testing cache new tx event format: key = "Tx" plus a tag {"tx.hash": XYZ} This should allow to subscribe to all transactions! or a specific one using a query: "tm.events.type = Tx and tx.hash = '013ABF99434...'" use TimeoutCommit instead of afterPublishEventNewBlockTimeout TimeoutCommit is the time a node waits after committing a block, before it goes into the next height. So it will finish everything from the last block, but then wait a bit. The idea is this gives it time to hear more votes from other validators, to strengthen the commit it includes in the next block. But it also gives it time to hear about new transactions. waitForBlockWithUpdatedVals rewrite WAL crash tests Task: test that we can recover from any WAL crash. Solution: the old tests were relying on event hub being run in the same thread (we were injecting the private validator's last signature). when considering a rewrite, we considered two possible solutions: write a "fuzzy" testing system where WAL is crashing upon receiving a new message, or inject failures and trigger them in tests using something like https://github.com/coreos/gofail. remove sleep no cs.Lock around wal.Save test different cases (empty block, non-empty block, ...) comments add comments test 4 cases: empty block, non-empty block, non-empty block with smaller part size, many blocks fixes as per Bucky's last review reset subscriptions on UnsubscribeAll use a simple counter to track message for which we panicked also, set a smaller part size for all test cases
8 years ago
9 years ago
9 years ago
9 years ago
9 years ago
9 years ago
9 years ago
9 years ago
9 years ago
9 years ago
9 years ago
new pubsub package comment out failing consensus tests for now rewrite rpc httpclient to use new pubsub package import pubsub as tmpubsub, query as tmquery make event IDs constants EventKey -> EventTypeKey rename EventsPubsub to PubSub mempool does not use pubsub rename eventsSub to pubsub new subscribe API fix channel size issues and consensus tests bugs refactor rpc client add missing discardFromChan method add mutex rename pubsub to eventBus remove IsRunning from WSRPCConnection interface (not needed) add a comment in broadcastNewRoundStepsAndVotes rename registerEventCallbacks to broadcastNewRoundStepsAndVotes See https://dave.cheney.net/2014/03/19/channel-axioms stop eventBuses after reactor tests remove unnecessary Unsubscribe return subscribe helper function move discardFromChan to where it is used subscribe now returns an err this gives us ability to refuse to subscribe if pubsub is at its max capacity. use context for control overflow cache queries handle err when subscribing in replay_test rename testClientID to testSubscriber extract var set channel buffer capacity to 1 in replay_file fix byzantine_test unsubscribe from single event, not all events refactor httpclient to return events to appropriate channels return failing testReplayCrashBeforeWriteVote test fix TestValidatorSetChanges refactor code a bit fix testReplayCrashBeforeWriteVote add comment fix TestValidatorSetChanges fixes from Bucky's review update comment [ci skip] test TxEventBuffer update changelog fix TestValidatorSetChanges (2nd attempt) only do wg.Done when no errors benchmark event bus create pubsub server inside NewEventBus only expose config params (later if needed) set buffer capacity to 0 so we are not testing cache new tx event format: key = "Tx" plus a tag {"tx.hash": XYZ} This should allow to subscribe to all transactions! or a specific one using a query: "tm.events.type = Tx and tx.hash = '013ABF99434...'" use TimeoutCommit instead of afterPublishEventNewBlockTimeout TimeoutCommit is the time a node waits after committing a block, before it goes into the next height. So it will finish everything from the last block, but then wait a bit. The idea is this gives it time to hear more votes from other validators, to strengthen the commit it includes in the next block. But it also gives it time to hear about new transactions. waitForBlockWithUpdatedVals rewrite WAL crash tests Task: test that we can recover from any WAL crash. Solution: the old tests were relying on event hub being run in the same thread (we were injecting the private validator's last signature). when considering a rewrite, we considered two possible solutions: write a "fuzzy" testing system where WAL is crashing upon receiving a new message, or inject failures and trigger them in tests using something like https://github.com/coreos/gofail. remove sleep no cs.Lock around wal.Save test different cases (empty block, non-empty block, ...) comments add comments test 4 cases: empty block, non-empty block, non-empty block with smaller part size, many blocks fixes as per Bucky's last review reset subscriptions on UnsubscribeAll use a simple counter to track message for which we panicked also, set a smaller part size for all test cases
8 years ago
9 years ago
7 years ago
9 years ago
9 years ago
9 years ago
9 years ago
9 years ago
9 years ago
9 years ago
8 years ago
9 years ago
9 years ago
7 years ago
9 years ago
9 years ago
9 years ago
9 years ago
9 years ago
9 years ago
9 years ago
9 years ago
9 years ago
8 years ago
9 years ago
9 years ago
9 years ago
7 years ago
9 years ago
9 years ago
9 years ago
9 years ago
9 years ago
9 years ago
9 years ago
8 years ago
9 years ago
9 years ago
9 years ago
9 years ago
new pubsub package comment out failing consensus tests for now rewrite rpc httpclient to use new pubsub package import pubsub as tmpubsub, query as tmquery make event IDs constants EventKey -> EventTypeKey rename EventsPubsub to PubSub mempool does not use pubsub rename eventsSub to pubsub new subscribe API fix channel size issues and consensus tests bugs refactor rpc client add missing discardFromChan method add mutex rename pubsub to eventBus remove IsRunning from WSRPCConnection interface (not needed) add a comment in broadcastNewRoundStepsAndVotes rename registerEventCallbacks to broadcastNewRoundStepsAndVotes See https://dave.cheney.net/2014/03/19/channel-axioms stop eventBuses after reactor tests remove unnecessary Unsubscribe return subscribe helper function move discardFromChan to where it is used subscribe now returns an err this gives us ability to refuse to subscribe if pubsub is at its max capacity. use context for control overflow cache queries handle err when subscribing in replay_test rename testClientID to testSubscriber extract var set channel buffer capacity to 1 in replay_file fix byzantine_test unsubscribe from single event, not all events refactor httpclient to return events to appropriate channels return failing testReplayCrashBeforeWriteVote test fix TestValidatorSetChanges refactor code a bit fix testReplayCrashBeforeWriteVote add comment fix TestValidatorSetChanges fixes from Bucky's review update comment [ci skip] test TxEventBuffer update changelog fix TestValidatorSetChanges (2nd attempt) only do wg.Done when no errors benchmark event bus create pubsub server inside NewEventBus only expose config params (later if needed) set buffer capacity to 0 so we are not testing cache new tx event format: key = "Tx" plus a tag {"tx.hash": XYZ} This should allow to subscribe to all transactions! or a specific one using a query: "tm.events.type = Tx and tx.hash = '013ABF99434...'" use TimeoutCommit instead of afterPublishEventNewBlockTimeout TimeoutCommit is the time a node waits after committing a block, before it goes into the next height. So it will finish everything from the last block, but then wait a bit. The idea is this gives it time to hear more votes from other validators, to strengthen the commit it includes in the next block. But it also gives it time to hear about new transactions. waitForBlockWithUpdatedVals rewrite WAL crash tests Task: test that we can recover from any WAL crash. Solution: the old tests were relying on event hub being run in the same thread (we were injecting the private validator's last signature). when considering a rewrite, we considered two possible solutions: write a "fuzzy" testing system where WAL is crashing upon receiving a new message, or inject failures and trigger them in tests using something like https://github.com/coreos/gofail. remove sleep no cs.Lock around wal.Save test different cases (empty block, non-empty block, ...) comments add comments test 4 cases: empty block, non-empty block, non-empty block with smaller part size, many blocks fixes as per Bucky's last review reset subscriptions on UnsubscribeAll use a simple counter to track message for which we panicked also, set a smaller part size for all test cases
8 years ago
9 years ago
9 years ago
7 years ago
9 years ago
9 years ago
9 years ago
9 years ago
9 years ago
9 years ago
9 years ago
8 years ago
9 years ago
9 years ago
9 years ago
9 years ago
9 years ago
9 years ago
9 years ago
7 years ago
7 years ago
9 years ago
9 years ago
9 years ago
new pubsub package comment out failing consensus tests for now rewrite rpc httpclient to use new pubsub package import pubsub as tmpubsub, query as tmquery make event IDs constants EventKey -> EventTypeKey rename EventsPubsub to PubSub mempool does not use pubsub rename eventsSub to pubsub new subscribe API fix channel size issues and consensus tests bugs refactor rpc client add missing discardFromChan method add mutex rename pubsub to eventBus remove IsRunning from WSRPCConnection interface (not needed) add a comment in broadcastNewRoundStepsAndVotes rename registerEventCallbacks to broadcastNewRoundStepsAndVotes See https://dave.cheney.net/2014/03/19/channel-axioms stop eventBuses after reactor tests remove unnecessary Unsubscribe return subscribe helper function move discardFromChan to where it is used subscribe now returns an err this gives us ability to refuse to subscribe if pubsub is at its max capacity. use context for control overflow cache queries handle err when subscribing in replay_test rename testClientID to testSubscriber extract var set channel buffer capacity to 1 in replay_file fix byzantine_test unsubscribe from single event, not all events refactor httpclient to return events to appropriate channels return failing testReplayCrashBeforeWriteVote test fix TestValidatorSetChanges refactor code a bit fix testReplayCrashBeforeWriteVote add comment fix TestValidatorSetChanges fixes from Bucky's review update comment [ci skip] test TxEventBuffer update changelog fix TestValidatorSetChanges (2nd attempt) only do wg.Done when no errors benchmark event bus create pubsub server inside NewEventBus only expose config params (later if needed) set buffer capacity to 0 so we are not testing cache new tx event format: key = "Tx" plus a tag {"tx.hash": XYZ} This should allow to subscribe to all transactions! or a specific one using a query: "tm.events.type = Tx and tx.hash = '013ABF99434...'" use TimeoutCommit instead of afterPublishEventNewBlockTimeout TimeoutCommit is the time a node waits after committing a block, before it goes into the next height. So it will finish everything from the last block, but then wait a bit. The idea is this gives it time to hear more votes from other validators, to strengthen the commit it includes in the next block. But it also gives it time to hear about new transactions. waitForBlockWithUpdatedVals rewrite WAL crash tests Task: test that we can recover from any WAL crash. Solution: the old tests were relying on event hub being run in the same thread (we were injecting the private validator's last signature). when considering a rewrite, we considered two possible solutions: write a "fuzzy" testing system where WAL is crashing upon receiving a new message, or inject failures and trigger them in tests using something like https://github.com/coreos/gofail. remove sleep no cs.Lock around wal.Save test different cases (empty block, non-empty block, ...) comments add comments test 4 cases: empty block, non-empty block, non-empty block with smaller part size, many blocks fixes as per Bucky's last review reset subscriptions on UnsubscribeAll use a simple counter to track message for which we panicked also, set a smaller part size for all test cases
8 years ago
9 years ago
7 years ago
9 years ago
9 years ago
9 years ago
9 years ago
9 years ago
9 years ago
9 years ago
9 years ago
7 years ago
9 years ago
8 years ago
9 years ago
9 years ago
9 years ago
9 years ago
9 years ago
9 years ago
new pubsub package comment out failing consensus tests for now rewrite rpc httpclient to use new pubsub package import pubsub as tmpubsub, query as tmquery make event IDs constants EventKey -> EventTypeKey rename EventsPubsub to PubSub mempool does not use pubsub rename eventsSub to pubsub new subscribe API fix channel size issues and consensus tests bugs refactor rpc client add missing discardFromChan method add mutex rename pubsub to eventBus remove IsRunning from WSRPCConnection interface (not needed) add a comment in broadcastNewRoundStepsAndVotes rename registerEventCallbacks to broadcastNewRoundStepsAndVotes See https://dave.cheney.net/2014/03/19/channel-axioms stop eventBuses after reactor tests remove unnecessary Unsubscribe return subscribe helper function move discardFromChan to where it is used subscribe now returns an err this gives us ability to refuse to subscribe if pubsub is at its max capacity. use context for control overflow cache queries handle err when subscribing in replay_test rename testClientID to testSubscriber extract var set channel buffer capacity to 1 in replay_file fix byzantine_test unsubscribe from single event, not all events refactor httpclient to return events to appropriate channels return failing testReplayCrashBeforeWriteVote test fix TestValidatorSetChanges refactor code a bit fix testReplayCrashBeforeWriteVote add comment fix TestValidatorSetChanges fixes from Bucky's review update comment [ci skip] test TxEventBuffer update changelog fix TestValidatorSetChanges (2nd attempt) only do wg.Done when no errors benchmark event bus create pubsub server inside NewEventBus only expose config params (later if needed) set buffer capacity to 0 so we are not testing cache new tx event format: key = "Tx" plus a tag {"tx.hash": XYZ} This should allow to subscribe to all transactions! or a specific one using a query: "tm.events.type = Tx and tx.hash = '013ABF99434...'" use TimeoutCommit instead of afterPublishEventNewBlockTimeout TimeoutCommit is the time a node waits after committing a block, before it goes into the next height. So it will finish everything from the last block, but then wait a bit. The idea is this gives it time to hear more votes from other validators, to strengthen the commit it includes in the next block. But it also gives it time to hear about new transactions. waitForBlockWithUpdatedVals rewrite WAL crash tests Task: test that we can recover from any WAL crash. Solution: the old tests were relying on event hub being run in the same thread (we were injecting the private validator's last signature). when considering a rewrite, we considered two possible solutions: write a "fuzzy" testing system where WAL is crashing upon receiving a new message, or inject failures and trigger them in tests using something like https://github.com/coreos/gofail. remove sleep no cs.Lock around wal.Save test different cases (empty block, non-empty block, ...) comments add comments test 4 cases: empty block, non-empty block, non-empty block with smaller part size, many blocks fixes as per Bucky's last review reset subscriptions on UnsubscribeAll use a simple counter to track message for which we panicked also, set a smaller part size for all test cases
8 years ago
9 years ago
7 years ago
9 years ago
9 years ago
9 years ago
9 years ago
9 years ago
8 years ago
9 years ago
9 years ago
9 years ago
9 years ago
9 years ago
7 years ago
9 years ago
8 years ago
9 years ago
9 years ago
9 years ago
9 years ago
9 years ago
9 years ago
8 years ago
9 years ago
9 years ago
9 years ago
9 years ago
new pubsub package comment out failing consensus tests for now rewrite rpc httpclient to use new pubsub package import pubsub as tmpubsub, query as tmquery make event IDs constants EventKey -> EventTypeKey rename EventsPubsub to PubSub mempool does not use pubsub rename eventsSub to pubsub new subscribe API fix channel size issues and consensus tests bugs refactor rpc client add missing discardFromChan method add mutex rename pubsub to eventBus remove IsRunning from WSRPCConnection interface (not needed) add a comment in broadcastNewRoundStepsAndVotes rename registerEventCallbacks to broadcastNewRoundStepsAndVotes See https://dave.cheney.net/2014/03/19/channel-axioms stop eventBuses after reactor tests remove unnecessary Unsubscribe return subscribe helper function move discardFromChan to where it is used subscribe now returns an err this gives us ability to refuse to subscribe if pubsub is at its max capacity. use context for control overflow cache queries handle err when subscribing in replay_test rename testClientID to testSubscriber extract var set channel buffer capacity to 1 in replay_file fix byzantine_test unsubscribe from single event, not all events refactor httpclient to return events to appropriate channels return failing testReplayCrashBeforeWriteVote test fix TestValidatorSetChanges refactor code a bit fix testReplayCrashBeforeWriteVote add comment fix TestValidatorSetChanges fixes from Bucky's review update comment [ci skip] test TxEventBuffer update changelog fix TestValidatorSetChanges (2nd attempt) only do wg.Done when no errors benchmark event bus create pubsub server inside NewEventBus only expose config params (later if needed) set buffer capacity to 0 so we are not testing cache new tx event format: key = "Tx" plus a tag {"tx.hash": XYZ} This should allow to subscribe to all transactions! or a specific one using a query: "tm.events.type = Tx and tx.hash = '013ABF99434...'" use TimeoutCommit instead of afterPublishEventNewBlockTimeout TimeoutCommit is the time a node waits after committing a block, before it goes into the next height. So it will finish everything from the last block, but then wait a bit. The idea is this gives it time to hear more votes from other validators, to strengthen the commit it includes in the next block. But it also gives it time to hear about new transactions. waitForBlockWithUpdatedVals rewrite WAL crash tests Task: test that we can recover from any WAL crash. Solution: the old tests were relying on event hub being run in the same thread (we were injecting the private validator's last signature). when considering a rewrite, we considered two possible solutions: write a "fuzzy" testing system where WAL is crashing upon receiving a new message, or inject failures and trigger them in tests using something like https://github.com/coreos/gofail. remove sleep no cs.Lock around wal.Save test different cases (empty block, non-empty block, ...) comments add comments test 4 cases: empty block, non-empty block, non-empty block with smaller part size, many blocks fixes as per Bucky's last review reset subscriptions on UnsubscribeAll use a simple counter to track message for which we panicked also, set a smaller part size for all test cases
8 years ago
9 years ago
9 years ago
8 years ago
9 years ago
9 years ago
9 years ago
new pubsub package comment out failing consensus tests for now rewrite rpc httpclient to use new pubsub package import pubsub as tmpubsub, query as tmquery make event IDs constants EventKey -> EventTypeKey rename EventsPubsub to PubSub mempool does not use pubsub rename eventsSub to pubsub new subscribe API fix channel size issues and consensus tests bugs refactor rpc client add missing discardFromChan method add mutex rename pubsub to eventBus remove IsRunning from WSRPCConnection interface (not needed) add a comment in broadcastNewRoundStepsAndVotes rename registerEventCallbacks to broadcastNewRoundStepsAndVotes See https://dave.cheney.net/2014/03/19/channel-axioms stop eventBuses after reactor tests remove unnecessary Unsubscribe return subscribe helper function move discardFromChan to where it is used subscribe now returns an err this gives us ability to refuse to subscribe if pubsub is at its max capacity. use context for control overflow cache queries handle err when subscribing in replay_test rename testClientID to testSubscriber extract var set channel buffer capacity to 1 in replay_file fix byzantine_test unsubscribe from single event, not all events refactor httpclient to return events to appropriate channels return failing testReplayCrashBeforeWriteVote test fix TestValidatorSetChanges refactor code a bit fix testReplayCrashBeforeWriteVote add comment fix TestValidatorSetChanges fixes from Bucky's review update comment [ci skip] test TxEventBuffer update changelog fix TestValidatorSetChanges (2nd attempt) only do wg.Done when no errors benchmark event bus create pubsub server inside NewEventBus only expose config params (later if needed) set buffer capacity to 0 so we are not testing cache new tx event format: key = "Tx" plus a tag {"tx.hash": XYZ} This should allow to subscribe to all transactions! or a specific one using a query: "tm.events.type = Tx and tx.hash = '013ABF99434...'" use TimeoutCommit instead of afterPublishEventNewBlockTimeout TimeoutCommit is the time a node waits after committing a block, before it goes into the next height. So it will finish everything from the last block, but then wait a bit. The idea is this gives it time to hear more votes from other validators, to strengthen the commit it includes in the next block. But it also gives it time to hear about new transactions. waitForBlockWithUpdatedVals rewrite WAL crash tests Task: test that we can recover from any WAL crash. Solution: the old tests were relying on event hub being run in the same thread (we were injecting the private validator's last signature). when considering a rewrite, we considered two possible solutions: write a "fuzzy" testing system where WAL is crashing upon receiving a new message, or inject failures and trigger them in tests using something like https://github.com/coreos/gofail. remove sleep no cs.Lock around wal.Save test different cases (empty block, non-empty block, ...) comments add comments test 4 cases: empty block, non-empty block, non-empty block with smaller part size, many blocks fixes as per Bucky's last review reset subscriptions on UnsubscribeAll use a simple counter to track message for which we panicked also, set a smaller part size for all test cases
8 years ago
9 years ago
9 years ago
9 years ago
9 years ago
9 years ago
9 years ago
8 years ago
9 years ago
9 years ago
9 years ago
9 years ago
9 years ago
9 years ago
9 years ago
9 years ago
9 years ago
8 years ago
9 years ago
9 years ago
9 years ago
9 years ago
9 years ago
9 years ago
new pubsub package comment out failing consensus tests for now rewrite rpc httpclient to use new pubsub package import pubsub as tmpubsub, query as tmquery make event IDs constants EventKey -> EventTypeKey rename EventsPubsub to PubSub mempool does not use pubsub rename eventsSub to pubsub new subscribe API fix channel size issues and consensus tests bugs refactor rpc client add missing discardFromChan method add mutex rename pubsub to eventBus remove IsRunning from WSRPCConnection interface (not needed) add a comment in broadcastNewRoundStepsAndVotes rename registerEventCallbacks to broadcastNewRoundStepsAndVotes See https://dave.cheney.net/2014/03/19/channel-axioms stop eventBuses after reactor tests remove unnecessary Unsubscribe return subscribe helper function move discardFromChan to where it is used subscribe now returns an err this gives us ability to refuse to subscribe if pubsub is at its max capacity. use context for control overflow cache queries handle err when subscribing in replay_test rename testClientID to testSubscriber extract var set channel buffer capacity to 1 in replay_file fix byzantine_test unsubscribe from single event, not all events refactor httpclient to return events to appropriate channels return failing testReplayCrashBeforeWriteVote test fix TestValidatorSetChanges refactor code a bit fix testReplayCrashBeforeWriteVote add comment fix TestValidatorSetChanges fixes from Bucky's review update comment [ci skip] test TxEventBuffer update changelog fix TestValidatorSetChanges (2nd attempt) only do wg.Done when no errors benchmark event bus create pubsub server inside NewEventBus only expose config params (later if needed) set buffer capacity to 0 so we are not testing cache new tx event format: key = "Tx" plus a tag {"tx.hash": XYZ} This should allow to subscribe to all transactions! or a specific one using a query: "tm.events.type = Tx and tx.hash = '013ABF99434...'" use TimeoutCommit instead of afterPublishEventNewBlockTimeout TimeoutCommit is the time a node waits after committing a block, before it goes into the next height. So it will finish everything from the last block, but then wait a bit. The idea is this gives it time to hear more votes from other validators, to strengthen the commit it includes in the next block. But it also gives it time to hear about new transactions. waitForBlockWithUpdatedVals rewrite WAL crash tests Task: test that we can recover from any WAL crash. Solution: the old tests were relying on event hub being run in the same thread (we were injecting the private validator's last signature). when considering a rewrite, we considered two possible solutions: write a "fuzzy" testing system where WAL is crashing upon receiving a new message, or inject failures and trigger them in tests using something like https://github.com/coreos/gofail. remove sleep no cs.Lock around wal.Save test different cases (empty block, non-empty block, ...) comments add comments test 4 cases: empty block, non-empty block, non-empty block with smaller part size, many blocks fixes as per Bucky's last review reset subscriptions on UnsubscribeAll use a simple counter to track message for which we panicked also, set a smaller part size for all test cases
8 years ago
9 years ago
9 years ago
9 years ago
9 years ago
9 years ago
9 years ago
9 years ago
new pubsub package comment out failing consensus tests for now rewrite rpc httpclient to use new pubsub package import pubsub as tmpubsub, query as tmquery make event IDs constants EventKey -> EventTypeKey rename EventsPubsub to PubSub mempool does not use pubsub rename eventsSub to pubsub new subscribe API fix channel size issues and consensus tests bugs refactor rpc client add missing discardFromChan method add mutex rename pubsub to eventBus remove IsRunning from WSRPCConnection interface (not needed) add a comment in broadcastNewRoundStepsAndVotes rename registerEventCallbacks to broadcastNewRoundStepsAndVotes See https://dave.cheney.net/2014/03/19/channel-axioms stop eventBuses after reactor tests remove unnecessary Unsubscribe return subscribe helper function move discardFromChan to where it is used subscribe now returns an err this gives us ability to refuse to subscribe if pubsub is at its max capacity. use context for control overflow cache queries handle err when subscribing in replay_test rename testClientID to testSubscriber extract var set channel buffer capacity to 1 in replay_file fix byzantine_test unsubscribe from single event, not all events refactor httpclient to return events to appropriate channels return failing testReplayCrashBeforeWriteVote test fix TestValidatorSetChanges refactor code a bit fix testReplayCrashBeforeWriteVote add comment fix TestValidatorSetChanges fixes from Bucky's review update comment [ci skip] test TxEventBuffer update changelog fix TestValidatorSetChanges (2nd attempt) only do wg.Done when no errors benchmark event bus create pubsub server inside NewEventBus only expose config params (later if needed) set buffer capacity to 0 so we are not testing cache new tx event format: key = "Tx" plus a tag {"tx.hash": XYZ} This should allow to subscribe to all transactions! or a specific one using a query: "tm.events.type = Tx and tx.hash = '013ABF99434...'" use TimeoutCommit instead of afterPublishEventNewBlockTimeout TimeoutCommit is the time a node waits after committing a block, before it goes into the next height. So it will finish everything from the last block, but then wait a bit. The idea is this gives it time to hear more votes from other validators, to strengthen the commit it includes in the next block. But it also gives it time to hear about new transactions. waitForBlockWithUpdatedVals rewrite WAL crash tests Task: test that we can recover from any WAL crash. Solution: the old tests were relying on event hub being run in the same thread (we were injecting the private validator's last signature). when considering a rewrite, we considered two possible solutions: write a "fuzzy" testing system where WAL is crashing upon receiving a new message, or inject failures and trigger them in tests using something like https://github.com/coreos/gofail. remove sleep no cs.Lock around wal.Save test different cases (empty block, non-empty block, ...) comments add comments test 4 cases: empty block, non-empty block, non-empty block with smaller part size, many blocks fixes as per Bucky's last review reset subscriptions on UnsubscribeAll use a simple counter to track message for which we panicked also, set a smaller part size for all test cases
8 years ago
9 years ago
9 years ago
9 years ago
9 years ago
9 years ago
9 years ago
9 years ago
9 years ago
9 years ago
new pubsub package comment out failing consensus tests for now rewrite rpc httpclient to use new pubsub package import pubsub as tmpubsub, query as tmquery make event IDs constants EventKey -> EventTypeKey rename EventsPubsub to PubSub mempool does not use pubsub rename eventsSub to pubsub new subscribe API fix channel size issues and consensus tests bugs refactor rpc client add missing discardFromChan method add mutex rename pubsub to eventBus remove IsRunning from WSRPCConnection interface (not needed) add a comment in broadcastNewRoundStepsAndVotes rename registerEventCallbacks to broadcastNewRoundStepsAndVotes See https://dave.cheney.net/2014/03/19/channel-axioms stop eventBuses after reactor tests remove unnecessary Unsubscribe return subscribe helper function move discardFromChan to where it is used subscribe now returns an err this gives us ability to refuse to subscribe if pubsub is at its max capacity. use context for control overflow cache queries handle err when subscribing in replay_test rename testClientID to testSubscriber extract var set channel buffer capacity to 1 in replay_file fix byzantine_test unsubscribe from single event, not all events refactor httpclient to return events to appropriate channels return failing testReplayCrashBeforeWriteVote test fix TestValidatorSetChanges refactor code a bit fix testReplayCrashBeforeWriteVote add comment fix TestValidatorSetChanges fixes from Bucky's review update comment [ci skip] test TxEventBuffer update changelog fix TestValidatorSetChanges (2nd attempt) only do wg.Done when no errors benchmark event bus create pubsub server inside NewEventBus only expose config params (later if needed) set buffer capacity to 0 so we are not testing cache new tx event format: key = "Tx" plus a tag {"tx.hash": XYZ} This should allow to subscribe to all transactions! or a specific one using a query: "tm.events.type = Tx and tx.hash = '013ABF99434...'" use TimeoutCommit instead of afterPublishEventNewBlockTimeout TimeoutCommit is the time a node waits after committing a block, before it goes into the next height. So it will finish everything from the last block, but then wait a bit. The idea is this gives it time to hear more votes from other validators, to strengthen the commit it includes in the next block. But it also gives it time to hear about new transactions. waitForBlockWithUpdatedVals rewrite WAL crash tests Task: test that we can recover from any WAL crash. Solution: the old tests were relying on event hub being run in the same thread (we were injecting the private validator's last signature). when considering a rewrite, we considered two possible solutions: write a "fuzzy" testing system where WAL is crashing upon receiving a new message, or inject failures and trigger them in tests using something like https://github.com/coreos/gofail. remove sleep no cs.Lock around wal.Save test different cases (empty block, non-empty block, ...) comments add comments test 4 cases: empty block, non-empty block, non-empty block with smaller part size, many blocks fixes as per Bucky's last review reset subscriptions on UnsubscribeAll use a simple counter to track message for which we panicked also, set a smaller part size for all test cases
8 years ago
9 years ago
7 years ago
9 years ago
9 years ago
9 years ago
9 years ago
9 years ago
9 years ago
7 years ago
9 years ago
8 years ago
9 years ago
9 years ago
9 years ago
7 years ago
9 years ago
9 years ago
new pubsub package comment out failing consensus tests for now rewrite rpc httpclient to use new pubsub package import pubsub as tmpubsub, query as tmquery make event IDs constants EventKey -> EventTypeKey rename EventsPubsub to PubSub mempool does not use pubsub rename eventsSub to pubsub new subscribe API fix channel size issues and consensus tests bugs refactor rpc client add missing discardFromChan method add mutex rename pubsub to eventBus remove IsRunning from WSRPCConnection interface (not needed) add a comment in broadcastNewRoundStepsAndVotes rename registerEventCallbacks to broadcastNewRoundStepsAndVotes See https://dave.cheney.net/2014/03/19/channel-axioms stop eventBuses after reactor tests remove unnecessary Unsubscribe return subscribe helper function move discardFromChan to where it is used subscribe now returns an err this gives us ability to refuse to subscribe if pubsub is at its max capacity. use context for control overflow cache queries handle err when subscribing in replay_test rename testClientID to testSubscriber extract var set channel buffer capacity to 1 in replay_file fix byzantine_test unsubscribe from single event, not all events refactor httpclient to return events to appropriate channels return failing testReplayCrashBeforeWriteVote test fix TestValidatorSetChanges refactor code a bit fix testReplayCrashBeforeWriteVote add comment fix TestValidatorSetChanges fixes from Bucky's review update comment [ci skip] test TxEventBuffer update changelog fix TestValidatorSetChanges (2nd attempt) only do wg.Done when no errors benchmark event bus create pubsub server inside NewEventBus only expose config params (later if needed) set buffer capacity to 0 so we are not testing cache new tx event format: key = "Tx" plus a tag {"tx.hash": XYZ} This should allow to subscribe to all transactions! or a specific one using a query: "tm.events.type = Tx and tx.hash = '013ABF99434...'" use TimeoutCommit instead of afterPublishEventNewBlockTimeout TimeoutCommit is the time a node waits after committing a block, before it goes into the next height. So it will finish everything from the last block, but then wait a bit. The idea is this gives it time to hear more votes from other validators, to strengthen the commit it includes in the next block. But it also gives it time to hear about new transactions. waitForBlockWithUpdatedVals rewrite WAL crash tests Task: test that we can recover from any WAL crash. Solution: the old tests were relying on event hub being run in the same thread (we were injecting the private validator's last signature). when considering a rewrite, we considered two possible solutions: write a "fuzzy" testing system where WAL is crashing upon receiving a new message, or inject failures and trigger them in tests using something like https://github.com/coreos/gofail. remove sleep no cs.Lock around wal.Save test different cases (empty block, non-empty block, ...) comments add comments test 4 cases: empty block, non-empty block, non-empty block with smaller part size, many blocks fixes as per Bucky's last review reset subscriptions on UnsubscribeAll use a simple counter to track message for which we panicked also, set a smaller part size for all test cases
8 years ago
new pubsub package comment out failing consensus tests for now rewrite rpc httpclient to use new pubsub package import pubsub as tmpubsub, query as tmquery make event IDs constants EventKey -> EventTypeKey rename EventsPubsub to PubSub mempool does not use pubsub rename eventsSub to pubsub new subscribe API fix channel size issues and consensus tests bugs refactor rpc client add missing discardFromChan method add mutex rename pubsub to eventBus remove IsRunning from WSRPCConnection interface (not needed) add a comment in broadcastNewRoundStepsAndVotes rename registerEventCallbacks to broadcastNewRoundStepsAndVotes See https://dave.cheney.net/2014/03/19/channel-axioms stop eventBuses after reactor tests remove unnecessary Unsubscribe return subscribe helper function move discardFromChan to where it is used subscribe now returns an err this gives us ability to refuse to subscribe if pubsub is at its max capacity. use context for control overflow cache queries handle err when subscribing in replay_test rename testClientID to testSubscriber extract var set channel buffer capacity to 1 in replay_file fix byzantine_test unsubscribe from single event, not all events refactor httpclient to return events to appropriate channels return failing testReplayCrashBeforeWriteVote test fix TestValidatorSetChanges refactor code a bit fix testReplayCrashBeforeWriteVote add comment fix TestValidatorSetChanges fixes from Bucky's review update comment [ci skip] test TxEventBuffer update changelog fix TestValidatorSetChanges (2nd attempt) only do wg.Done when no errors benchmark event bus create pubsub server inside NewEventBus only expose config params (later if needed) set buffer capacity to 0 so we are not testing cache new tx event format: key = "Tx" plus a tag {"tx.hash": XYZ} This should allow to subscribe to all transactions! or a specific one using a query: "tm.events.type = Tx and tx.hash = '013ABF99434...'" use TimeoutCommit instead of afterPublishEventNewBlockTimeout TimeoutCommit is the time a node waits after committing a block, before it goes into the next height. So it will finish everything from the last block, but then wait a bit. The idea is this gives it time to hear more votes from other validators, to strengthen the commit it includes in the next block. But it also gives it time to hear about new transactions. waitForBlockWithUpdatedVals rewrite WAL crash tests Task: test that we can recover from any WAL crash. Solution: the old tests were relying on event hub being run in the same thread (we were injecting the private validator's last signature). when considering a rewrite, we considered two possible solutions: write a "fuzzy" testing system where WAL is crashing upon receiving a new message, or inject failures and trigger them in tests using something like https://github.com/coreos/gofail. remove sleep no cs.Lock around wal.Save test different cases (empty block, non-empty block, ...) comments add comments test 4 cases: empty block, non-empty block, non-empty block with smaller part size, many blocks fixes as per Bucky's last review reset subscriptions on UnsubscribeAll use a simple counter to track message for which we panicked also, set a smaller part size for all test cases
8 years ago
  1. package consensus
  2. import (
  3. "bytes"
  4. "context"
  5. "fmt"
  6. "testing"
  7. "time"
  8. "github.com/stretchr/testify/require"
  9. cstypes "github.com/tendermint/tendermint/consensus/types"
  10. cmn "github.com/tendermint/tendermint/libs/common"
  11. "github.com/tendermint/tendermint/libs/log"
  12. tmpubsub "github.com/tendermint/tendermint/libs/pubsub"
  13. p2pdummy "github.com/tendermint/tendermint/p2p/dummy"
  14. "github.com/tendermint/tendermint/types"
  15. )
  16. func init() {
  17. config = ResetConfig("consensus_state_test")
  18. }
  19. func ensureProposeTimeout(timeoutPropose time.Duration) time.Duration {
  20. return time.Duration(timeoutPropose.Nanoseconds()*2) * time.Nanosecond
  21. }
  22. /*
  23. ProposeSuite
  24. x * TestProposerSelection0 - round robin ordering, round 0
  25. x * TestProposerSelection2 - round robin ordering, round 2++
  26. x * TestEnterProposeNoValidator - timeout into prevote round
  27. x * TestEnterPropose - finish propose without timing out (we have the proposal)
  28. x * TestBadProposal - 2 vals, bad proposal (bad block state hash), should prevote and precommit nil
  29. FullRoundSuite
  30. x * TestFullRound1 - 1 val, full successful round
  31. x * TestFullRoundNil - 1 val, full round of nil
  32. x * TestFullRound2 - 2 vals, both required for full round
  33. LockSuite
  34. x * TestLockNoPOL - 2 vals, 4 rounds. one val locked, precommits nil every round except first.
  35. x * TestLockPOLRelock - 4 vals, one precommits, other 3 polka at next round, so we unlock and precomit the polka
  36. x * TestLockPOLUnlock - 4 vals, one precommits, other 3 polka nil at next round, so we unlock and precomit nil
  37. x * TestLockPOLSafety1 - 4 vals. We shouldn't change lock based on polka at earlier round
  38. x * TestLockPOLSafety2 - 4 vals. After unlocking, we shouldn't relock based on polka at earlier round
  39. * TestNetworkLock - once +1/3 precommits, network should be locked
  40. * TestNetworkLockPOL - once +1/3 precommits, the block with more recent polka is committed
  41. SlashingSuite
  42. x * TestSlashingPrevotes - a validator prevoting twice in a round gets slashed
  43. x * TestSlashingPrecommits - a validator precomitting twice in a round gets slashed
  44. CatchupSuite
  45. * TestCatchup - if we might be behind and we've seen any 2/3 prevotes, round skip to new round, precommit, or prevote
  46. HaltSuite
  47. x * TestHalt1 - if we see +2/3 precommits after timing out into new round, we should still commit
  48. */
  49. //----------------------------------------------------------------------------------------------------
  50. // ProposeSuite
  51. func TestStateProposerSelection0(t *testing.T) {
  52. cs1, vss := randConsensusState(4)
  53. height, round := cs1.Height, cs1.Round
  54. newRoundCh := subscribe(cs1.eventBus, types.EventQueryNewRound)
  55. proposalCh := subscribe(cs1.eventBus, types.EventQueryCompleteProposal)
  56. startTestRound(cs1, height, round)
  57. // Wait for new round so proposer is set.
  58. ensureNewRound(newRoundCh)
  59. // Commit a block and ensure proposer for the next height is correct.
  60. prop := cs1.GetRoundState().Validators.GetProposer()
  61. if !bytes.Equal(prop.Address, cs1.privValidator.GetAddress()) {
  62. t.Fatalf("expected proposer to be validator %d. Got %X", 0, prop.Address)
  63. }
  64. // Wait for complete proposal.
  65. ensureNewProposal(proposalCh)
  66. rs := cs1.GetRoundState()
  67. signAddVotes(cs1, types.VoteTypePrecommit, rs.ProposalBlock.Hash(), rs.ProposalBlockParts.Header(), vss[1:]...)
  68. // Wait for new round so next validator is set.
  69. ensureNewRound(newRoundCh)
  70. prop = cs1.GetRoundState().Validators.GetProposer()
  71. if !bytes.Equal(prop.Address, vss[1].GetAddress()) {
  72. panic(fmt.Sprintf("expected proposer to be validator %d. Got %X", 1, prop.Address))
  73. }
  74. }
  75. // Now let's do it all again, but starting from round 2 instead of 0
  76. func TestStateProposerSelection2(t *testing.T) {
  77. cs1, vss := randConsensusState(4) // test needs more work for more than 3 validators
  78. newRoundCh := subscribe(cs1.eventBus, types.EventQueryNewRound)
  79. // this time we jump in at round 2
  80. incrementRound(vss[1:]...)
  81. incrementRound(vss[1:]...)
  82. startTestRound(cs1, cs1.Height, 2)
  83. ensureNewRound(newRoundCh) // wait for the new round
  84. // everyone just votes nil. we get a new proposer each round
  85. for i := 0; i < len(vss); i++ {
  86. prop := cs1.GetRoundState().Validators.GetProposer()
  87. correctProposer := vss[(i+2)%len(vss)].GetAddress()
  88. if !bytes.Equal(prop.Address, correctProposer) {
  89. panic(fmt.Sprintf("expected RoundState.Validators.GetProposer() to be validator %d. Got %X", (i+2)%len(vss), prop.Address))
  90. }
  91. rs := cs1.GetRoundState()
  92. signAddVotes(cs1, types.VoteTypePrecommit, nil, rs.ProposalBlockParts.Header(), vss[1:]...)
  93. ensureNewRound(newRoundCh) // wait for the new round event each round
  94. incrementRound(vss[1:]...)
  95. }
  96. }
  97. // a non-validator should timeout into the prevote round
  98. func TestStateEnterProposeNoPrivValidator(t *testing.T) {
  99. cs, _ := randConsensusState(1)
  100. cs.SetPrivValidator(nil)
  101. height, round := cs.Height, cs.Round
  102. // Listen for propose timeout event
  103. timeoutCh := subscribe(cs.eventBus, types.EventQueryTimeoutPropose)
  104. startTestRound(cs, height, round)
  105. // if we're not a validator, EnterPropose should timeout
  106. ensureNewTimeout(timeoutCh, cs.config.TimeoutPropose.Nanoseconds())
  107. if cs.GetRoundState().Proposal != nil {
  108. t.Error("Expected to make no proposal, since no privValidator")
  109. }
  110. }
  111. // a validator should not timeout of the prevote round (TODO: unless the block is really big!)
  112. func TestStateEnterProposeYesPrivValidator(t *testing.T) {
  113. cs, _ := randConsensusState(1)
  114. height, round := cs.Height, cs.Round
  115. // Listen for propose timeout event
  116. timeoutCh := subscribe(cs.eventBus, types.EventQueryTimeoutPropose)
  117. proposalCh := subscribe(cs.eventBus, types.EventQueryCompleteProposal)
  118. cs.enterNewRound(height, round)
  119. cs.startRoutines(3)
  120. ensureNewProposal(proposalCh)
  121. // Check that Proposal, ProposalBlock, ProposalBlockParts are set.
  122. rs := cs.GetRoundState()
  123. if rs.Proposal == nil {
  124. t.Error("rs.Proposal should be set")
  125. }
  126. if rs.ProposalBlock == nil {
  127. t.Error("rs.ProposalBlock should be set")
  128. }
  129. if rs.ProposalBlockParts.Total() == 0 {
  130. t.Error("rs.ProposalBlockParts should be set")
  131. }
  132. // if we're a validator, enterPropose should not timeout
  133. ensureNoNewTimeout(timeoutCh, cs.config.TimeoutPropose.Nanoseconds())
  134. }
  135. func TestStateBadProposal(t *testing.T) {
  136. cs1, vss := randConsensusState(2)
  137. height, round := cs1.Height, cs1.Round
  138. vs2 := vss[1]
  139. partSize := types.BlockPartSizeBytes
  140. proposalCh := subscribe(cs1.eventBus, types.EventQueryCompleteProposal)
  141. voteCh := subscribe(cs1.eventBus, types.EventQueryVote)
  142. propBlock, _ := cs1.createProposalBlock() //changeProposer(t, cs1, vs2)
  143. // make the second validator the proposer by incrementing round
  144. round = round + 1
  145. incrementRound(vss[1:]...)
  146. // make the block bad by tampering with statehash
  147. stateHash := propBlock.AppHash
  148. if len(stateHash) == 0 {
  149. stateHash = make([]byte, 32)
  150. }
  151. stateHash[0] = byte((stateHash[0] + 1) % 255)
  152. propBlock.AppHash = stateHash
  153. propBlockParts := propBlock.MakePartSet(partSize)
  154. proposal := types.NewProposal(vs2.Height, round, propBlockParts.Header(), -1, types.BlockID{})
  155. if err := vs2.SignProposal(config.ChainID(), proposal); err != nil {
  156. t.Fatal("failed to sign bad proposal", err)
  157. }
  158. // set the proposal block
  159. if err := cs1.SetProposalAndBlock(proposal, propBlock, propBlockParts, "some peer"); err != nil {
  160. t.Fatal(err)
  161. }
  162. // start the machine
  163. startTestRound(cs1, height, round)
  164. // wait for proposal
  165. ensureNewProposal(proposalCh)
  166. // wait for prevote
  167. ensureNewVote(voteCh)
  168. validatePrevote(t, cs1, round, vss[0], nil)
  169. // add bad prevote from vs2 and wait for it
  170. signAddVotes(cs1, types.VoteTypePrevote, propBlock.Hash(), propBlock.MakePartSet(partSize).Header(), vs2)
  171. ensureNewVote(voteCh)
  172. // wait for precommit
  173. ensureNewVote(voteCh)
  174. validatePrecommit(t, cs1, round, 0, vss[0], nil, nil)
  175. signAddVotes(cs1, types.VoteTypePrecommit, propBlock.Hash(), propBlock.MakePartSet(partSize).Header(), vs2)
  176. }
  177. //----------------------------------------------------------------------------------------------------
  178. // FullRoundSuite
  179. // propose, prevote, and precommit a block
  180. func TestStateFullRound1(t *testing.T) {
  181. cs, vss := randConsensusState(1)
  182. height, round := cs.Height, cs.Round
  183. // NOTE: buffer capacity of 0 ensures we can validate prevote and last commit
  184. // before consensus can move to the next height (and cause a race condition)
  185. cs.eventBus.Stop()
  186. eventBus := types.NewEventBusWithBufferCapacity(0)
  187. eventBus.SetLogger(log.TestingLogger().With("module", "events"))
  188. cs.SetEventBus(eventBus)
  189. eventBus.Start()
  190. voteCh := subscribe(cs.eventBus, types.EventQueryVote)
  191. propCh := subscribe(cs.eventBus, types.EventQueryCompleteProposal)
  192. newRoundCh := subscribe(cs.eventBus, types.EventQueryNewRound)
  193. startTestRound(cs, height, round)
  194. ensureNewRound(newRoundCh)
  195. // grab proposal
  196. re := <-propCh
  197. propBlockHash := re.(types.EventDataRoundState).RoundState.(*cstypes.RoundState).ProposalBlock.Hash()
  198. ensureNewVote(voteCh) // wait for prevote
  199. validatePrevote(t, cs, round, vss[0], propBlockHash)
  200. ensureNewVote(voteCh) // wait for precommit
  201. // we're going to roll right into new height
  202. ensureNewRound(newRoundCh)
  203. validateLastPrecommit(t, cs, vss[0], propBlockHash)
  204. }
  205. // nil is proposed, so prevote and precommit nil
  206. func TestStateFullRoundNil(t *testing.T) {
  207. cs, vss := randConsensusState(1)
  208. height, round := cs.Height, cs.Round
  209. voteCh := subscribe(cs.eventBus, types.EventQueryVote)
  210. cs.enterPrevote(height, round)
  211. cs.startRoutines(4)
  212. ensureNewVote(voteCh) // prevote
  213. ensureNewVote(voteCh) // precommit
  214. // should prevote and precommit nil
  215. validatePrevoteAndPrecommit(t, cs, round, 0, vss[0], nil, nil)
  216. }
  217. // run through propose, prevote, precommit commit with two validators
  218. // where the first validator has to wait for votes from the second
  219. func TestStateFullRound2(t *testing.T) {
  220. cs1, vss := randConsensusState(2)
  221. vs2 := vss[1]
  222. height, round := cs1.Height, cs1.Round
  223. voteCh := subscribe(cs1.eventBus, types.EventQueryVote)
  224. newBlockCh := subscribe(cs1.eventBus, types.EventQueryNewBlock)
  225. // start round and wait for propose and prevote
  226. startTestRound(cs1, height, round)
  227. ensureNewVote(voteCh) // prevote
  228. // we should be stuck in limbo waiting for more prevotes
  229. rs := cs1.GetRoundState()
  230. propBlockHash, propPartsHeader := rs.ProposalBlock.Hash(), rs.ProposalBlockParts.Header()
  231. // prevote arrives from vs2:
  232. signAddVotes(cs1, types.VoteTypePrevote, propBlockHash, propPartsHeader, vs2)
  233. ensureNewVote(voteCh)
  234. ensureNewVote(voteCh) //precommit
  235. // the proposed block should now be locked and our precommit added
  236. validatePrecommit(t, cs1, 0, 0, vss[0], propBlockHash, propBlockHash)
  237. // we should be stuck in limbo waiting for more precommits
  238. // precommit arrives from vs2:
  239. signAddVotes(cs1, types.VoteTypePrecommit, propBlockHash, propPartsHeader, vs2)
  240. ensureNewVote(voteCh)
  241. // wait to finish commit, propose in next height
  242. ensureNewBlock(newBlockCh)
  243. }
  244. //------------------------------------------------------------------------------------------
  245. // LockSuite
  246. // two validators, 4 rounds.
  247. // two vals take turns proposing. val1 locks on first one, precommits nil on everything else
  248. func TestStateLockNoPOL(t *testing.T) {
  249. cs1, vss := randConsensusState(2)
  250. vs2 := vss[1]
  251. height := cs1.Height
  252. partSize := types.BlockPartSizeBytes
  253. timeoutProposeCh := subscribe(cs1.eventBus, types.EventQueryTimeoutPropose)
  254. timeoutWaitCh := subscribe(cs1.eventBus, types.EventQueryTimeoutWait)
  255. voteCh := subscribe(cs1.eventBus, types.EventQueryVote)
  256. proposalCh := subscribe(cs1.eventBus, types.EventQueryCompleteProposal)
  257. newRoundCh := subscribe(cs1.eventBus, types.EventQueryNewRound)
  258. /*
  259. Round1 (cs1, B) // B B // B B2
  260. */
  261. // start round and wait for prevote
  262. cs1.enterNewRound(height, 0)
  263. cs1.startRoutines(0)
  264. re := <-proposalCh
  265. rs := re.(types.EventDataRoundState).RoundState.(*cstypes.RoundState)
  266. theBlockHash := rs.ProposalBlock.Hash()
  267. ensureNewVote(voteCh) // prevote
  268. // we should now be stuck in limbo forever, waiting for more prevotes
  269. // prevote arrives from vs2:
  270. signAddVotes(cs1, types.VoteTypePrevote, cs1.ProposalBlock.Hash(), cs1.ProposalBlockParts.Header(), vs2)
  271. ensureNewVote(voteCh) // prevote
  272. ensureNewVote(voteCh) // precommit
  273. // the proposed block should now be locked and our precommit added
  274. validatePrecommit(t, cs1, 0, 0, vss[0], theBlockHash, theBlockHash)
  275. // we should now be stuck in limbo forever, waiting for more precommits
  276. // lets add one for a different block
  277. // NOTE: in practice we should never get to a point where there are precommits for different blocks at the same round
  278. hash := make([]byte, len(theBlockHash))
  279. copy(hash, theBlockHash)
  280. hash[0] = byte((hash[0] + 1) % 255)
  281. signAddVotes(cs1, types.VoteTypePrecommit, hash, rs.ProposalBlock.MakePartSet(partSize).Header(), vs2)
  282. ensureNewVote(voteCh) // precommit
  283. // (note we're entering precommit for a second time this round)
  284. // but with invalid args. then we enterPrecommitWait, and the timeout to new round
  285. ensureNewTimeout(timeoutWaitCh, cs1.config.TimeoutPrecommit.Nanoseconds())
  286. ///
  287. ensureNewRound(newRoundCh)
  288. t.Log("#### ONTO ROUND 1")
  289. /*
  290. Round2 (cs1, B) // B B2
  291. */
  292. incrementRound(vs2)
  293. // now we're on a new round and not the proposer, so wait for timeout
  294. re = <-timeoutProposeCh
  295. rs = re.(types.EventDataRoundState).RoundState.(*cstypes.RoundState)
  296. if rs.ProposalBlock != nil {
  297. panic("Expected proposal block to be nil")
  298. }
  299. // wait to finish prevote
  300. ensureNewVote(voteCh)
  301. // we should have prevoted our locked block
  302. validatePrevote(t, cs1, 1, vss[0], rs.LockedBlock.Hash())
  303. // add a conflicting prevote from the other validator
  304. signAddVotes(cs1, types.VoteTypePrevote, hash, rs.LockedBlock.MakePartSet(partSize).Header(), vs2)
  305. ensureNewVote(voteCh)
  306. // now we're going to enter prevote again, but with invalid args
  307. // and then prevote wait, which should timeout. then wait for precommit
  308. ensureNewTimeout(timeoutWaitCh, cs1.config.TimeoutPrevote.Nanoseconds())
  309. ensureNewVote(voteCh) // precommit
  310. // the proposed block should still be locked and our precommit added
  311. // we should precommit nil and be locked on the proposal
  312. validatePrecommit(t, cs1, 1, 0, vss[0], nil, theBlockHash)
  313. // add conflicting precommit from vs2
  314. // NOTE: in practice we should never get to a point where there are precommits for different blocks at the same round
  315. signAddVotes(cs1, types.VoteTypePrecommit, hash, rs.LockedBlock.MakePartSet(partSize).Header(), vs2)
  316. ensureNewVote(voteCh)
  317. // (note we're entering precommit for a second time this round, but with invalid args
  318. // then we enterPrecommitWait and timeout into NewRound
  319. ensureNewTimeout(timeoutWaitCh, cs1.config.TimeoutPrecommit.Nanoseconds())
  320. ensureNewRound(newRoundCh)
  321. t.Log("#### ONTO ROUND 2")
  322. /*
  323. Round3 (vs2, _) // B, B2
  324. */
  325. incrementRound(vs2)
  326. re = <-proposalCh
  327. rs = re.(types.EventDataRoundState).RoundState.(*cstypes.RoundState)
  328. // now we're on a new round and are the proposer
  329. if !bytes.Equal(rs.ProposalBlock.Hash(), rs.LockedBlock.Hash()) {
  330. panic(fmt.Sprintf("Expected proposal block to be locked block. Got %v, Expected %v", rs.ProposalBlock, rs.LockedBlock))
  331. }
  332. ensureNewVote(voteCh) // prevote
  333. validatePrevote(t, cs1, 2, vss[0], rs.LockedBlock.Hash())
  334. signAddVotes(cs1, types.VoteTypePrevote, hash, rs.ProposalBlock.MakePartSet(partSize).Header(), vs2)
  335. ensureNewVote(voteCh)
  336. ensureNewTimeout(timeoutWaitCh, cs1.config.TimeoutPrevote.Nanoseconds())
  337. ensureNewVote(voteCh) // precommit
  338. validatePrecommit(t, cs1, 2, 0, vss[0], nil, theBlockHash) // precommit nil but be locked on proposal
  339. signAddVotes(cs1, types.VoteTypePrecommit, hash, rs.ProposalBlock.MakePartSet(partSize).Header(), vs2) // NOTE: conflicting precommits at same height
  340. ensureNewVote(voteCh)
  341. ensureNewTimeout(timeoutWaitCh, cs1.config.TimeoutPrecommit.Nanoseconds())
  342. // before we time out into new round, set next proposal block
  343. prop, propBlock := decideProposal(cs1, vs2, vs2.Height, vs2.Round+1)
  344. if prop == nil || propBlock == nil {
  345. t.Fatal("Failed to create proposal block with vs2")
  346. }
  347. incrementRound(vs2)
  348. ensureNewRound(newRoundCh)
  349. t.Log("#### ONTO ROUND 3")
  350. /*
  351. Round4 (vs2, C) // B C // B C
  352. */
  353. // now we're on a new round and not the proposer
  354. // so set the proposal block
  355. if err := cs1.SetProposalAndBlock(prop, propBlock, propBlock.MakePartSet(partSize), ""); err != nil {
  356. t.Fatal(err)
  357. }
  358. ensureNewProposal(proposalCh)
  359. ensureNewVote(voteCh) // prevote
  360. // prevote for locked block (not proposal)
  361. validatePrevote(t, cs1, 0, vss[0], cs1.LockedBlock.Hash())
  362. signAddVotes(cs1, types.VoteTypePrevote, propBlock.Hash(), propBlock.MakePartSet(partSize).Header(), vs2)
  363. ensureNewVote(voteCh)
  364. ensureNewTimeout(timeoutWaitCh, cs1.config.TimeoutPrevote.Nanoseconds())
  365. ensureNewVote(voteCh)
  366. validatePrecommit(t, cs1, 2, 0, vss[0], nil, theBlockHash) // precommit nil but locked on proposal
  367. signAddVotes(cs1, types.VoteTypePrecommit, propBlock.Hash(), propBlock.MakePartSet(partSize).Header(), vs2) // NOTE: conflicting precommits at same height
  368. ensureNewVote(voteCh)
  369. }
  370. // 4 vals, one precommits, other 3 polka at next round, so we unlock and precomit the polka
  371. func TestStateLockPOLRelock(t *testing.T) {
  372. cs1, vss := randConsensusState(4)
  373. vs2, vs3, vs4 := vss[1], vss[2], vss[3]
  374. partSize := types.BlockPartSizeBytes
  375. timeoutProposeCh := subscribe(cs1.eventBus, types.EventQueryTimeoutPropose)
  376. timeoutWaitCh := subscribe(cs1.eventBus, types.EventQueryTimeoutWait)
  377. proposalCh := subscribe(cs1.eventBus, types.EventQueryCompleteProposal)
  378. voteCh := subscribe(cs1.eventBus, types.EventQueryVote)
  379. newRoundCh := subscribe(cs1.eventBus, types.EventQueryNewRound)
  380. newBlockCh := subscribe(cs1.eventBus, types.EventQueryNewBlockHeader)
  381. // everything done from perspective of cs1
  382. /*
  383. Round1 (cs1, B) // B B B B// B nil B nil
  384. eg. vs2 and vs4 didn't see the 2/3 prevotes
  385. */
  386. // start round and wait for propose and prevote
  387. startTestRound(cs1, cs1.Height, 0)
  388. ensureNewRound(newRoundCh)
  389. re := <-proposalCh
  390. rs := re.(types.EventDataRoundState).RoundState.(*cstypes.RoundState)
  391. theBlockHash := rs.ProposalBlock.Hash()
  392. ensureNewVote(voteCh) // prevote
  393. signAddVotes(cs1, types.VoteTypePrevote, cs1.ProposalBlock.Hash(), cs1.ProposalBlockParts.Header(), vs2, vs3, vs4)
  394. // prevotes
  395. discardFromChan(voteCh, 3)
  396. ensureNewVote(voteCh) // our precommit
  397. // the proposed block should now be locked and our precommit added
  398. validatePrecommit(t, cs1, 0, 0, vss[0], theBlockHash, theBlockHash)
  399. // add precommits from the rest
  400. signAddVotes(cs1, types.VoteTypePrecommit, nil, types.PartSetHeader{}, vs2, vs4)
  401. signAddVotes(cs1, types.VoteTypePrecommit, cs1.ProposalBlock.Hash(), cs1.ProposalBlockParts.Header(), vs3)
  402. // precommites
  403. discardFromChan(voteCh, 3)
  404. // before we timeout to the new round set the new proposal
  405. prop, propBlock := decideProposal(cs1, vs2, vs2.Height, vs2.Round+1)
  406. propBlockParts := propBlock.MakePartSet(partSize)
  407. propBlockHash := propBlock.Hash()
  408. incrementRound(vs2, vs3, vs4)
  409. // timeout to new round
  410. ensureNewTimeout(timeoutWaitCh, cs1.config.TimeoutPrecommit.Nanoseconds())
  411. //XXX: this isnt guaranteed to get there before the timeoutPropose ...
  412. if err := cs1.SetProposalAndBlock(prop, propBlock, propBlockParts, "some peer"); err != nil {
  413. t.Fatal(err)
  414. }
  415. ensureNewRound(newRoundCh)
  416. t.Log("### ONTO ROUND 1")
  417. /*
  418. Round2 (vs2, C) // B C C C // C C C _)
  419. cs1 changes lock!
  420. */
  421. // now we're on a new round and not the proposer
  422. // but we should receive the proposal
  423. select {
  424. case <-proposalCh:
  425. case <-timeoutProposeCh:
  426. <-proposalCh
  427. }
  428. // go to prevote, prevote for locked block (not proposal), move on
  429. ensureNewVote(voteCh)
  430. validatePrevote(t, cs1, 0, vss[0], theBlockHash)
  431. // now lets add prevotes from everyone else for the new block
  432. signAddVotes(cs1, types.VoteTypePrevote, propBlockHash, propBlockParts.Header(), vs2, vs3, vs4)
  433. // prevotes
  434. discardFromChan(voteCh, 3)
  435. // now either we go to PrevoteWait or Precommit
  436. select {
  437. case <-timeoutWaitCh: // we're in PrevoteWait, go to Precommit
  438. // XXX: there's no guarantee we see the polka, this might be a precommit for nil,
  439. // in which case the test fails!
  440. <-voteCh
  441. case <-voteCh: // we went straight to Precommit
  442. }
  443. // we should have unlocked and locked on the new block
  444. validatePrecommit(t, cs1, 1, 1, vss[0], propBlockHash, propBlockHash)
  445. signAddVotes(cs1, types.VoteTypePrecommit, propBlockHash, propBlockParts.Header(), vs2, vs3)
  446. discardFromChan(voteCh, 2)
  447. be := <-newBlockCh
  448. b := be.(types.EventDataNewBlockHeader)
  449. re = <-newRoundCh
  450. rs = re.(types.EventDataRoundState).RoundState.(*cstypes.RoundState)
  451. if rs.Height != 2 {
  452. panic("Expected height to increment")
  453. }
  454. if !bytes.Equal(b.Header.Hash(), propBlockHash) {
  455. panic("Expected new block to be proposal block")
  456. }
  457. }
  458. // 4 vals, one precommits, other 3 polka at next round, so we unlock and precomit the polka
  459. func TestStateLockPOLUnlock(t *testing.T) {
  460. cs1, vss := randConsensusState(4)
  461. vs2, vs3, vs4 := vss[1], vss[2], vss[3]
  462. h := cs1.GetRoundState().Height
  463. r := cs1.GetRoundState().Round
  464. partSize := types.BlockPartSizeBytes
  465. proposalCh := subscribe(cs1.eventBus, types.EventQueryCompleteProposal)
  466. timeoutProposeCh := subscribe(cs1.eventBus, types.EventQueryTimeoutPropose)
  467. timeoutWaitCh := subscribe(cs1.eventBus, types.EventQueryTimeoutWait)
  468. newRoundCh := subscribe(cs1.eventBus, types.EventQueryNewRound)
  469. unlockCh := subscribe(cs1.eventBus, types.EventQueryUnlock)
  470. voteCh := subscribeToVoter(cs1, cs1.privValidator.GetAddress())
  471. // everything done from perspective of cs1
  472. /*
  473. Round1 (cs1, B) // B B B B // B nil B nil
  474. eg. didn't see the 2/3 prevotes
  475. */
  476. // start round and wait for propose and prevote
  477. startTestRound(cs1, h, r)
  478. ensureNewRound(newRoundCh)
  479. re := <-proposalCh
  480. rs := re.(types.EventDataRoundState).RoundState.(*cstypes.RoundState)
  481. theBlockHash := rs.ProposalBlock.Hash()
  482. ensureVote(voteCh, h, r, types.VoteTypePrevote)
  483. signAddVotes(cs1, types.VoteTypePrevote, cs1.ProposalBlock.Hash(), cs1.ProposalBlockParts.Header(), vs2, vs3, vs4)
  484. ensureVote(voteCh, h, r, types.VoteTypePrecommit)
  485. // the proposed block should now be locked and our precommit added
  486. validatePrecommit(t, cs1, r, 0, vss[0], theBlockHash, theBlockHash)
  487. rs = cs1.GetRoundState()
  488. // add precommits from the rest
  489. signAddVotes(cs1, types.VoteTypePrecommit, nil, types.PartSetHeader{}, vs2, vs4)
  490. signAddVotes(cs1, types.VoteTypePrecommit, cs1.ProposalBlock.Hash(), cs1.ProposalBlockParts.Header(), vs3)
  491. // before we time out into new round, set next proposal block
  492. prop, propBlock := decideProposal(cs1, vs2, vs2.Height, vs2.Round+1)
  493. propBlockParts := propBlock.MakePartSet(partSize)
  494. incrementRound(vs2, vs3, vs4)
  495. // timeout to new round
  496. re = <-timeoutWaitCh
  497. rs = re.(types.EventDataRoundState).RoundState.(*cstypes.RoundState)
  498. lockedBlockHash := rs.LockedBlock.Hash()
  499. //XXX: this isnt guaranteed to get there before the timeoutPropose ...
  500. if err := cs1.SetProposalAndBlock(prop, propBlock, propBlockParts, "some peer"); err != nil {
  501. t.Fatal(err)
  502. }
  503. ensureNewRound(newRoundCh)
  504. t.Log("#### ONTO ROUND 1")
  505. /*
  506. Round2 (vs2, C) // B nil nil nil // nil nil nil _
  507. cs1 unlocks!
  508. */
  509. // now we're on a new round and not the proposer,
  510. // but we should receive the proposal
  511. select {
  512. case <-proposalCh:
  513. case <-timeoutProposeCh:
  514. <-proposalCh
  515. }
  516. // go to prevote, prevote for locked block (not proposal)
  517. ensureVote(voteCh, h, r+1, types.VoteTypePrevote)
  518. validatePrevote(t, cs1, 0, vss[0], lockedBlockHash)
  519. // now lets add prevotes from everyone else for nil (a polka!)
  520. signAddVotes(cs1, types.VoteTypePrevote, nil, types.PartSetHeader{}, vs2, vs3, vs4)
  521. // the polka makes us unlock and precommit nil
  522. ensureNewUnlock(unlockCh)
  523. ensureVote(voteCh, h, r+1, types.VoteTypePrecommit)
  524. // we should have unlocked and committed nil
  525. // NOTE: since we don't relock on nil, the lock round is 0
  526. validatePrecommit(t, cs1, r+1, 0, vss[0], nil, nil)
  527. signAddVotes(cs1, types.VoteTypePrecommit, nil, types.PartSetHeader{}, vs2, vs3)
  528. ensureNewRound(newRoundCh)
  529. }
  530. // 4 vals
  531. // a polka at round 1 but we miss it
  532. // then a polka at round 2 that we lock on
  533. // then we see the polka from round 1 but shouldn't unlock
  534. func TestStateLockPOLSafety1(t *testing.T) {
  535. cs1, vss := randConsensusState(4)
  536. vs2, vs3, vs4 := vss[1], vss[2], vss[3]
  537. h := cs1.GetRoundState().Height
  538. r := cs1.GetRoundState().Round
  539. partSize := types.BlockPartSizeBytes
  540. proposalCh := subscribe(cs1.eventBus, types.EventQueryCompleteProposal)
  541. timeoutProposeCh := subscribe(cs1.eventBus, types.EventQueryTimeoutPropose)
  542. timeoutWaitCh := subscribe(cs1.eventBus, types.EventQueryTimeoutWait)
  543. newRoundCh := subscribe(cs1.eventBus, types.EventQueryNewRound)
  544. voteCh := subscribeToVoter(cs1, cs1.privValidator.GetAddress())
  545. // start round and wait for propose and prevote
  546. startTestRound(cs1, cs1.Height, 0)
  547. ensureNewRound(newRoundCh)
  548. re := <-proposalCh
  549. rs := re.(types.EventDataRoundState).RoundState.(*cstypes.RoundState)
  550. propBlock := rs.ProposalBlock
  551. ensureVote(voteCh, h, r, types.VoteTypePrevote)
  552. validatePrevote(t, cs1, 0, vss[0], propBlock.Hash())
  553. // the others sign a polka but we don't see it
  554. prevotes := signVotes(types.VoteTypePrevote, propBlock.Hash(), propBlock.MakePartSet(partSize).Header(), vs2, vs3, vs4)
  555. // before we time out into new round, set next proposer
  556. // and next proposal block
  557. //TODO: Should we remove this?
  558. /*
  559. _, v1 := cs1.Validators.GetByAddress(vss[0].Address)
  560. v1.VotingPower = 1
  561. if updated := cs1.Validators.Update(v1); !updated {
  562. panic("failed to update validator")
  563. }*/
  564. t.Logf("old prop hash %v", fmt.Sprintf("%X", propBlock.Hash()))
  565. // we do see them precommit nil
  566. signAddVotes(cs1, types.VoteTypePrecommit, nil, types.PartSetHeader{}, vs2, vs3, vs4)
  567. ensureVote(voteCh, h, r, types.VoteTypePrecommit)
  568. ensureNewRound(newRoundCh)
  569. t.Log("### ONTO ROUND 1")
  570. prop, propBlock := decideProposal(cs1, vs2, vs2.Height, vs2.Round+1)
  571. propBlockHash := propBlock.Hash()
  572. propBlockParts := propBlock.MakePartSet(partSize)
  573. incrementRound(vs2, vs3, vs4)
  574. //XXX: this isnt guaranteed to get there before the timeoutPropose ...
  575. if err := cs1.SetProposalAndBlock(prop, propBlock, propBlockParts, "some peer"); err != nil {
  576. t.Fatal(err)
  577. }
  578. /*Round2
  579. // we timeout and prevote our lock
  580. // a polka happened but we didn't see it!
  581. */
  582. // now we're on a new round and not the proposer,
  583. // but we should receive the proposal
  584. select {
  585. case re = <-proposalCh:
  586. case <-timeoutProposeCh:
  587. re = <-proposalCh
  588. }
  589. rs = re.(types.EventDataRoundState).RoundState.(*cstypes.RoundState)
  590. if rs.LockedBlock != nil {
  591. panic("we should not be locked!")
  592. }
  593. t.Logf("new prop hash %v", fmt.Sprintf("%X", propBlockHash))
  594. // go to prevote, prevote for proposal block
  595. ensureVote(voteCh, h, r+1, types.VoteTypePrevote)
  596. validatePrevote(t, cs1, 1, vss[0], propBlockHash)
  597. // now we see the others prevote for it, so we should lock on it
  598. signAddVotes(cs1, types.VoteTypePrevote, propBlockHash, propBlockParts.Header(), vs2, vs3, vs4)
  599. ensureVote(voteCh, h, r+1, types.VoteTypePrecommit)
  600. // we should have precommitted
  601. validatePrecommit(t, cs1, 1, 1, vss[0], propBlockHash, propBlockHash)
  602. signAddVotes(cs1, types.VoteTypePrecommit, nil, types.PartSetHeader{}, vs2, vs3)
  603. ensureNewTimeout(timeoutWaitCh, cs1.config.TimeoutPrecommit.Nanoseconds())
  604. incrementRound(vs2, vs3, vs4)
  605. ensureNewRound(newRoundCh)
  606. t.Log("### ONTO ROUND 2")
  607. /*Round3
  608. we see the polka from round 1 but we shouldn't unlock!
  609. */
  610. // timeout of propose
  611. ensureNewTimeout(timeoutProposeCh, cs1.config.TimeoutPropose.Nanoseconds())
  612. // finish prevote
  613. ensureVote(voteCh, h, r+2, types.VoteTypePrevote)
  614. // we should prevote what we're locked on
  615. validatePrevote(t, cs1, 2, vss[0], propBlockHash)
  616. newStepCh := subscribe(cs1.eventBus, types.EventQueryNewRoundStep)
  617. // add prevotes from the earlier round
  618. addVotes(cs1, prevotes...)
  619. t.Log("Done adding prevotes!")
  620. ensureNoNewStep(newStepCh)
  621. }
  622. // 4 vals.
  623. // polka P0 at R0, P1 at R1, and P2 at R2,
  624. // we lock on P0 at R0, don't see P1, and unlock using P2 at R2
  625. // then we should make sure we don't lock using P1
  626. // What we want:
  627. // dont see P0, lock on P1 at R1, dont unlock using P0 at R2
  628. func TestStateLockPOLSafety2(t *testing.T) {
  629. cs1, vss := randConsensusState(4)
  630. vs2, vs3, vs4 := vss[1], vss[2], vss[3]
  631. h := cs1.GetRoundState().Height
  632. r := cs1.GetRoundState().Round
  633. partSize := types.BlockPartSizeBytes
  634. proposalCh := subscribe(cs1.eventBus, types.EventQueryCompleteProposal)
  635. timeoutProposeCh := subscribe(cs1.eventBus, types.EventQueryTimeoutPropose)
  636. timeoutWaitCh := subscribe(cs1.eventBus, types.EventQueryTimeoutWait)
  637. newRoundCh := subscribe(cs1.eventBus, types.EventQueryNewRound)
  638. unlockCh := subscribe(cs1.eventBus, types.EventQueryUnlock)
  639. voteCh := subscribeToVoter(cs1, cs1.privValidator.GetAddress())
  640. // the block for R0: gets polkad but we miss it
  641. // (even though we signed it, shhh)
  642. _, propBlock0 := decideProposal(cs1, vss[0], cs1.Height, cs1.Round)
  643. propBlockHash0 := propBlock0.Hash()
  644. propBlockParts0 := propBlock0.MakePartSet(partSize)
  645. // the others sign a polka but we don't see it
  646. prevotes := signVotes(types.VoteTypePrevote, propBlockHash0, propBlockParts0.Header(), vs2, vs3, vs4)
  647. // the block for round 1
  648. prop1, propBlock1 := decideProposal(cs1, vs2, vs2.Height, vs2.Round+1)
  649. propBlockHash1 := propBlock1.Hash()
  650. propBlockParts1 := propBlock1.MakePartSet(partSize)
  651. propBlockID1 := types.BlockID{propBlockHash1, propBlockParts1.Header()}
  652. incrementRound(vs2, vs3, vs4)
  653. cs1.updateRoundStep(0, cstypes.RoundStepPrecommitWait)
  654. t.Log("### ONTO Round 1")
  655. // jump in at round 1
  656. startTestRound(cs1, h, r+1)
  657. ensureNewRound(newRoundCh)
  658. if err := cs1.SetProposalAndBlock(prop1, propBlock1, propBlockParts1, "some peer"); err != nil {
  659. t.Fatal(err)
  660. }
  661. ensureNewProposal(proposalCh)
  662. ensureVote(voteCh, h, r+1, types.VoteTypePrevote)
  663. signAddVotes(cs1, types.VoteTypePrevote, propBlockHash1, propBlockParts1.Header(), vs2, vs3, vs4)
  664. ensureVote(voteCh, h, r+1, types.VoteTypePrecommit)
  665. // the proposed block should now be locked and our precommit added
  666. validatePrecommit(t, cs1, 1, 1, vss[0], propBlockHash1, propBlockHash1)
  667. // add precommits from the rest
  668. signAddVotes(cs1, types.VoteTypePrecommit, nil, types.PartSetHeader{}, vs2, vs4)
  669. signAddVotes(cs1, types.VoteTypePrecommit, propBlockHash1, propBlockParts1.Header(), vs3)
  670. incrementRound(vs2, vs3, vs4)
  671. // timeout of precommit wait to new round
  672. ensureNewTimeout(timeoutWaitCh, cs1.config.TimeoutPrecommit.Nanoseconds())
  673. // in round 2 we see the polkad block from round 0
  674. newProp := types.NewProposal(h, 2, propBlockParts0.Header(), 0, propBlockID1)
  675. if err := vs3.SignProposal(config.ChainID(), newProp); err != nil {
  676. t.Fatal(err)
  677. }
  678. if err := cs1.SetProposalAndBlock(newProp, propBlock0, propBlockParts0, "some peer"); err != nil {
  679. t.Fatal(err)
  680. }
  681. // Add the pol votes
  682. addVotes(cs1, prevotes...)
  683. ensureNewRound(newRoundCh)
  684. t.Log("### ONTO Round 2")
  685. /*Round2
  686. // now we see the polka from round 1, but we shouldnt unlock
  687. */
  688. select {
  689. case <-timeoutProposeCh:
  690. <-proposalCh
  691. case <-proposalCh:
  692. }
  693. select {
  694. case <-unlockCh:
  695. panic("validator unlocked using an old polka")
  696. case <-voteCh:
  697. // prevote our locked block
  698. }
  699. validatePrevote(t, cs1, 2, vss[0], propBlockHash1)
  700. }
  701. // 4 vals, 3 Nil Precommits at P0
  702. // What we want:
  703. // P0 waits for timeoutPrecommit before starting next round
  704. func TestWaitingTimeoutOnNilPolka(t *testing.T) {
  705. cs1, vss := randConsensusState(4)
  706. vs2, vs3, vs4 := vss[1], vss[2], vss[3]
  707. timeoutWaitCh := subscribe(cs1.eventBus, types.EventQueryTimeoutWait)
  708. newRoundCh := subscribe(cs1.eventBus, types.EventQueryNewRound)
  709. // start round
  710. startTestRound(cs1, cs1.Height, 0)
  711. ensureNewRound(newRoundCh)
  712. signAddVotes(cs1, types.VoteTypePrecommit, nil, types.PartSetHeader{}, vs2, vs3, vs4)
  713. ensureNewTimeout(timeoutWaitCh, cs1.config.TimeoutPrecommit.Nanoseconds())
  714. ensureNewRound(newRoundCh)
  715. }
  716. //------------------------------------------------------------------------------------------
  717. // SlashingSuite
  718. // TODO: Slashing
  719. /*
  720. func TestStateSlashingPrevotes(t *testing.T) {
  721. cs1, vss := randConsensusState(2)
  722. vs2 := vss[1]
  723. proposalCh := subscribe(cs1.eventBus, types.EventQueryCompleteProposal)
  724. timeoutWaitCh := subscribe(cs1.eventBus, types.EventQueryTimeoutWait)
  725. newRoundCh := subscribe(cs1.eventBus, types.EventQueryNewRound)
  726. voteCh := subscribeToVoter(cs1, cs1.privValidator.GetAddress())
  727. // start round and wait for propose and prevote
  728. startTestRound(cs1, cs1.Height, 0)
  729. <-newRoundCh
  730. re := <-proposalCh
  731. <-voteCh // prevote
  732. rs := re.(types.EventDataRoundState).RoundState.(*cstypes.RoundState)
  733. // we should now be stuck in limbo forever, waiting for more prevotes
  734. // add one for a different block should cause us to go into prevote wait
  735. hash := rs.ProposalBlock.Hash()
  736. hash[0] = byte(hash[0]+1) % 255
  737. signAddVotes(cs1, types.VoteTypePrevote, hash, rs.ProposalBlockParts.Header(), vs2)
  738. <-timeoutWaitCh
  739. // NOTE: we have to send the vote for different block first so we don't just go into precommit round right
  740. // away and ignore more prevotes (and thus fail to slash!)
  741. // add the conflicting vote
  742. signAddVotes(cs1, types.VoteTypePrevote, rs.ProposalBlock.Hash(), rs.ProposalBlockParts.Header(), vs2)
  743. // XXX: Check for existence of Dupeout info
  744. }
  745. func TestStateSlashingPrecommits(t *testing.T) {
  746. cs1, vss := randConsensusState(2)
  747. vs2 := vss[1]
  748. proposalCh := subscribe(cs1.eventBus, types.EventQueryCompleteProposal)
  749. timeoutWaitCh := subscribe(cs1.eventBus, types.EventQueryTimeoutWait)
  750. newRoundCh := subscribe(cs1.eventBus, types.EventQueryNewRound)
  751. voteCh := subscribeToVoter(cs1, cs1.privValidator.GetAddress())
  752. // start round and wait for propose and prevote
  753. startTestRound(cs1, cs1.Height, 0)
  754. <-newRoundCh
  755. re := <-proposalCh
  756. <-voteCh // prevote
  757. // add prevote from vs2
  758. signAddVotes(cs1, types.VoteTypePrevote, rs.ProposalBlock.Hash(), rs.ProposalBlockParts.Header(), vs2)
  759. <-voteCh // precommit
  760. // we should now be stuck in limbo forever, waiting for more prevotes
  761. // add one for a different block should cause us to go into prevote wait
  762. hash := rs.ProposalBlock.Hash()
  763. hash[0] = byte(hash[0]+1) % 255
  764. signAddVotes(cs1, types.VoteTypePrecommit, hash, rs.ProposalBlockParts.Header(), vs2)
  765. // NOTE: we have to send the vote for different block first so we don't just go into precommit round right
  766. // away and ignore more prevotes (and thus fail to slash!)
  767. // add precommit from vs2
  768. signAddVotes(cs1, types.VoteTypePrecommit, rs.ProposalBlock.Hash(), rs.ProposalBlockParts.Header(), vs2)
  769. // XXX: Check for existence of Dupeout info
  770. }
  771. */
  772. //------------------------------------------------------------------------------------------
  773. // CatchupSuite
  774. //------------------------------------------------------------------------------------------
  775. // HaltSuite
  776. // 4 vals.
  777. // we receive a final precommit after going into next round, but others might have gone to commit already!
  778. func TestStateHalt1(t *testing.T) {
  779. cs1, vss := randConsensusState(4)
  780. vs2, vs3, vs4 := vss[1], vss[2], vss[3]
  781. h := cs1.GetRoundState().Height
  782. r := cs1.GetRoundState().Round
  783. partSize := types.BlockPartSizeBytes
  784. proposalCh := subscribe(cs1.eventBus, types.EventQueryCompleteProposal)
  785. timeoutWaitCh := subscribe(cs1.eventBus, types.EventQueryTimeoutWait)
  786. newRoundCh := subscribe(cs1.eventBus, types.EventQueryNewRound)
  787. newBlockCh := subscribe(cs1.eventBus, types.EventQueryNewBlock)
  788. voteCh := subscribeToVoter(cs1, cs1.privValidator.GetAddress())
  789. // start round and wait for propose and prevote
  790. startTestRound(cs1, cs1.Height, 0)
  791. ensureNewRound(newRoundCh)
  792. re := <-proposalCh
  793. rs := re.(types.EventDataRoundState).RoundState.(*cstypes.RoundState)
  794. propBlock := rs.ProposalBlock
  795. propBlockParts := propBlock.MakePartSet(partSize)
  796. ensureVote(voteCh, h, r, types.VoteTypePrevote)
  797. signAddVotes(cs1, types.VoteTypePrevote, propBlock.Hash(), propBlockParts.Header(), vs3, vs4)
  798. ensureVote(voteCh, h, r, types.VoteTypePrecommit)
  799. // the proposed block should now be locked and our precommit added
  800. validatePrecommit(t, cs1, 0, 0, vss[0], propBlock.Hash(), propBlock.Hash())
  801. // add precommits from the rest
  802. signAddVotes(cs1, types.VoteTypePrecommit, nil, types.PartSetHeader{}, vs2) // didnt receive proposal
  803. signAddVotes(cs1, types.VoteTypePrecommit, propBlock.Hash(), propBlockParts.Header(), vs3)
  804. // we receive this later, but vs3 might receive it earlier and with ours will go to commit!
  805. precommit4 := signVote(vs4, types.VoteTypePrecommit, propBlock.Hash(), propBlockParts.Header())
  806. incrementRound(vs2, vs3, vs4)
  807. // timeout to new round
  808. ensureNewTimeout(timeoutWaitCh, cs1.config.TimeoutPrecommit.Nanoseconds())
  809. re = <-newRoundCh
  810. rs = re.(types.EventDataRoundState).RoundState.(*cstypes.RoundState)
  811. t.Log("### ONTO ROUND 1")
  812. /*Round2
  813. // we timeout and prevote our lock
  814. // a polka happened but we didn't see it!
  815. */
  816. // go to prevote, prevote for locked block
  817. ensureVote(voteCh, h, r+1, types.VoteTypePrevote)
  818. validatePrevote(t, cs1, 0, vss[0], rs.LockedBlock.Hash())
  819. // now we receive the precommit from the previous round
  820. addVotes(cs1, precommit4)
  821. // receiving that precommit should take us straight to commit
  822. ensureNewBlock(newBlockCh)
  823. re = <-newRoundCh
  824. rs = re.(types.EventDataRoundState).RoundState.(*cstypes.RoundState)
  825. if rs.Height != 2 {
  826. panic("expected height to increment")
  827. }
  828. }
  829. func TestStateOutputsBlockPartsStats(t *testing.T) {
  830. // create dummy peer
  831. cs, _ := randConsensusState(1)
  832. peer := p2pdummy.NewPeer()
  833. // 1) new block part
  834. parts := types.NewPartSetFromData(cmn.RandBytes(100), 10)
  835. msg := &BlockPartMessage{
  836. Height: 1,
  837. Round: 0,
  838. Part: parts.GetPart(0),
  839. }
  840. cs.ProposalBlockParts = types.NewPartSetFromHeader(parts.Header())
  841. cs.handleMsg(msgInfo{msg, peer.ID()})
  842. statsMessage := <-cs.statsMsgQueue
  843. require.Equal(t, msg, statsMessage.Msg, "")
  844. require.Equal(t, peer.ID(), statsMessage.PeerID, "")
  845. // sending the same part from different peer
  846. cs.handleMsg(msgInfo{msg, "peer2"})
  847. // sending the part with the same height, but different round
  848. msg.Round = 1
  849. cs.handleMsg(msgInfo{msg, peer.ID()})
  850. // sending the part from the smaller height
  851. msg.Height = 0
  852. cs.handleMsg(msgInfo{msg, peer.ID()})
  853. // sending the part from the bigger height
  854. msg.Height = 3
  855. cs.handleMsg(msgInfo{msg, peer.ID()})
  856. select {
  857. case <-cs.statsMsgQueue:
  858. t.Errorf("Should not output stats message after receiving the known block part!")
  859. case <-time.After(50 * time.Millisecond):
  860. }
  861. }
  862. func TestStateOutputVoteStats(t *testing.T) {
  863. cs, vss := randConsensusState(2)
  864. // create dummy peer
  865. peer := p2pdummy.NewPeer()
  866. vote := signVote(vss[1], types.VoteTypePrecommit, []byte("test"), types.PartSetHeader{})
  867. voteMessage := &VoteMessage{vote}
  868. cs.handleMsg(msgInfo{voteMessage, peer.ID()})
  869. statsMessage := <-cs.statsMsgQueue
  870. require.Equal(t, voteMessage, statsMessage.Msg, "")
  871. require.Equal(t, peer.ID(), statsMessage.PeerID, "")
  872. // sending the same part from different peer
  873. cs.handleMsg(msgInfo{&VoteMessage{vote}, "peer2"})
  874. // sending the vote for the bigger height
  875. incrementHeight(vss[1])
  876. vote = signVote(vss[1], types.VoteTypePrecommit, []byte("test"), types.PartSetHeader{})
  877. cs.handleMsg(msgInfo{&VoteMessage{vote}, peer.ID()})
  878. select {
  879. case <-cs.statsMsgQueue:
  880. t.Errorf("Should not output stats message after receiving the known vote or vote from bigger height")
  881. case <-time.After(50 * time.Millisecond):
  882. }
  883. }
  884. // subscribe subscribes test client to the given query and returns a channel with cap = 1.
  885. func subscribe(eventBus *types.EventBus, q tmpubsub.Query) <-chan interface{} {
  886. out := make(chan interface{}, 1)
  887. err := eventBus.Subscribe(context.Background(), testSubscriber, q, out)
  888. if err != nil {
  889. panic(fmt.Sprintf("failed to subscribe %s to %v", testSubscriber, q))
  890. }
  891. return out
  892. }
  893. // discardFromChan reads n values from the channel.
  894. func discardFromChan(ch <-chan interface{}, n int) {
  895. for i := 0; i < n; i++ {
  896. <-ch
  897. }
  898. }