You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

1100 lines
35 KiB

9 years ago
new pubsub package comment out failing consensus tests for now rewrite rpc httpclient to use new pubsub package import pubsub as tmpubsub, query as tmquery make event IDs constants EventKey -> EventTypeKey rename EventsPubsub to PubSub mempool does not use pubsub rename eventsSub to pubsub new subscribe API fix channel size issues and consensus tests bugs refactor rpc client add missing discardFromChan method add mutex rename pubsub to eventBus remove IsRunning from WSRPCConnection interface (not needed) add a comment in broadcastNewRoundStepsAndVotes rename registerEventCallbacks to broadcastNewRoundStepsAndVotes See https://dave.cheney.net/2014/03/19/channel-axioms stop eventBuses after reactor tests remove unnecessary Unsubscribe return subscribe helper function move discardFromChan to where it is used subscribe now returns an err this gives us ability to refuse to subscribe if pubsub is at its max capacity. use context for control overflow cache queries handle err when subscribing in replay_test rename testClientID to testSubscriber extract var set channel buffer capacity to 1 in replay_file fix byzantine_test unsubscribe from single event, not all events refactor httpclient to return events to appropriate channels return failing testReplayCrashBeforeWriteVote test fix TestValidatorSetChanges refactor code a bit fix testReplayCrashBeforeWriteVote add comment fix TestValidatorSetChanges fixes from Bucky's review update comment [ci skip] test TxEventBuffer update changelog fix TestValidatorSetChanges (2nd attempt) only do wg.Done when no errors benchmark event bus create pubsub server inside NewEventBus only expose config params (later if needed) set buffer capacity to 0 so we are not testing cache new tx event format: key = "Tx" plus a tag {"tx.hash": XYZ} This should allow to subscribe to all transactions! or a specific one using a query: "tm.events.type = Tx and tx.hash = '013ABF99434...'" use TimeoutCommit instead of afterPublishEventNewBlockTimeout TimeoutCommit is the time a node waits after committing a block, before it goes into the next height. So it will finish everything from the last block, but then wait a bit. The idea is this gives it time to hear more votes from other validators, to strengthen the commit it includes in the next block. But it also gives it time to hear about new transactions. waitForBlockWithUpdatedVals rewrite WAL crash tests Task: test that we can recover from any WAL crash. Solution: the old tests were relying on event hub being run in the same thread (we were injecting the private validator's last signature). when considering a rewrite, we considered two possible solutions: write a "fuzzy" testing system where WAL is crashing upon receiving a new message, or inject failures and trigger them in tests using something like https://github.com/coreos/gofail. remove sleep no cs.Lock around wal.Save test different cases (empty block, non-empty block, ...) comments add comments test 4 cases: empty block, non-empty block, non-empty block with smaller part size, many blocks fixes as per Bucky's last review reset subscriptions on UnsubscribeAll use a simple counter to track message for which we panicked also, set a smaller part size for all test cases
8 years ago
9 years ago
9 years ago
8 years ago
9 years ago
9 years ago
9 years ago
new pubsub package comment out failing consensus tests for now rewrite rpc httpclient to use new pubsub package import pubsub as tmpubsub, query as tmquery make event IDs constants EventKey -> EventTypeKey rename EventsPubsub to PubSub mempool does not use pubsub rename eventsSub to pubsub new subscribe API fix channel size issues and consensus tests bugs refactor rpc client add missing discardFromChan method add mutex rename pubsub to eventBus remove IsRunning from WSRPCConnection interface (not needed) add a comment in broadcastNewRoundStepsAndVotes rename registerEventCallbacks to broadcastNewRoundStepsAndVotes See https://dave.cheney.net/2014/03/19/channel-axioms stop eventBuses after reactor tests remove unnecessary Unsubscribe return subscribe helper function move discardFromChan to where it is used subscribe now returns an err this gives us ability to refuse to subscribe if pubsub is at its max capacity. use context for control overflow cache queries handle err when subscribing in replay_test rename testClientID to testSubscriber extract var set channel buffer capacity to 1 in replay_file fix byzantine_test unsubscribe from single event, not all events refactor httpclient to return events to appropriate channels return failing testReplayCrashBeforeWriteVote test fix TestValidatorSetChanges refactor code a bit fix testReplayCrashBeforeWriteVote add comment fix TestValidatorSetChanges fixes from Bucky's review update comment [ci skip] test TxEventBuffer update changelog fix TestValidatorSetChanges (2nd attempt) only do wg.Done when no errors benchmark event bus create pubsub server inside NewEventBus only expose config params (later if needed) set buffer capacity to 0 so we are not testing cache new tx event format: key = "Tx" plus a tag {"tx.hash": XYZ} This should allow to subscribe to all transactions! or a specific one using a query: "tm.events.type = Tx and tx.hash = '013ABF99434...'" use TimeoutCommit instead of afterPublishEventNewBlockTimeout TimeoutCommit is the time a node waits after committing a block, before it goes into the next height. So it will finish everything from the last block, but then wait a bit. The idea is this gives it time to hear more votes from other validators, to strengthen the commit it includes in the next block. But it also gives it time to hear about new transactions. waitForBlockWithUpdatedVals rewrite WAL crash tests Task: test that we can recover from any WAL crash. Solution: the old tests were relying on event hub being run in the same thread (we were injecting the private validator's last signature). when considering a rewrite, we considered two possible solutions: write a "fuzzy" testing system where WAL is crashing upon receiving a new message, or inject failures and trigger them in tests using something like https://github.com/coreos/gofail. remove sleep no cs.Lock around wal.Save test different cases (empty block, non-empty block, ...) comments add comments test 4 cases: empty block, non-empty block, non-empty block with smaller part size, many blocks fixes as per Bucky's last review reset subscriptions on UnsubscribeAll use a simple counter to track message for which we panicked also, set a smaller part size for all test cases
8 years ago
new pubsub package comment out failing consensus tests for now rewrite rpc httpclient to use new pubsub package import pubsub as tmpubsub, query as tmquery make event IDs constants EventKey -> EventTypeKey rename EventsPubsub to PubSub mempool does not use pubsub rename eventsSub to pubsub new subscribe API fix channel size issues and consensus tests bugs refactor rpc client add missing discardFromChan method add mutex rename pubsub to eventBus remove IsRunning from WSRPCConnection interface (not needed) add a comment in broadcastNewRoundStepsAndVotes rename registerEventCallbacks to broadcastNewRoundStepsAndVotes See https://dave.cheney.net/2014/03/19/channel-axioms stop eventBuses after reactor tests remove unnecessary Unsubscribe return subscribe helper function move discardFromChan to where it is used subscribe now returns an err this gives us ability to refuse to subscribe if pubsub is at its max capacity. use context for control overflow cache queries handle err when subscribing in replay_test rename testClientID to testSubscriber extract var set channel buffer capacity to 1 in replay_file fix byzantine_test unsubscribe from single event, not all events refactor httpclient to return events to appropriate channels return failing testReplayCrashBeforeWriteVote test fix TestValidatorSetChanges refactor code a bit fix testReplayCrashBeforeWriteVote add comment fix TestValidatorSetChanges fixes from Bucky's review update comment [ci skip] test TxEventBuffer update changelog fix TestValidatorSetChanges (2nd attempt) only do wg.Done when no errors benchmark event bus create pubsub server inside NewEventBus only expose config params (later if needed) set buffer capacity to 0 so we are not testing cache new tx event format: key = "Tx" plus a tag {"tx.hash": XYZ} This should allow to subscribe to all transactions! or a specific one using a query: "tm.events.type = Tx and tx.hash = '013ABF99434...'" use TimeoutCommit instead of afterPublishEventNewBlockTimeout TimeoutCommit is the time a node waits after committing a block, before it goes into the next height. So it will finish everything from the last block, but then wait a bit. The idea is this gives it time to hear more votes from other validators, to strengthen the commit it includes in the next block. But it also gives it time to hear about new transactions. waitForBlockWithUpdatedVals rewrite WAL crash tests Task: test that we can recover from any WAL crash. Solution: the old tests were relying on event hub being run in the same thread (we were injecting the private validator's last signature). when considering a rewrite, we considered two possible solutions: write a "fuzzy" testing system where WAL is crashing upon receiving a new message, or inject failures and trigger them in tests using something like https://github.com/coreos/gofail. remove sleep no cs.Lock around wal.Save test different cases (empty block, non-empty block, ...) comments add comments test 4 cases: empty block, non-empty block, non-empty block with smaller part size, many blocks fixes as per Bucky's last review reset subscriptions on UnsubscribeAll use a simple counter to track message for which we panicked also, set a smaller part size for all test cases
8 years ago
9 years ago
9 years ago
9 years ago
new pubsub package comment out failing consensus tests for now rewrite rpc httpclient to use new pubsub package import pubsub as tmpubsub, query as tmquery make event IDs constants EventKey -> EventTypeKey rename EventsPubsub to PubSub mempool does not use pubsub rename eventsSub to pubsub new subscribe API fix channel size issues and consensus tests bugs refactor rpc client add missing discardFromChan method add mutex rename pubsub to eventBus remove IsRunning from WSRPCConnection interface (not needed) add a comment in broadcastNewRoundStepsAndVotes rename registerEventCallbacks to broadcastNewRoundStepsAndVotes See https://dave.cheney.net/2014/03/19/channel-axioms stop eventBuses after reactor tests remove unnecessary Unsubscribe return subscribe helper function move discardFromChan to where it is used subscribe now returns an err this gives us ability to refuse to subscribe if pubsub is at its max capacity. use context for control overflow cache queries handle err when subscribing in replay_test rename testClientID to testSubscriber extract var set channel buffer capacity to 1 in replay_file fix byzantine_test unsubscribe from single event, not all events refactor httpclient to return events to appropriate channels return failing testReplayCrashBeforeWriteVote test fix TestValidatorSetChanges refactor code a bit fix testReplayCrashBeforeWriteVote add comment fix TestValidatorSetChanges fixes from Bucky's review update comment [ci skip] test TxEventBuffer update changelog fix TestValidatorSetChanges (2nd attempt) only do wg.Done when no errors benchmark event bus create pubsub server inside NewEventBus only expose config params (later if needed) set buffer capacity to 0 so we are not testing cache new tx event format: key = "Tx" plus a tag {"tx.hash": XYZ} This should allow to subscribe to all transactions! or a specific one using a query: "tm.events.type = Tx and tx.hash = '013ABF99434...'" use TimeoutCommit instead of afterPublishEventNewBlockTimeout TimeoutCommit is the time a node waits after committing a block, before it goes into the next height. So it will finish everything from the last block, but then wait a bit. The idea is this gives it time to hear more votes from other validators, to strengthen the commit it includes in the next block. But it also gives it time to hear about new transactions. waitForBlockWithUpdatedVals rewrite WAL crash tests Task: test that we can recover from any WAL crash. Solution: the old tests were relying on event hub being run in the same thread (we were injecting the private validator's last signature). when considering a rewrite, we considered two possible solutions: write a "fuzzy" testing system where WAL is crashing upon receiving a new message, or inject failures and trigger them in tests using something like https://github.com/coreos/gofail. remove sleep no cs.Lock around wal.Save test different cases (empty block, non-empty block, ...) comments add comments test 4 cases: empty block, non-empty block, non-empty block with smaller part size, many blocks fixes as per Bucky's last review reset subscriptions on UnsubscribeAll use a simple counter to track message for which we panicked also, set a smaller part size for all test cases
8 years ago
9 years ago
9 years ago
8 years ago
9 years ago
9 years ago
new pubsub package comment out failing consensus tests for now rewrite rpc httpclient to use new pubsub package import pubsub as tmpubsub, query as tmquery make event IDs constants EventKey -> EventTypeKey rename EventsPubsub to PubSub mempool does not use pubsub rename eventsSub to pubsub new subscribe API fix channel size issues and consensus tests bugs refactor rpc client add missing discardFromChan method add mutex rename pubsub to eventBus remove IsRunning from WSRPCConnection interface (not needed) add a comment in broadcastNewRoundStepsAndVotes rename registerEventCallbacks to broadcastNewRoundStepsAndVotes See https://dave.cheney.net/2014/03/19/channel-axioms stop eventBuses after reactor tests remove unnecessary Unsubscribe return subscribe helper function move discardFromChan to where it is used subscribe now returns an err this gives us ability to refuse to subscribe if pubsub is at its max capacity. use context for control overflow cache queries handle err when subscribing in replay_test rename testClientID to testSubscriber extract var set channel buffer capacity to 1 in replay_file fix byzantine_test unsubscribe from single event, not all events refactor httpclient to return events to appropriate channels return failing testReplayCrashBeforeWriteVote test fix TestValidatorSetChanges refactor code a bit fix testReplayCrashBeforeWriteVote add comment fix TestValidatorSetChanges fixes from Bucky's review update comment [ci skip] test TxEventBuffer update changelog fix TestValidatorSetChanges (2nd attempt) only do wg.Done when no errors benchmark event bus create pubsub server inside NewEventBus only expose config params (later if needed) set buffer capacity to 0 so we are not testing cache new tx event format: key = "Tx" plus a tag {"tx.hash": XYZ} This should allow to subscribe to all transactions! or a specific one using a query: "tm.events.type = Tx and tx.hash = '013ABF99434...'" use TimeoutCommit instead of afterPublishEventNewBlockTimeout TimeoutCommit is the time a node waits after committing a block, before it goes into the next height. So it will finish everything from the last block, but then wait a bit. The idea is this gives it time to hear more votes from other validators, to strengthen the commit it includes in the next block. But it also gives it time to hear about new transactions. waitForBlockWithUpdatedVals rewrite WAL crash tests Task: test that we can recover from any WAL crash. Solution: the old tests were relying on event hub being run in the same thread (we were injecting the private validator's last signature). when considering a rewrite, we considered two possible solutions: write a "fuzzy" testing system where WAL is crashing upon receiving a new message, or inject failures and trigger them in tests using something like https://github.com/coreos/gofail. remove sleep no cs.Lock around wal.Save test different cases (empty block, non-empty block, ...) comments add comments test 4 cases: empty block, non-empty block, non-empty block with smaller part size, many blocks fixes as per Bucky's last review reset subscriptions on UnsubscribeAll use a simple counter to track message for which we panicked also, set a smaller part size for all test cases
8 years ago
9 years ago
9 years ago
9 years ago
9 years ago
8 years ago
9 years ago
9 years ago
new pubsub package comment out failing consensus tests for now rewrite rpc httpclient to use new pubsub package import pubsub as tmpubsub, query as tmquery make event IDs constants EventKey -> EventTypeKey rename EventsPubsub to PubSub mempool does not use pubsub rename eventsSub to pubsub new subscribe API fix channel size issues and consensus tests bugs refactor rpc client add missing discardFromChan method add mutex rename pubsub to eventBus remove IsRunning from WSRPCConnection interface (not needed) add a comment in broadcastNewRoundStepsAndVotes rename registerEventCallbacks to broadcastNewRoundStepsAndVotes See https://dave.cheney.net/2014/03/19/channel-axioms stop eventBuses after reactor tests remove unnecessary Unsubscribe return subscribe helper function move discardFromChan to where it is used subscribe now returns an err this gives us ability to refuse to subscribe if pubsub is at its max capacity. use context for control overflow cache queries handle err when subscribing in replay_test rename testClientID to testSubscriber extract var set channel buffer capacity to 1 in replay_file fix byzantine_test unsubscribe from single event, not all events refactor httpclient to return events to appropriate channels return failing testReplayCrashBeforeWriteVote test fix TestValidatorSetChanges refactor code a bit fix testReplayCrashBeforeWriteVote add comment fix TestValidatorSetChanges fixes from Bucky's review update comment [ci skip] test TxEventBuffer update changelog fix TestValidatorSetChanges (2nd attempt) only do wg.Done when no errors benchmark event bus create pubsub server inside NewEventBus only expose config params (later if needed) set buffer capacity to 0 so we are not testing cache new tx event format: key = "Tx" plus a tag {"tx.hash": XYZ} This should allow to subscribe to all transactions! or a specific one using a query: "tm.events.type = Tx and tx.hash = '013ABF99434...'" use TimeoutCommit instead of afterPublishEventNewBlockTimeout TimeoutCommit is the time a node waits after committing a block, before it goes into the next height. So it will finish everything from the last block, but then wait a bit. The idea is this gives it time to hear more votes from other validators, to strengthen the commit it includes in the next block. But it also gives it time to hear about new transactions. waitForBlockWithUpdatedVals rewrite WAL crash tests Task: test that we can recover from any WAL crash. Solution: the old tests were relying on event hub being run in the same thread (we were injecting the private validator's last signature). when considering a rewrite, we considered two possible solutions: write a "fuzzy" testing system where WAL is crashing upon receiving a new message, or inject failures and trigger them in tests using something like https://github.com/coreos/gofail. remove sleep no cs.Lock around wal.Save test different cases (empty block, non-empty block, ...) comments add comments test 4 cases: empty block, non-empty block, non-empty block with smaller part size, many blocks fixes as per Bucky's last review reset subscriptions on UnsubscribeAll use a simple counter to track message for which we panicked also, set a smaller part size for all test cases
8 years ago
9 years ago
9 years ago
9 years ago
9 years ago
9 years ago
9 years ago
9 years ago
9 years ago
9 years ago
9 years ago
fix TestFullRound1 race (Refs #846) ``` ================== WARNING: DATA RACE Write at 0x00c42d7605f0 by goroutine 844: github.com/tendermint/tendermint/consensus.(*ConsensusState).updateToState() /home/vagrant/go/src/github.com/tendermint/tendermint/consensus/state.go:465 +0x59e I[11-14|22:37:28.781] Added to prevote vote="Vote{0:646753DCE124 1/02/1(Prevote) E9B19636DCDB {/CAD5FA805E8C.../}}" prevotes="VoteSet{H:1 R:2 T:1 +2/3:<nil> BA{2:X_} map[]}" github.com/tendermint/tendermint/consensus.(*ConsensusState).finalizeCommit() /home/vagrant/go/src/github.com/tendermint/tendermint/consensus/state.go:1229 +0x16a9 github.com/tendermint/tendermint/consensus.(*ConsensusState).tryFinalizeCommit() /home/vagrant/go/src/github.com/tendermint/tendermint/consensus/state.go:1135 +0x721 github.com/tendermint/tendermint/consensus.(*ConsensusState).enterCommit.func1() /home/vagrant/go/src/github.com/tendermint/tendermint/consensus/state.go:1087 +0x153 github.com/tendermint/tendermint/consensus.(*ConsensusState).enterCommit() /home/vagrant/go/src/github.com/tendermint/tendermint/consensus/state.go:1114 +0xa34 github.com/tendermint/tendermint/consensus.(*ConsensusState).addVote() /home/vagrant/go/src/github.com/tendermint/tendermint/consensus/state.go:1423 +0xdd6 github.com/tendermint/tendermint/consensus.(*ConsensusState).tryAddVote() /home/vagrant/go/src/github.com/tendermint/tendermint/consensus/state.go:1317 +0x77 github.com/tendermint/tendermint/consensus.(*ConsensusState).handleMsg() /home/vagrant/go/src/github.com/tendermint/tendermint/consensus/state.go:565 +0x7a9 github.com/tendermint/tendermint/consensus.(*ConsensusState).receiveRoutine() /home/vagrant/go/src/github.com/tendermint/tendermint/consensus/state.go:523 +0x6d2 Previous read at 0x00c42d7605f0 by goroutine 654: github.com/tendermint/tendermint/consensus.validatePrevote() /home/vagrant/go/src/github.com/tendermint/tendermint/consensus/common_test.go:149 +0x57 github.com/tendermint/tendermint/consensus.TestFullRound1() /home/vagrant/go/src/github.com/tendermint/tendermint/consensus/state_test.go:256 +0x3c5 testing.tRunner() /usr/local/go/src/testing/testing.go:746 +0x16c Goroutine 844 (running) created at: github.com/tendermint/tendermint/consensus.(*ConsensusState).startRoutines() /home/vagrant/go/src/github.com/tendermint/tendermint/consensus/state.go:258 +0x8c github.com/tendermint/tendermint/consensus.startTestRound() /home/vagrant/go/src/github.com/tendermint/tendermint/consensus/common_test.go:118 +0x63 github.com/tendermint/tendermint/consensus.TestFullRound1() /home/vagrant/go/src/github.com/tendermint/tendermint/consensus/state_test.go:247 +0x1fb testing.tRunner() /usr/local/go/src/testing/testing.go:746 +0x16c Goroutine 654 (running) created at: testing.(*T).Run() /usr/local/go/src/testing/testing.go:789 +0x568 testing.runTests.func1() /usr/local/go/src/testing/testing.go:1004 +0xa7 testing.tRunner() /usr/local/go/src/testing/testing.go:746 +0x16c testing.runTests() /usr/local/go/src/testing/testing.go:1002 +0x521 testing.(*M).Run() /usr/local/go/src/testing/testing.go:921 +0x206 main.main() github.com/tendermint/tendermint/consensus/_test/_testmain.go:106 +0x1d3 ================== ```
7 years ago
new pubsub package comment out failing consensus tests for now rewrite rpc httpclient to use new pubsub package import pubsub as tmpubsub, query as tmquery make event IDs constants EventKey -> EventTypeKey rename EventsPubsub to PubSub mempool does not use pubsub rename eventsSub to pubsub new subscribe API fix channel size issues and consensus tests bugs refactor rpc client add missing discardFromChan method add mutex rename pubsub to eventBus remove IsRunning from WSRPCConnection interface (not needed) add a comment in broadcastNewRoundStepsAndVotes rename registerEventCallbacks to broadcastNewRoundStepsAndVotes See https://dave.cheney.net/2014/03/19/channel-axioms stop eventBuses after reactor tests remove unnecessary Unsubscribe return subscribe helper function move discardFromChan to where it is used subscribe now returns an err this gives us ability to refuse to subscribe if pubsub is at its max capacity. use context for control overflow cache queries handle err when subscribing in replay_test rename testClientID to testSubscriber extract var set channel buffer capacity to 1 in replay_file fix byzantine_test unsubscribe from single event, not all events refactor httpclient to return events to appropriate channels return failing testReplayCrashBeforeWriteVote test fix TestValidatorSetChanges refactor code a bit fix testReplayCrashBeforeWriteVote add comment fix TestValidatorSetChanges fixes from Bucky's review update comment [ci skip] test TxEventBuffer update changelog fix TestValidatorSetChanges (2nd attempt) only do wg.Done when no errors benchmark event bus create pubsub server inside NewEventBus only expose config params (later if needed) set buffer capacity to 0 so we are not testing cache new tx event format: key = "Tx" plus a tag {"tx.hash": XYZ} This should allow to subscribe to all transactions! or a specific one using a query: "tm.events.type = Tx and tx.hash = '013ABF99434...'" use TimeoutCommit instead of afterPublishEventNewBlockTimeout TimeoutCommit is the time a node waits after committing a block, before it goes into the next height. So it will finish everything from the last block, but then wait a bit. The idea is this gives it time to hear more votes from other validators, to strengthen the commit it includes in the next block. But it also gives it time to hear about new transactions. waitForBlockWithUpdatedVals rewrite WAL crash tests Task: test that we can recover from any WAL crash. Solution: the old tests were relying on event hub being run in the same thread (we were injecting the private validator's last signature). when considering a rewrite, we considered two possible solutions: write a "fuzzy" testing system where WAL is crashing upon receiving a new message, or inject failures and trigger them in tests using something like https://github.com/coreos/gofail. remove sleep no cs.Lock around wal.Save test different cases (empty block, non-empty block, ...) comments add comments test 4 cases: empty block, non-empty block, non-empty block with smaller part size, many blocks fixes as per Bucky's last review reset subscriptions on UnsubscribeAll use a simple counter to track message for which we panicked also, set a smaller part size for all test cases
8 years ago
7 years ago
9 years ago
9 years ago
9 years ago
9 years ago
new pubsub package comment out failing consensus tests for now rewrite rpc httpclient to use new pubsub package import pubsub as tmpubsub, query as tmquery make event IDs constants EventKey -> EventTypeKey rename EventsPubsub to PubSub mempool does not use pubsub rename eventsSub to pubsub new subscribe API fix channel size issues and consensus tests bugs refactor rpc client add missing discardFromChan method add mutex rename pubsub to eventBus remove IsRunning from WSRPCConnection interface (not needed) add a comment in broadcastNewRoundStepsAndVotes rename registerEventCallbacks to broadcastNewRoundStepsAndVotes See https://dave.cheney.net/2014/03/19/channel-axioms stop eventBuses after reactor tests remove unnecessary Unsubscribe return subscribe helper function move discardFromChan to where it is used subscribe now returns an err this gives us ability to refuse to subscribe if pubsub is at its max capacity. use context for control overflow cache queries handle err when subscribing in replay_test rename testClientID to testSubscriber extract var set channel buffer capacity to 1 in replay_file fix byzantine_test unsubscribe from single event, not all events refactor httpclient to return events to appropriate channels return failing testReplayCrashBeforeWriteVote test fix TestValidatorSetChanges refactor code a bit fix testReplayCrashBeforeWriteVote add comment fix TestValidatorSetChanges fixes from Bucky's review update comment [ci skip] test TxEventBuffer update changelog fix TestValidatorSetChanges (2nd attempt) only do wg.Done when no errors benchmark event bus create pubsub server inside NewEventBus only expose config params (later if needed) set buffer capacity to 0 so we are not testing cache new tx event format: key = "Tx" plus a tag {"tx.hash": XYZ} This should allow to subscribe to all transactions! or a specific one using a query: "tm.events.type = Tx and tx.hash = '013ABF99434...'" use TimeoutCommit instead of afterPublishEventNewBlockTimeout TimeoutCommit is the time a node waits after committing a block, before it goes into the next height. So it will finish everything from the last block, but then wait a bit. The idea is this gives it time to hear more votes from other validators, to strengthen the commit it includes in the next block. But it also gives it time to hear about new transactions. waitForBlockWithUpdatedVals rewrite WAL crash tests Task: test that we can recover from any WAL crash. Solution: the old tests were relying on event hub being run in the same thread (we were injecting the private validator's last signature). when considering a rewrite, we considered two possible solutions: write a "fuzzy" testing system where WAL is crashing upon receiving a new message, or inject failures and trigger them in tests using something like https://github.com/coreos/gofail. remove sleep no cs.Lock around wal.Save test different cases (empty block, non-empty block, ...) comments add comments test 4 cases: empty block, non-empty block, non-empty block with smaller part size, many blocks fixes as per Bucky's last review reset subscriptions on UnsubscribeAll use a simple counter to track message for which we panicked also, set a smaller part size for all test cases
8 years ago
9 years ago
9 years ago
9 years ago
9 years ago
new pubsub package comment out failing consensus tests for now rewrite rpc httpclient to use new pubsub package import pubsub as tmpubsub, query as tmquery make event IDs constants EventKey -> EventTypeKey rename EventsPubsub to PubSub mempool does not use pubsub rename eventsSub to pubsub new subscribe API fix channel size issues and consensus tests bugs refactor rpc client add missing discardFromChan method add mutex rename pubsub to eventBus remove IsRunning from WSRPCConnection interface (not needed) add a comment in broadcastNewRoundStepsAndVotes rename registerEventCallbacks to broadcastNewRoundStepsAndVotes See https://dave.cheney.net/2014/03/19/channel-axioms stop eventBuses after reactor tests remove unnecessary Unsubscribe return subscribe helper function move discardFromChan to where it is used subscribe now returns an err this gives us ability to refuse to subscribe if pubsub is at its max capacity. use context for control overflow cache queries handle err when subscribing in replay_test rename testClientID to testSubscriber extract var set channel buffer capacity to 1 in replay_file fix byzantine_test unsubscribe from single event, not all events refactor httpclient to return events to appropriate channels return failing testReplayCrashBeforeWriteVote test fix TestValidatorSetChanges refactor code a bit fix testReplayCrashBeforeWriteVote add comment fix TestValidatorSetChanges fixes from Bucky's review update comment [ci skip] test TxEventBuffer update changelog fix TestValidatorSetChanges (2nd attempt) only do wg.Done when no errors benchmark event bus create pubsub server inside NewEventBus only expose config params (later if needed) set buffer capacity to 0 so we are not testing cache new tx event format: key = "Tx" plus a tag {"tx.hash": XYZ} This should allow to subscribe to all transactions! or a specific one using a query: "tm.events.type = Tx and tx.hash = '013ABF99434...'" use TimeoutCommit instead of afterPublishEventNewBlockTimeout TimeoutCommit is the time a node waits after committing a block, before it goes into the next height. So it will finish everything from the last block, but then wait a bit. The idea is this gives it time to hear more votes from other validators, to strengthen the commit it includes in the next block. But it also gives it time to hear about new transactions. waitForBlockWithUpdatedVals rewrite WAL crash tests Task: test that we can recover from any WAL crash. Solution: the old tests were relying on event hub being run in the same thread (we were injecting the private validator's last signature). when considering a rewrite, we considered two possible solutions: write a "fuzzy" testing system where WAL is crashing upon receiving a new message, or inject failures and trigger them in tests using something like https://github.com/coreos/gofail. remove sleep no cs.Lock around wal.Save test different cases (empty block, non-empty block, ...) comments add comments test 4 cases: empty block, non-empty block, non-empty block with smaller part size, many blocks fixes as per Bucky's last review reset subscriptions on UnsubscribeAll use a simple counter to track message for which we panicked also, set a smaller part size for all test cases
8 years ago
9 years ago
9 years ago
9 years ago
9 years ago
9 years ago
9 years ago
9 years ago
9 years ago
9 years ago
9 years ago
new pubsub package comment out failing consensus tests for now rewrite rpc httpclient to use new pubsub package import pubsub as tmpubsub, query as tmquery make event IDs constants EventKey -> EventTypeKey rename EventsPubsub to PubSub mempool does not use pubsub rename eventsSub to pubsub new subscribe API fix channel size issues and consensus tests bugs refactor rpc client add missing discardFromChan method add mutex rename pubsub to eventBus remove IsRunning from WSRPCConnection interface (not needed) add a comment in broadcastNewRoundStepsAndVotes rename registerEventCallbacks to broadcastNewRoundStepsAndVotes See https://dave.cheney.net/2014/03/19/channel-axioms stop eventBuses after reactor tests remove unnecessary Unsubscribe return subscribe helper function move discardFromChan to where it is used subscribe now returns an err this gives us ability to refuse to subscribe if pubsub is at its max capacity. use context for control overflow cache queries handle err when subscribing in replay_test rename testClientID to testSubscriber extract var set channel buffer capacity to 1 in replay_file fix byzantine_test unsubscribe from single event, not all events refactor httpclient to return events to appropriate channels return failing testReplayCrashBeforeWriteVote test fix TestValidatorSetChanges refactor code a bit fix testReplayCrashBeforeWriteVote add comment fix TestValidatorSetChanges fixes from Bucky's review update comment [ci skip] test TxEventBuffer update changelog fix TestValidatorSetChanges (2nd attempt) only do wg.Done when no errors benchmark event bus create pubsub server inside NewEventBus only expose config params (later if needed) set buffer capacity to 0 so we are not testing cache new tx event format: key = "Tx" plus a tag {"tx.hash": XYZ} This should allow to subscribe to all transactions! or a specific one using a query: "tm.events.type = Tx and tx.hash = '013ABF99434...'" use TimeoutCommit instead of afterPublishEventNewBlockTimeout TimeoutCommit is the time a node waits after committing a block, before it goes into the next height. So it will finish everything from the last block, but then wait a bit. The idea is this gives it time to hear more votes from other validators, to strengthen the commit it includes in the next block. But it also gives it time to hear about new transactions. waitForBlockWithUpdatedVals rewrite WAL crash tests Task: test that we can recover from any WAL crash. Solution: the old tests were relying on event hub being run in the same thread (we were injecting the private validator's last signature). when considering a rewrite, we considered two possible solutions: write a "fuzzy" testing system where WAL is crashing upon receiving a new message, or inject failures and trigger them in tests using something like https://github.com/coreos/gofail. remove sleep no cs.Lock around wal.Save test different cases (empty block, non-empty block, ...) comments add comments test 4 cases: empty block, non-empty block, non-empty block with smaller part size, many blocks fixes as per Bucky's last review reset subscriptions on UnsubscribeAll use a simple counter to track message for which we panicked also, set a smaller part size for all test cases
8 years ago
9 years ago
7 years ago
9 years ago
9 years ago
9 years ago
9 years ago
9 years ago
9 years ago
9 years ago
8 years ago
9 years ago
9 years ago
7 years ago
9 years ago
9 years ago
9 years ago
9 years ago
9 years ago
9 years ago
9 years ago
9 years ago
9 years ago
8 years ago
9 years ago
9 years ago
9 years ago
7 years ago
9 years ago
9 years ago
9 years ago
9 years ago
9 years ago
9 years ago
9 years ago
8 years ago
9 years ago
9 years ago
9 years ago
9 years ago
new pubsub package comment out failing consensus tests for now rewrite rpc httpclient to use new pubsub package import pubsub as tmpubsub, query as tmquery make event IDs constants EventKey -> EventTypeKey rename EventsPubsub to PubSub mempool does not use pubsub rename eventsSub to pubsub new subscribe API fix channel size issues and consensus tests bugs refactor rpc client add missing discardFromChan method add mutex rename pubsub to eventBus remove IsRunning from WSRPCConnection interface (not needed) add a comment in broadcastNewRoundStepsAndVotes rename registerEventCallbacks to broadcastNewRoundStepsAndVotes See https://dave.cheney.net/2014/03/19/channel-axioms stop eventBuses after reactor tests remove unnecessary Unsubscribe return subscribe helper function move discardFromChan to where it is used subscribe now returns an err this gives us ability to refuse to subscribe if pubsub is at its max capacity. use context for control overflow cache queries handle err when subscribing in replay_test rename testClientID to testSubscriber extract var set channel buffer capacity to 1 in replay_file fix byzantine_test unsubscribe from single event, not all events refactor httpclient to return events to appropriate channels return failing testReplayCrashBeforeWriteVote test fix TestValidatorSetChanges refactor code a bit fix testReplayCrashBeforeWriteVote add comment fix TestValidatorSetChanges fixes from Bucky's review update comment [ci skip] test TxEventBuffer update changelog fix TestValidatorSetChanges (2nd attempt) only do wg.Done when no errors benchmark event bus create pubsub server inside NewEventBus only expose config params (later if needed) set buffer capacity to 0 so we are not testing cache new tx event format: key = "Tx" plus a tag {"tx.hash": XYZ} This should allow to subscribe to all transactions! or a specific one using a query: "tm.events.type = Tx and tx.hash = '013ABF99434...'" use TimeoutCommit instead of afterPublishEventNewBlockTimeout TimeoutCommit is the time a node waits after committing a block, before it goes into the next height. So it will finish everything from the last block, but then wait a bit. The idea is this gives it time to hear more votes from other validators, to strengthen the commit it includes in the next block. But it also gives it time to hear about new transactions. waitForBlockWithUpdatedVals rewrite WAL crash tests Task: test that we can recover from any WAL crash. Solution: the old tests were relying on event hub being run in the same thread (we were injecting the private validator's last signature). when considering a rewrite, we considered two possible solutions: write a "fuzzy" testing system where WAL is crashing upon receiving a new message, or inject failures and trigger them in tests using something like https://github.com/coreos/gofail. remove sleep no cs.Lock around wal.Save test different cases (empty block, non-empty block, ...) comments add comments test 4 cases: empty block, non-empty block, non-empty block with smaller part size, many blocks fixes as per Bucky's last review reset subscriptions on UnsubscribeAll use a simple counter to track message for which we panicked also, set a smaller part size for all test cases
8 years ago
9 years ago
9 years ago
7 years ago
9 years ago
9 years ago
9 years ago
9 years ago
9 years ago
9 years ago
9 years ago
8 years ago
9 years ago
9 years ago
9 years ago
9 years ago
9 years ago
9 years ago
9 years ago
7 years ago
7 years ago
9 years ago
9 years ago
9 years ago
new pubsub package comment out failing consensus tests for now rewrite rpc httpclient to use new pubsub package import pubsub as tmpubsub, query as tmquery make event IDs constants EventKey -> EventTypeKey rename EventsPubsub to PubSub mempool does not use pubsub rename eventsSub to pubsub new subscribe API fix channel size issues and consensus tests bugs refactor rpc client add missing discardFromChan method add mutex rename pubsub to eventBus remove IsRunning from WSRPCConnection interface (not needed) add a comment in broadcastNewRoundStepsAndVotes rename registerEventCallbacks to broadcastNewRoundStepsAndVotes See https://dave.cheney.net/2014/03/19/channel-axioms stop eventBuses after reactor tests remove unnecessary Unsubscribe return subscribe helper function move discardFromChan to where it is used subscribe now returns an err this gives us ability to refuse to subscribe if pubsub is at its max capacity. use context for control overflow cache queries handle err when subscribing in replay_test rename testClientID to testSubscriber extract var set channel buffer capacity to 1 in replay_file fix byzantine_test unsubscribe from single event, not all events refactor httpclient to return events to appropriate channels return failing testReplayCrashBeforeWriteVote test fix TestValidatorSetChanges refactor code a bit fix testReplayCrashBeforeWriteVote add comment fix TestValidatorSetChanges fixes from Bucky's review update comment [ci skip] test TxEventBuffer update changelog fix TestValidatorSetChanges (2nd attempt) only do wg.Done when no errors benchmark event bus create pubsub server inside NewEventBus only expose config params (later if needed) set buffer capacity to 0 so we are not testing cache new tx event format: key = "Tx" plus a tag {"tx.hash": XYZ} This should allow to subscribe to all transactions! or a specific one using a query: "tm.events.type = Tx and tx.hash = '013ABF99434...'" use TimeoutCommit instead of afterPublishEventNewBlockTimeout TimeoutCommit is the time a node waits after committing a block, before it goes into the next height. So it will finish everything from the last block, but then wait a bit. The idea is this gives it time to hear more votes from other validators, to strengthen the commit it includes in the next block. But it also gives it time to hear about new transactions. waitForBlockWithUpdatedVals rewrite WAL crash tests Task: test that we can recover from any WAL crash. Solution: the old tests were relying on event hub being run in the same thread (we were injecting the private validator's last signature). when considering a rewrite, we considered two possible solutions: write a "fuzzy" testing system where WAL is crashing upon receiving a new message, or inject failures and trigger them in tests using something like https://github.com/coreos/gofail. remove sleep no cs.Lock around wal.Save test different cases (empty block, non-empty block, ...) comments add comments test 4 cases: empty block, non-empty block, non-empty block with smaller part size, many blocks fixes as per Bucky's last review reset subscriptions on UnsubscribeAll use a simple counter to track message for which we panicked also, set a smaller part size for all test cases
8 years ago
9 years ago
7 years ago
9 years ago
9 years ago
9 years ago
9 years ago
9 years ago
9 years ago
9 years ago
9 years ago
7 years ago
9 years ago
8 years ago
9 years ago
9 years ago
9 years ago
9 years ago
9 years ago
9 years ago
new pubsub package comment out failing consensus tests for now rewrite rpc httpclient to use new pubsub package import pubsub as tmpubsub, query as tmquery make event IDs constants EventKey -> EventTypeKey rename EventsPubsub to PubSub mempool does not use pubsub rename eventsSub to pubsub new subscribe API fix channel size issues and consensus tests bugs refactor rpc client add missing discardFromChan method add mutex rename pubsub to eventBus remove IsRunning from WSRPCConnection interface (not needed) add a comment in broadcastNewRoundStepsAndVotes rename registerEventCallbacks to broadcastNewRoundStepsAndVotes See https://dave.cheney.net/2014/03/19/channel-axioms stop eventBuses after reactor tests remove unnecessary Unsubscribe return subscribe helper function move discardFromChan to where it is used subscribe now returns an err this gives us ability to refuse to subscribe if pubsub is at its max capacity. use context for control overflow cache queries handle err when subscribing in replay_test rename testClientID to testSubscriber extract var set channel buffer capacity to 1 in replay_file fix byzantine_test unsubscribe from single event, not all events refactor httpclient to return events to appropriate channels return failing testReplayCrashBeforeWriteVote test fix TestValidatorSetChanges refactor code a bit fix testReplayCrashBeforeWriteVote add comment fix TestValidatorSetChanges fixes from Bucky's review update comment [ci skip] test TxEventBuffer update changelog fix TestValidatorSetChanges (2nd attempt) only do wg.Done when no errors benchmark event bus create pubsub server inside NewEventBus only expose config params (later if needed) set buffer capacity to 0 so we are not testing cache new tx event format: key = "Tx" plus a tag {"tx.hash": XYZ} This should allow to subscribe to all transactions! or a specific one using a query: "tm.events.type = Tx and tx.hash = '013ABF99434...'" use TimeoutCommit instead of afterPublishEventNewBlockTimeout TimeoutCommit is the time a node waits after committing a block, before it goes into the next height. So it will finish everything from the last block, but then wait a bit. The idea is this gives it time to hear more votes from other validators, to strengthen the commit it includes in the next block. But it also gives it time to hear about new transactions. waitForBlockWithUpdatedVals rewrite WAL crash tests Task: test that we can recover from any WAL crash. Solution: the old tests were relying on event hub being run in the same thread (we were injecting the private validator's last signature). when considering a rewrite, we considered two possible solutions: write a "fuzzy" testing system where WAL is crashing upon receiving a new message, or inject failures and trigger them in tests using something like https://github.com/coreos/gofail. remove sleep no cs.Lock around wal.Save test different cases (empty block, non-empty block, ...) comments add comments test 4 cases: empty block, non-empty block, non-empty block with smaller part size, many blocks fixes as per Bucky's last review reset subscriptions on UnsubscribeAll use a simple counter to track message for which we panicked also, set a smaller part size for all test cases
8 years ago
9 years ago
7 years ago
9 years ago
9 years ago
9 years ago
9 years ago
9 years ago
8 years ago
9 years ago
9 years ago
9 years ago
9 years ago
8 years ago
9 years ago
7 years ago
9 years ago
8 years ago
9 years ago
9 years ago
9 years ago
9 years ago
9 years ago
9 years ago
8 years ago
9 years ago
9 years ago
9 years ago
9 years ago
new pubsub package comment out failing consensus tests for now rewrite rpc httpclient to use new pubsub package import pubsub as tmpubsub, query as tmquery make event IDs constants EventKey -> EventTypeKey rename EventsPubsub to PubSub mempool does not use pubsub rename eventsSub to pubsub new subscribe API fix channel size issues and consensus tests bugs refactor rpc client add missing discardFromChan method add mutex rename pubsub to eventBus remove IsRunning from WSRPCConnection interface (not needed) add a comment in broadcastNewRoundStepsAndVotes rename registerEventCallbacks to broadcastNewRoundStepsAndVotes See https://dave.cheney.net/2014/03/19/channel-axioms stop eventBuses after reactor tests remove unnecessary Unsubscribe return subscribe helper function move discardFromChan to where it is used subscribe now returns an err this gives us ability to refuse to subscribe if pubsub is at its max capacity. use context for control overflow cache queries handle err when subscribing in replay_test rename testClientID to testSubscriber extract var set channel buffer capacity to 1 in replay_file fix byzantine_test unsubscribe from single event, not all events refactor httpclient to return events to appropriate channels return failing testReplayCrashBeforeWriteVote test fix TestValidatorSetChanges refactor code a bit fix testReplayCrashBeforeWriteVote add comment fix TestValidatorSetChanges fixes from Bucky's review update comment [ci skip] test TxEventBuffer update changelog fix TestValidatorSetChanges (2nd attempt) only do wg.Done when no errors benchmark event bus create pubsub server inside NewEventBus only expose config params (later if needed) set buffer capacity to 0 so we are not testing cache new tx event format: key = "Tx" plus a tag {"tx.hash": XYZ} This should allow to subscribe to all transactions! or a specific one using a query: "tm.events.type = Tx and tx.hash = '013ABF99434...'" use TimeoutCommit instead of afterPublishEventNewBlockTimeout TimeoutCommit is the time a node waits after committing a block, before it goes into the next height. So it will finish everything from the last block, but then wait a bit. The idea is this gives it time to hear more votes from other validators, to strengthen the commit it includes in the next block. But it also gives it time to hear about new transactions. waitForBlockWithUpdatedVals rewrite WAL crash tests Task: test that we can recover from any WAL crash. Solution: the old tests were relying on event hub being run in the same thread (we were injecting the private validator's last signature). when considering a rewrite, we considered two possible solutions: write a "fuzzy" testing system where WAL is crashing upon receiving a new message, or inject failures and trigger them in tests using something like https://github.com/coreos/gofail. remove sleep no cs.Lock around wal.Save test different cases (empty block, non-empty block, ...) comments add comments test 4 cases: empty block, non-empty block, non-empty block with smaller part size, many blocks fixes as per Bucky's last review reset subscriptions on UnsubscribeAll use a simple counter to track message for which we panicked also, set a smaller part size for all test cases
8 years ago
9 years ago
9 years ago
8 years ago
9 years ago
9 years ago
9 years ago
new pubsub package comment out failing consensus tests for now rewrite rpc httpclient to use new pubsub package import pubsub as tmpubsub, query as tmquery make event IDs constants EventKey -> EventTypeKey rename EventsPubsub to PubSub mempool does not use pubsub rename eventsSub to pubsub new subscribe API fix channel size issues and consensus tests bugs refactor rpc client add missing discardFromChan method add mutex rename pubsub to eventBus remove IsRunning from WSRPCConnection interface (not needed) add a comment in broadcastNewRoundStepsAndVotes rename registerEventCallbacks to broadcastNewRoundStepsAndVotes See https://dave.cheney.net/2014/03/19/channel-axioms stop eventBuses after reactor tests remove unnecessary Unsubscribe return subscribe helper function move discardFromChan to where it is used subscribe now returns an err this gives us ability to refuse to subscribe if pubsub is at its max capacity. use context for control overflow cache queries handle err when subscribing in replay_test rename testClientID to testSubscriber extract var set channel buffer capacity to 1 in replay_file fix byzantine_test unsubscribe from single event, not all events refactor httpclient to return events to appropriate channels return failing testReplayCrashBeforeWriteVote test fix TestValidatorSetChanges refactor code a bit fix testReplayCrashBeforeWriteVote add comment fix TestValidatorSetChanges fixes from Bucky's review update comment [ci skip] test TxEventBuffer update changelog fix TestValidatorSetChanges (2nd attempt) only do wg.Done when no errors benchmark event bus create pubsub server inside NewEventBus only expose config params (later if needed) set buffer capacity to 0 so we are not testing cache new tx event format: key = "Tx" plus a tag {"tx.hash": XYZ} This should allow to subscribe to all transactions! or a specific one using a query: "tm.events.type = Tx and tx.hash = '013ABF99434...'" use TimeoutCommit instead of afterPublishEventNewBlockTimeout TimeoutCommit is the time a node waits after committing a block, before it goes into the next height. So it will finish everything from the last block, but then wait a bit. The idea is this gives it time to hear more votes from other validators, to strengthen the commit it includes in the next block. But it also gives it time to hear about new transactions. waitForBlockWithUpdatedVals rewrite WAL crash tests Task: test that we can recover from any WAL crash. Solution: the old tests were relying on event hub being run in the same thread (we were injecting the private validator's last signature). when considering a rewrite, we considered two possible solutions: write a "fuzzy" testing system where WAL is crashing upon receiving a new message, or inject failures and trigger them in tests using something like https://github.com/coreos/gofail. remove sleep no cs.Lock around wal.Save test different cases (empty block, non-empty block, ...) comments add comments test 4 cases: empty block, non-empty block, non-empty block with smaller part size, many blocks fixes as per Bucky's last review reset subscriptions on UnsubscribeAll use a simple counter to track message for which we panicked also, set a smaller part size for all test cases
8 years ago
9 years ago
9 years ago
9 years ago
9 years ago
9 years ago
9 years ago
8 years ago
9 years ago
9 years ago
9 years ago
9 years ago
9 years ago
9 years ago
9 years ago
9 years ago
9 years ago
8 years ago
9 years ago
9 years ago
9 years ago
9 years ago
9 years ago
new pubsub package comment out failing consensus tests for now rewrite rpc httpclient to use new pubsub package import pubsub as tmpubsub, query as tmquery make event IDs constants EventKey -> EventTypeKey rename EventsPubsub to PubSub mempool does not use pubsub rename eventsSub to pubsub new subscribe API fix channel size issues and consensus tests bugs refactor rpc client add missing discardFromChan method add mutex rename pubsub to eventBus remove IsRunning from WSRPCConnection interface (not needed) add a comment in broadcastNewRoundStepsAndVotes rename registerEventCallbacks to broadcastNewRoundStepsAndVotes See https://dave.cheney.net/2014/03/19/channel-axioms stop eventBuses after reactor tests remove unnecessary Unsubscribe return subscribe helper function move discardFromChan to where it is used subscribe now returns an err this gives us ability to refuse to subscribe if pubsub is at its max capacity. use context for control overflow cache queries handle err when subscribing in replay_test rename testClientID to testSubscriber extract var set channel buffer capacity to 1 in replay_file fix byzantine_test unsubscribe from single event, not all events refactor httpclient to return events to appropriate channels return failing testReplayCrashBeforeWriteVote test fix TestValidatorSetChanges refactor code a bit fix testReplayCrashBeforeWriteVote add comment fix TestValidatorSetChanges fixes from Bucky's review update comment [ci skip] test TxEventBuffer update changelog fix TestValidatorSetChanges (2nd attempt) only do wg.Done when no errors benchmark event bus create pubsub server inside NewEventBus only expose config params (later if needed) set buffer capacity to 0 so we are not testing cache new tx event format: key = "Tx" plus a tag {"tx.hash": XYZ} This should allow to subscribe to all transactions! or a specific one using a query: "tm.events.type = Tx and tx.hash = '013ABF99434...'" use TimeoutCommit instead of afterPublishEventNewBlockTimeout TimeoutCommit is the time a node waits after committing a block, before it goes into the next height. So it will finish everything from the last block, but then wait a bit. The idea is this gives it time to hear more votes from other validators, to strengthen the commit it includes in the next block. But it also gives it time to hear about new transactions. waitForBlockWithUpdatedVals rewrite WAL crash tests Task: test that we can recover from any WAL crash. Solution: the old tests were relying on event hub being run in the same thread (we were injecting the private validator's last signature). when considering a rewrite, we considered two possible solutions: write a "fuzzy" testing system where WAL is crashing upon receiving a new message, or inject failures and trigger them in tests using something like https://github.com/coreos/gofail. remove sleep no cs.Lock around wal.Save test different cases (empty block, non-empty block, ...) comments add comments test 4 cases: empty block, non-empty block, non-empty block with smaller part size, many blocks fixes as per Bucky's last review reset subscriptions on UnsubscribeAll use a simple counter to track message for which we panicked also, set a smaller part size for all test cases
8 years ago
9 years ago
9 years ago
9 years ago
9 years ago
9 years ago
9 years ago
9 years ago
new pubsub package comment out failing consensus tests for now rewrite rpc httpclient to use new pubsub package import pubsub as tmpubsub, query as tmquery make event IDs constants EventKey -> EventTypeKey rename EventsPubsub to PubSub mempool does not use pubsub rename eventsSub to pubsub new subscribe API fix channel size issues and consensus tests bugs refactor rpc client add missing discardFromChan method add mutex rename pubsub to eventBus remove IsRunning from WSRPCConnection interface (not needed) add a comment in broadcastNewRoundStepsAndVotes rename registerEventCallbacks to broadcastNewRoundStepsAndVotes See https://dave.cheney.net/2014/03/19/channel-axioms stop eventBuses after reactor tests remove unnecessary Unsubscribe return subscribe helper function move discardFromChan to where it is used subscribe now returns an err this gives us ability to refuse to subscribe if pubsub is at its max capacity. use context for control overflow cache queries handle err when subscribing in replay_test rename testClientID to testSubscriber extract var set channel buffer capacity to 1 in replay_file fix byzantine_test unsubscribe from single event, not all events refactor httpclient to return events to appropriate channels return failing testReplayCrashBeforeWriteVote test fix TestValidatorSetChanges refactor code a bit fix testReplayCrashBeforeWriteVote add comment fix TestValidatorSetChanges fixes from Bucky's review update comment [ci skip] test TxEventBuffer update changelog fix TestValidatorSetChanges (2nd attempt) only do wg.Done when no errors benchmark event bus create pubsub server inside NewEventBus only expose config params (later if needed) set buffer capacity to 0 so we are not testing cache new tx event format: key = "Tx" plus a tag {"tx.hash": XYZ} This should allow to subscribe to all transactions! or a specific one using a query: "tm.events.type = Tx and tx.hash = '013ABF99434...'" use TimeoutCommit instead of afterPublishEventNewBlockTimeout TimeoutCommit is the time a node waits after committing a block, before it goes into the next height. So it will finish everything from the last block, but then wait a bit. The idea is this gives it time to hear more votes from other validators, to strengthen the commit it includes in the next block. But it also gives it time to hear about new transactions. waitForBlockWithUpdatedVals rewrite WAL crash tests Task: test that we can recover from any WAL crash. Solution: the old tests were relying on event hub being run in the same thread (we were injecting the private validator's last signature). when considering a rewrite, we considered two possible solutions: write a "fuzzy" testing system where WAL is crashing upon receiving a new message, or inject failures and trigger them in tests using something like https://github.com/coreos/gofail. remove sleep no cs.Lock around wal.Save test different cases (empty block, non-empty block, ...) comments add comments test 4 cases: empty block, non-empty block, non-empty block with smaller part size, many blocks fixes as per Bucky's last review reset subscriptions on UnsubscribeAll use a simple counter to track message for which we panicked also, set a smaller part size for all test cases
8 years ago
9 years ago
9 years ago
9 years ago
9 years ago
9 years ago
9 years ago
9 years ago
9 years ago
9 years ago
9 years ago
new pubsub package comment out failing consensus tests for now rewrite rpc httpclient to use new pubsub package import pubsub as tmpubsub, query as tmquery make event IDs constants EventKey -> EventTypeKey rename EventsPubsub to PubSub mempool does not use pubsub rename eventsSub to pubsub new subscribe API fix channel size issues and consensus tests bugs refactor rpc client add missing discardFromChan method add mutex rename pubsub to eventBus remove IsRunning from WSRPCConnection interface (not needed) add a comment in broadcastNewRoundStepsAndVotes rename registerEventCallbacks to broadcastNewRoundStepsAndVotes See https://dave.cheney.net/2014/03/19/channel-axioms stop eventBuses after reactor tests remove unnecessary Unsubscribe return subscribe helper function move discardFromChan to where it is used subscribe now returns an err this gives us ability to refuse to subscribe if pubsub is at its max capacity. use context for control overflow cache queries handle err when subscribing in replay_test rename testClientID to testSubscriber extract var set channel buffer capacity to 1 in replay_file fix byzantine_test unsubscribe from single event, not all events refactor httpclient to return events to appropriate channels return failing testReplayCrashBeforeWriteVote test fix TestValidatorSetChanges refactor code a bit fix testReplayCrashBeforeWriteVote add comment fix TestValidatorSetChanges fixes from Bucky's review update comment [ci skip] test TxEventBuffer update changelog fix TestValidatorSetChanges (2nd attempt) only do wg.Done when no errors benchmark event bus create pubsub server inside NewEventBus only expose config params (later if needed) set buffer capacity to 0 so we are not testing cache new tx event format: key = "Tx" plus a tag {"tx.hash": XYZ} This should allow to subscribe to all transactions! or a specific one using a query: "tm.events.type = Tx and tx.hash = '013ABF99434...'" use TimeoutCommit instead of afterPublishEventNewBlockTimeout TimeoutCommit is the time a node waits after committing a block, before it goes into the next height. So it will finish everything from the last block, but then wait a bit. The idea is this gives it time to hear more votes from other validators, to strengthen the commit it includes in the next block. But it also gives it time to hear about new transactions. waitForBlockWithUpdatedVals rewrite WAL crash tests Task: test that we can recover from any WAL crash. Solution: the old tests were relying on event hub being run in the same thread (we were injecting the private validator's last signature). when considering a rewrite, we considered two possible solutions: write a "fuzzy" testing system where WAL is crashing upon receiving a new message, or inject failures and trigger them in tests using something like https://github.com/coreos/gofail. remove sleep no cs.Lock around wal.Save test different cases (empty block, non-empty block, ...) comments add comments test 4 cases: empty block, non-empty block, non-empty block with smaller part size, many blocks fixes as per Bucky's last review reset subscriptions on UnsubscribeAll use a simple counter to track message for which we panicked also, set a smaller part size for all test cases
8 years ago
9 years ago
7 years ago
9 years ago
9 years ago
9 years ago
9 years ago
9 years ago
9 years ago
7 years ago
9 years ago
8 years ago
9 years ago
9 years ago
9 years ago
7 years ago
9 years ago
9 years ago
new pubsub package comment out failing consensus tests for now rewrite rpc httpclient to use new pubsub package import pubsub as tmpubsub, query as tmquery make event IDs constants EventKey -> EventTypeKey rename EventsPubsub to PubSub mempool does not use pubsub rename eventsSub to pubsub new subscribe API fix channel size issues and consensus tests bugs refactor rpc client add missing discardFromChan method add mutex rename pubsub to eventBus remove IsRunning from WSRPCConnection interface (not needed) add a comment in broadcastNewRoundStepsAndVotes rename registerEventCallbacks to broadcastNewRoundStepsAndVotes See https://dave.cheney.net/2014/03/19/channel-axioms stop eventBuses after reactor tests remove unnecessary Unsubscribe return subscribe helper function move discardFromChan to where it is used subscribe now returns an err this gives us ability to refuse to subscribe if pubsub is at its max capacity. use context for control overflow cache queries handle err when subscribing in replay_test rename testClientID to testSubscriber extract var set channel buffer capacity to 1 in replay_file fix byzantine_test unsubscribe from single event, not all events refactor httpclient to return events to appropriate channels return failing testReplayCrashBeforeWriteVote test fix TestValidatorSetChanges refactor code a bit fix testReplayCrashBeforeWriteVote add comment fix TestValidatorSetChanges fixes from Bucky's review update comment [ci skip] test TxEventBuffer update changelog fix TestValidatorSetChanges (2nd attempt) only do wg.Done when no errors benchmark event bus create pubsub server inside NewEventBus only expose config params (later if needed) set buffer capacity to 0 so we are not testing cache new tx event format: key = "Tx" plus a tag {"tx.hash": XYZ} This should allow to subscribe to all transactions! or a specific one using a query: "tm.events.type = Tx and tx.hash = '013ABF99434...'" use TimeoutCommit instead of afterPublishEventNewBlockTimeout TimeoutCommit is the time a node waits after committing a block, before it goes into the next height. So it will finish everything from the last block, but then wait a bit. The idea is this gives it time to hear more votes from other validators, to strengthen the commit it includes in the next block. But it also gives it time to hear about new transactions. waitForBlockWithUpdatedVals rewrite WAL crash tests Task: test that we can recover from any WAL crash. Solution: the old tests were relying on event hub being run in the same thread (we were injecting the private validator's last signature). when considering a rewrite, we considered two possible solutions: write a "fuzzy" testing system where WAL is crashing upon receiving a new message, or inject failures and trigger them in tests using something like https://github.com/coreos/gofail. remove sleep no cs.Lock around wal.Save test different cases (empty block, non-empty block, ...) comments add comments test 4 cases: empty block, non-empty block, non-empty block with smaller part size, many blocks fixes as per Bucky's last review reset subscriptions on UnsubscribeAll use a simple counter to track message for which we panicked also, set a smaller part size for all test cases
8 years ago
  1. package consensus
  2. import (
  3. "bytes"
  4. "context"
  5. "fmt"
  6. "testing"
  7. "time"
  8. cstypes "github.com/tendermint/tendermint/consensus/types"
  9. cmn "github.com/tendermint/tendermint/libs/common"
  10. "github.com/tendermint/tendermint/libs/log"
  11. tmpubsub "github.com/tendermint/tendermint/libs/pubsub"
  12. "github.com/tendermint/tendermint/types"
  13. )
  14. func init() {
  15. config = ResetConfig("consensus_state_test")
  16. }
  17. func ensureProposeTimeout(timeoutPropose int) time.Duration {
  18. return time.Duration(timeoutPropose*2) * time.Millisecond
  19. }
  20. /*
  21. ProposeSuite
  22. x * TestProposerSelection0 - round robin ordering, round 0
  23. x * TestProposerSelection2 - round robin ordering, round 2++
  24. x * TestEnterProposeNoValidator - timeout into prevote round
  25. x * TestEnterPropose - finish propose without timing out (we have the proposal)
  26. x * TestBadProposal - 2 vals, bad proposal (bad block state hash), should prevote and precommit nil
  27. FullRoundSuite
  28. x * TestFullRound1 - 1 val, full successful round
  29. x * TestFullRoundNil - 1 val, full round of nil
  30. x * TestFullRound2 - 2 vals, both required for full round
  31. LockSuite
  32. x * TestLockNoPOL - 2 vals, 4 rounds. one val locked, precommits nil every round except first.
  33. x * TestLockPOLRelock - 4 vals, one precommits, other 3 polka at next round, so we unlock and precomit the polka
  34. x * TestLockPOLUnlock - 4 vals, one precommits, other 3 polka nil at next round, so we unlock and precomit nil
  35. x * TestLockPOLSafety1 - 4 vals. We shouldn't change lock based on polka at earlier round
  36. x * TestLockPOLSafety2 - 4 vals. After unlocking, we shouldn't relock based on polka at earlier round
  37. * TestNetworkLock - once +1/3 precommits, network should be locked
  38. * TestNetworkLockPOL - once +1/3 precommits, the block with more recent polka is committed
  39. SlashingSuite
  40. x * TestSlashingPrevotes - a validator prevoting twice in a round gets slashed
  41. x * TestSlashingPrecommits - a validator precomitting twice in a round gets slashed
  42. CatchupSuite
  43. * TestCatchup - if we might be behind and we've seen any 2/3 prevotes, round skip to new round, precommit, or prevote
  44. HaltSuite
  45. x * TestHalt1 - if we see +2/3 precommits after timing out into new round, we should still commit
  46. */
  47. //----------------------------------------------------------------------------------------------------
  48. // ProposeSuite
  49. func TestStateProposerSelection0(t *testing.T) {
  50. cs1, vss := randConsensusState(4)
  51. height, round := cs1.Height, cs1.Round
  52. newRoundCh := subscribe(cs1.eventBus, types.EventQueryNewRound)
  53. proposalCh := subscribe(cs1.eventBus, types.EventQueryCompleteProposal)
  54. startTestRound(cs1, height, round)
  55. // wait for new round so proposer is set
  56. <-newRoundCh
  57. // lets commit a block and ensure proposer for the next height is correct
  58. prop := cs1.GetRoundState().Validators.GetProposer()
  59. if !bytes.Equal(prop.Address, cs1.privValidator.GetAddress()) {
  60. t.Fatalf("expected proposer to be validator %d. Got %X", 0, prop.Address)
  61. }
  62. // wait for complete proposal
  63. <-proposalCh
  64. rs := cs1.GetRoundState()
  65. signAddVotes(cs1, types.VoteTypePrecommit, rs.ProposalBlock.Hash(), rs.ProposalBlockParts.Header(), vss[1:]...)
  66. // wait for new round so next validator is set
  67. <-newRoundCh
  68. prop = cs1.GetRoundState().Validators.GetProposer()
  69. if !bytes.Equal(prop.Address, vss[1].GetAddress()) {
  70. panic(cmn.Fmt("expected proposer to be validator %d. Got %X", 1, prop.Address))
  71. }
  72. }
  73. // Now let's do it all again, but starting from round 2 instead of 0
  74. func TestStateProposerSelection2(t *testing.T) {
  75. cs1, vss := randConsensusState(4) // test needs more work for more than 3 validators
  76. newRoundCh := subscribe(cs1.eventBus, types.EventQueryNewRound)
  77. // this time we jump in at round 2
  78. incrementRound(vss[1:]...)
  79. incrementRound(vss[1:]...)
  80. startTestRound(cs1, cs1.Height, 2)
  81. <-newRoundCh // wait for the new round
  82. // everyone just votes nil. we get a new proposer each round
  83. for i := 0; i < len(vss); i++ {
  84. prop := cs1.GetRoundState().Validators.GetProposer()
  85. correctProposer := vss[(i+2)%len(vss)].GetAddress()
  86. if !bytes.Equal(prop.Address, correctProposer) {
  87. panic(cmn.Fmt("expected RoundState.Validators.GetProposer() to be validator %d. Got %X", (i+2)%len(vss), prop.Address))
  88. }
  89. rs := cs1.GetRoundState()
  90. signAddVotes(cs1, types.VoteTypePrecommit, nil, rs.ProposalBlockParts.Header(), vss[1:]...)
  91. <-newRoundCh // wait for the new round event each round
  92. incrementRound(vss[1:]...)
  93. }
  94. }
  95. // a non-validator should timeout into the prevote round
  96. func TestStateEnterProposeNoPrivValidator(t *testing.T) {
  97. cs, _ := randConsensusState(1)
  98. cs.SetPrivValidator(nil)
  99. height, round := cs.Height, cs.Round
  100. // Listen for propose timeout event
  101. timeoutCh := subscribe(cs.eventBus, types.EventQueryTimeoutPropose)
  102. startTestRound(cs, height, round)
  103. // if we're not a validator, EnterPropose should timeout
  104. ticker := time.NewTicker(ensureProposeTimeout(cs.config.TimeoutPropose))
  105. select {
  106. case <-timeoutCh:
  107. case <-ticker.C:
  108. panic("Expected EnterPropose to timeout")
  109. }
  110. if cs.GetRoundState().Proposal != nil {
  111. t.Error("Expected to make no proposal, since no privValidator")
  112. }
  113. }
  114. // a validator should not timeout of the prevote round (TODO: unless the block is really big!)
  115. func TestStateEnterProposeYesPrivValidator(t *testing.T) {
  116. cs, _ := randConsensusState(1)
  117. height, round := cs.Height, cs.Round
  118. // Listen for propose timeout event
  119. timeoutCh := subscribe(cs.eventBus, types.EventQueryTimeoutPropose)
  120. proposalCh := subscribe(cs.eventBus, types.EventQueryCompleteProposal)
  121. cs.enterNewRound(height, round)
  122. cs.startRoutines(3)
  123. <-proposalCh
  124. // Check that Proposal, ProposalBlock, ProposalBlockParts are set.
  125. rs := cs.GetRoundState()
  126. if rs.Proposal == nil {
  127. t.Error("rs.Proposal should be set")
  128. }
  129. if rs.ProposalBlock == nil {
  130. t.Error("rs.ProposalBlock should be set")
  131. }
  132. if rs.ProposalBlockParts.Total() == 0 {
  133. t.Error("rs.ProposalBlockParts should be set")
  134. }
  135. // if we're a validator, enterPropose should not timeout
  136. ticker := time.NewTicker(ensureProposeTimeout(cs.config.TimeoutPropose))
  137. select {
  138. case <-timeoutCh:
  139. panic("Expected EnterPropose not to timeout")
  140. case <-ticker.C:
  141. }
  142. }
  143. func TestStateBadProposal(t *testing.T) {
  144. cs1, vss := randConsensusState(2)
  145. height, round := cs1.Height, cs1.Round
  146. vs2 := vss[1]
  147. partSize := cs1.state.ConsensusParams.BlockPartSizeBytes
  148. proposalCh := subscribe(cs1.eventBus, types.EventQueryCompleteProposal)
  149. voteCh := subscribe(cs1.eventBus, types.EventQueryVote)
  150. propBlock, _ := cs1.createProposalBlock() //changeProposer(t, cs1, vs2)
  151. // make the second validator the proposer by incrementing round
  152. round = round + 1
  153. incrementRound(vss[1:]...)
  154. // make the block bad by tampering with statehash
  155. stateHash := propBlock.AppHash
  156. if len(stateHash) == 0 {
  157. stateHash = make([]byte, 32)
  158. }
  159. stateHash[0] = byte((stateHash[0] + 1) % 255)
  160. propBlock.AppHash = stateHash
  161. propBlockParts := propBlock.MakePartSet(partSize)
  162. proposal := types.NewProposal(vs2.Height, round, propBlockParts.Header(), -1, types.BlockID{})
  163. if err := vs2.SignProposal(config.ChainID(), proposal); err != nil {
  164. t.Fatal("failed to sign bad proposal", err)
  165. }
  166. // set the proposal block
  167. if err := cs1.SetProposalAndBlock(proposal, propBlock, propBlockParts, "some peer"); err != nil {
  168. t.Fatal(err)
  169. }
  170. // start the machine
  171. startTestRound(cs1, height, round)
  172. // wait for proposal
  173. <-proposalCh
  174. // wait for prevote
  175. <-voteCh
  176. validatePrevote(t, cs1, round, vss[0], nil)
  177. // add bad prevote from vs2 and wait for it
  178. signAddVotes(cs1, types.VoteTypePrevote, propBlock.Hash(), propBlock.MakePartSet(partSize).Header(), vs2)
  179. <-voteCh
  180. // wait for precommit
  181. <-voteCh
  182. validatePrecommit(t, cs1, round, 0, vss[0], nil, nil)
  183. signAddVotes(cs1, types.VoteTypePrecommit, propBlock.Hash(), propBlock.MakePartSet(partSize).Header(), vs2)
  184. }
  185. //----------------------------------------------------------------------------------------------------
  186. // FullRoundSuite
  187. // propose, prevote, and precommit a block
  188. func TestStateFullRound1(t *testing.T) {
  189. cs, vss := randConsensusState(1)
  190. height, round := cs.Height, cs.Round
  191. // NOTE: buffer capacity of 0 ensures we can validate prevote and last commit
  192. // before consensus can move to the next height (and cause a race condition)
  193. cs.eventBus.Stop()
  194. eventBus := types.NewEventBusWithBufferCapacity(0)
  195. eventBus.SetLogger(log.TestingLogger().With("module", "events"))
  196. cs.SetEventBus(eventBus)
  197. eventBus.Start()
  198. voteCh := subscribe(cs.eventBus, types.EventQueryVote)
  199. propCh := subscribe(cs.eventBus, types.EventQueryCompleteProposal)
  200. newRoundCh := subscribe(cs.eventBus, types.EventQueryNewRound)
  201. startTestRound(cs, height, round)
  202. <-newRoundCh
  203. // grab proposal
  204. re := <-propCh
  205. propBlockHash := re.(types.EventDataRoundState).RoundState.(*cstypes.RoundState).ProposalBlock.Hash()
  206. <-voteCh // wait for prevote
  207. validatePrevote(t, cs, round, vss[0], propBlockHash)
  208. <-voteCh // wait for precommit
  209. // we're going to roll right into new height
  210. <-newRoundCh
  211. validateLastPrecommit(t, cs, vss[0], propBlockHash)
  212. }
  213. // nil is proposed, so prevote and precommit nil
  214. func TestStateFullRoundNil(t *testing.T) {
  215. cs, vss := randConsensusState(1)
  216. height, round := cs.Height, cs.Round
  217. voteCh := subscribe(cs.eventBus, types.EventQueryVote)
  218. cs.enterPrevote(height, round)
  219. cs.startRoutines(4)
  220. <-voteCh // prevote
  221. <-voteCh // precommit
  222. // should prevote and precommit nil
  223. validatePrevoteAndPrecommit(t, cs, round, 0, vss[0], nil, nil)
  224. }
  225. // run through propose, prevote, precommit commit with two validators
  226. // where the first validator has to wait for votes from the second
  227. func TestStateFullRound2(t *testing.T) {
  228. cs1, vss := randConsensusState(2)
  229. vs2 := vss[1]
  230. height, round := cs1.Height, cs1.Round
  231. voteCh := subscribe(cs1.eventBus, types.EventQueryVote)
  232. newBlockCh := subscribe(cs1.eventBus, types.EventQueryNewBlock)
  233. // start round and wait for propose and prevote
  234. startTestRound(cs1, height, round)
  235. <-voteCh // prevote
  236. // we should be stuck in limbo waiting for more prevotes
  237. rs := cs1.GetRoundState()
  238. propBlockHash, propPartsHeader := rs.ProposalBlock.Hash(), rs.ProposalBlockParts.Header()
  239. // prevote arrives from vs2:
  240. signAddVotes(cs1, types.VoteTypePrevote, propBlockHash, propPartsHeader, vs2)
  241. <-voteCh
  242. <-voteCh //precommit
  243. // the proposed block should now be locked and our precommit added
  244. validatePrecommit(t, cs1, 0, 0, vss[0], propBlockHash, propBlockHash)
  245. // we should be stuck in limbo waiting for more precommits
  246. // precommit arrives from vs2:
  247. signAddVotes(cs1, types.VoteTypePrecommit, propBlockHash, propPartsHeader, vs2)
  248. <-voteCh
  249. // wait to finish commit, propose in next height
  250. <-newBlockCh
  251. }
  252. //------------------------------------------------------------------------------------------
  253. // LockSuite
  254. // two validators, 4 rounds.
  255. // two vals take turns proposing. val1 locks on first one, precommits nil on everything else
  256. func TestStateLockNoPOL(t *testing.T) {
  257. cs1, vss := randConsensusState(2)
  258. vs2 := vss[1]
  259. height := cs1.Height
  260. partSize := cs1.state.ConsensusParams.BlockPartSizeBytes
  261. timeoutProposeCh := subscribe(cs1.eventBus, types.EventQueryTimeoutPropose)
  262. timeoutWaitCh := subscribe(cs1.eventBus, types.EventQueryTimeoutWait)
  263. voteCh := subscribe(cs1.eventBus, types.EventQueryVote)
  264. proposalCh := subscribe(cs1.eventBus, types.EventQueryCompleteProposal)
  265. newRoundCh := subscribe(cs1.eventBus, types.EventQueryNewRound)
  266. /*
  267. Round1 (cs1, B) // B B // B B2
  268. */
  269. // start round and wait for prevote
  270. cs1.enterNewRound(height, 0)
  271. cs1.startRoutines(0)
  272. re := <-proposalCh
  273. rs := re.(types.EventDataRoundState).RoundState.(*cstypes.RoundState)
  274. theBlockHash := rs.ProposalBlock.Hash()
  275. <-voteCh // prevote
  276. // we should now be stuck in limbo forever, waiting for more prevotes
  277. // prevote arrives from vs2:
  278. signAddVotes(cs1, types.VoteTypePrevote, cs1.ProposalBlock.Hash(), cs1.ProposalBlockParts.Header(), vs2)
  279. <-voteCh // prevote
  280. <-voteCh // precommit
  281. // the proposed block should now be locked and our precommit added
  282. validatePrecommit(t, cs1, 0, 0, vss[0], theBlockHash, theBlockHash)
  283. // we should now be stuck in limbo forever, waiting for more precommits
  284. // lets add one for a different block
  285. // NOTE: in practice we should never get to a point where there are precommits for different blocks at the same round
  286. hash := make([]byte, len(theBlockHash))
  287. copy(hash, theBlockHash)
  288. hash[0] = byte((hash[0] + 1) % 255)
  289. signAddVotes(cs1, types.VoteTypePrecommit, hash, rs.ProposalBlock.MakePartSet(partSize).Header(), vs2)
  290. <-voteCh // precommit
  291. // (note we're entering precommit for a second time this round)
  292. // but with invalid args. then we enterPrecommitWait, and the timeout to new round
  293. <-timeoutWaitCh
  294. ///
  295. <-newRoundCh
  296. t.Log("#### ONTO ROUND 1")
  297. /*
  298. Round2 (cs1, B) // B B2
  299. */
  300. incrementRound(vs2)
  301. // now we're on a new round and not the proposer, so wait for timeout
  302. re = <-timeoutProposeCh
  303. rs = re.(types.EventDataRoundState).RoundState.(*cstypes.RoundState)
  304. if rs.ProposalBlock != nil {
  305. panic("Expected proposal block to be nil")
  306. }
  307. // wait to finish prevote
  308. <-voteCh
  309. // we should have prevoted our locked block
  310. validatePrevote(t, cs1, 1, vss[0], rs.LockedBlock.Hash())
  311. // add a conflicting prevote from the other validator
  312. signAddVotes(cs1, types.VoteTypePrevote, hash, rs.LockedBlock.MakePartSet(partSize).Header(), vs2)
  313. <-voteCh
  314. // now we're going to enter prevote again, but with invalid args
  315. // and then prevote wait, which should timeout. then wait for precommit
  316. <-timeoutWaitCh
  317. <-voteCh // precommit
  318. // the proposed block should still be locked and our precommit added
  319. // we should precommit nil and be locked on the proposal
  320. validatePrecommit(t, cs1, 1, 0, vss[0], nil, theBlockHash)
  321. // add conflicting precommit from vs2
  322. // NOTE: in practice we should never get to a point where there are precommits for different blocks at the same round
  323. signAddVotes(cs1, types.VoteTypePrecommit, hash, rs.LockedBlock.MakePartSet(partSize).Header(), vs2)
  324. <-voteCh
  325. // (note we're entering precommit for a second time this round, but with invalid args
  326. // then we enterPrecommitWait and timeout into NewRound
  327. <-timeoutWaitCh
  328. <-newRoundCh
  329. t.Log("#### ONTO ROUND 2")
  330. /*
  331. Round3 (vs2, _) // B, B2
  332. */
  333. incrementRound(vs2)
  334. re = <-proposalCh
  335. rs = re.(types.EventDataRoundState).RoundState.(*cstypes.RoundState)
  336. // now we're on a new round and are the proposer
  337. if !bytes.Equal(rs.ProposalBlock.Hash(), rs.LockedBlock.Hash()) {
  338. panic(cmn.Fmt("Expected proposal block to be locked block. Got %v, Expected %v", rs.ProposalBlock, rs.LockedBlock))
  339. }
  340. <-voteCh // prevote
  341. validatePrevote(t, cs1, 2, vss[0], rs.LockedBlock.Hash())
  342. signAddVotes(cs1, types.VoteTypePrevote, hash, rs.ProposalBlock.MakePartSet(partSize).Header(), vs2)
  343. <-voteCh
  344. <-timeoutWaitCh // prevote wait
  345. <-voteCh // precommit
  346. validatePrecommit(t, cs1, 2, 0, vss[0], nil, theBlockHash) // precommit nil but be locked on proposal
  347. signAddVotes(cs1, types.VoteTypePrecommit, hash, rs.ProposalBlock.MakePartSet(partSize).Header(), vs2) // NOTE: conflicting precommits at same height
  348. <-voteCh
  349. <-timeoutWaitCh
  350. // before we time out into new round, set next proposal block
  351. prop, propBlock := decideProposal(cs1, vs2, vs2.Height, vs2.Round+1)
  352. if prop == nil || propBlock == nil {
  353. t.Fatal("Failed to create proposal block with vs2")
  354. }
  355. incrementRound(vs2)
  356. <-newRoundCh
  357. t.Log("#### ONTO ROUND 3")
  358. /*
  359. Round4 (vs2, C) // B C // B C
  360. */
  361. // now we're on a new round and not the proposer
  362. // so set the proposal block
  363. if err := cs1.SetProposalAndBlock(prop, propBlock, propBlock.MakePartSet(partSize), ""); err != nil {
  364. t.Fatal(err)
  365. }
  366. <-proposalCh
  367. <-voteCh // prevote
  368. // prevote for locked block (not proposal)
  369. validatePrevote(t, cs1, 0, vss[0], cs1.LockedBlock.Hash())
  370. signAddVotes(cs1, types.VoteTypePrevote, propBlock.Hash(), propBlock.MakePartSet(partSize).Header(), vs2)
  371. <-voteCh
  372. <-timeoutWaitCh
  373. <-voteCh
  374. validatePrecommit(t, cs1, 2, 0, vss[0], nil, theBlockHash) // precommit nil but locked on proposal
  375. signAddVotes(cs1, types.VoteTypePrecommit, propBlock.Hash(), propBlock.MakePartSet(partSize).Header(), vs2) // NOTE: conflicting precommits at same height
  376. <-voteCh
  377. }
  378. // 4 vals, one precommits, other 3 polka at next round, so we unlock and precomit the polka
  379. func TestStateLockPOLRelock(t *testing.T) {
  380. cs1, vss := randConsensusState(4)
  381. vs2, vs3, vs4 := vss[1], vss[2], vss[3]
  382. partSize := cs1.state.ConsensusParams.BlockPartSizeBytes
  383. timeoutProposeCh := subscribe(cs1.eventBus, types.EventQueryTimeoutPropose)
  384. timeoutWaitCh := subscribe(cs1.eventBus, types.EventQueryTimeoutWait)
  385. proposalCh := subscribe(cs1.eventBus, types.EventQueryCompleteProposal)
  386. voteCh := subscribe(cs1.eventBus, types.EventQueryVote)
  387. newRoundCh := subscribe(cs1.eventBus, types.EventQueryNewRound)
  388. newBlockCh := subscribe(cs1.eventBus, types.EventQueryNewBlockHeader)
  389. // everything done from perspective of cs1
  390. /*
  391. Round1 (cs1, B) // B B B B// B nil B nil
  392. eg. vs2 and vs4 didn't see the 2/3 prevotes
  393. */
  394. // start round and wait for propose and prevote
  395. startTestRound(cs1, cs1.Height, 0)
  396. <-newRoundCh
  397. re := <-proposalCh
  398. rs := re.(types.EventDataRoundState).RoundState.(*cstypes.RoundState)
  399. theBlockHash := rs.ProposalBlock.Hash()
  400. <-voteCh // prevote
  401. signAddVotes(cs1, types.VoteTypePrevote, cs1.ProposalBlock.Hash(), cs1.ProposalBlockParts.Header(), vs2, vs3, vs4)
  402. // prevotes
  403. discardFromChan(voteCh, 3)
  404. <-voteCh // our precommit
  405. // the proposed block should now be locked and our precommit added
  406. validatePrecommit(t, cs1, 0, 0, vss[0], theBlockHash, theBlockHash)
  407. // add precommits from the rest
  408. signAddVotes(cs1, types.VoteTypePrecommit, nil, types.PartSetHeader{}, vs2, vs4)
  409. signAddVotes(cs1, types.VoteTypePrecommit, cs1.ProposalBlock.Hash(), cs1.ProposalBlockParts.Header(), vs3)
  410. // precommites
  411. discardFromChan(voteCh, 3)
  412. // before we timeout to the new round set the new proposal
  413. prop, propBlock := decideProposal(cs1, vs2, vs2.Height, vs2.Round+1)
  414. propBlockParts := propBlock.MakePartSet(partSize)
  415. propBlockHash := propBlock.Hash()
  416. incrementRound(vs2, vs3, vs4)
  417. // timeout to new round
  418. <-timeoutWaitCh
  419. //XXX: this isnt guaranteed to get there before the timeoutPropose ...
  420. if err := cs1.SetProposalAndBlock(prop, propBlock, propBlockParts, "some peer"); err != nil {
  421. t.Fatal(err)
  422. }
  423. <-newRoundCh
  424. t.Log("### ONTO ROUND 1")
  425. /*
  426. Round2 (vs2, C) // B C C C // C C C _)
  427. cs1 changes lock!
  428. */
  429. // now we're on a new round and not the proposer
  430. // but we should receive the proposal
  431. select {
  432. case <-proposalCh:
  433. case <-timeoutProposeCh:
  434. <-proposalCh
  435. }
  436. // go to prevote, prevote for locked block (not proposal), move on
  437. <-voteCh
  438. validatePrevote(t, cs1, 0, vss[0], theBlockHash)
  439. // now lets add prevotes from everyone else for the new block
  440. signAddVotes(cs1, types.VoteTypePrevote, propBlockHash, propBlockParts.Header(), vs2, vs3, vs4)
  441. // prevotes
  442. discardFromChan(voteCh, 3)
  443. // now either we go to PrevoteWait or Precommit
  444. select {
  445. case <-timeoutWaitCh: // we're in PrevoteWait, go to Precommit
  446. // XXX: there's no guarantee we see the polka, this might be a precommit for nil,
  447. // in which case the test fails!
  448. <-voteCh
  449. case <-voteCh: // we went straight to Precommit
  450. }
  451. // we should have unlocked and locked on the new block
  452. validatePrecommit(t, cs1, 1, 1, vss[0], propBlockHash, propBlockHash)
  453. signAddVotes(cs1, types.VoteTypePrecommit, propBlockHash, propBlockParts.Header(), vs2, vs3)
  454. discardFromChan(voteCh, 2)
  455. be := <-newBlockCh
  456. b := be.(types.EventDataNewBlockHeader)
  457. re = <-newRoundCh
  458. rs = re.(types.EventDataRoundState).RoundState.(*cstypes.RoundState)
  459. if rs.Height != 2 {
  460. panic("Expected height to increment")
  461. }
  462. if !bytes.Equal(b.Header.Hash(), propBlockHash) {
  463. panic("Expected new block to be proposal block")
  464. }
  465. }
  466. // 4 vals, one precommits, other 3 polka at next round, so we unlock and precomit the polka
  467. func TestStateLockPOLUnlock(t *testing.T) {
  468. cs1, vss := randConsensusState(4)
  469. vs2, vs3, vs4 := vss[1], vss[2], vss[3]
  470. partSize := cs1.state.ConsensusParams.BlockPartSizeBytes
  471. proposalCh := subscribe(cs1.eventBus, types.EventQueryCompleteProposal)
  472. timeoutProposeCh := subscribe(cs1.eventBus, types.EventQueryTimeoutPropose)
  473. timeoutWaitCh := subscribe(cs1.eventBus, types.EventQueryTimeoutWait)
  474. newRoundCh := subscribe(cs1.eventBus, types.EventQueryNewRound)
  475. unlockCh := subscribe(cs1.eventBus, types.EventQueryUnlock)
  476. voteCh := subscribeToVoter(cs1, cs1.privValidator.GetAddress())
  477. // everything done from perspective of cs1
  478. /*
  479. Round1 (cs1, B) // B B B B // B nil B nil
  480. eg. didn't see the 2/3 prevotes
  481. */
  482. // start round and wait for propose and prevote
  483. startTestRound(cs1, cs1.Height, 0)
  484. <-newRoundCh
  485. re := <-proposalCh
  486. rs := re.(types.EventDataRoundState).RoundState.(*cstypes.RoundState)
  487. theBlockHash := rs.ProposalBlock.Hash()
  488. <-voteCh // prevote
  489. signAddVotes(cs1, types.VoteTypePrevote, cs1.ProposalBlock.Hash(), cs1.ProposalBlockParts.Header(), vs2, vs3, vs4)
  490. <-voteCh //precommit
  491. // the proposed block should now be locked and our precommit added
  492. validatePrecommit(t, cs1, 0, 0, vss[0], theBlockHash, theBlockHash)
  493. rs = cs1.GetRoundState()
  494. // add precommits from the rest
  495. signAddVotes(cs1, types.VoteTypePrecommit, nil, types.PartSetHeader{}, vs2, vs4)
  496. signAddVotes(cs1, types.VoteTypePrecommit, cs1.ProposalBlock.Hash(), cs1.ProposalBlockParts.Header(), vs3)
  497. // before we time out into new round, set next proposal block
  498. prop, propBlock := decideProposal(cs1, vs2, vs2.Height, vs2.Round+1)
  499. propBlockParts := propBlock.MakePartSet(partSize)
  500. incrementRound(vs2, vs3, vs4)
  501. // timeout to new round
  502. re = <-timeoutWaitCh
  503. rs = re.(types.EventDataRoundState).RoundState.(*cstypes.RoundState)
  504. lockedBlockHash := rs.LockedBlock.Hash()
  505. //XXX: this isnt guaranteed to get there before the timeoutPropose ...
  506. if err := cs1.SetProposalAndBlock(prop, propBlock, propBlockParts, "some peer"); err != nil {
  507. t.Fatal(err)
  508. }
  509. <-newRoundCh
  510. t.Log("#### ONTO ROUND 1")
  511. /*
  512. Round2 (vs2, C) // B nil nil nil // nil nil nil _
  513. cs1 unlocks!
  514. */
  515. // now we're on a new round and not the proposer,
  516. // but we should receive the proposal
  517. select {
  518. case <-proposalCh:
  519. case <-timeoutProposeCh:
  520. <-proposalCh
  521. }
  522. // go to prevote, prevote for locked block (not proposal)
  523. <-voteCh
  524. validatePrevote(t, cs1, 0, vss[0], lockedBlockHash)
  525. // now lets add prevotes from everyone else for nil (a polka!)
  526. signAddVotes(cs1, types.VoteTypePrevote, nil, types.PartSetHeader{}, vs2, vs3, vs4)
  527. // the polka makes us unlock and precommit nil
  528. <-unlockCh
  529. <-voteCh // precommit
  530. // we should have unlocked and committed nil
  531. // NOTE: since we don't relock on nil, the lock round is 0
  532. validatePrecommit(t, cs1, 1, 0, vss[0], nil, nil)
  533. signAddVotes(cs1, types.VoteTypePrecommit, nil, types.PartSetHeader{}, vs2, vs3)
  534. <-newRoundCh
  535. }
  536. // 4 vals
  537. // a polka at round 1 but we miss it
  538. // then a polka at round 2 that we lock on
  539. // then we see the polka from round 1 but shouldn't unlock
  540. func TestStateLockPOLSafety1(t *testing.T) {
  541. cs1, vss := randConsensusState(4)
  542. vs2, vs3, vs4 := vss[1], vss[2], vss[3]
  543. partSize := cs1.state.ConsensusParams.BlockPartSizeBytes
  544. proposalCh := subscribe(cs1.eventBus, types.EventQueryCompleteProposal)
  545. timeoutProposeCh := subscribe(cs1.eventBus, types.EventQueryTimeoutPropose)
  546. timeoutWaitCh := subscribe(cs1.eventBus, types.EventQueryTimeoutWait)
  547. newRoundCh := subscribe(cs1.eventBus, types.EventQueryNewRound)
  548. voteCh := subscribeToVoter(cs1, cs1.privValidator.GetAddress())
  549. // start round and wait for propose and prevote
  550. startTestRound(cs1, cs1.Height, 0)
  551. <-newRoundCh
  552. re := <-proposalCh
  553. rs := re.(types.EventDataRoundState).RoundState.(*cstypes.RoundState)
  554. propBlock := rs.ProposalBlock
  555. <-voteCh // prevote
  556. validatePrevote(t, cs1, 0, vss[0], propBlock.Hash())
  557. // the others sign a polka but we don't see it
  558. prevotes := signVotes(types.VoteTypePrevote, propBlock.Hash(), propBlock.MakePartSet(partSize).Header(), vs2, vs3, vs4)
  559. // before we time out into new round, set next proposer
  560. // and next proposal block
  561. /*
  562. _, v1 := cs1.Validators.GetByAddress(vss[0].Address)
  563. v1.VotingPower = 1
  564. if updated := cs1.Validators.Update(v1); !updated {
  565. panic("failed to update validator")
  566. }*/
  567. t.Logf("old prop hash %v", fmt.Sprintf("%X", propBlock.Hash()))
  568. // we do see them precommit nil
  569. signAddVotes(cs1, types.VoteTypePrecommit, nil, types.PartSetHeader{}, vs2, vs3, vs4)
  570. prop, propBlock := decideProposal(cs1, vs2, vs2.Height, vs2.Round+1)
  571. propBlockHash := propBlock.Hash()
  572. propBlockParts := propBlock.MakePartSet(partSize)
  573. incrementRound(vs2, vs3, vs4)
  574. //XXX: this isnt guaranteed to get there before the timeoutPropose ...
  575. if err := cs1.SetProposalAndBlock(prop, propBlock, propBlockParts, "some peer"); err != nil {
  576. t.Fatal(err)
  577. }
  578. <-newRoundCh
  579. t.Log("### ONTO ROUND 1")
  580. /*Round2
  581. // we timeout and prevote our lock
  582. // a polka happened but we didn't see it!
  583. */
  584. // now we're on a new round and not the proposer,
  585. // but we should receive the proposal
  586. select {
  587. case re = <-proposalCh:
  588. case <-timeoutProposeCh:
  589. re = <-proposalCh
  590. }
  591. rs = re.(types.EventDataRoundState).RoundState.(*cstypes.RoundState)
  592. if rs.LockedBlock != nil {
  593. panic("we should not be locked!")
  594. }
  595. t.Logf("new prop hash %v", fmt.Sprintf("%X", propBlockHash))
  596. // go to prevote, prevote for proposal block
  597. <-voteCh
  598. validatePrevote(t, cs1, 1, vss[0], propBlockHash)
  599. // now we see the others prevote for it, so we should lock on it
  600. signAddVotes(cs1, types.VoteTypePrevote, propBlockHash, propBlockParts.Header(), vs2, vs3, vs4)
  601. <-voteCh // precommit
  602. // we should have precommitted
  603. validatePrecommit(t, cs1, 1, 1, vss[0], propBlockHash, propBlockHash)
  604. signAddVotes(cs1, types.VoteTypePrecommit, nil, types.PartSetHeader{}, vs2, vs3)
  605. <-timeoutWaitCh
  606. incrementRound(vs2, vs3, vs4)
  607. <-newRoundCh
  608. t.Log("### ONTO ROUND 2")
  609. /*Round3
  610. we see the polka from round 1 but we shouldn't unlock!
  611. */
  612. // timeout of propose
  613. <-timeoutProposeCh
  614. // finish prevote
  615. <-voteCh
  616. // we should prevote what we're locked on
  617. validatePrevote(t, cs1, 2, vss[0], propBlockHash)
  618. newStepCh := subscribe(cs1.eventBus, types.EventQueryNewRoundStep)
  619. // add prevotes from the earlier round
  620. addVotes(cs1, prevotes...)
  621. t.Log("Done adding prevotes!")
  622. ensureNoNewStep(newStepCh)
  623. }
  624. // 4 vals.
  625. // polka P0 at R0, P1 at R1, and P2 at R2,
  626. // we lock on P0 at R0, don't see P1, and unlock using P2 at R2
  627. // then we should make sure we don't lock using P1
  628. // What we want:
  629. // dont see P0, lock on P1 at R1, dont unlock using P0 at R2
  630. func TestStateLockPOLSafety2(t *testing.T) {
  631. cs1, vss := randConsensusState(4)
  632. vs2, vs3, vs4 := vss[1], vss[2], vss[3]
  633. partSize := cs1.state.ConsensusParams.BlockPartSizeBytes
  634. proposalCh := subscribe(cs1.eventBus, types.EventQueryCompleteProposal)
  635. timeoutProposeCh := subscribe(cs1.eventBus, types.EventQueryTimeoutPropose)
  636. timeoutWaitCh := subscribe(cs1.eventBus, types.EventQueryTimeoutWait)
  637. newRoundCh := subscribe(cs1.eventBus, types.EventQueryNewRound)
  638. unlockCh := subscribe(cs1.eventBus, types.EventQueryUnlock)
  639. voteCh := subscribeToVoter(cs1, cs1.privValidator.GetAddress())
  640. // the block for R0: gets polkad but we miss it
  641. // (even though we signed it, shhh)
  642. _, propBlock0 := decideProposal(cs1, vss[0], cs1.Height, cs1.Round)
  643. propBlockHash0 := propBlock0.Hash()
  644. propBlockParts0 := propBlock0.MakePartSet(partSize)
  645. // the others sign a polka but we don't see it
  646. prevotes := signVotes(types.VoteTypePrevote, propBlockHash0, propBlockParts0.Header(), vs2, vs3, vs4)
  647. // the block for round 1
  648. prop1, propBlock1 := decideProposal(cs1, vs2, vs2.Height, vs2.Round+1)
  649. propBlockHash1 := propBlock1.Hash()
  650. propBlockParts1 := propBlock1.MakePartSet(partSize)
  651. propBlockID1 := types.BlockID{propBlockHash1, propBlockParts1.Header()}
  652. incrementRound(vs2, vs3, vs4)
  653. cs1.updateRoundStep(0, cstypes.RoundStepPrecommitWait)
  654. t.Log("### ONTO Round 1")
  655. // jump in at round 1
  656. height := cs1.Height
  657. startTestRound(cs1, height, 1)
  658. <-newRoundCh
  659. if err := cs1.SetProposalAndBlock(prop1, propBlock1, propBlockParts1, "some peer"); err != nil {
  660. t.Fatal(err)
  661. }
  662. <-proposalCh
  663. <-voteCh // prevote
  664. signAddVotes(cs1, types.VoteTypePrevote, propBlockHash1, propBlockParts1.Header(), vs2, vs3, vs4)
  665. <-voteCh // precommit
  666. // the proposed block should now be locked and our precommit added
  667. validatePrecommit(t, cs1, 1, 1, vss[0], propBlockHash1, propBlockHash1)
  668. // add precommits from the rest
  669. signAddVotes(cs1, types.VoteTypePrecommit, nil, types.PartSetHeader{}, vs2, vs4)
  670. signAddVotes(cs1, types.VoteTypePrecommit, propBlockHash1, propBlockParts1.Header(), vs3)
  671. incrementRound(vs2, vs3, vs4)
  672. // timeout of precommit wait to new round
  673. <-timeoutWaitCh
  674. // in round 2 we see the polkad block from round 0
  675. newProp := types.NewProposal(height, 2, propBlockParts0.Header(), 0, propBlockID1)
  676. if err := vs3.SignProposal(config.ChainID(), newProp); err != nil {
  677. t.Fatal(err)
  678. }
  679. if err := cs1.SetProposalAndBlock(newProp, propBlock0, propBlockParts0, "some peer"); err != nil {
  680. t.Fatal(err)
  681. }
  682. // Add the pol votes
  683. addVotes(cs1, prevotes...)
  684. <-newRoundCh
  685. t.Log("### ONTO Round 2")
  686. /*Round2
  687. // now we see the polka from round 1, but we shouldnt unlock
  688. */
  689. select {
  690. case <-timeoutProposeCh:
  691. <-proposalCh
  692. case <-proposalCh:
  693. }
  694. select {
  695. case <-unlockCh:
  696. panic("validator unlocked using an old polka")
  697. case <-voteCh:
  698. // prevote our locked block
  699. }
  700. validatePrevote(t, cs1, 2, vss[0], propBlockHash1)
  701. }
  702. //------------------------------------------------------------------------------------------
  703. // SlashingSuite
  704. // TODO: Slashing
  705. /*
  706. func TestStateSlashingPrevotes(t *testing.T) {
  707. cs1, vss := randConsensusState(2)
  708. vs2 := vss[1]
  709. proposalCh := subscribe(cs1.eventBus, types.EventQueryCompleteProposal)
  710. timeoutWaitCh := subscribe(cs1.eventBus, types.EventQueryTimeoutWait)
  711. newRoundCh := subscribe(cs1.eventBus, types.EventQueryNewRound)
  712. voteCh := subscribeToVoter(cs1, cs1.privValidator.GetAddress())
  713. // start round and wait for propose and prevote
  714. startTestRound(cs1, cs1.Height, 0)
  715. <-newRoundCh
  716. re := <-proposalCh
  717. <-voteCh // prevote
  718. rs := re.(types.EventDataRoundState).RoundState.(*cstypes.RoundState)
  719. // we should now be stuck in limbo forever, waiting for more prevotes
  720. // add one for a different block should cause us to go into prevote wait
  721. hash := rs.ProposalBlock.Hash()
  722. hash[0] = byte(hash[0]+1) % 255
  723. signAddVotes(cs1, types.VoteTypePrevote, hash, rs.ProposalBlockParts.Header(), vs2)
  724. <-timeoutWaitCh
  725. // NOTE: we have to send the vote for different block first so we don't just go into precommit round right
  726. // away and ignore more prevotes (and thus fail to slash!)
  727. // add the conflicting vote
  728. signAddVotes(cs1, types.VoteTypePrevote, rs.ProposalBlock.Hash(), rs.ProposalBlockParts.Header(), vs2)
  729. // XXX: Check for existence of Dupeout info
  730. }
  731. func TestStateSlashingPrecommits(t *testing.T) {
  732. cs1, vss := randConsensusState(2)
  733. vs2 := vss[1]
  734. proposalCh := subscribe(cs1.eventBus, types.EventQueryCompleteProposal)
  735. timeoutWaitCh := subscribe(cs1.eventBus, types.EventQueryTimeoutWait)
  736. newRoundCh := subscribe(cs1.eventBus, types.EventQueryNewRound)
  737. voteCh := subscribeToVoter(cs1, cs1.privValidator.GetAddress())
  738. // start round and wait for propose and prevote
  739. startTestRound(cs1, cs1.Height, 0)
  740. <-newRoundCh
  741. re := <-proposalCh
  742. <-voteCh // prevote
  743. // add prevote from vs2
  744. signAddVotes(cs1, types.VoteTypePrevote, rs.ProposalBlock.Hash(), rs.ProposalBlockParts.Header(), vs2)
  745. <-voteCh // precommit
  746. // we should now be stuck in limbo forever, waiting for more prevotes
  747. // add one for a different block should cause us to go into prevote wait
  748. hash := rs.ProposalBlock.Hash()
  749. hash[0] = byte(hash[0]+1) % 255
  750. signAddVotes(cs1, types.VoteTypePrecommit, hash, rs.ProposalBlockParts.Header(), vs2)
  751. // NOTE: we have to send the vote for different block first so we don't just go into precommit round right
  752. // away and ignore more prevotes (and thus fail to slash!)
  753. // add precommit from vs2
  754. signAddVotes(cs1, types.VoteTypePrecommit, rs.ProposalBlock.Hash(), rs.ProposalBlockParts.Header(), vs2)
  755. // XXX: Check for existence of Dupeout info
  756. }
  757. */
  758. //------------------------------------------------------------------------------------------
  759. // CatchupSuite
  760. //------------------------------------------------------------------------------------------
  761. // HaltSuite
  762. // 4 vals.
  763. // we receive a final precommit after going into next round, but others might have gone to commit already!
  764. func TestStateHalt1(t *testing.T) {
  765. cs1, vss := randConsensusState(4)
  766. vs2, vs3, vs4 := vss[1], vss[2], vss[3]
  767. partSize := cs1.state.ConsensusParams.BlockPartSizeBytes
  768. proposalCh := subscribe(cs1.eventBus, types.EventQueryCompleteProposal)
  769. timeoutWaitCh := subscribe(cs1.eventBus, types.EventQueryTimeoutWait)
  770. newRoundCh := subscribe(cs1.eventBus, types.EventQueryNewRound)
  771. newBlockCh := subscribe(cs1.eventBus, types.EventQueryNewBlock)
  772. voteCh := subscribeToVoter(cs1, cs1.privValidator.GetAddress())
  773. // start round and wait for propose and prevote
  774. startTestRound(cs1, cs1.Height, 0)
  775. <-newRoundCh
  776. re := <-proposalCh
  777. rs := re.(types.EventDataRoundState).RoundState.(*cstypes.RoundState)
  778. propBlock := rs.ProposalBlock
  779. propBlockParts := propBlock.MakePartSet(partSize)
  780. <-voteCh // prevote
  781. signAddVotes(cs1, types.VoteTypePrevote, propBlock.Hash(), propBlockParts.Header(), vs3, vs4)
  782. <-voteCh // precommit
  783. // the proposed block should now be locked and our precommit added
  784. validatePrecommit(t, cs1, 0, 0, vss[0], propBlock.Hash(), propBlock.Hash())
  785. // add precommits from the rest
  786. signAddVotes(cs1, types.VoteTypePrecommit, nil, types.PartSetHeader{}, vs2) // didnt receive proposal
  787. signAddVotes(cs1, types.VoteTypePrecommit, propBlock.Hash(), propBlockParts.Header(), vs3)
  788. // we receive this later, but vs3 might receive it earlier and with ours will go to commit!
  789. precommit4 := signVote(vs4, types.VoteTypePrecommit, propBlock.Hash(), propBlockParts.Header())
  790. incrementRound(vs2, vs3, vs4)
  791. // timeout to new round
  792. <-timeoutWaitCh
  793. re = <-newRoundCh
  794. rs = re.(types.EventDataRoundState).RoundState.(*cstypes.RoundState)
  795. t.Log("### ONTO ROUND 1")
  796. /*Round2
  797. // we timeout and prevote our lock
  798. // a polka happened but we didn't see it!
  799. */
  800. // go to prevote, prevote for locked block
  801. <-voteCh // prevote
  802. validatePrevote(t, cs1, 0, vss[0], rs.LockedBlock.Hash())
  803. // now we receive the precommit from the previous round
  804. addVotes(cs1, precommit4)
  805. // receiving that precommit should take us straight to commit
  806. <-newBlockCh
  807. re = <-newRoundCh
  808. rs = re.(types.EventDataRoundState).RoundState.(*cstypes.RoundState)
  809. if rs.Height != 2 {
  810. panic("expected height to increment")
  811. }
  812. }
  813. // subscribe subscribes test client to the given query and returns a channel with cap = 1.
  814. func subscribe(eventBus *types.EventBus, q tmpubsub.Query) <-chan interface{} {
  815. out := make(chan interface{}, 1)
  816. err := eventBus.Subscribe(context.Background(), testSubscriber, q, out)
  817. if err != nil {
  818. panic(fmt.Sprintf("failed to subscribe %s to %v", testSubscriber, q))
  819. }
  820. return out
  821. }
  822. // discardFromChan reads n values from the channel.
  823. func discardFromChan(ch <-chan interface{}, n int) {
  824. for i := 0; i < n; i++ {
  825. <-ch
  826. }
  827. }