You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

997 lines
30 KiB

blockchain: Reorg reactor (#3561) * go routines in blockchain reactor * Added reference to the go routine diagram * Initial commit * cleanup * Undo testing_logger change, committed by mistake * Fix the test loggers * pulled some fsm code into pool.go * added pool tests * changes to the design added block requests under peer moved the request trigger in the reactor poolRoutine, triggered now by a ticker in general moved everything required for making block requests smarter in the poolRoutine added a simple map of heights to keep track of what will need to be requested next added a few more tests * send errors to FSM in a different channel than blocks send errors (RemovePeer) from switch on a different channel than the one receiving blocks renamed channels added more pool tests * more pool tests * lint errors * more tests * more tests * switch fast sync to new implementation * fixed data race in tests * cleanup * finished fsm tests * address golangci comments :) * address golangci comments :) * Added timeout on next block needed to advance * updating docs and cleanup * fix issue in test from previous cleanup * cleanup * Added termination scenarios, tests and more cleanup * small fixes to adr, comments and cleanup * Fix bug in sendRequest() If we tried to send a request to a peer not present in the switch, a missing continue statement caused the request to be blackholed in a peer that was removed and never retried. While this bug was manifesting, the reactor kept asking for other blocks that would be stored and never consumed. Added the number of unconsumed blocks in the math for requesting blocks ahead of current processing height so eventually there will be no more blocks requested until the already received ones are consumed. * remove bpPeer's didTimeout field * Use distinct err codes for peer timeout and FSM timeouts * Don't allow peers to update with lower height * review comments from Ethan and Zarko * some cleanup, renaming, comments * Move block execution in separate goroutine * Remove pool's numPending * review comments * fix lint, remove old blockchain reactor and duplicates in fsm tests * small reorg around peer after review comments * add the reactor spec * verify block only once * review comments * change to int for max number of pending requests * cleanup and godoc * Add configuration flag fast sync version * golangci fixes * fix config template * move both reactor versions under blockchain * cleanup, golint, renaming stuff * updated documentation, fixed more golint warnings * integrate with behavior package * sync with master * gofmt * add changelog_pending entry * move to improvments * suggestion to changelog entry
5 years ago
blockchain: Reorg reactor (#3561) * go routines in blockchain reactor * Added reference to the go routine diagram * Initial commit * cleanup * Undo testing_logger change, committed by mistake * Fix the test loggers * pulled some fsm code into pool.go * added pool tests * changes to the design added block requests under peer moved the request trigger in the reactor poolRoutine, triggered now by a ticker in general moved everything required for making block requests smarter in the poolRoutine added a simple map of heights to keep track of what will need to be requested next added a few more tests * send errors to FSM in a different channel than blocks send errors (RemovePeer) from switch on a different channel than the one receiving blocks renamed channels added more pool tests * more pool tests * lint errors * more tests * more tests * switch fast sync to new implementation * fixed data race in tests * cleanup * finished fsm tests * address golangci comments :) * address golangci comments :) * Added timeout on next block needed to advance * updating docs and cleanup * fix issue in test from previous cleanup * cleanup * Added termination scenarios, tests and more cleanup * small fixes to adr, comments and cleanup * Fix bug in sendRequest() If we tried to send a request to a peer not present in the switch, a missing continue statement caused the request to be blackholed in a peer that was removed and never retried. While this bug was manifesting, the reactor kept asking for other blocks that would be stored and never consumed. Added the number of unconsumed blocks in the math for requesting blocks ahead of current processing height so eventually there will be no more blocks requested until the already received ones are consumed. * remove bpPeer's didTimeout field * Use distinct err codes for peer timeout and FSM timeouts * Don't allow peers to update with lower height * review comments from Ethan and Zarko * some cleanup, renaming, comments * Move block execution in separate goroutine * Remove pool's numPending * review comments * fix lint, remove old blockchain reactor and duplicates in fsm tests * small reorg around peer after review comments * add the reactor spec * verify block only once * review comments * change to int for max number of pending requests * cleanup and godoc * Add configuration flag fast sync version * golangci fixes * fix config template * move both reactor versions under blockchain * cleanup, golint, renaming stuff * updated documentation, fixed more golint warnings * integrate with behavior package * sync with master * gofmt * add changelog_pending entry * move to improvments * suggestion to changelog entry
5 years ago
blockchain: Reorg reactor (#3561) * go routines in blockchain reactor * Added reference to the go routine diagram * Initial commit * cleanup * Undo testing_logger change, committed by mistake * Fix the test loggers * pulled some fsm code into pool.go * added pool tests * changes to the design added block requests under peer moved the request trigger in the reactor poolRoutine, triggered now by a ticker in general moved everything required for making block requests smarter in the poolRoutine added a simple map of heights to keep track of what will need to be requested next added a few more tests * send errors to FSM in a different channel than blocks send errors (RemovePeer) from switch on a different channel than the one receiving blocks renamed channels added more pool tests * more pool tests * lint errors * more tests * more tests * switch fast sync to new implementation * fixed data race in tests * cleanup * finished fsm tests * address golangci comments :) * address golangci comments :) * Added timeout on next block needed to advance * updating docs and cleanup * fix issue in test from previous cleanup * cleanup * Added termination scenarios, tests and more cleanup * small fixes to adr, comments and cleanup * Fix bug in sendRequest() If we tried to send a request to a peer not present in the switch, a missing continue statement caused the request to be blackholed in a peer that was removed and never retried. While this bug was manifesting, the reactor kept asking for other blocks that would be stored and never consumed. Added the number of unconsumed blocks in the math for requesting blocks ahead of current processing height so eventually there will be no more blocks requested until the already received ones are consumed. * remove bpPeer's didTimeout field * Use distinct err codes for peer timeout and FSM timeouts * Don't allow peers to update with lower height * review comments from Ethan and Zarko * some cleanup, renaming, comments * Move block execution in separate goroutine * Remove pool's numPending * review comments * fix lint, remove old blockchain reactor and duplicates in fsm tests * small reorg around peer after review comments * add the reactor spec * verify block only once * review comments * change to int for max number of pending requests * cleanup and godoc * Add configuration flag fast sync version * golangci fixes * fix config template * move both reactor versions under blockchain * cleanup, golint, renaming stuff * updated documentation, fixed more golint warnings * integrate with behavior package * sync with master * gofmt * add changelog_pending entry * move to improvments * suggestion to changelog entry
5 years ago
blockchain: Reorg reactor (#3561) * go routines in blockchain reactor * Added reference to the go routine diagram * Initial commit * cleanup * Undo testing_logger change, committed by mistake * Fix the test loggers * pulled some fsm code into pool.go * added pool tests * changes to the design added block requests under peer moved the request trigger in the reactor poolRoutine, triggered now by a ticker in general moved everything required for making block requests smarter in the poolRoutine added a simple map of heights to keep track of what will need to be requested next added a few more tests * send errors to FSM in a different channel than blocks send errors (RemovePeer) from switch on a different channel than the one receiving blocks renamed channels added more pool tests * more pool tests * lint errors * more tests * more tests * switch fast sync to new implementation * fixed data race in tests * cleanup * finished fsm tests * address golangci comments :) * address golangci comments :) * Added timeout on next block needed to advance * updating docs and cleanup * fix issue in test from previous cleanup * cleanup * Added termination scenarios, tests and more cleanup * small fixes to adr, comments and cleanup * Fix bug in sendRequest() If we tried to send a request to a peer not present in the switch, a missing continue statement caused the request to be blackholed in a peer that was removed and never retried. While this bug was manifesting, the reactor kept asking for other blocks that would be stored and never consumed. Added the number of unconsumed blocks in the math for requesting blocks ahead of current processing height so eventually there will be no more blocks requested until the already received ones are consumed. * remove bpPeer's didTimeout field * Use distinct err codes for peer timeout and FSM timeouts * Don't allow peers to update with lower height * review comments from Ethan and Zarko * some cleanup, renaming, comments * Move block execution in separate goroutine * Remove pool's numPending * review comments * fix lint, remove old blockchain reactor and duplicates in fsm tests * small reorg around peer after review comments * add the reactor spec * verify block only once * review comments * change to int for max number of pending requests * cleanup and godoc * Add configuration flag fast sync version * golangci fixes * fix config template * move both reactor versions under blockchain * cleanup, golint, renaming stuff * updated documentation, fixed more golint warnings * integrate with behavior package * sync with master * gofmt * add changelog_pending entry * move to improvments * suggestion to changelog entry
5 years ago
blockchain: Reorg reactor (#3561) * go routines in blockchain reactor * Added reference to the go routine diagram * Initial commit * cleanup * Undo testing_logger change, committed by mistake * Fix the test loggers * pulled some fsm code into pool.go * added pool tests * changes to the design added block requests under peer moved the request trigger in the reactor poolRoutine, triggered now by a ticker in general moved everything required for making block requests smarter in the poolRoutine added a simple map of heights to keep track of what will need to be requested next added a few more tests * send errors to FSM in a different channel than blocks send errors (RemovePeer) from switch on a different channel than the one receiving blocks renamed channels added more pool tests * more pool tests * lint errors * more tests * more tests * switch fast sync to new implementation * fixed data race in tests * cleanup * finished fsm tests * address golangci comments :) * address golangci comments :) * Added timeout on next block needed to advance * updating docs and cleanup * fix issue in test from previous cleanup * cleanup * Added termination scenarios, tests and more cleanup * small fixes to adr, comments and cleanup * Fix bug in sendRequest() If we tried to send a request to a peer not present in the switch, a missing continue statement caused the request to be blackholed in a peer that was removed and never retried. While this bug was manifesting, the reactor kept asking for other blocks that would be stored and never consumed. Added the number of unconsumed blocks in the math for requesting blocks ahead of current processing height so eventually there will be no more blocks requested until the already received ones are consumed. * remove bpPeer's didTimeout field * Use distinct err codes for peer timeout and FSM timeouts * Don't allow peers to update with lower height * review comments from Ethan and Zarko * some cleanup, renaming, comments * Move block execution in separate goroutine * Remove pool's numPending * review comments * fix lint, remove old blockchain reactor and duplicates in fsm tests * small reorg around peer after review comments * add the reactor spec * verify block only once * review comments * change to int for max number of pending requests * cleanup and godoc * Add configuration flag fast sync version * golangci fixes * fix config template * move both reactor versions under blockchain * cleanup, golint, renaming stuff * updated documentation, fixed more golint warnings * integrate with behavior package * sync with master * gofmt * add changelog_pending entry * move to improvments * suggestion to changelog entry
5 years ago
blockchain: Reorg reactor (#3561) * go routines in blockchain reactor * Added reference to the go routine diagram * Initial commit * cleanup * Undo testing_logger change, committed by mistake * Fix the test loggers * pulled some fsm code into pool.go * added pool tests * changes to the design added block requests under peer moved the request trigger in the reactor poolRoutine, triggered now by a ticker in general moved everything required for making block requests smarter in the poolRoutine added a simple map of heights to keep track of what will need to be requested next added a few more tests * send errors to FSM in a different channel than blocks send errors (RemovePeer) from switch on a different channel than the one receiving blocks renamed channels added more pool tests * more pool tests * lint errors * more tests * more tests * switch fast sync to new implementation * fixed data race in tests * cleanup * finished fsm tests * address golangci comments :) * address golangci comments :) * Added timeout on next block needed to advance * updating docs and cleanup * fix issue in test from previous cleanup * cleanup * Added termination scenarios, tests and more cleanup * small fixes to adr, comments and cleanup * Fix bug in sendRequest() If we tried to send a request to a peer not present in the switch, a missing continue statement caused the request to be blackholed in a peer that was removed and never retried. While this bug was manifesting, the reactor kept asking for other blocks that would be stored and never consumed. Added the number of unconsumed blocks in the math for requesting blocks ahead of current processing height so eventually there will be no more blocks requested until the already received ones are consumed. * remove bpPeer's didTimeout field * Use distinct err codes for peer timeout and FSM timeouts * Don't allow peers to update with lower height * review comments from Ethan and Zarko * some cleanup, renaming, comments * Move block execution in separate goroutine * Remove pool's numPending * review comments * fix lint, remove old blockchain reactor and duplicates in fsm tests * small reorg around peer after review comments * add the reactor spec * verify block only once * review comments * change to int for max number of pending requests * cleanup and godoc * Add configuration flag fast sync version * golangci fixes * fix config template * move both reactor versions under blockchain * cleanup, golint, renaming stuff * updated documentation, fixed more golint warnings * integrate with behavior package * sync with master * gofmt * add changelog_pending entry * move to improvments * suggestion to changelog entry
5 years ago
blockchain: Reorg reactor (#3561) * go routines in blockchain reactor * Added reference to the go routine diagram * Initial commit * cleanup * Undo testing_logger change, committed by mistake * Fix the test loggers * pulled some fsm code into pool.go * added pool tests * changes to the design added block requests under peer moved the request trigger in the reactor poolRoutine, triggered now by a ticker in general moved everything required for making block requests smarter in the poolRoutine added a simple map of heights to keep track of what will need to be requested next added a few more tests * send errors to FSM in a different channel than blocks send errors (RemovePeer) from switch on a different channel than the one receiving blocks renamed channels added more pool tests * more pool tests * lint errors * more tests * more tests * switch fast sync to new implementation * fixed data race in tests * cleanup * finished fsm tests * address golangci comments :) * address golangci comments :) * Added timeout on next block needed to advance * updating docs and cleanup * fix issue in test from previous cleanup * cleanup * Added termination scenarios, tests and more cleanup * small fixes to adr, comments and cleanup * Fix bug in sendRequest() If we tried to send a request to a peer not present in the switch, a missing continue statement caused the request to be blackholed in a peer that was removed and never retried. While this bug was manifesting, the reactor kept asking for other blocks that would be stored and never consumed. Added the number of unconsumed blocks in the math for requesting blocks ahead of current processing height so eventually there will be no more blocks requested until the already received ones are consumed. * remove bpPeer's didTimeout field * Use distinct err codes for peer timeout and FSM timeouts * Don't allow peers to update with lower height * review comments from Ethan and Zarko * some cleanup, renaming, comments * Move block execution in separate goroutine * Remove pool's numPending * review comments * fix lint, remove old blockchain reactor and duplicates in fsm tests * small reorg around peer after review comments * add the reactor spec * verify block only once * review comments * change to int for max number of pending requests * cleanup and godoc * Add configuration flag fast sync version * golangci fixes * fix config template * move both reactor versions under blockchain * cleanup, golint, renaming stuff * updated documentation, fixed more golint warnings * integrate with behavior package * sync with master * gofmt * add changelog_pending entry * move to improvments * suggestion to changelog entry
5 years ago
blockchain: Reorg reactor (#3561) * go routines in blockchain reactor * Added reference to the go routine diagram * Initial commit * cleanup * Undo testing_logger change, committed by mistake * Fix the test loggers * pulled some fsm code into pool.go * added pool tests * changes to the design added block requests under peer moved the request trigger in the reactor poolRoutine, triggered now by a ticker in general moved everything required for making block requests smarter in the poolRoutine added a simple map of heights to keep track of what will need to be requested next added a few more tests * send errors to FSM in a different channel than blocks send errors (RemovePeer) from switch on a different channel than the one receiving blocks renamed channels added more pool tests * more pool tests * lint errors * more tests * more tests * switch fast sync to new implementation * fixed data race in tests * cleanup * finished fsm tests * address golangci comments :) * address golangci comments :) * Added timeout on next block needed to advance * updating docs and cleanup * fix issue in test from previous cleanup * cleanup * Added termination scenarios, tests and more cleanup * small fixes to adr, comments and cleanup * Fix bug in sendRequest() If we tried to send a request to a peer not present in the switch, a missing continue statement caused the request to be blackholed in a peer that was removed and never retried. While this bug was manifesting, the reactor kept asking for other blocks that would be stored and never consumed. Added the number of unconsumed blocks in the math for requesting blocks ahead of current processing height so eventually there will be no more blocks requested until the already received ones are consumed. * remove bpPeer's didTimeout field * Use distinct err codes for peer timeout and FSM timeouts * Don't allow peers to update with lower height * review comments from Ethan and Zarko * some cleanup, renaming, comments * Move block execution in separate goroutine * Remove pool's numPending * review comments * fix lint, remove old blockchain reactor and duplicates in fsm tests * small reorg around peer after review comments * add the reactor spec * verify block only once * review comments * change to int for max number of pending requests * cleanup and godoc * Add configuration flag fast sync version * golangci fixes * fix config template * move both reactor versions under blockchain * cleanup, golint, renaming stuff * updated documentation, fixed more golint warnings * integrate with behavior package * sync with master * gofmt * add changelog_pending entry * move to improvments * suggestion to changelog entry
5 years ago
blockchain: Reorg reactor (#3561) * go routines in blockchain reactor * Added reference to the go routine diagram * Initial commit * cleanup * Undo testing_logger change, committed by mistake * Fix the test loggers * pulled some fsm code into pool.go * added pool tests * changes to the design added block requests under peer moved the request trigger in the reactor poolRoutine, triggered now by a ticker in general moved everything required for making block requests smarter in the poolRoutine added a simple map of heights to keep track of what will need to be requested next added a few more tests * send errors to FSM in a different channel than blocks send errors (RemovePeer) from switch on a different channel than the one receiving blocks renamed channels added more pool tests * more pool tests * lint errors * more tests * more tests * switch fast sync to new implementation * fixed data race in tests * cleanup * finished fsm tests * address golangci comments :) * address golangci comments :) * Added timeout on next block needed to advance * updating docs and cleanup * fix issue in test from previous cleanup * cleanup * Added termination scenarios, tests and more cleanup * small fixes to adr, comments and cleanup * Fix bug in sendRequest() If we tried to send a request to a peer not present in the switch, a missing continue statement caused the request to be blackholed in a peer that was removed and never retried. While this bug was manifesting, the reactor kept asking for other blocks that would be stored and never consumed. Added the number of unconsumed blocks in the math for requesting blocks ahead of current processing height so eventually there will be no more blocks requested until the already received ones are consumed. * remove bpPeer's didTimeout field * Use distinct err codes for peer timeout and FSM timeouts * Don't allow peers to update with lower height * review comments from Ethan and Zarko * some cleanup, renaming, comments * Move block execution in separate goroutine * Remove pool's numPending * review comments * fix lint, remove old blockchain reactor and duplicates in fsm tests * small reorg around peer after review comments * add the reactor spec * verify block only once * review comments * change to int for max number of pending requests * cleanup and godoc * Add configuration flag fast sync version * golangci fixes * fix config template * move both reactor versions under blockchain * cleanup, golint, renaming stuff * updated documentation, fixed more golint warnings * integrate with behavior package * sync with master * gofmt * add changelog_pending entry * move to improvments * suggestion to changelog entry
5 years ago
blockchain: Reorg reactor (#3561) * go routines in blockchain reactor * Added reference to the go routine diagram * Initial commit * cleanup * Undo testing_logger change, committed by mistake * Fix the test loggers * pulled some fsm code into pool.go * added pool tests * changes to the design added block requests under peer moved the request trigger in the reactor poolRoutine, triggered now by a ticker in general moved everything required for making block requests smarter in the poolRoutine added a simple map of heights to keep track of what will need to be requested next added a few more tests * send errors to FSM in a different channel than blocks send errors (RemovePeer) from switch on a different channel than the one receiving blocks renamed channels added more pool tests * more pool tests * lint errors * more tests * more tests * switch fast sync to new implementation * fixed data race in tests * cleanup * finished fsm tests * address golangci comments :) * address golangci comments :) * Added timeout on next block needed to advance * updating docs and cleanup * fix issue in test from previous cleanup * cleanup * Added termination scenarios, tests and more cleanup * small fixes to adr, comments and cleanup * Fix bug in sendRequest() If we tried to send a request to a peer not present in the switch, a missing continue statement caused the request to be blackholed in a peer that was removed and never retried. While this bug was manifesting, the reactor kept asking for other blocks that would be stored and never consumed. Added the number of unconsumed blocks in the math for requesting blocks ahead of current processing height so eventually there will be no more blocks requested until the already received ones are consumed. * remove bpPeer's didTimeout field * Use distinct err codes for peer timeout and FSM timeouts * Don't allow peers to update with lower height * review comments from Ethan and Zarko * some cleanup, renaming, comments * Move block execution in separate goroutine * Remove pool's numPending * review comments * fix lint, remove old blockchain reactor and duplicates in fsm tests * small reorg around peer after review comments * add the reactor spec * verify block only once * review comments * change to int for max number of pending requests * cleanup and godoc * Add configuration flag fast sync version * golangci fixes * fix config template * move both reactor versions under blockchain * cleanup, golint, renaming stuff * updated documentation, fixed more golint warnings * integrate with behavior package * sync with master * gofmt * add changelog_pending entry * move to improvments * suggestion to changelog entry
5 years ago
blockchain: Reorg reactor (#3561) * go routines in blockchain reactor * Added reference to the go routine diagram * Initial commit * cleanup * Undo testing_logger change, committed by mistake * Fix the test loggers * pulled some fsm code into pool.go * added pool tests * changes to the design added block requests under peer moved the request trigger in the reactor poolRoutine, triggered now by a ticker in general moved everything required for making block requests smarter in the poolRoutine added a simple map of heights to keep track of what will need to be requested next added a few more tests * send errors to FSM in a different channel than blocks send errors (RemovePeer) from switch on a different channel than the one receiving blocks renamed channels added more pool tests * more pool tests * lint errors * more tests * more tests * switch fast sync to new implementation * fixed data race in tests * cleanup * finished fsm tests * address golangci comments :) * address golangci comments :) * Added timeout on next block needed to advance * updating docs and cleanup * fix issue in test from previous cleanup * cleanup * Added termination scenarios, tests and more cleanup * small fixes to adr, comments and cleanup * Fix bug in sendRequest() If we tried to send a request to a peer not present in the switch, a missing continue statement caused the request to be blackholed in a peer that was removed and never retried. While this bug was manifesting, the reactor kept asking for other blocks that would be stored and never consumed. Added the number of unconsumed blocks in the math for requesting blocks ahead of current processing height so eventually there will be no more blocks requested until the already received ones are consumed. * remove bpPeer's didTimeout field * Use distinct err codes for peer timeout and FSM timeouts * Don't allow peers to update with lower height * review comments from Ethan and Zarko * some cleanup, renaming, comments * Move block execution in separate goroutine * Remove pool's numPending * review comments * fix lint, remove old blockchain reactor and duplicates in fsm tests * small reorg around peer after review comments * add the reactor spec * verify block only once * review comments * change to int for max number of pending requests * cleanup and godoc * Add configuration flag fast sync version * golangci fixes * fix config template * move both reactor versions under blockchain * cleanup, golint, renaming stuff * updated documentation, fixed more golint warnings * integrate with behavior package * sync with master * gofmt * add changelog_pending entry * move to improvments * suggestion to changelog entry
5 years ago
blockchain: Reorg reactor (#3561) * go routines in blockchain reactor * Added reference to the go routine diagram * Initial commit * cleanup * Undo testing_logger change, committed by mistake * Fix the test loggers * pulled some fsm code into pool.go * added pool tests * changes to the design added block requests under peer moved the request trigger in the reactor poolRoutine, triggered now by a ticker in general moved everything required for making block requests smarter in the poolRoutine added a simple map of heights to keep track of what will need to be requested next added a few more tests * send errors to FSM in a different channel than blocks send errors (RemovePeer) from switch on a different channel than the one receiving blocks renamed channels added more pool tests * more pool tests * lint errors * more tests * more tests * switch fast sync to new implementation * fixed data race in tests * cleanup * finished fsm tests * address golangci comments :) * address golangci comments :) * Added timeout on next block needed to advance * updating docs and cleanup * fix issue in test from previous cleanup * cleanup * Added termination scenarios, tests and more cleanup * small fixes to adr, comments and cleanup * Fix bug in sendRequest() If we tried to send a request to a peer not present in the switch, a missing continue statement caused the request to be blackholed in a peer that was removed and never retried. While this bug was manifesting, the reactor kept asking for other blocks that would be stored and never consumed. Added the number of unconsumed blocks in the math for requesting blocks ahead of current processing height so eventually there will be no more blocks requested until the already received ones are consumed. * remove bpPeer's didTimeout field * Use distinct err codes for peer timeout and FSM timeouts * Don't allow peers to update with lower height * review comments from Ethan and Zarko * some cleanup, renaming, comments * Move block execution in separate goroutine * Remove pool's numPending * review comments * fix lint, remove old blockchain reactor and duplicates in fsm tests * small reorg around peer after review comments * add the reactor spec * verify block only once * review comments * change to int for max number of pending requests * cleanup and godoc * Add configuration flag fast sync version * golangci fixes * fix config template * move both reactor versions under blockchain * cleanup, golint, renaming stuff * updated documentation, fixed more golint warnings * integrate with behavior package * sync with master * gofmt * add changelog_pending entry * move to improvments * suggestion to changelog entry
5 years ago
blockchain: Reorg reactor (#3561) * go routines in blockchain reactor * Added reference to the go routine diagram * Initial commit * cleanup * Undo testing_logger change, committed by mistake * Fix the test loggers * pulled some fsm code into pool.go * added pool tests * changes to the design added block requests under peer moved the request trigger in the reactor poolRoutine, triggered now by a ticker in general moved everything required for making block requests smarter in the poolRoutine added a simple map of heights to keep track of what will need to be requested next added a few more tests * send errors to FSM in a different channel than blocks send errors (RemovePeer) from switch on a different channel than the one receiving blocks renamed channels added more pool tests * more pool tests * lint errors * more tests * more tests * switch fast sync to new implementation * fixed data race in tests * cleanup * finished fsm tests * address golangci comments :) * address golangci comments :) * Added timeout on next block needed to advance * updating docs and cleanup * fix issue in test from previous cleanup * cleanup * Added termination scenarios, tests and more cleanup * small fixes to adr, comments and cleanup * Fix bug in sendRequest() If we tried to send a request to a peer not present in the switch, a missing continue statement caused the request to be blackholed in a peer that was removed and never retried. While this bug was manifesting, the reactor kept asking for other blocks that would be stored and never consumed. Added the number of unconsumed blocks in the math for requesting blocks ahead of current processing height so eventually there will be no more blocks requested until the already received ones are consumed. * remove bpPeer's didTimeout field * Use distinct err codes for peer timeout and FSM timeouts * Don't allow peers to update with lower height * review comments from Ethan and Zarko * some cleanup, renaming, comments * Move block execution in separate goroutine * Remove pool's numPending * review comments * fix lint, remove old blockchain reactor and duplicates in fsm tests * small reorg around peer after review comments * add the reactor spec * verify block only once * review comments * change to int for max number of pending requests * cleanup and godoc * Add configuration flag fast sync version * golangci fixes * fix config template * move both reactor versions under blockchain * cleanup, golint, renaming stuff * updated documentation, fixed more golint warnings * integrate with behavior package * sync with master * gofmt * add changelog_pending entry * move to improvments * suggestion to changelog entry
5 years ago
blockchain: Reorg reactor (#3561) * go routines in blockchain reactor * Added reference to the go routine diagram * Initial commit * cleanup * Undo testing_logger change, committed by mistake * Fix the test loggers * pulled some fsm code into pool.go * added pool tests * changes to the design added block requests under peer moved the request trigger in the reactor poolRoutine, triggered now by a ticker in general moved everything required for making block requests smarter in the poolRoutine added a simple map of heights to keep track of what will need to be requested next added a few more tests * send errors to FSM in a different channel than blocks send errors (RemovePeer) from switch on a different channel than the one receiving blocks renamed channels added more pool tests * more pool tests * lint errors * more tests * more tests * switch fast sync to new implementation * fixed data race in tests * cleanup * finished fsm tests * address golangci comments :) * address golangci comments :) * Added timeout on next block needed to advance * updating docs and cleanup * fix issue in test from previous cleanup * cleanup * Added termination scenarios, tests and more cleanup * small fixes to adr, comments and cleanup * Fix bug in sendRequest() If we tried to send a request to a peer not present in the switch, a missing continue statement caused the request to be blackholed in a peer that was removed and never retried. While this bug was manifesting, the reactor kept asking for other blocks that would be stored and never consumed. Added the number of unconsumed blocks in the math for requesting blocks ahead of current processing height so eventually there will be no more blocks requested until the already received ones are consumed. * remove bpPeer's didTimeout field * Use distinct err codes for peer timeout and FSM timeouts * Don't allow peers to update with lower height * review comments from Ethan and Zarko * some cleanup, renaming, comments * Move block execution in separate goroutine * Remove pool's numPending * review comments * fix lint, remove old blockchain reactor and duplicates in fsm tests * small reorg around peer after review comments * add the reactor spec * verify block only once * review comments * change to int for max number of pending requests * cleanup and godoc * Add configuration flag fast sync version * golangci fixes * fix config template * move both reactor versions under blockchain * cleanup, golint, renaming stuff * updated documentation, fixed more golint warnings * integrate with behavior package * sync with master * gofmt * add changelog_pending entry * move to improvments * suggestion to changelog entry
5 years ago
blockchain: Reorg reactor (#3561) * go routines in blockchain reactor * Added reference to the go routine diagram * Initial commit * cleanup * Undo testing_logger change, committed by mistake * Fix the test loggers * pulled some fsm code into pool.go * added pool tests * changes to the design added block requests under peer moved the request trigger in the reactor poolRoutine, triggered now by a ticker in general moved everything required for making block requests smarter in the poolRoutine added a simple map of heights to keep track of what will need to be requested next added a few more tests * send errors to FSM in a different channel than blocks send errors (RemovePeer) from switch on a different channel than the one receiving blocks renamed channels added more pool tests * more pool tests * lint errors * more tests * more tests * switch fast sync to new implementation * fixed data race in tests * cleanup * finished fsm tests * address golangci comments :) * address golangci comments :) * Added timeout on next block needed to advance * updating docs and cleanup * fix issue in test from previous cleanup * cleanup * Added termination scenarios, tests and more cleanup * small fixes to adr, comments and cleanup * Fix bug in sendRequest() If we tried to send a request to a peer not present in the switch, a missing continue statement caused the request to be blackholed in a peer that was removed and never retried. While this bug was manifesting, the reactor kept asking for other blocks that would be stored and never consumed. Added the number of unconsumed blocks in the math for requesting blocks ahead of current processing height so eventually there will be no more blocks requested until the already received ones are consumed. * remove bpPeer's didTimeout field * Use distinct err codes for peer timeout and FSM timeouts * Don't allow peers to update with lower height * review comments from Ethan and Zarko * some cleanup, renaming, comments * Move block execution in separate goroutine * Remove pool's numPending * review comments * fix lint, remove old blockchain reactor and duplicates in fsm tests * small reorg around peer after review comments * add the reactor spec * verify block only once * review comments * change to int for max number of pending requests * cleanup and godoc * Add configuration flag fast sync version * golangci fixes * fix config template * move both reactor versions under blockchain * cleanup, golint, renaming stuff * updated documentation, fixed more golint warnings * integrate with behavior package * sync with master * gofmt * add changelog_pending entry * move to improvments * suggestion to changelog entry
5 years ago
blockchain: Reorg reactor (#3561) * go routines in blockchain reactor * Added reference to the go routine diagram * Initial commit * cleanup * Undo testing_logger change, committed by mistake * Fix the test loggers * pulled some fsm code into pool.go * added pool tests * changes to the design added block requests under peer moved the request trigger in the reactor poolRoutine, triggered now by a ticker in general moved everything required for making block requests smarter in the poolRoutine added a simple map of heights to keep track of what will need to be requested next added a few more tests * send errors to FSM in a different channel than blocks send errors (RemovePeer) from switch on a different channel than the one receiving blocks renamed channels added more pool tests * more pool tests * lint errors * more tests * more tests * switch fast sync to new implementation * fixed data race in tests * cleanup * finished fsm tests * address golangci comments :) * address golangci comments :) * Added timeout on next block needed to advance * updating docs and cleanup * fix issue in test from previous cleanup * cleanup * Added termination scenarios, tests and more cleanup * small fixes to adr, comments and cleanup * Fix bug in sendRequest() If we tried to send a request to a peer not present in the switch, a missing continue statement caused the request to be blackholed in a peer that was removed and never retried. While this bug was manifesting, the reactor kept asking for other blocks that would be stored and never consumed. Added the number of unconsumed blocks in the math for requesting blocks ahead of current processing height so eventually there will be no more blocks requested until the already received ones are consumed. * remove bpPeer's didTimeout field * Use distinct err codes for peer timeout and FSM timeouts * Don't allow peers to update with lower height * review comments from Ethan and Zarko * some cleanup, renaming, comments * Move block execution in separate goroutine * Remove pool's numPending * review comments * fix lint, remove old blockchain reactor and duplicates in fsm tests * small reorg around peer after review comments * add the reactor spec * verify block only once * review comments * change to int for max number of pending requests * cleanup and godoc * Add configuration flag fast sync version * golangci fixes * fix config template * move both reactor versions under blockchain * cleanup, golint, renaming stuff * updated documentation, fixed more golint warnings * integrate with behavior package * sync with master * gofmt * add changelog_pending entry * move to improvments * suggestion to changelog entry
5 years ago
blockchain: Reorg reactor (#3561) * go routines in blockchain reactor * Added reference to the go routine diagram * Initial commit * cleanup * Undo testing_logger change, committed by mistake * Fix the test loggers * pulled some fsm code into pool.go * added pool tests * changes to the design added block requests under peer moved the request trigger in the reactor poolRoutine, triggered now by a ticker in general moved everything required for making block requests smarter in the poolRoutine added a simple map of heights to keep track of what will need to be requested next added a few more tests * send errors to FSM in a different channel than blocks send errors (RemovePeer) from switch on a different channel than the one receiving blocks renamed channels added more pool tests * more pool tests * lint errors * more tests * more tests * switch fast sync to new implementation * fixed data race in tests * cleanup * finished fsm tests * address golangci comments :) * address golangci comments :) * Added timeout on next block needed to advance * updating docs and cleanup * fix issue in test from previous cleanup * cleanup * Added termination scenarios, tests and more cleanup * small fixes to adr, comments and cleanup * Fix bug in sendRequest() If we tried to send a request to a peer not present in the switch, a missing continue statement caused the request to be blackholed in a peer that was removed and never retried. While this bug was manifesting, the reactor kept asking for other blocks that would be stored and never consumed. Added the number of unconsumed blocks in the math for requesting blocks ahead of current processing height so eventually there will be no more blocks requested until the already received ones are consumed. * remove bpPeer's didTimeout field * Use distinct err codes for peer timeout and FSM timeouts * Don't allow peers to update with lower height * review comments from Ethan and Zarko * some cleanup, renaming, comments * Move block execution in separate goroutine * Remove pool's numPending * review comments * fix lint, remove old blockchain reactor and duplicates in fsm tests * small reorg around peer after review comments * add the reactor spec * verify block only once * review comments * change to int for max number of pending requests * cleanup and godoc * Add configuration flag fast sync version * golangci fixes * fix config template * move both reactor versions under blockchain * cleanup, golint, renaming stuff * updated documentation, fixed more golint warnings * integrate with behavior package * sync with master * gofmt * add changelog_pending entry * move to improvments * suggestion to changelog entry
5 years ago
blockchain: Reorg reactor (#3561) * go routines in blockchain reactor * Added reference to the go routine diagram * Initial commit * cleanup * Undo testing_logger change, committed by mistake * Fix the test loggers * pulled some fsm code into pool.go * added pool tests * changes to the design added block requests under peer moved the request trigger in the reactor poolRoutine, triggered now by a ticker in general moved everything required for making block requests smarter in the poolRoutine added a simple map of heights to keep track of what will need to be requested next added a few more tests * send errors to FSM in a different channel than blocks send errors (RemovePeer) from switch on a different channel than the one receiving blocks renamed channels added more pool tests * more pool tests * lint errors * more tests * more tests * switch fast sync to new implementation * fixed data race in tests * cleanup * finished fsm tests * address golangci comments :) * address golangci comments :) * Added timeout on next block needed to advance * updating docs and cleanup * fix issue in test from previous cleanup * cleanup * Added termination scenarios, tests and more cleanup * small fixes to adr, comments and cleanup * Fix bug in sendRequest() If we tried to send a request to a peer not present in the switch, a missing continue statement caused the request to be blackholed in a peer that was removed and never retried. While this bug was manifesting, the reactor kept asking for other blocks that would be stored and never consumed. Added the number of unconsumed blocks in the math for requesting blocks ahead of current processing height so eventually there will be no more blocks requested until the already received ones are consumed. * remove bpPeer's didTimeout field * Use distinct err codes for peer timeout and FSM timeouts * Don't allow peers to update with lower height * review comments from Ethan and Zarko * some cleanup, renaming, comments * Move block execution in separate goroutine * Remove pool's numPending * review comments * fix lint, remove old blockchain reactor and duplicates in fsm tests * small reorg around peer after review comments * add the reactor spec * verify block only once * review comments * change to int for max number of pending requests * cleanup and godoc * Add configuration flag fast sync version * golangci fixes * fix config template * move both reactor versions under blockchain * cleanup, golint, renaming stuff * updated documentation, fixed more golint warnings * integrate with behavior package * sync with master * gofmt * add changelog_pending entry * move to improvments * suggestion to changelog entry
5 years ago
blockchain: Reorg reactor (#3561) * go routines in blockchain reactor * Added reference to the go routine diagram * Initial commit * cleanup * Undo testing_logger change, committed by mistake * Fix the test loggers * pulled some fsm code into pool.go * added pool tests * changes to the design added block requests under peer moved the request trigger in the reactor poolRoutine, triggered now by a ticker in general moved everything required for making block requests smarter in the poolRoutine added a simple map of heights to keep track of what will need to be requested next added a few more tests * send errors to FSM in a different channel than blocks send errors (RemovePeer) from switch on a different channel than the one receiving blocks renamed channels added more pool tests * more pool tests * lint errors * more tests * more tests * switch fast sync to new implementation * fixed data race in tests * cleanup * finished fsm tests * address golangci comments :) * address golangci comments :) * Added timeout on next block needed to advance * updating docs and cleanup * fix issue in test from previous cleanup * cleanup * Added termination scenarios, tests and more cleanup * small fixes to adr, comments and cleanup * Fix bug in sendRequest() If we tried to send a request to a peer not present in the switch, a missing continue statement caused the request to be blackholed in a peer that was removed and never retried. While this bug was manifesting, the reactor kept asking for other blocks that would be stored and never consumed. Added the number of unconsumed blocks in the math for requesting blocks ahead of current processing height so eventually there will be no more blocks requested until the already received ones are consumed. * remove bpPeer's didTimeout field * Use distinct err codes for peer timeout and FSM timeouts * Don't allow peers to update with lower height * review comments from Ethan and Zarko * some cleanup, renaming, comments * Move block execution in separate goroutine * Remove pool's numPending * review comments * fix lint, remove old blockchain reactor and duplicates in fsm tests * small reorg around peer after review comments * add the reactor spec * verify block only once * review comments * change to int for max number of pending requests * cleanup and godoc * Add configuration flag fast sync version * golangci fixes * fix config template * move both reactor versions under blockchain * cleanup, golint, renaming stuff * updated documentation, fixed more golint warnings * integrate with behavior package * sync with master * gofmt * add changelog_pending entry * move to improvments * suggestion to changelog entry
5 years ago
blockchain: Reorg reactor (#3561) * go routines in blockchain reactor * Added reference to the go routine diagram * Initial commit * cleanup * Undo testing_logger change, committed by mistake * Fix the test loggers * pulled some fsm code into pool.go * added pool tests * changes to the design added block requests under peer moved the request trigger in the reactor poolRoutine, triggered now by a ticker in general moved everything required for making block requests smarter in the poolRoutine added a simple map of heights to keep track of what will need to be requested next added a few more tests * send errors to FSM in a different channel than blocks send errors (RemovePeer) from switch on a different channel than the one receiving blocks renamed channels added more pool tests * more pool tests * lint errors * more tests * more tests * switch fast sync to new implementation * fixed data race in tests * cleanup * finished fsm tests * address golangci comments :) * address golangci comments :) * Added timeout on next block needed to advance * updating docs and cleanup * fix issue in test from previous cleanup * cleanup * Added termination scenarios, tests and more cleanup * small fixes to adr, comments and cleanup * Fix bug in sendRequest() If we tried to send a request to a peer not present in the switch, a missing continue statement caused the request to be blackholed in a peer that was removed and never retried. While this bug was manifesting, the reactor kept asking for other blocks that would be stored and never consumed. Added the number of unconsumed blocks in the math for requesting blocks ahead of current processing height so eventually there will be no more blocks requested until the already received ones are consumed. * remove bpPeer's didTimeout field * Use distinct err codes for peer timeout and FSM timeouts * Don't allow peers to update with lower height * review comments from Ethan and Zarko * some cleanup, renaming, comments * Move block execution in separate goroutine * Remove pool's numPending * review comments * fix lint, remove old blockchain reactor and duplicates in fsm tests * small reorg around peer after review comments * add the reactor spec * verify block only once * review comments * change to int for max number of pending requests * cleanup and godoc * Add configuration flag fast sync version * golangci fixes * fix config template * move both reactor versions under blockchain * cleanup, golint, renaming stuff * updated documentation, fixed more golint warnings * integrate with behavior package * sync with master * gofmt * add changelog_pending entry * move to improvments * suggestion to changelog entry
5 years ago
  1. package v1
  2. import (
  3. "fmt"
  4. "testing"
  5. "time"
  6. "github.com/stretchr/testify/assert"
  7. "github.com/tendermint/tendermint/libs/log"
  8. tmmath "github.com/tendermint/tendermint/libs/math"
  9. tmrand "github.com/tendermint/tendermint/libs/rand"
  10. "github.com/tendermint/tendermint/p2p"
  11. "github.com/tendermint/tendermint/types"
  12. )
  13. type lastBlockRequestT struct {
  14. peerID p2p.ID
  15. height int64
  16. }
  17. type lastPeerErrorT struct {
  18. peerID p2p.ID
  19. err error
  20. }
  21. // reactor for FSM testing
  22. type testReactor struct {
  23. logger log.Logger
  24. fsm *BcReactorFSM
  25. numStatusRequests int
  26. numBlockRequests int
  27. lastBlockRequest lastBlockRequestT
  28. lastPeerError lastPeerErrorT
  29. stateTimerStarts map[string]int
  30. }
  31. func sendEventToFSM(fsm *BcReactorFSM, ev bReactorEvent, data bReactorEventData) error {
  32. return fsm.Handle(&bcReactorMessage{event: ev, data: data})
  33. }
  34. type fsmStepTestValues struct {
  35. currentState string
  36. event bReactorEvent
  37. data bReactorEventData
  38. wantErr error
  39. wantState string
  40. wantStatusReqSent bool
  41. wantReqIncreased bool
  42. wantNewBlocks []int64
  43. wantRemovedPeers []p2p.ID
  44. }
  45. // ---------------------------------------------------------------------------
  46. // helper test function for different FSM events, state and expected behavior
  47. func sStopFSMEv(current, expected string) fsmStepTestValues {
  48. return fsmStepTestValues{
  49. currentState: current,
  50. event: stopFSMEv,
  51. wantState: expected,
  52. wantErr: errNoErrorFinished}
  53. }
  54. func sUnknownFSMEv(current string) fsmStepTestValues {
  55. return fsmStepTestValues{
  56. currentState: current,
  57. event: 1234,
  58. wantState: current,
  59. wantErr: errInvalidEvent}
  60. }
  61. func sStartFSMEv() fsmStepTestValues {
  62. return fsmStepTestValues{
  63. currentState: "unknown",
  64. event: startFSMEv,
  65. wantState: "waitForPeer",
  66. wantStatusReqSent: true}
  67. }
  68. func sStateTimeoutEv(current, expected string, timedoutState string, wantErr error) fsmStepTestValues {
  69. return fsmStepTestValues{
  70. currentState: current,
  71. event: stateTimeoutEv,
  72. data: bReactorEventData{
  73. stateName: timedoutState,
  74. },
  75. wantState: expected,
  76. wantErr: wantErr,
  77. }
  78. }
  79. func sProcessedBlockEv(current, expected string, reactorError error) fsmStepTestValues {
  80. return fsmStepTestValues{
  81. currentState: current,
  82. event: processedBlockEv,
  83. data: bReactorEventData{
  84. err: reactorError,
  85. },
  86. wantState: expected,
  87. wantErr: reactorError,
  88. }
  89. }
  90. func sNoBlockResponseEv(current, expected string, peerID p2p.ID, height int64, err error) fsmStepTestValues {
  91. return fsmStepTestValues{
  92. currentState: current,
  93. event: noBlockResponseEv,
  94. data: bReactorEventData{
  95. peerID: peerID,
  96. height: height,
  97. },
  98. wantState: expected,
  99. wantErr: err,
  100. }
  101. }
  102. func sStatusEv(current, expected string, peerID p2p.ID, height int64, err error) fsmStepTestValues {
  103. return fsmStepTestValues{
  104. currentState: current,
  105. event: statusResponseEv,
  106. data: bReactorEventData{peerID: peerID, height: height},
  107. wantState: expected,
  108. wantErr: err}
  109. }
  110. func sMakeRequestsEv(current, expected string, maxPendingRequests int) fsmStepTestValues {
  111. return fsmStepTestValues{
  112. currentState: current,
  113. event: makeRequestsEv,
  114. data: bReactorEventData{maxNumRequests: maxPendingRequests},
  115. wantState: expected,
  116. wantReqIncreased: true,
  117. }
  118. }
  119. func sMakeRequestsEvErrored(current, expected string,
  120. maxPendingRequests int, err error, peersRemoved []p2p.ID) fsmStepTestValues {
  121. return fsmStepTestValues{
  122. currentState: current,
  123. event: makeRequestsEv,
  124. data: bReactorEventData{maxNumRequests: maxPendingRequests},
  125. wantState: expected,
  126. wantErr: err,
  127. wantRemovedPeers: peersRemoved,
  128. wantReqIncreased: true,
  129. }
  130. }
  131. func sBlockRespEv(current, expected string, peerID p2p.ID, height int64, prevBlocks []int64) fsmStepTestValues {
  132. txs := []types.Tx{types.Tx("foo"), types.Tx("bar")}
  133. return fsmStepTestValues{
  134. currentState: current,
  135. event: blockResponseEv,
  136. data: bReactorEventData{
  137. peerID: peerID,
  138. height: height,
  139. block: types.MakeBlock(height, txs, nil, nil),
  140. length: 100},
  141. wantState: expected,
  142. wantNewBlocks: append(prevBlocks, height),
  143. }
  144. }
  145. func sBlockRespEvErrored(current, expected string,
  146. peerID p2p.ID, height int64, prevBlocks []int64, wantErr error, peersRemoved []p2p.ID) fsmStepTestValues {
  147. txs := []types.Tx{types.Tx("foo"), types.Tx("bar")}
  148. return fsmStepTestValues{
  149. currentState: current,
  150. event: blockResponseEv,
  151. data: bReactorEventData{
  152. peerID: peerID,
  153. height: height,
  154. block: types.MakeBlock(height, txs, nil, nil),
  155. length: 100},
  156. wantState: expected,
  157. wantErr: wantErr,
  158. wantRemovedPeers: peersRemoved,
  159. wantNewBlocks: prevBlocks,
  160. }
  161. }
  162. func sPeerRemoveEv(current, expected string, peerID p2p.ID, err error, peersRemoved []p2p.ID) fsmStepTestValues {
  163. return fsmStepTestValues{
  164. currentState: current,
  165. event: peerRemoveEv,
  166. data: bReactorEventData{
  167. peerID: peerID,
  168. err: err,
  169. },
  170. wantState: expected,
  171. wantRemovedPeers: peersRemoved,
  172. }
  173. }
  174. // --------------------------------------------
  175. func newTestReactor(height int64) *testReactor {
  176. testBcR := &testReactor{logger: log.TestingLogger(), stateTimerStarts: make(map[string]int)}
  177. testBcR.fsm = NewFSM(height, testBcR)
  178. testBcR.fsm.SetLogger(testBcR.logger)
  179. return testBcR
  180. }
  181. func fixBlockResponseEvStep(step *fsmStepTestValues, testBcR *testReactor) {
  182. // There is currently no good way to know to which peer a block request was sent.
  183. // So in some cases where it does not matter, before we simulate a block response
  184. // we cheat and look where it is expected from.
  185. if step.event == blockResponseEv {
  186. height := step.data.height
  187. peerID, ok := testBcR.fsm.pool.blocks[height]
  188. if ok {
  189. step.data.peerID = peerID
  190. }
  191. }
  192. }
  193. type testFields struct {
  194. name string
  195. startingHeight int64
  196. maxRequestsPerPeer int
  197. maxPendingRequests int
  198. steps []fsmStepTestValues
  199. }
  200. func executeFSMTests(t *testing.T, tests []testFields, matchRespToReq bool) {
  201. for _, tt := range tests {
  202. tt := tt
  203. t.Run(tt.name, func(t *testing.T) {
  204. // Create test reactor
  205. testBcR := newTestReactor(tt.startingHeight)
  206. if tt.maxRequestsPerPeer != 0 {
  207. maxRequestsPerPeer = tt.maxRequestsPerPeer
  208. }
  209. for _, step := range tt.steps {
  210. step := step
  211. assert.Equal(t, step.currentState, testBcR.fsm.state.name)
  212. var heightBefore int64
  213. if step.event == processedBlockEv && step.data.err == errBlockVerificationFailure {
  214. heightBefore = testBcR.fsm.pool.Height
  215. }
  216. oldNumStatusRequests := testBcR.numStatusRequests
  217. oldNumBlockRequests := testBcR.numBlockRequests
  218. if matchRespToReq {
  219. fixBlockResponseEvStep(&step, testBcR)
  220. }
  221. fsmErr := sendEventToFSM(testBcR.fsm, step.event, step.data)
  222. assert.Equal(t, step.wantErr, fsmErr)
  223. if step.wantStatusReqSent {
  224. assert.Equal(t, oldNumStatusRequests+1, testBcR.numStatusRequests)
  225. } else {
  226. assert.Equal(t, oldNumStatusRequests, testBcR.numStatusRequests)
  227. }
  228. if step.wantReqIncreased {
  229. assert.True(t, oldNumBlockRequests < testBcR.numBlockRequests)
  230. } else {
  231. assert.Equal(t, oldNumBlockRequests, testBcR.numBlockRequests)
  232. }
  233. for _, height := range step.wantNewBlocks {
  234. _, err := testBcR.fsm.pool.BlockAndPeerAtHeight(height)
  235. assert.Nil(t, err)
  236. }
  237. if step.event == processedBlockEv && step.data.err == errBlockVerificationFailure {
  238. heightAfter := testBcR.fsm.pool.Height
  239. assert.Equal(t, heightBefore, heightAfter)
  240. firstAfter, err1 := testBcR.fsm.pool.BlockAndPeerAtHeight(testBcR.fsm.pool.Height)
  241. secondAfter, err2 := testBcR.fsm.pool.BlockAndPeerAtHeight(testBcR.fsm.pool.Height + 1)
  242. assert.NotNil(t, err1)
  243. assert.NotNil(t, err2)
  244. assert.Nil(t, firstAfter)
  245. assert.Nil(t, secondAfter)
  246. }
  247. assert.Equal(t, step.wantState, testBcR.fsm.state.name)
  248. if step.wantState == "finished" {
  249. assert.True(t, testBcR.fsm.isCaughtUp())
  250. }
  251. }
  252. })
  253. }
  254. }
  255. func TestFSMBasic(t *testing.T) {
  256. tests := []testFields{
  257. {
  258. name: "one block, one peer - TS2",
  259. startingHeight: 1,
  260. maxRequestsPerPeer: 2,
  261. steps: []fsmStepTestValues{
  262. sStartFSMEv(),
  263. sStatusEv("waitForPeer", "waitForBlock", "P1", 2, nil),
  264. sMakeRequestsEv("waitForBlock", "waitForBlock", maxNumRequests),
  265. sBlockRespEv("waitForBlock", "waitForBlock", "P1", 1, []int64{}),
  266. sBlockRespEv("waitForBlock", "waitForBlock", "P2", 2, []int64{1}),
  267. sProcessedBlockEv("waitForBlock", "finished", nil),
  268. },
  269. },
  270. {
  271. name: "multi block, multi peer - TS2",
  272. startingHeight: 1,
  273. maxRequestsPerPeer: 2,
  274. steps: []fsmStepTestValues{
  275. sStartFSMEv(),
  276. sStatusEv("waitForPeer", "waitForBlock", "P1", 4, nil),
  277. sStatusEv("waitForBlock", "waitForBlock", "P2", 4, nil),
  278. sMakeRequestsEv("waitForBlock", "waitForBlock", maxNumRequests),
  279. sBlockRespEv("waitForBlock", "waitForBlock", "P1", 1, []int64{}),
  280. sBlockRespEv("waitForBlock", "waitForBlock", "P1", 2, []int64{1}),
  281. sBlockRespEv("waitForBlock", "waitForBlock", "P2", 3, []int64{1, 2}),
  282. sBlockRespEv("waitForBlock", "waitForBlock", "P2", 4, []int64{1, 2, 3}),
  283. sProcessedBlockEv("waitForBlock", "waitForBlock", nil),
  284. sProcessedBlockEv("waitForBlock", "waitForBlock", nil),
  285. sProcessedBlockEv("waitForBlock", "finished", nil),
  286. },
  287. },
  288. }
  289. executeFSMTests(t, tests, true)
  290. }
  291. func TestFSMBlockVerificationFailure(t *testing.T) {
  292. tests := []testFields{
  293. {
  294. name: "block verification failure - TS2 variant",
  295. startingHeight: 1,
  296. maxRequestsPerPeer: 3,
  297. steps: []fsmStepTestValues{
  298. sStartFSMEv(),
  299. // add P1 and get blocks 1-3 from it
  300. sStatusEv("waitForPeer", "waitForBlock", "P1", 3, nil),
  301. sMakeRequestsEv("waitForBlock", "waitForBlock", maxNumRequests),
  302. sBlockRespEv("waitForBlock", "waitForBlock", "P1", 1, []int64{}),
  303. sBlockRespEv("waitForBlock", "waitForBlock", "P1", 2, []int64{1}),
  304. sBlockRespEv("waitForBlock", "waitForBlock", "P1", 3, []int64{1, 2}),
  305. // add P2
  306. sStatusEv("waitForBlock", "waitForBlock", "P2", 3, nil),
  307. // process block failure, should remove P1 and all blocks
  308. sProcessedBlockEv("waitForBlock", "waitForBlock", errBlockVerificationFailure),
  309. // get blocks 1-3 from P2
  310. sMakeRequestsEv("waitForBlock", "waitForBlock", maxNumRequests),
  311. sBlockRespEv("waitForBlock", "waitForBlock", "P2", 1, []int64{}),
  312. sBlockRespEv("waitForBlock", "waitForBlock", "P2", 2, []int64{1}),
  313. sBlockRespEv("waitForBlock", "waitForBlock", "P2", 3, []int64{1, 2}),
  314. // finish after processing blocks 1 and 2
  315. sProcessedBlockEv("waitForBlock", "waitForBlock", nil),
  316. sProcessedBlockEv("waitForBlock", "finished", nil),
  317. },
  318. },
  319. }
  320. executeFSMTests(t, tests, false)
  321. }
  322. func TestFSMNoBlockResponse(t *testing.T) {
  323. tests := []testFields{
  324. {
  325. name: "no block response",
  326. startingHeight: 1,
  327. maxRequestsPerPeer: 3,
  328. steps: []fsmStepTestValues{
  329. sStartFSMEv(),
  330. // add P1 and get blocks 1-3 from it
  331. sStatusEv("waitForPeer", "waitForBlock", "P1", 3, nil),
  332. sMakeRequestsEv("waitForBlock", "waitForBlock", maxNumRequests),
  333. sBlockRespEv("waitForBlock", "waitForBlock", "P1", 1, []int64{}),
  334. sBlockRespEv("waitForBlock", "waitForBlock", "P1", 2, []int64{1}),
  335. sBlockRespEv("waitForBlock", "waitForBlock", "P1", 3, []int64{1, 2}),
  336. // add P2
  337. sStatusEv("waitForBlock", "waitForBlock", "P2", 3, nil),
  338. // process block failure, should remove P1 and all blocks
  339. sNoBlockResponseEv("waitForBlock", "waitForBlock", "P1", 1, nil),
  340. sNoBlockResponseEv("waitForBlock", "waitForBlock", "P1", 2, nil),
  341. sNoBlockResponseEv("waitForBlock", "waitForBlock", "P1", 3, nil),
  342. // get blocks 1-3 from P2
  343. sMakeRequestsEv("waitForBlock", "waitForBlock", maxNumRequests),
  344. sBlockRespEv("waitForBlock", "waitForBlock", "P2", 1, []int64{}),
  345. sBlockRespEv("waitForBlock", "waitForBlock", "P2", 2, []int64{1}),
  346. sBlockRespEv("waitForBlock", "waitForBlock", "P2", 3, []int64{1, 2}),
  347. // finish after processing blocks 1 and 2
  348. sProcessedBlockEv("waitForBlock", "waitForBlock", nil),
  349. sProcessedBlockEv("waitForBlock", "finished", nil),
  350. },
  351. },
  352. }
  353. executeFSMTests(t, tests, false)
  354. }
  355. func TestFSMBadBlockFromPeer(t *testing.T) {
  356. tests := []testFields{
  357. {
  358. name: "block we haven't asked for",
  359. startingHeight: 1,
  360. maxRequestsPerPeer: 3,
  361. steps: []fsmStepTestValues{
  362. sStartFSMEv(),
  363. // add P1 and ask for blocks 1-3
  364. sStatusEv("waitForPeer", "waitForBlock", "P1", 300, nil),
  365. sMakeRequestsEv("waitForBlock", "waitForBlock", maxNumRequests),
  366. // blockResponseEv for height 100 should cause an error
  367. sBlockRespEvErrored("waitForBlock", "waitForPeer",
  368. "P1", 100, []int64{}, errMissingBlock, []p2p.ID{}),
  369. },
  370. },
  371. {
  372. name: "block we already have",
  373. startingHeight: 1,
  374. maxRequestsPerPeer: 3,
  375. steps: []fsmStepTestValues{
  376. sStartFSMEv(),
  377. // add P1 and get block 1
  378. sStatusEv("waitForPeer", "waitForBlock", "P1", 100, nil),
  379. sMakeRequestsEv("waitForBlock", "waitForBlock", maxNumRequests),
  380. sBlockRespEv("waitForBlock", "waitForBlock",
  381. "P1", 1, []int64{}),
  382. // Get block 1 again. Since peer is removed together with block 1,
  383. // the blocks present in the pool should be {}
  384. sBlockRespEvErrored("waitForBlock", "waitForPeer",
  385. "P1", 1, []int64{}, errDuplicateBlock, []p2p.ID{"P1"}),
  386. },
  387. },
  388. {
  389. name: "block from unknown peer",
  390. startingHeight: 1,
  391. maxRequestsPerPeer: 3,
  392. steps: []fsmStepTestValues{
  393. sStartFSMEv(),
  394. // add P1 and get block 1
  395. sStatusEv("waitForPeer", "waitForBlock", "P1", 3, nil),
  396. // get block 1 from unknown peer P2
  397. sMakeRequestsEv("waitForBlock", "waitForBlock", maxNumRequests),
  398. sBlockRespEvErrored("waitForBlock", "waitForBlock",
  399. "P2", 1, []int64{}, errBadDataFromPeer, []p2p.ID{"P2"}),
  400. },
  401. },
  402. {
  403. name: "block from wrong peer",
  404. startingHeight: 1,
  405. maxRequestsPerPeer: 3,
  406. steps: []fsmStepTestValues{
  407. sStartFSMEv(),
  408. // add P1, make requests for blocks 1-3 to P1
  409. sStatusEv("waitForPeer", "waitForBlock", "P1", 3, nil),
  410. sMakeRequestsEv("waitForBlock", "waitForBlock", maxNumRequests),
  411. // add P2
  412. sStatusEv("waitForBlock", "waitForBlock", "P2", 3, nil),
  413. // receive block 1 from P2
  414. sBlockRespEvErrored("waitForBlock", "waitForBlock",
  415. "P2", 1, []int64{}, errBadDataFromPeer, []p2p.ID{"P2"}),
  416. },
  417. },
  418. }
  419. executeFSMTests(t, tests, false)
  420. }
  421. func TestFSMBlockAtCurrentHeightDoesNotArriveInTime(t *testing.T) {
  422. tests := []testFields{
  423. {
  424. name: "block at current height undelivered - TS5",
  425. startingHeight: 1,
  426. maxRequestsPerPeer: 3,
  427. steps: []fsmStepTestValues{
  428. sStartFSMEv(),
  429. // add P1, get blocks 1 and 2, process block 1
  430. sStatusEv("waitForPeer", "waitForBlock", "P1", 3, nil),
  431. sMakeRequestsEv("waitForBlock", "waitForBlock", maxNumRequests),
  432. sBlockRespEv("waitForBlock", "waitForBlock",
  433. "P1", 1, []int64{}),
  434. sBlockRespEv("waitForBlock", "waitForBlock",
  435. "P1", 2, []int64{1}),
  436. sProcessedBlockEv("waitForBlock", "waitForBlock", nil),
  437. // add P2
  438. sStatusEv("waitForBlock", "waitForBlock", "P2", 3, nil),
  439. // timeout on block 3, P1 should be removed
  440. sStateTimeoutEv("waitForBlock", "waitForBlock", "waitForBlock", errNoPeerResponseForCurrentHeights),
  441. // make requests and finish by receiving blocks 2 and 3 from P2
  442. sMakeRequestsEv("waitForBlock", "waitForBlock", maxNumRequests),
  443. sBlockRespEv("waitForBlock", "waitForBlock", "P2", 2, []int64{}),
  444. sBlockRespEv("waitForBlock", "waitForBlock", "P2", 3, []int64{2}),
  445. sProcessedBlockEv("waitForBlock", "finished", nil),
  446. },
  447. },
  448. {
  449. name: "block at current height undelivered, at maxPeerHeight after peer removal - TS3",
  450. startingHeight: 1,
  451. maxRequestsPerPeer: 3,
  452. steps: []fsmStepTestValues{
  453. sStartFSMEv(),
  454. // add P1, request blocks 1-3 from P1
  455. sStatusEv("waitForPeer", "waitForBlock", "P1", 3, nil),
  456. sMakeRequestsEv("waitForBlock", "waitForBlock", maxNumRequests),
  457. // add P2 (tallest)
  458. sStatusEv("waitForBlock", "waitForBlock", "P2", 30, nil),
  459. sMakeRequestsEv("waitForBlock", "waitForBlock", maxNumRequests),
  460. // receive blocks 1-3 from P1
  461. sBlockRespEv("waitForBlock", "waitForBlock", "P1", 1, []int64{}),
  462. sBlockRespEv("waitForBlock", "waitForBlock", "P1", 2, []int64{1}),
  463. sBlockRespEv("waitForBlock", "waitForBlock", "P1", 3, []int64{1, 2}),
  464. // process blocks at heights 1 and 2
  465. sProcessedBlockEv("waitForBlock", "waitForBlock", nil),
  466. sProcessedBlockEv("waitForBlock", "waitForBlock", nil),
  467. // timeout on block at height 4
  468. sStateTimeoutEv("waitForBlock", "finished", "waitForBlock", nil),
  469. },
  470. },
  471. }
  472. executeFSMTests(t, tests, true)
  473. }
  474. func TestFSMPeerRelatedEvents(t *testing.T) {
  475. tests := []testFields{
  476. {
  477. name: "peer remove event with no blocks",
  478. startingHeight: 1,
  479. steps: []fsmStepTestValues{
  480. sStartFSMEv(),
  481. // add P1, P2, P3
  482. sStatusEv("waitForPeer", "waitForBlock", "P1", 3, nil),
  483. sStatusEv("waitForBlock", "waitForBlock", "P2", 3, nil),
  484. sStatusEv("waitForBlock", "waitForBlock", "P3", 3, nil),
  485. // switch removes P2
  486. sPeerRemoveEv("waitForBlock", "waitForBlock", "P2", errSwitchRemovesPeer, []p2p.ID{"P2"}),
  487. },
  488. },
  489. {
  490. name: "only peer removed while in waitForBlock state",
  491. startingHeight: 100,
  492. steps: []fsmStepTestValues{
  493. sStartFSMEv(),
  494. // add P1
  495. sStatusEv("waitForPeer", "waitForBlock", "P1", 200, nil),
  496. // switch removes P1
  497. sPeerRemoveEv("waitForBlock", "waitForPeer", "P1", errSwitchRemovesPeer, []p2p.ID{"P1"}),
  498. },
  499. },
  500. {
  501. name: "highest peer removed while in waitForBlock state, node reaches maxPeerHeight - TS4 ",
  502. startingHeight: 100,
  503. maxRequestsPerPeer: 3,
  504. steps: []fsmStepTestValues{
  505. sStartFSMEv(),
  506. // add P1 and make requests
  507. sStatusEv("waitForPeer", "waitForBlock", "P1", 101, nil),
  508. sMakeRequestsEv("waitForBlock", "waitForBlock", maxNumRequests),
  509. // add P2
  510. sStatusEv("waitForBlock", "waitForBlock", "P2", 200, nil),
  511. // get blocks 100 and 101 from P1 and process block at height 100
  512. sBlockRespEv("waitForBlock", "waitForBlock", "P1", 100, []int64{}),
  513. sBlockRespEv("waitForBlock", "waitForBlock", "P1", 101, []int64{100}),
  514. sProcessedBlockEv("waitForBlock", "waitForBlock", nil),
  515. // switch removes peer P1, should be finished
  516. sPeerRemoveEv("waitForBlock", "finished", "P2", errSwitchRemovesPeer, []p2p.ID{"P2"}),
  517. },
  518. },
  519. {
  520. name: "highest peer lowers its height in waitForBlock state, node reaches maxPeerHeight - TS4",
  521. startingHeight: 100,
  522. maxRequestsPerPeer: 3,
  523. steps: []fsmStepTestValues{
  524. sStartFSMEv(),
  525. // add P1 and make requests
  526. sStatusEv("waitForPeer", "waitForBlock", "P1", 101, nil),
  527. sMakeRequestsEv("waitForBlock", "waitForBlock", maxNumRequests),
  528. // add P2
  529. sStatusEv("waitForBlock", "waitForBlock", "P2", 200, nil),
  530. // get blocks 100 and 101 from P1
  531. sBlockRespEv("waitForBlock", "waitForBlock", "P1", 100, []int64{}),
  532. sBlockRespEv("waitForBlock", "waitForBlock", "P1", 101, []int64{100}),
  533. // processed block at heights 100
  534. sProcessedBlockEv("waitForBlock", "waitForBlock", nil),
  535. // P2 becomes short
  536. sStatusEv("waitForBlock", "finished", "P2", 100, errPeerLowersItsHeight),
  537. },
  538. },
  539. {
  540. name: "new short peer while in waitForPeer state",
  541. startingHeight: 100,
  542. steps: []fsmStepTestValues{
  543. sStartFSMEv(),
  544. sStatusEv("waitForPeer", "waitForPeer", "P1", 3, errPeerTooShort),
  545. },
  546. },
  547. {
  548. name: "new short peer while in waitForBlock state",
  549. startingHeight: 100,
  550. steps: []fsmStepTestValues{
  551. sStartFSMEv(),
  552. sStatusEv("waitForPeer", "waitForBlock", "P1", 200, nil),
  553. sStatusEv("waitForBlock", "waitForBlock", "P2", 3, errPeerTooShort),
  554. },
  555. },
  556. {
  557. name: "only peer updated with low height while in waitForBlock state",
  558. startingHeight: 100,
  559. steps: []fsmStepTestValues{
  560. sStartFSMEv(),
  561. sStatusEv("waitForPeer", "waitForBlock", "P1", 200, nil),
  562. sStatusEv("waitForBlock", "waitForPeer", "P1", 3, errPeerLowersItsHeight),
  563. },
  564. },
  565. {
  566. name: "peer does not exist in the switch",
  567. startingHeight: 9999999,
  568. maxRequestsPerPeer: 3,
  569. steps: []fsmStepTestValues{
  570. sStartFSMEv(),
  571. // add P1
  572. sStatusEv("waitForPeer", "waitForBlock", "P1", 20000000, nil),
  573. // send request for block 9999999
  574. // Note: For this block request the "switch missing the peer" error is simulated,
  575. // see implementation of bcReactor interface, sendBlockRequest(), in this file.
  576. sMakeRequestsEvErrored("waitForBlock", "waitForBlock",
  577. maxNumRequests, nil, []p2p.ID{"P1"}),
  578. },
  579. },
  580. }
  581. executeFSMTests(t, tests, true)
  582. }
  583. func TestFSMStopFSM(t *testing.T) {
  584. tests := []testFields{
  585. {
  586. name: "stopFSMEv in unknown",
  587. steps: []fsmStepTestValues{
  588. sStopFSMEv("unknown", "finished"),
  589. },
  590. },
  591. {
  592. name: "stopFSMEv in waitForPeer",
  593. startingHeight: 1,
  594. steps: []fsmStepTestValues{
  595. sStartFSMEv(),
  596. sStopFSMEv("waitForPeer", "finished"),
  597. },
  598. },
  599. {
  600. name: "stopFSMEv in waitForBlock",
  601. startingHeight: 1,
  602. steps: []fsmStepTestValues{
  603. sStartFSMEv(),
  604. sStatusEv("waitForPeer", "waitForBlock", "P1", 3, nil),
  605. sStopFSMEv("waitForBlock", "finished"),
  606. },
  607. },
  608. }
  609. executeFSMTests(t, tests, false)
  610. }
  611. func TestFSMUnknownElements(t *testing.T) {
  612. tests := []testFields{
  613. {
  614. name: "unknown event for state unknown",
  615. steps: []fsmStepTestValues{
  616. sUnknownFSMEv("unknown"),
  617. },
  618. },
  619. {
  620. name: "unknown event for state waitForPeer",
  621. steps: []fsmStepTestValues{
  622. sStartFSMEv(),
  623. sUnknownFSMEv("waitForPeer"),
  624. },
  625. },
  626. {
  627. name: "unknown event for state waitForBlock",
  628. startingHeight: 1,
  629. steps: []fsmStepTestValues{
  630. sStartFSMEv(),
  631. sStatusEv("waitForPeer", "waitForBlock", "P1", 3, nil),
  632. sUnknownFSMEv("waitForBlock"),
  633. },
  634. },
  635. }
  636. executeFSMTests(t, tests, false)
  637. }
  638. func TestFSMPeerStateTimeoutEvent(t *testing.T) {
  639. tests := []testFields{
  640. {
  641. name: "timeout event for state waitForPeer while in state waitForPeer - TS1",
  642. startingHeight: 1,
  643. maxRequestsPerPeer: 3,
  644. steps: []fsmStepTestValues{
  645. sStartFSMEv(),
  646. sStateTimeoutEv("waitForPeer", "finished", "waitForPeer", errNoTallerPeer),
  647. },
  648. },
  649. {
  650. name: "timeout event for state waitForPeer while in a state != waitForPeer",
  651. startingHeight: 1,
  652. maxRequestsPerPeer: 3,
  653. steps: []fsmStepTestValues{
  654. sStartFSMEv(),
  655. sStateTimeoutEv("waitForPeer", "waitForPeer", "waitForBlock", errTimeoutEventWrongState),
  656. },
  657. },
  658. {
  659. name: "timeout event for state waitForBlock while in state waitForBlock ",
  660. startingHeight: 1,
  661. maxRequestsPerPeer: 3,
  662. steps: []fsmStepTestValues{
  663. sStartFSMEv(),
  664. sStatusEv("waitForPeer", "waitForBlock", "P1", 3, nil),
  665. sMakeRequestsEv("waitForBlock", "waitForBlock", maxNumRequests),
  666. sStateTimeoutEv("waitForBlock", "waitForPeer", "waitForBlock", errNoPeerResponseForCurrentHeights),
  667. },
  668. },
  669. {
  670. name: "timeout event for state waitForBlock while in a state != waitForBlock",
  671. startingHeight: 1,
  672. maxRequestsPerPeer: 3,
  673. steps: []fsmStepTestValues{
  674. sStartFSMEv(),
  675. sStatusEv("waitForPeer", "waitForBlock", "P1", 3, nil),
  676. sMakeRequestsEv("waitForBlock", "waitForBlock", maxNumRequests),
  677. sStateTimeoutEv("waitForBlock", "waitForBlock", "waitForPeer", errTimeoutEventWrongState),
  678. },
  679. },
  680. {
  681. name: "timeout event for state waitForBlock with multiple peers",
  682. startingHeight: 1,
  683. maxRequestsPerPeer: 3,
  684. steps: []fsmStepTestValues{
  685. sStartFSMEv(),
  686. sStatusEv("waitForPeer", "waitForBlock", "P1", 3, nil),
  687. sMakeRequestsEv("waitForBlock", "waitForBlock", maxNumRequests),
  688. sStatusEv("waitForBlock", "waitForBlock", "P2", 3, nil),
  689. sStateTimeoutEv("waitForBlock", "waitForBlock", "waitForBlock", errNoPeerResponseForCurrentHeights),
  690. },
  691. },
  692. }
  693. executeFSMTests(t, tests, false)
  694. }
  695. func makeCorrectTransitionSequence(startingHeight int64, numBlocks int64, numPeers int, randomPeerHeights bool,
  696. maxRequestsPerPeer int, maxPendingRequests int) testFields {
  697. // Generate numPeers peers with random or numBlocks heights according to the randomPeerHeights flag.
  698. peerHeights := make([]int64, numPeers)
  699. for i := 0; i < numPeers; i++ {
  700. if i == 0 {
  701. peerHeights[0] = numBlocks
  702. continue
  703. }
  704. if randomPeerHeights {
  705. peerHeights[i] = int64(tmmath.MaxInt(tmrand.Intn(int(numBlocks)), int(startingHeight)+1))
  706. } else {
  707. peerHeights[i] = numBlocks
  708. }
  709. }
  710. // Approximate the slice capacity to save time for appends.
  711. testSteps := make([]fsmStepTestValues, 0, 3*numBlocks+int64(numPeers))
  712. testName := fmt.Sprintf("%v-blocks %v-startingHeight %v-peers %v-maxRequestsPerPeer %v-maxNumRequests",
  713. numBlocks, startingHeight, numPeers, maxRequestsPerPeer, maxPendingRequests)
  714. // Add startFSMEv step.
  715. testSteps = append(testSteps, sStartFSMEv())
  716. // For each peer, add statusResponseEv step.
  717. for i := 0; i < numPeers; i++ {
  718. peerName := fmt.Sprintf("P%d", i)
  719. if i == 0 {
  720. testSteps = append(
  721. testSteps,
  722. sStatusEv("waitForPeer", "waitForBlock", p2p.ID(peerName), peerHeights[i], nil))
  723. } else {
  724. testSteps = append(testSteps,
  725. sStatusEv("waitForBlock", "waitForBlock", p2p.ID(peerName), peerHeights[i], nil))
  726. }
  727. }
  728. height := startingHeight
  729. numBlocksReceived := 0
  730. prevBlocks := make([]int64, 0, maxPendingRequests)
  731. forLoop:
  732. for i := 0; i < int(numBlocks); i++ {
  733. // Add the makeRequestEv step periodically.
  734. if i%maxRequestsPerPeer == 0 {
  735. testSteps = append(
  736. testSteps,
  737. sMakeRequestsEv("waitForBlock", "waitForBlock", maxNumRequests),
  738. )
  739. }
  740. // Add the blockRespEv step
  741. testSteps = append(
  742. testSteps,
  743. sBlockRespEv("waitForBlock", "waitForBlock",
  744. "P0", height, prevBlocks))
  745. prevBlocks = append(prevBlocks, height)
  746. height++
  747. numBlocksReceived++
  748. // Add the processedBlockEv step periodically.
  749. if numBlocksReceived >= maxRequestsPerPeer || height >= numBlocks {
  750. for j := int(height) - numBlocksReceived; j < int(height); j++ {
  751. if j >= int(numBlocks) {
  752. // This is the last block that is processed, we should be in "finished" state.
  753. testSteps = append(
  754. testSteps,
  755. sProcessedBlockEv("waitForBlock", "finished", nil))
  756. break forLoop
  757. }
  758. testSteps = append(
  759. testSteps,
  760. sProcessedBlockEv("waitForBlock", "waitForBlock", nil))
  761. }
  762. numBlocksReceived = 0
  763. prevBlocks = make([]int64, 0, maxPendingRequests)
  764. }
  765. }
  766. return testFields{
  767. name: testName,
  768. startingHeight: startingHeight,
  769. maxRequestsPerPeer: maxRequestsPerPeer,
  770. maxPendingRequests: maxPendingRequests,
  771. steps: testSteps,
  772. }
  773. }
  774. const (
  775. maxStartingHeightTest = 100
  776. maxRequestsPerPeerTest = 20
  777. maxTotalPendingRequestsTest = 600
  778. maxNumPeersTest = 1000
  779. maxNumBlocksInChainTest = 10000 // should be smaller than 9999999
  780. )
  781. func makeCorrectTransitionSequenceWithRandomParameters() testFields {
  782. // Generate a starting height for fast sync.
  783. startingHeight := int64(tmrand.Intn(maxStartingHeightTest) + 1)
  784. // Generate the number of requests per peer.
  785. maxRequestsPerPeer := tmrand.Intn(maxRequestsPerPeerTest) + 1
  786. // Generate the maximum number of total pending requests, >= maxRequestsPerPeer.
  787. maxPendingRequests := tmrand.Intn(maxTotalPendingRequestsTest-maxRequestsPerPeer) + maxRequestsPerPeer
  788. // Generate the number of blocks to be synced.
  789. numBlocks := int64(tmrand.Intn(maxNumBlocksInChainTest)) + startingHeight
  790. // Generate a number of peers.
  791. numPeers := tmrand.Intn(maxNumPeersTest) + 1
  792. return makeCorrectTransitionSequence(startingHeight, numBlocks, numPeers, true, maxRequestsPerPeer, maxPendingRequests)
  793. }
  794. func shouldApplyProcessedBlockEvStep(step *fsmStepTestValues, testBcR *testReactor) bool {
  795. if step.event == processedBlockEv {
  796. _, err := testBcR.fsm.pool.BlockAndPeerAtHeight(testBcR.fsm.pool.Height)
  797. if err == errMissingBlock {
  798. return false
  799. }
  800. _, err = testBcR.fsm.pool.BlockAndPeerAtHeight(testBcR.fsm.pool.Height + 1)
  801. if err == errMissingBlock {
  802. return false
  803. }
  804. }
  805. return true
  806. }
  807. func TestFSMCorrectTransitionSequences(t *testing.T) {
  808. tests := []testFields{
  809. makeCorrectTransitionSequence(1, 100, 10, true, 10, 40),
  810. makeCorrectTransitionSequenceWithRandomParameters(),
  811. }
  812. for _, tt := range tests {
  813. tt := tt
  814. t.Run(tt.name, func(t *testing.T) {
  815. // Create test reactor
  816. testBcR := newTestReactor(tt.startingHeight)
  817. if tt.maxRequestsPerPeer != 0 {
  818. maxRequestsPerPeer = tt.maxRequestsPerPeer
  819. }
  820. for _, step := range tt.steps {
  821. step := step
  822. assert.Equal(t, step.currentState, testBcR.fsm.state.name)
  823. oldNumStatusRequests := testBcR.numStatusRequests
  824. fixBlockResponseEvStep(&step, testBcR)
  825. if !shouldApplyProcessedBlockEvStep(&step, testBcR) {
  826. continue
  827. }
  828. fsmErr := sendEventToFSM(testBcR.fsm, step.event, step.data)
  829. assert.Equal(t, step.wantErr, fsmErr)
  830. if step.wantStatusReqSent {
  831. assert.Equal(t, oldNumStatusRequests+1, testBcR.numStatusRequests)
  832. } else {
  833. assert.Equal(t, oldNumStatusRequests, testBcR.numStatusRequests)
  834. }
  835. assert.Equal(t, step.wantState, testBcR.fsm.state.name)
  836. if step.wantState == "finished" {
  837. assert.True(t, testBcR.fsm.isCaughtUp())
  838. }
  839. }
  840. })
  841. }
  842. }
  843. // ----------------------------------------
  844. // implements the bcRNotifier
  845. func (testR *testReactor) sendPeerError(err error, peerID p2p.ID) {
  846. testR.logger.Info("Reactor received sendPeerError call from FSM", "peer", peerID, "err", err)
  847. testR.lastPeerError.peerID = peerID
  848. testR.lastPeerError.err = err
  849. }
  850. func (testR *testReactor) sendStatusRequest() {
  851. testR.logger.Info("Reactor received sendStatusRequest call from FSM")
  852. testR.numStatusRequests++
  853. }
  854. func (testR *testReactor) sendBlockRequest(peerID p2p.ID, height int64) error {
  855. testR.logger.Info("Reactor received sendBlockRequest call from FSM", "peer", peerID, "height", height)
  856. testR.numBlockRequests++
  857. testR.lastBlockRequest.peerID = peerID
  858. testR.lastBlockRequest.height = height
  859. if height == 9999999 {
  860. // simulate switch does not have peer
  861. return errNilPeerForBlockRequest
  862. }
  863. return nil
  864. }
  865. func (testR *testReactor) resetStateTimer(name string, timer **time.Timer, timeout time.Duration) {
  866. testR.logger.Info("Reactor received resetStateTimer call from FSM", "state", name, "timeout", timeout)
  867. if _, ok := testR.stateTimerStarts[name]; !ok {
  868. testR.stateTimerStarts[name] = 1
  869. } else {
  870. testR.stateTimerStarts[name]++
  871. }
  872. }
  873. func (testR *testReactor) switchToConsensus() {
  874. }
  875. // ----------------------------------------