You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

1799 lines
54 KiB

Normalize priorities to not exceed total voting power (#3049) * more proposer priority tests - test that we don't reset to zero when updating / adding - test that same power validators alternate * add another test to track / simulate similar behaviour as in #2960 * address some of Chris' review comments * address some more of Chris' review comments * temporarily pushing branch with the following changes: The total power might change if: - a validator is added - a validator is removed - a validator is updated Decrement the accums (of all validators) directly after any of these events (by the inverse of the change) * Fix 2960 by re-normalizing / scaling priorities to be in bounds of total power, additionally: - remove heap where it doesn't make sense - avg. only at the end of IncrementProposerPriority instead of each iteration - update (and slightly improve) TestAveragingInIncrementProposerPriorityWithVotingPower to reflect above changes * Fix 2960 by re-normalizing / scaling priorities to be in bounds of total power, additionally: - remove heap where it doesn't make sense - avg. only at the end of IncrementProposerPriority instead of each iteration - update (and slightly improve) TestAveragingInIncrementProposerPriorityWithVotingPower to reflect above changes * fix tests * add comment * update changelog pending & some minor changes * comment about division will floor the result & fix typo * Update TestLargeGenesisValidator: - remove TODO and increase large genesis validator's voting power accordingly * move changelog entry to P2P Protocol * Ceil instead of flooring when dividing & update test * quickly fix failing TestProposerPriorityDoesNotGetResetToZero: - divide by Ceil((maxPriority - minPriority) / 2*totalVotingPower) * fix typo: rename getValWitMostPriority -> getValWithMostPriority * test proposer frequencies * return absolute value for diff. keep testing * use for loop for div * cleanup, more tests * spellcheck * get rid of using floats: manually ceil where necessary * Remove float, simplify, fix tests to match chris's proof (#3157)
6 years ago
Normalize priorities to not exceed total voting power (#3049) * more proposer priority tests - test that we don't reset to zero when updating / adding - test that same power validators alternate * add another test to track / simulate similar behaviour as in #2960 * address some of Chris' review comments * address some more of Chris' review comments * temporarily pushing branch with the following changes: The total power might change if: - a validator is added - a validator is removed - a validator is updated Decrement the accums (of all validators) directly after any of these events (by the inverse of the change) * Fix 2960 by re-normalizing / scaling priorities to be in bounds of total power, additionally: - remove heap where it doesn't make sense - avg. only at the end of IncrementProposerPriority instead of each iteration - update (and slightly improve) TestAveragingInIncrementProposerPriorityWithVotingPower to reflect above changes * Fix 2960 by re-normalizing / scaling priorities to be in bounds of total power, additionally: - remove heap where it doesn't make sense - avg. only at the end of IncrementProposerPriority instead of each iteration - update (and slightly improve) TestAveragingInIncrementProposerPriorityWithVotingPower to reflect above changes * fix tests * add comment * update changelog pending & some minor changes * comment about division will floor the result & fix typo * Update TestLargeGenesisValidator: - remove TODO and increase large genesis validator's voting power accordingly * move changelog entry to P2P Protocol * Ceil instead of flooring when dividing & update test * quickly fix failing TestProposerPriorityDoesNotGetResetToZero: - divide by Ceil((maxPriority - minPriority) / 2*totalVotingPower) * fix typo: rename getValWitMostPriority -> getValWithMostPriority * test proposer frequencies * return absolute value for diff. keep testing * use for loop for div * cleanup, more tests * spellcheck * get rid of using floats: manually ceil where necessary * Remove float, simplify, fix tests to match chris's proof (#3157)
6 years ago
Normalize priorities to not exceed total voting power (#3049) * more proposer priority tests - test that we don't reset to zero when updating / adding - test that same power validators alternate * add another test to track / simulate similar behaviour as in #2960 * address some of Chris' review comments * address some more of Chris' review comments * temporarily pushing branch with the following changes: The total power might change if: - a validator is added - a validator is removed - a validator is updated Decrement the accums (of all validators) directly after any of these events (by the inverse of the change) * Fix 2960 by re-normalizing / scaling priorities to be in bounds of total power, additionally: - remove heap where it doesn't make sense - avg. only at the end of IncrementProposerPriority instead of each iteration - update (and slightly improve) TestAveragingInIncrementProposerPriorityWithVotingPower to reflect above changes * Fix 2960 by re-normalizing / scaling priorities to be in bounds of total power, additionally: - remove heap where it doesn't make sense - avg. only at the end of IncrementProposerPriority instead of each iteration - update (and slightly improve) TestAveragingInIncrementProposerPriorityWithVotingPower to reflect above changes * fix tests * add comment * update changelog pending & some minor changes * comment about division will floor the result & fix typo * Update TestLargeGenesisValidator: - remove TODO and increase large genesis validator's voting power accordingly * move changelog entry to P2P Protocol * Ceil instead of flooring when dividing & update test * quickly fix failing TestProposerPriorityDoesNotGetResetToZero: - divide by Ceil((maxPriority - minPriority) / 2*totalVotingPower) * fix typo: rename getValWitMostPriority -> getValWithMostPriority * test proposer frequencies * return absolute value for diff. keep testing * use for loop for div * cleanup, more tests * spellcheck * get rid of using floats: manually ceil where necessary * Remove float, simplify, fix tests to match chris's proof (#3157)
6 years ago
Normalize priorities to not exceed total voting power (#3049) * more proposer priority tests - test that we don't reset to zero when updating / adding - test that same power validators alternate * add another test to track / simulate similar behaviour as in #2960 * address some of Chris' review comments * address some more of Chris' review comments * temporarily pushing branch with the following changes: The total power might change if: - a validator is added - a validator is removed - a validator is updated Decrement the accums (of all validators) directly after any of these events (by the inverse of the change) * Fix 2960 by re-normalizing / scaling priorities to be in bounds of total power, additionally: - remove heap where it doesn't make sense - avg. only at the end of IncrementProposerPriority instead of each iteration - update (and slightly improve) TestAveragingInIncrementProposerPriorityWithVotingPower to reflect above changes * Fix 2960 by re-normalizing / scaling priorities to be in bounds of total power, additionally: - remove heap where it doesn't make sense - avg. only at the end of IncrementProposerPriority instead of each iteration - update (and slightly improve) TestAveragingInIncrementProposerPriorityWithVotingPower to reflect above changes * fix tests * add comment * update changelog pending & some minor changes * comment about division will floor the result & fix typo * Update TestLargeGenesisValidator: - remove TODO and increase large genesis validator's voting power accordingly * move changelog entry to P2P Protocol * Ceil instead of flooring when dividing & update test * quickly fix failing TestProposerPriorityDoesNotGetResetToZero: - divide by Ceil((maxPriority - minPriority) / 2*totalVotingPower) * fix typo: rename getValWitMostPriority -> getValWithMostPriority * test proposer frequencies * return absolute value for diff. keep testing * use for loop for div * cleanup, more tests * spellcheck * get rid of using floats: manually ceil where necessary * Remove float, simplify, fix tests to match chris's proof (#3157)
6 years ago
Normalize priorities to not exceed total voting power (#3049) * more proposer priority tests - test that we don't reset to zero when updating / adding - test that same power validators alternate * add another test to track / simulate similar behaviour as in #2960 * address some of Chris' review comments * address some more of Chris' review comments * temporarily pushing branch with the following changes: The total power might change if: - a validator is added - a validator is removed - a validator is updated Decrement the accums (of all validators) directly after any of these events (by the inverse of the change) * Fix 2960 by re-normalizing / scaling priorities to be in bounds of total power, additionally: - remove heap where it doesn't make sense - avg. only at the end of IncrementProposerPriority instead of each iteration - update (and slightly improve) TestAveragingInIncrementProposerPriorityWithVotingPower to reflect above changes * Fix 2960 by re-normalizing / scaling priorities to be in bounds of total power, additionally: - remove heap where it doesn't make sense - avg. only at the end of IncrementProposerPriority instead of each iteration - update (and slightly improve) TestAveragingInIncrementProposerPriorityWithVotingPower to reflect above changes * fix tests * add comment * update changelog pending & some minor changes * comment about division will floor the result & fix typo * Update TestLargeGenesisValidator: - remove TODO and increase large genesis validator's voting power accordingly * move changelog entry to P2P Protocol * Ceil instead of flooring when dividing & update test * quickly fix failing TestProposerPriorityDoesNotGetResetToZero: - divide by Ceil((maxPriority - minPriority) / 2*totalVotingPower) * fix typo: rename getValWitMostPriority -> getValWithMostPriority * test proposer frequencies * return absolute value for diff. keep testing * use for loop for div * cleanup, more tests * spellcheck * get rid of using floats: manually ceil where necessary * Remove float, simplify, fix tests to match chris's proof (#3157)
6 years ago
Normalize priorities to not exceed total voting power (#3049) * more proposer priority tests - test that we don't reset to zero when updating / adding - test that same power validators alternate * add another test to track / simulate similar behaviour as in #2960 * address some of Chris' review comments * address some more of Chris' review comments * temporarily pushing branch with the following changes: The total power might change if: - a validator is added - a validator is removed - a validator is updated Decrement the accums (of all validators) directly after any of these events (by the inverse of the change) * Fix 2960 by re-normalizing / scaling priorities to be in bounds of total power, additionally: - remove heap where it doesn't make sense - avg. only at the end of IncrementProposerPriority instead of each iteration - update (and slightly improve) TestAveragingInIncrementProposerPriorityWithVotingPower to reflect above changes * Fix 2960 by re-normalizing / scaling priorities to be in bounds of total power, additionally: - remove heap where it doesn't make sense - avg. only at the end of IncrementProposerPriority instead of each iteration - update (and slightly improve) TestAveragingInIncrementProposerPriorityWithVotingPower to reflect above changes * fix tests * add comment * update changelog pending & some minor changes * comment about division will floor the result & fix typo * Update TestLargeGenesisValidator: - remove TODO and increase large genesis validator's voting power accordingly * move changelog entry to P2P Protocol * Ceil instead of flooring when dividing & update test * quickly fix failing TestProposerPriorityDoesNotGetResetToZero: - divide by Ceil((maxPriority - minPriority) / 2*totalVotingPower) * fix typo: rename getValWitMostPriority -> getValWithMostPriority * test proposer frequencies * return absolute value for diff. keep testing * use for loop for div * cleanup, more tests * spellcheck * get rid of using floats: manually ceil where necessary * Remove float, simplify, fix tests to match chris's proof (#3157)
6 years ago
Normalize priorities to not exceed total voting power (#3049) * more proposer priority tests - test that we don't reset to zero when updating / adding - test that same power validators alternate * add another test to track / simulate similar behaviour as in #2960 * address some of Chris' review comments * address some more of Chris' review comments * temporarily pushing branch with the following changes: The total power might change if: - a validator is added - a validator is removed - a validator is updated Decrement the accums (of all validators) directly after any of these events (by the inverse of the change) * Fix 2960 by re-normalizing / scaling priorities to be in bounds of total power, additionally: - remove heap where it doesn't make sense - avg. only at the end of IncrementProposerPriority instead of each iteration - update (and slightly improve) TestAveragingInIncrementProposerPriorityWithVotingPower to reflect above changes * Fix 2960 by re-normalizing / scaling priorities to be in bounds of total power, additionally: - remove heap where it doesn't make sense - avg. only at the end of IncrementProposerPriority instead of each iteration - update (and slightly improve) TestAveragingInIncrementProposerPriorityWithVotingPower to reflect above changes * fix tests * add comment * update changelog pending & some minor changes * comment about division will floor the result & fix typo * Update TestLargeGenesisValidator: - remove TODO and increase large genesis validator's voting power accordingly * move changelog entry to P2P Protocol * Ceil instead of flooring when dividing & update test * quickly fix failing TestProposerPriorityDoesNotGetResetToZero: - divide by Ceil((maxPriority - minPriority) / 2*totalVotingPower) * fix typo: rename getValWitMostPriority -> getValWithMostPriority * test proposer frequencies * return absolute value for diff. keep testing * use for loop for div * cleanup, more tests * spellcheck * get rid of using floats: manually ceil where necessary * Remove float, simplify, fix tests to match chris's proof (#3157)
6 years ago
Normalize priorities to not exceed total voting power (#3049) * more proposer priority tests - test that we don't reset to zero when updating / adding - test that same power validators alternate * add another test to track / simulate similar behaviour as in #2960 * address some of Chris' review comments * address some more of Chris' review comments * temporarily pushing branch with the following changes: The total power might change if: - a validator is added - a validator is removed - a validator is updated Decrement the accums (of all validators) directly after any of these events (by the inverse of the change) * Fix 2960 by re-normalizing / scaling priorities to be in bounds of total power, additionally: - remove heap where it doesn't make sense - avg. only at the end of IncrementProposerPriority instead of each iteration - update (and slightly improve) TestAveragingInIncrementProposerPriorityWithVotingPower to reflect above changes * Fix 2960 by re-normalizing / scaling priorities to be in bounds of total power, additionally: - remove heap where it doesn't make sense - avg. only at the end of IncrementProposerPriority instead of each iteration - update (and slightly improve) TestAveragingInIncrementProposerPriorityWithVotingPower to reflect above changes * fix tests * add comment * update changelog pending & some minor changes * comment about division will floor the result & fix typo * Update TestLargeGenesisValidator: - remove TODO and increase large genesis validator's voting power accordingly * move changelog entry to P2P Protocol * Ceil instead of flooring when dividing & update test * quickly fix failing TestProposerPriorityDoesNotGetResetToZero: - divide by Ceil((maxPriority - minPriority) / 2*totalVotingPower) * fix typo: rename getValWitMostPriority -> getValWithMostPriority * test proposer frequencies * return absolute value for diff. keep testing * use for loop for div * cleanup, more tests * spellcheck * get rid of using floats: manually ceil where necessary * Remove float, simplify, fix tests to match chris's proof (#3157)
6 years ago
Normalize priorities to not exceed total voting power (#3049) * more proposer priority tests - test that we don't reset to zero when updating / adding - test that same power validators alternate * add another test to track / simulate similar behaviour as in #2960 * address some of Chris' review comments * address some more of Chris' review comments * temporarily pushing branch with the following changes: The total power might change if: - a validator is added - a validator is removed - a validator is updated Decrement the accums (of all validators) directly after any of these events (by the inverse of the change) * Fix 2960 by re-normalizing / scaling priorities to be in bounds of total power, additionally: - remove heap where it doesn't make sense - avg. only at the end of IncrementProposerPriority instead of each iteration - update (and slightly improve) TestAveragingInIncrementProposerPriorityWithVotingPower to reflect above changes * Fix 2960 by re-normalizing / scaling priorities to be in bounds of total power, additionally: - remove heap where it doesn't make sense - avg. only at the end of IncrementProposerPriority instead of each iteration - update (and slightly improve) TestAveragingInIncrementProposerPriorityWithVotingPower to reflect above changes * fix tests * add comment * update changelog pending & some minor changes * comment about division will floor the result & fix typo * Update TestLargeGenesisValidator: - remove TODO and increase large genesis validator's voting power accordingly * move changelog entry to P2P Protocol * Ceil instead of flooring when dividing & update test * quickly fix failing TestProposerPriorityDoesNotGetResetToZero: - divide by Ceil((maxPriority - minPriority) / 2*totalVotingPower) * fix typo: rename getValWitMostPriority -> getValWithMostPriority * test proposer frequencies * return absolute value for diff. keep testing * use for loop for div * cleanup, more tests * spellcheck * get rid of using floats: manually ceil where necessary * Remove float, simplify, fix tests to match chris's proof (#3157)
6 years ago
Normalize priorities to not exceed total voting power (#3049) * more proposer priority tests - test that we don't reset to zero when updating / adding - test that same power validators alternate * add another test to track / simulate similar behaviour as in #2960 * address some of Chris' review comments * address some more of Chris' review comments * temporarily pushing branch with the following changes: The total power might change if: - a validator is added - a validator is removed - a validator is updated Decrement the accums (of all validators) directly after any of these events (by the inverse of the change) * Fix 2960 by re-normalizing / scaling priorities to be in bounds of total power, additionally: - remove heap where it doesn't make sense - avg. only at the end of IncrementProposerPriority instead of each iteration - update (and slightly improve) TestAveragingInIncrementProposerPriorityWithVotingPower to reflect above changes * Fix 2960 by re-normalizing / scaling priorities to be in bounds of total power, additionally: - remove heap where it doesn't make sense - avg. only at the end of IncrementProposerPriority instead of each iteration - update (and slightly improve) TestAveragingInIncrementProposerPriorityWithVotingPower to reflect above changes * fix tests * add comment * update changelog pending & some minor changes * comment about division will floor the result & fix typo * Update TestLargeGenesisValidator: - remove TODO and increase large genesis validator's voting power accordingly * move changelog entry to P2P Protocol * Ceil instead of flooring when dividing & update test * quickly fix failing TestProposerPriorityDoesNotGetResetToZero: - divide by Ceil((maxPriority - minPriority) / 2*totalVotingPower) * fix typo: rename getValWitMostPriority -> getValWithMostPriority * test proposer frequencies * return absolute value for diff. keep testing * use for loop for div * cleanup, more tests * spellcheck * get rid of using floats: manually ceil where necessary * Remove float, simplify, fix tests to match chris's proof (#3157)
6 years ago
Normalize priorities to not exceed total voting power (#3049) * more proposer priority tests - test that we don't reset to zero when updating / adding - test that same power validators alternate * add another test to track / simulate similar behaviour as in #2960 * address some of Chris' review comments * address some more of Chris' review comments * temporarily pushing branch with the following changes: The total power might change if: - a validator is added - a validator is removed - a validator is updated Decrement the accums (of all validators) directly after any of these events (by the inverse of the change) * Fix 2960 by re-normalizing / scaling priorities to be in bounds of total power, additionally: - remove heap where it doesn't make sense - avg. only at the end of IncrementProposerPriority instead of each iteration - update (and slightly improve) TestAveragingInIncrementProposerPriorityWithVotingPower to reflect above changes * Fix 2960 by re-normalizing / scaling priorities to be in bounds of total power, additionally: - remove heap where it doesn't make sense - avg. only at the end of IncrementProposerPriority instead of each iteration - update (and slightly improve) TestAveragingInIncrementProposerPriorityWithVotingPower to reflect above changes * fix tests * add comment * update changelog pending & some minor changes * comment about division will floor the result & fix typo * Update TestLargeGenesisValidator: - remove TODO and increase large genesis validator's voting power accordingly * move changelog entry to P2P Protocol * Ceil instead of flooring when dividing & update test * quickly fix failing TestProposerPriorityDoesNotGetResetToZero: - divide by Ceil((maxPriority - minPriority) / 2*totalVotingPower) * fix typo: rename getValWitMostPriority -> getValWithMostPriority * test proposer frequencies * return absolute value for diff. keep testing * use for loop for div * cleanup, more tests * spellcheck * get rid of using floats: manually ceil where necessary * Remove float, simplify, fix tests to match chris's proof (#3157)
6 years ago
Normalize priorities to not exceed total voting power (#3049) * more proposer priority tests - test that we don't reset to zero when updating / adding - test that same power validators alternate * add another test to track / simulate similar behaviour as in #2960 * address some of Chris' review comments * address some more of Chris' review comments * temporarily pushing branch with the following changes: The total power might change if: - a validator is added - a validator is removed - a validator is updated Decrement the accums (of all validators) directly after any of these events (by the inverse of the change) * Fix 2960 by re-normalizing / scaling priorities to be in bounds of total power, additionally: - remove heap where it doesn't make sense - avg. only at the end of IncrementProposerPriority instead of each iteration - update (and slightly improve) TestAveragingInIncrementProposerPriorityWithVotingPower to reflect above changes * Fix 2960 by re-normalizing / scaling priorities to be in bounds of total power, additionally: - remove heap where it doesn't make sense - avg. only at the end of IncrementProposerPriority instead of each iteration - update (and slightly improve) TestAveragingInIncrementProposerPriorityWithVotingPower to reflect above changes * fix tests * add comment * update changelog pending & some minor changes * comment about division will floor the result & fix typo * Update TestLargeGenesisValidator: - remove TODO and increase large genesis validator's voting power accordingly * move changelog entry to P2P Protocol * Ceil instead of flooring when dividing & update test * quickly fix failing TestProposerPriorityDoesNotGetResetToZero: - divide by Ceil((maxPriority - minPriority) / 2*totalVotingPower) * fix typo: rename getValWitMostPriority -> getValWithMostPriority * test proposer frequencies * return absolute value for diff. keep testing * use for loop for div * cleanup, more tests * spellcheck * get rid of using floats: manually ceil where necessary * Remove float, simplify, fix tests to match chris's proof (#3157)
6 years ago
Normalize priorities to not exceed total voting power (#3049) * more proposer priority tests - test that we don't reset to zero when updating / adding - test that same power validators alternate * add another test to track / simulate similar behaviour as in #2960 * address some of Chris' review comments * address some more of Chris' review comments * temporarily pushing branch with the following changes: The total power might change if: - a validator is added - a validator is removed - a validator is updated Decrement the accums (of all validators) directly after any of these events (by the inverse of the change) * Fix 2960 by re-normalizing / scaling priorities to be in bounds of total power, additionally: - remove heap where it doesn't make sense - avg. only at the end of IncrementProposerPriority instead of each iteration - update (and slightly improve) TestAveragingInIncrementProposerPriorityWithVotingPower to reflect above changes * Fix 2960 by re-normalizing / scaling priorities to be in bounds of total power, additionally: - remove heap where it doesn't make sense - avg. only at the end of IncrementProposerPriority instead of each iteration - update (and slightly improve) TestAveragingInIncrementProposerPriorityWithVotingPower to reflect above changes * fix tests * add comment * update changelog pending & some minor changes * comment about division will floor the result & fix typo * Update TestLargeGenesisValidator: - remove TODO and increase large genesis validator's voting power accordingly * move changelog entry to P2P Protocol * Ceil instead of flooring when dividing & update test * quickly fix failing TestProposerPriorityDoesNotGetResetToZero: - divide by Ceil((maxPriority - minPriority) / 2*totalVotingPower) * fix typo: rename getValWitMostPriority -> getValWithMostPriority * test proposer frequencies * return absolute value for diff. keep testing * use for loop for div * cleanup, more tests * spellcheck * get rid of using floats: manually ceil where necessary * Remove float, simplify, fix tests to match chris's proof (#3157)
6 years ago
types: verify commit fully Since the light client work introduced in v0.33 it appears full nodes are no longer fully verifying commit signatures during block execution - they stop after +2/3. See in VerifyCommit: https://github.com/tendermint/tendermint/blob/0c7fd316eb006c0afc13996c00ac8bde1078b32c/types/validator_set.go#L700-L703 This means proposers can propose blocks that contain valid +2/3 signatures and then the rest of the signatures can be whatever they want. They can claim that all the other validators signed just by including a CommitSig with arbitrary signature data. While this doesn't seem to impact safety of Tendermint per se, it means that Commits may contain a lot of invalid data. This is already true of blocks, since they can include invalid txs filled with garbage, but in that case the application knows they they are invalid and can punish the proposer. But since applications dont verify commit signatures directly (they trust tendermint to do that), they won't be able to detect it. This can impact incentivization logic in the application that depends on the LastCommitInfo sent in BeginBlock, which includes which validators signed. For instance, Gaia incentivizes proposers with a bonus for including more than +2/3 of the signatures. But a proposer can now claim that bonus just by including arbitrary data for the final -1/3 of validators without actually waiting for their signatures. There may be other tricks that can be played because of this. In general, the full node should be a fully verifying machine. While it's true that the light client can avoid verifying all signatures by stopping after +2/3, the full node can not. Thus the light client and full node should use distinct VerifyCommit functions if one is going to stop after +2/3 or otherwise perform less validation (for instance light clients can also skip verifying votes for nil while full nodes can not). See a commit with a bad signature that verifies here: 56367fd. From what I can tell, Tendermint will go on to think this commit is valid and forward this data to the app, so the app will think the second validator actually signed when it clearly did not.
4 years ago
types: verify commit fully Since the light client work introduced in v0.33 it appears full nodes are no longer fully verifying commit signatures during block execution - they stop after +2/3. See in VerifyCommit: https://github.com/tendermint/tendermint/blob/0c7fd316eb006c0afc13996c00ac8bde1078b32c/types/validator_set.go#L700-L703 This means proposers can propose blocks that contain valid +2/3 signatures and then the rest of the signatures can be whatever they want. They can claim that all the other validators signed just by including a CommitSig with arbitrary signature data. While this doesn't seem to impact safety of Tendermint per se, it means that Commits may contain a lot of invalid data. This is already true of blocks, since they can include invalid txs filled with garbage, but in that case the application knows they they are invalid and can punish the proposer. But since applications dont verify commit signatures directly (they trust tendermint to do that), they won't be able to detect it. This can impact incentivization logic in the application that depends on the LastCommitInfo sent in BeginBlock, which includes which validators signed. For instance, Gaia incentivizes proposers with a bonus for including more than +2/3 of the signatures. But a proposer can now claim that bonus just by including arbitrary data for the final -1/3 of validators without actually waiting for their signatures. There may be other tricks that can be played because of this. In general, the full node should be a fully verifying machine. While it's true that the light client can avoid verifying all signatures by stopping after +2/3, the full node can not. Thus the light client and full node should use distinct VerifyCommit functions if one is going to stop after +2/3 or otherwise perform less validation (for instance light clients can also skip verifying votes for nil while full nodes can not). See a commit with a bad signature that verifies here: 56367fd. From what I can tell, Tendermint will go on to think this commit is valid and forward this data to the app, so the app will think the second validator actually signed when it clearly did not.
4 years ago
types: verify commit fully Since the light client work introduced in v0.33 it appears full nodes are no longer fully verifying commit signatures during block execution - they stop after +2/3. See in VerifyCommit: https://github.com/tendermint/tendermint/blob/0c7fd316eb006c0afc13996c00ac8bde1078b32c/types/validator_set.go#L700-L703 This means proposers can propose blocks that contain valid +2/3 signatures and then the rest of the signatures can be whatever they want. They can claim that all the other validators signed just by including a CommitSig with arbitrary signature data. While this doesn't seem to impact safety of Tendermint per se, it means that Commits may contain a lot of invalid data. This is already true of blocks, since they can include invalid txs filled with garbage, but in that case the application knows they they are invalid and can punish the proposer. But since applications dont verify commit signatures directly (they trust tendermint to do that), they won't be able to detect it. This can impact incentivization logic in the application that depends on the LastCommitInfo sent in BeginBlock, which includes which validators signed. For instance, Gaia incentivizes proposers with a bonus for including more than +2/3 of the signatures. But a proposer can now claim that bonus just by including arbitrary data for the final -1/3 of validators without actually waiting for their signatures. There may be other tricks that can be played because of this. In general, the full node should be a fully verifying machine. While it's true that the light client can avoid verifying all signatures by stopping after +2/3, the full node can not. Thus the light client and full node should use distinct VerifyCommit functions if one is going to stop after +2/3 or otherwise perform less validation (for instance light clients can also skip verifying votes for nil while full nodes can not). See a commit with a bad signature that verifies here: 56367fd. From what I can tell, Tendermint will go on to think this commit is valid and forward this data to the app, so the app will think the second validator actually signed when it clearly did not.
4 years ago
types: verify commit fully Since the light client work introduced in v0.33 it appears full nodes are no longer fully verifying commit signatures during block execution - they stop after +2/3. See in VerifyCommit: https://github.com/tendermint/tendermint/blob/0c7fd316eb006c0afc13996c00ac8bde1078b32c/types/validator_set.go#L700-L703 This means proposers can propose blocks that contain valid +2/3 signatures and then the rest of the signatures can be whatever they want. They can claim that all the other validators signed just by including a CommitSig with arbitrary signature data. While this doesn't seem to impact safety of Tendermint per se, it means that Commits may contain a lot of invalid data. This is already true of blocks, since they can include invalid txs filled with garbage, but in that case the application knows they they are invalid and can punish the proposer. But since applications dont verify commit signatures directly (they trust tendermint to do that), they won't be able to detect it. This can impact incentivization logic in the application that depends on the LastCommitInfo sent in BeginBlock, which includes which validators signed. For instance, Gaia incentivizes proposers with a bonus for including more than +2/3 of the signatures. But a proposer can now claim that bonus just by including arbitrary data for the final -1/3 of validators without actually waiting for their signatures. There may be other tricks that can be played because of this. In general, the full node should be a fully verifying machine. While it's true that the light client can avoid verifying all signatures by stopping after +2/3, the full node can not. Thus the light client and full node should use distinct VerifyCommit functions if one is going to stop after +2/3 or otherwise perform less validation (for instance light clients can also skip verifying votes for nil while full nodes can not). See a commit with a bad signature that verifies here: 56367fd. From what I can tell, Tendermint will go on to think this commit is valid and forward this data to the app, so the app will think the second validator actually signed when it clearly did not.
4 years ago
types: verify commit fully Since the light client work introduced in v0.33 it appears full nodes are no longer fully verifying commit signatures during block execution - they stop after +2/3. See in VerifyCommit: https://github.com/tendermint/tendermint/blob/0c7fd316eb006c0afc13996c00ac8bde1078b32c/types/validator_set.go#L700-L703 This means proposers can propose blocks that contain valid +2/3 signatures and then the rest of the signatures can be whatever they want. They can claim that all the other validators signed just by including a CommitSig with arbitrary signature data. While this doesn't seem to impact safety of Tendermint per se, it means that Commits may contain a lot of invalid data. This is already true of blocks, since they can include invalid txs filled with garbage, but in that case the application knows they they are invalid and can punish the proposer. But since applications dont verify commit signatures directly (they trust tendermint to do that), they won't be able to detect it. This can impact incentivization logic in the application that depends on the LastCommitInfo sent in BeginBlock, which includes which validators signed. For instance, Gaia incentivizes proposers with a bonus for including more than +2/3 of the signatures. But a proposer can now claim that bonus just by including arbitrary data for the final -1/3 of validators without actually waiting for their signatures. There may be other tricks that can be played because of this. In general, the full node should be a fully verifying machine. While it's true that the light client can avoid verifying all signatures by stopping after +2/3, the full node can not. Thus the light client and full node should use distinct VerifyCommit functions if one is going to stop after +2/3 or otherwise perform less validation (for instance light clients can also skip verifying votes for nil while full nodes can not). See a commit with a bad signature that verifies here: 56367fd. From what I can tell, Tendermint will go on to think this commit is valid and forward this data to the app, so the app will think the second validator actually signed when it clearly did not.
4 years ago
types: verify commit fully Since the light client work introduced in v0.33 it appears full nodes are no longer fully verifying commit signatures during block execution - they stop after +2/3. See in VerifyCommit: https://github.com/tendermint/tendermint/blob/0c7fd316eb006c0afc13996c00ac8bde1078b32c/types/validator_set.go#L700-L703 This means proposers can propose blocks that contain valid +2/3 signatures and then the rest of the signatures can be whatever they want. They can claim that all the other validators signed just by including a CommitSig with arbitrary signature data. While this doesn't seem to impact safety of Tendermint per se, it means that Commits may contain a lot of invalid data. This is already true of blocks, since they can include invalid txs filled with garbage, but in that case the application knows they they are invalid and can punish the proposer. But since applications dont verify commit signatures directly (they trust tendermint to do that), they won't be able to detect it. This can impact incentivization logic in the application that depends on the LastCommitInfo sent in BeginBlock, which includes which validators signed. For instance, Gaia incentivizes proposers with a bonus for including more than +2/3 of the signatures. But a proposer can now claim that bonus just by including arbitrary data for the final -1/3 of validators without actually waiting for their signatures. There may be other tricks that can be played because of this. In general, the full node should be a fully verifying machine. While it's true that the light client can avoid verifying all signatures by stopping after +2/3, the full node can not. Thus the light client and full node should use distinct VerifyCommit functions if one is going to stop after +2/3 or otherwise perform less validation (for instance light clients can also skip verifying votes for nil while full nodes can not). See a commit with a bad signature that verifies here: 56367fd. From what I can tell, Tendermint will go on to think this commit is valid and forward this data to the app, so the app will think the second validator actually signed when it clearly did not.
4 years ago
types: verify commit fully Since the light client work introduced in v0.33 it appears full nodes are no longer fully verifying commit signatures during block execution - they stop after +2/3. See in VerifyCommit: https://github.com/tendermint/tendermint/blob/0c7fd316eb006c0afc13996c00ac8bde1078b32c/types/validator_set.go#L700-L703 This means proposers can propose blocks that contain valid +2/3 signatures and then the rest of the signatures can be whatever they want. They can claim that all the other validators signed just by including a CommitSig with arbitrary signature data. While this doesn't seem to impact safety of Tendermint per se, it means that Commits may contain a lot of invalid data. This is already true of blocks, since they can include invalid txs filled with garbage, but in that case the application knows they they are invalid and can punish the proposer. But since applications dont verify commit signatures directly (they trust tendermint to do that), they won't be able to detect it. This can impact incentivization logic in the application that depends on the LastCommitInfo sent in BeginBlock, which includes which validators signed. For instance, Gaia incentivizes proposers with a bonus for including more than +2/3 of the signatures. But a proposer can now claim that bonus just by including arbitrary data for the final -1/3 of validators without actually waiting for their signatures. There may be other tricks that can be played because of this. In general, the full node should be a fully verifying machine. While it's true that the light client can avoid verifying all signatures by stopping after +2/3, the full node can not. Thus the light client and full node should use distinct VerifyCommit functions if one is going to stop after +2/3 or otherwise perform less validation (for instance light clients can also skip verifying votes for nil while full nodes can not). See a commit with a bad signature that verifies here: 56367fd. From what I can tell, Tendermint will go on to think this commit is valid and forward this data to the app, so the app will think the second validator actually signed when it clearly did not.
4 years ago
types: verify commit fully Since the light client work introduced in v0.33 it appears full nodes are no longer fully verifying commit signatures during block execution - they stop after +2/3. See in VerifyCommit: https://github.com/tendermint/tendermint/blob/0c7fd316eb006c0afc13996c00ac8bde1078b32c/types/validator_set.go#L700-L703 This means proposers can propose blocks that contain valid +2/3 signatures and then the rest of the signatures can be whatever they want. They can claim that all the other validators signed just by including a CommitSig with arbitrary signature data. While this doesn't seem to impact safety of Tendermint per se, it means that Commits may contain a lot of invalid data. This is already true of blocks, since they can include invalid txs filled with garbage, but in that case the application knows they they are invalid and can punish the proposer. But since applications dont verify commit signatures directly (they trust tendermint to do that), they won't be able to detect it. This can impact incentivization logic in the application that depends on the LastCommitInfo sent in BeginBlock, which includes which validators signed. For instance, Gaia incentivizes proposers with a bonus for including more than +2/3 of the signatures. But a proposer can now claim that bonus just by including arbitrary data for the final -1/3 of validators without actually waiting for their signatures. There may be other tricks that can be played because of this. In general, the full node should be a fully verifying machine. While it's true that the light client can avoid verifying all signatures by stopping after +2/3, the full node can not. Thus the light client and full node should use distinct VerifyCommit functions if one is going to stop after +2/3 or otherwise perform less validation (for instance light clients can also skip verifying votes for nil while full nodes can not). See a commit with a bad signature that verifies here: 56367fd. From what I can tell, Tendermint will go on to think this commit is valid and forward this data to the app, so the app will think the second validator actually signed when it clearly did not.
4 years ago
types: verify commit fully Since the light client work introduced in v0.33 it appears full nodes are no longer fully verifying commit signatures during block execution - they stop after +2/3. See in VerifyCommit: https://github.com/tendermint/tendermint/blob/0c7fd316eb006c0afc13996c00ac8bde1078b32c/types/validator_set.go#L700-L703 This means proposers can propose blocks that contain valid +2/3 signatures and then the rest of the signatures can be whatever they want. They can claim that all the other validators signed just by including a CommitSig with arbitrary signature data. While this doesn't seem to impact safety of Tendermint per se, it means that Commits may contain a lot of invalid data. This is already true of blocks, since they can include invalid txs filled with garbage, but in that case the application knows they they are invalid and can punish the proposer. But since applications dont verify commit signatures directly (they trust tendermint to do that), they won't be able to detect it. This can impact incentivization logic in the application that depends on the LastCommitInfo sent in BeginBlock, which includes which validators signed. For instance, Gaia incentivizes proposers with a bonus for including more than +2/3 of the signatures. But a proposer can now claim that bonus just by including arbitrary data for the final -1/3 of validators without actually waiting for their signatures. There may be other tricks that can be played because of this. In general, the full node should be a fully verifying machine. While it's true that the light client can avoid verifying all signatures by stopping after +2/3, the full node can not. Thus the light client and full node should use distinct VerifyCommit functions if one is going to stop after +2/3 or otherwise perform less validation (for instance light clients can also skip verifying votes for nil while full nodes can not). See a commit with a bad signature that verifies here: 56367fd. From what I can tell, Tendermint will go on to think this commit is valid and forward this data to the app, so the app will think the second validator actually signed when it clearly did not.
4 years ago
types: verify commit fully Since the light client work introduced in v0.33 it appears full nodes are no longer fully verifying commit signatures during block execution - they stop after +2/3. See in VerifyCommit: https://github.com/tendermint/tendermint/blob/0c7fd316eb006c0afc13996c00ac8bde1078b32c/types/validator_set.go#L700-L703 This means proposers can propose blocks that contain valid +2/3 signatures and then the rest of the signatures can be whatever they want. They can claim that all the other validators signed just by including a CommitSig with arbitrary signature data. While this doesn't seem to impact safety of Tendermint per se, it means that Commits may contain a lot of invalid data. This is already true of blocks, since they can include invalid txs filled with garbage, but in that case the application knows they they are invalid and can punish the proposer. But since applications dont verify commit signatures directly (they trust tendermint to do that), they won't be able to detect it. This can impact incentivization logic in the application that depends on the LastCommitInfo sent in BeginBlock, which includes which validators signed. For instance, Gaia incentivizes proposers with a bonus for including more than +2/3 of the signatures. But a proposer can now claim that bonus just by including arbitrary data for the final -1/3 of validators without actually waiting for their signatures. There may be other tricks that can be played because of this. In general, the full node should be a fully verifying machine. While it's true that the light client can avoid verifying all signatures by stopping after +2/3, the full node can not. Thus the light client and full node should use distinct VerifyCommit functions if one is going to stop after +2/3 or otherwise perform less validation (for instance light clients can also skip verifying votes for nil while full nodes can not). See a commit with a bad signature that verifies here: 56367fd. From what I can tell, Tendermint will go on to think this commit is valid and forward this data to the app, so the app will think the second validator actually signed when it clearly did not.
4 years ago
types: verify commit fully Since the light client work introduced in v0.33 it appears full nodes are no longer fully verifying commit signatures during block execution - they stop after +2/3. See in VerifyCommit: https://github.com/tendermint/tendermint/blob/0c7fd316eb006c0afc13996c00ac8bde1078b32c/types/validator_set.go#L700-L703 This means proposers can propose blocks that contain valid +2/3 signatures and then the rest of the signatures can be whatever they want. They can claim that all the other validators signed just by including a CommitSig with arbitrary signature data. While this doesn't seem to impact safety of Tendermint per se, it means that Commits may contain a lot of invalid data. This is already true of blocks, since they can include invalid txs filled with garbage, but in that case the application knows they they are invalid and can punish the proposer. But since applications dont verify commit signatures directly (they trust tendermint to do that), they won't be able to detect it. This can impact incentivization logic in the application that depends on the LastCommitInfo sent in BeginBlock, which includes which validators signed. For instance, Gaia incentivizes proposers with a bonus for including more than +2/3 of the signatures. But a proposer can now claim that bonus just by including arbitrary data for the final -1/3 of validators without actually waiting for their signatures. There may be other tricks that can be played because of this. In general, the full node should be a fully verifying machine. While it's true that the light client can avoid verifying all signatures by stopping after +2/3, the full node can not. Thus the light client and full node should use distinct VerifyCommit functions if one is going to stop after +2/3 or otherwise perform less validation (for instance light clients can also skip verifying votes for nil while full nodes can not). See a commit with a bad signature that verifies here: 56367fd. From what I can tell, Tendermint will go on to think this commit is valid and forward this data to the app, so the app will think the second validator actually signed when it clearly did not.
4 years ago
types: prevent spurious validator power overflow warnings when changing the validator set (#4183) Fix for #4164 The general problem is that in certain conditions an overflow warning is issued when attempting to update a validator set even if the final set's total voting power is not over the maximum allowed. Root cause is that in verifyUpdates(), updates are verified wrt to total voting power in the order of validator address. It is then possible that a low address validator may increase its power such that the temporary total voting power count goes over MaxTotalVotingPower. Scenarios where removing and adding/ updating validators with high voting power, in the same update operation, cause the same false warning and the updates are not applied. Main changes to fix this are in verifyUpdate() that now does the verification starting with the decreases in power. It also takes into account the removals that are part of the update. ## Commits: * tests for overflow detection and prevention * test fix * more tests * fix the false overflow warnings and golint * scopelint warning fix * review comments * variant with using sort by amount of change in power * compute separately number new validators in update * types: use a switch in processChanges * more review comments * types: use HasAddress in numNewValidators * types: refactor verifyUpdates copy updates, sort them by delta and use resulting slice to calculate tvpAfterUpdatesBeforeRemovals. * remove unused structs * review comments * update changelog
5 years ago
types: prevent spurious validator power overflow warnings when changing the validator set (#4183) Fix for #4164 The general problem is that in certain conditions an overflow warning is issued when attempting to update a validator set even if the final set's total voting power is not over the maximum allowed. Root cause is that in verifyUpdates(), updates are verified wrt to total voting power in the order of validator address. It is then possible that a low address validator may increase its power such that the temporary total voting power count goes over MaxTotalVotingPower. Scenarios where removing and adding/ updating validators with high voting power, in the same update operation, cause the same false warning and the updates are not applied. Main changes to fix this are in verifyUpdate() that now does the verification starting with the decreases in power. It also takes into account the removals that are part of the update. ## Commits: * tests for overflow detection and prevention * test fix * more tests * fix the false overflow warnings and golint * scopelint warning fix * review comments * variant with using sort by amount of change in power * compute separately number new validators in update * types: use a switch in processChanges * more review comments * types: use HasAddress in numNewValidators * types: refactor verifyUpdates copy updates, sort them by delta and use resulting slice to calculate tvpAfterUpdatesBeforeRemovals. * remove unused structs * review comments * update changelog
5 years ago
types: prevent spurious validator power overflow warnings when changing the validator set (#4183) Fix for #4164 The general problem is that in certain conditions an overflow warning is issued when attempting to update a validator set even if the final set's total voting power is not over the maximum allowed. Root cause is that in verifyUpdates(), updates are verified wrt to total voting power in the order of validator address. It is then possible that a low address validator may increase its power such that the temporary total voting power count goes over MaxTotalVotingPower. Scenarios where removing and adding/ updating validators with high voting power, in the same update operation, cause the same false warning and the updates are not applied. Main changes to fix this are in verifyUpdate() that now does the verification starting with the decreases in power. It also takes into account the removals that are part of the update. ## Commits: * tests for overflow detection and prevention * test fix * more tests * fix the false overflow warnings and golint * scopelint warning fix * review comments * variant with using sort by amount of change in power * compute separately number new validators in update * types: use a switch in processChanges * more review comments * types: use HasAddress in numNewValidators * types: refactor verifyUpdates copy updates, sort them by delta and use resulting slice to calculate tvpAfterUpdatesBeforeRemovals. * remove unused structs * review comments * update changelog
5 years ago
types: prevent spurious validator power overflow warnings when changing the validator set (#4183) Fix for #4164 The general problem is that in certain conditions an overflow warning is issued when attempting to update a validator set even if the final set's total voting power is not over the maximum allowed. Root cause is that in verifyUpdates(), updates are verified wrt to total voting power in the order of validator address. It is then possible that a low address validator may increase its power such that the temporary total voting power count goes over MaxTotalVotingPower. Scenarios where removing and adding/ updating validators with high voting power, in the same update operation, cause the same false warning and the updates are not applied. Main changes to fix this are in verifyUpdate() that now does the verification starting with the decreases in power. It also takes into account the removals that are part of the update. ## Commits: * tests for overflow detection and prevention * test fix * more tests * fix the false overflow warnings and golint * scopelint warning fix * review comments * variant with using sort by amount of change in power * compute separately number new validators in update * types: use a switch in processChanges * more review comments * types: use HasAddress in numNewValidators * types: refactor verifyUpdates copy updates, sort them by delta and use resulting slice to calculate tvpAfterUpdatesBeforeRemovals. * remove unused structs * review comments * update changelog
5 years ago
types: prevent spurious validator power overflow warnings when changing the validator set (#4183) Fix for #4164 The general problem is that in certain conditions an overflow warning is issued when attempting to update a validator set even if the final set's total voting power is not over the maximum allowed. Root cause is that in verifyUpdates(), updates are verified wrt to total voting power in the order of validator address. It is then possible that a low address validator may increase its power such that the temporary total voting power count goes over MaxTotalVotingPower. Scenarios where removing and adding/ updating validators with high voting power, in the same update operation, cause the same false warning and the updates are not applied. Main changes to fix this are in verifyUpdate() that now does the verification starting with the decreases in power. It also takes into account the removals that are part of the update. ## Commits: * tests for overflow detection and prevention * test fix * more tests * fix the false overflow warnings and golint * scopelint warning fix * review comments * variant with using sort by amount of change in power * compute separately number new validators in update * types: use a switch in processChanges * more review comments * types: use HasAddress in numNewValidators * types: refactor verifyUpdates copy updates, sort them by delta and use resulting slice to calculate tvpAfterUpdatesBeforeRemovals. * remove unused structs * review comments * update changelog
5 years ago
types: prevent spurious validator power overflow warnings when changing the validator set (#4183) Fix for #4164 The general problem is that in certain conditions an overflow warning is issued when attempting to update a validator set even if the final set's total voting power is not over the maximum allowed. Root cause is that in verifyUpdates(), updates are verified wrt to total voting power in the order of validator address. It is then possible that a low address validator may increase its power such that the temporary total voting power count goes over MaxTotalVotingPower. Scenarios where removing and adding/ updating validators with high voting power, in the same update operation, cause the same false warning and the updates are not applied. Main changes to fix this are in verifyUpdate() that now does the verification starting with the decreases in power. It also takes into account the removals that are part of the update. ## Commits: * tests for overflow detection and prevention * test fix * more tests * fix the false overflow warnings and golint * scopelint warning fix * review comments * variant with using sort by amount of change in power * compute separately number new validators in update * types: use a switch in processChanges * more review comments * types: use HasAddress in numNewValidators * types: refactor verifyUpdates copy updates, sort them by delta and use resulting slice to calculate tvpAfterUpdatesBeforeRemovals. * remove unused structs * review comments * update changelog
5 years ago
types: prevent spurious validator power overflow warnings when changing the validator set (#4183) Fix for #4164 The general problem is that in certain conditions an overflow warning is issued when attempting to update a validator set even if the final set's total voting power is not over the maximum allowed. Root cause is that in verifyUpdates(), updates are verified wrt to total voting power in the order of validator address. It is then possible that a low address validator may increase its power such that the temporary total voting power count goes over MaxTotalVotingPower. Scenarios where removing and adding/ updating validators with high voting power, in the same update operation, cause the same false warning and the updates are not applied. Main changes to fix this are in verifyUpdate() that now does the verification starting with the decreases in power. It also takes into account the removals that are part of the update. ## Commits: * tests for overflow detection and prevention * test fix * more tests * fix the false overflow warnings and golint * scopelint warning fix * review comments * variant with using sort by amount of change in power * compute separately number new validators in update * types: use a switch in processChanges * more review comments * types: use HasAddress in numNewValidators * types: refactor verifyUpdates copy updates, sort them by delta and use resulting slice to calculate tvpAfterUpdatesBeforeRemovals. * remove unused structs * review comments * update changelog
5 years ago
types: prevent spurious validator power overflow warnings when changing the validator set (#4183) Fix for #4164 The general problem is that in certain conditions an overflow warning is issued when attempting to update a validator set even if the final set's total voting power is not over the maximum allowed. Root cause is that in verifyUpdates(), updates are verified wrt to total voting power in the order of validator address. It is then possible that a low address validator may increase its power such that the temporary total voting power count goes over MaxTotalVotingPower. Scenarios where removing and adding/ updating validators with high voting power, in the same update operation, cause the same false warning and the updates are not applied. Main changes to fix this are in verifyUpdate() that now does the verification starting with the decreases in power. It also takes into account the removals that are part of the update. ## Commits: * tests for overflow detection and prevention * test fix * more tests * fix the false overflow warnings and golint * scopelint warning fix * review comments * variant with using sort by amount of change in power * compute separately number new validators in update * types: use a switch in processChanges * more review comments * types: use HasAddress in numNewValidators * types: refactor verifyUpdates copy updates, sort them by delta and use resulting slice to calculate tvpAfterUpdatesBeforeRemovals. * remove unused structs * review comments * update changelog
5 years ago
types: prevent spurious validator power overflow warnings when changing the validator set (#4183) Fix for #4164 The general problem is that in certain conditions an overflow warning is issued when attempting to update a validator set even if the final set's total voting power is not over the maximum allowed. Root cause is that in verifyUpdates(), updates are verified wrt to total voting power in the order of validator address. It is then possible that a low address validator may increase its power such that the temporary total voting power count goes over MaxTotalVotingPower. Scenarios where removing and adding/ updating validators with high voting power, in the same update operation, cause the same false warning and the updates are not applied. Main changes to fix this are in verifyUpdate() that now does the verification starting with the decreases in power. It also takes into account the removals that are part of the update. ## Commits: * tests for overflow detection and prevention * test fix * more tests * fix the false overflow warnings and golint * scopelint warning fix * review comments * variant with using sort by amount of change in power * compute separately number new validators in update * types: use a switch in processChanges * more review comments * types: use HasAddress in numNewValidators * types: refactor verifyUpdates copy updates, sort them by delta and use resulting slice to calculate tvpAfterUpdatesBeforeRemovals. * remove unused structs * review comments * update changelog
5 years ago
types: prevent spurious validator power overflow warnings when changing the validator set (#4183) Fix for #4164 The general problem is that in certain conditions an overflow warning is issued when attempting to update a validator set even if the final set's total voting power is not over the maximum allowed. Root cause is that in verifyUpdates(), updates are verified wrt to total voting power in the order of validator address. It is then possible that a low address validator may increase its power such that the temporary total voting power count goes over MaxTotalVotingPower. Scenarios where removing and adding/ updating validators with high voting power, in the same update operation, cause the same false warning and the updates are not applied. Main changes to fix this are in verifyUpdate() that now does the verification starting with the decreases in power. It also takes into account the removals that are part of the update. ## Commits: * tests for overflow detection and prevention * test fix * more tests * fix the false overflow warnings and golint * scopelint warning fix * review comments * variant with using sort by amount of change in power * compute separately number new validators in update * types: use a switch in processChanges * more review comments * types: use HasAddress in numNewValidators * types: refactor verifyUpdates copy updates, sort them by delta and use resulting slice to calculate tvpAfterUpdatesBeforeRemovals. * remove unused structs * review comments * update changelog
5 years ago
types: prevent spurious validator power overflow warnings when changing the validator set (#4183) Fix for #4164 The general problem is that in certain conditions an overflow warning is issued when attempting to update a validator set even if the final set's total voting power is not over the maximum allowed. Root cause is that in verifyUpdates(), updates are verified wrt to total voting power in the order of validator address. It is then possible that a low address validator may increase its power such that the temporary total voting power count goes over MaxTotalVotingPower. Scenarios where removing and adding/ updating validators with high voting power, in the same update operation, cause the same false warning and the updates are not applied. Main changes to fix this are in verifyUpdate() that now does the verification starting with the decreases in power. It also takes into account the removals that are part of the update. ## Commits: * tests for overflow detection and prevention * test fix * more tests * fix the false overflow warnings and golint * scopelint warning fix * review comments * variant with using sort by amount of change in power * compute separately number new validators in update * types: use a switch in processChanges * more review comments * types: use HasAddress in numNewValidators * types: refactor verifyUpdates copy updates, sort them by delta and use resulting slice to calculate tvpAfterUpdatesBeforeRemovals. * remove unused structs * review comments * update changelog
5 years ago
types: verify commit fully Since the light client work introduced in v0.33 it appears full nodes are no longer fully verifying commit signatures during block execution - they stop after +2/3. See in VerifyCommit: https://github.com/tendermint/tendermint/blob/0c7fd316eb006c0afc13996c00ac8bde1078b32c/types/validator_set.go#L700-L703 This means proposers can propose blocks that contain valid +2/3 signatures and then the rest of the signatures can be whatever they want. They can claim that all the other validators signed just by including a CommitSig with arbitrary signature data. While this doesn't seem to impact safety of Tendermint per se, it means that Commits may contain a lot of invalid data. This is already true of blocks, since they can include invalid txs filled with garbage, but in that case the application knows they they are invalid and can punish the proposer. But since applications dont verify commit signatures directly (they trust tendermint to do that), they won't be able to detect it. This can impact incentivization logic in the application that depends on the LastCommitInfo sent in BeginBlock, which includes which validators signed. For instance, Gaia incentivizes proposers with a bonus for including more than +2/3 of the signatures. But a proposer can now claim that bonus just by including arbitrary data for the final -1/3 of validators without actually waiting for their signatures. There may be other tricks that can be played because of this. In general, the full node should be a fully verifying machine. While it's true that the light client can avoid verifying all signatures by stopping after +2/3, the full node can not. Thus the light client and full node should use distinct VerifyCommit functions if one is going to stop after +2/3 or otherwise perform less validation (for instance light clients can also skip verifying votes for nil while full nodes can not). See a commit with a bad signature that verifies here: 56367fd. From what I can tell, Tendermint will go on to think this commit is valid and forward this data to the app, so the app will think the second validator actually signed when it clearly did not.
4 years ago
types: verify commit fully Since the light client work introduced in v0.33 it appears full nodes are no longer fully verifying commit signatures during block execution - they stop after +2/3. See in VerifyCommit: https://github.com/tendermint/tendermint/blob/0c7fd316eb006c0afc13996c00ac8bde1078b32c/types/validator_set.go#L700-L703 This means proposers can propose blocks that contain valid +2/3 signatures and then the rest of the signatures can be whatever they want. They can claim that all the other validators signed just by including a CommitSig with arbitrary signature data. While this doesn't seem to impact safety of Tendermint per se, it means that Commits may contain a lot of invalid data. This is already true of blocks, since they can include invalid txs filled with garbage, but in that case the application knows they they are invalid and can punish the proposer. But since applications dont verify commit signatures directly (they trust tendermint to do that), they won't be able to detect it. This can impact incentivization logic in the application that depends on the LastCommitInfo sent in BeginBlock, which includes which validators signed. For instance, Gaia incentivizes proposers with a bonus for including more than +2/3 of the signatures. But a proposer can now claim that bonus just by including arbitrary data for the final -1/3 of validators without actually waiting for their signatures. There may be other tricks that can be played because of this. In general, the full node should be a fully verifying machine. While it's true that the light client can avoid verifying all signatures by stopping after +2/3, the full node can not. Thus the light client and full node should use distinct VerifyCommit functions if one is going to stop after +2/3 or otherwise perform less validation (for instance light clients can also skip verifying votes for nil while full nodes can not). See a commit with a bad signature that verifies here: 56367fd. From what I can tell, Tendermint will go on to think this commit is valid and forward this data to the app, so the app will think the second validator actually signed when it clearly did not.
4 years ago
types: verify commit fully Since the light client work introduced in v0.33 it appears full nodes are no longer fully verifying commit signatures during block execution - they stop after +2/3. See in VerifyCommit: https://github.com/tendermint/tendermint/blob/0c7fd316eb006c0afc13996c00ac8bde1078b32c/types/validator_set.go#L700-L703 This means proposers can propose blocks that contain valid +2/3 signatures and then the rest of the signatures can be whatever they want. They can claim that all the other validators signed just by including a CommitSig with arbitrary signature data. While this doesn't seem to impact safety of Tendermint per se, it means that Commits may contain a lot of invalid data. This is already true of blocks, since they can include invalid txs filled with garbage, but in that case the application knows they they are invalid and can punish the proposer. But since applications dont verify commit signatures directly (they trust tendermint to do that), they won't be able to detect it. This can impact incentivization logic in the application that depends on the LastCommitInfo sent in BeginBlock, which includes which validators signed. For instance, Gaia incentivizes proposers with a bonus for including more than +2/3 of the signatures. But a proposer can now claim that bonus just by including arbitrary data for the final -1/3 of validators without actually waiting for their signatures. There may be other tricks that can be played because of this. In general, the full node should be a fully verifying machine. While it's true that the light client can avoid verifying all signatures by stopping after +2/3, the full node can not. Thus the light client and full node should use distinct VerifyCommit functions if one is going to stop after +2/3 or otherwise perform less validation (for instance light clients can also skip verifying votes for nil while full nodes can not). See a commit with a bad signature that verifies here: 56367fd. From what I can tell, Tendermint will go on to think this commit is valid and forward this data to the app, so the app will think the second validator actually signed when it clearly did not.
4 years ago
types: verify commit fully Since the light client work introduced in v0.33 it appears full nodes are no longer fully verifying commit signatures during block execution - they stop after +2/3. See in VerifyCommit: https://github.com/tendermint/tendermint/blob/0c7fd316eb006c0afc13996c00ac8bde1078b32c/types/validator_set.go#L700-L703 This means proposers can propose blocks that contain valid +2/3 signatures and then the rest of the signatures can be whatever they want. They can claim that all the other validators signed just by including a CommitSig with arbitrary signature data. While this doesn't seem to impact safety of Tendermint per se, it means that Commits may contain a lot of invalid data. This is already true of blocks, since they can include invalid txs filled with garbage, but in that case the application knows they they are invalid and can punish the proposer. But since applications dont verify commit signatures directly (they trust tendermint to do that), they won't be able to detect it. This can impact incentivization logic in the application that depends on the LastCommitInfo sent in BeginBlock, which includes which validators signed. For instance, Gaia incentivizes proposers with a bonus for including more than +2/3 of the signatures. But a proposer can now claim that bonus just by including arbitrary data for the final -1/3 of validators without actually waiting for their signatures. There may be other tricks that can be played because of this. In general, the full node should be a fully verifying machine. While it's true that the light client can avoid verifying all signatures by stopping after +2/3, the full node can not. Thus the light client and full node should use distinct VerifyCommit functions if one is going to stop after +2/3 or otherwise perform less validation (for instance light clients can also skip verifying votes for nil while full nodes can not). See a commit with a bad signature that verifies here: 56367fd. From what I can tell, Tendermint will go on to think this commit is valid and forward this data to the app, so the app will think the second validator actually signed when it clearly did not.
4 years ago
  1. package types
  2. import (
  3. "bytes"
  4. "context"
  5. "fmt"
  6. "math"
  7. "math/rand"
  8. "sort"
  9. "strings"
  10. "testing"
  11. "testing/quick"
  12. "time"
  13. "github.com/stretchr/testify/assert"
  14. "github.com/stretchr/testify/require"
  15. "github.com/tendermint/tendermint/crypto"
  16. "github.com/tendermint/tendermint/crypto/ed25519"
  17. tmmath "github.com/tendermint/tendermint/libs/math"
  18. tmrand "github.com/tendermint/tendermint/libs/rand"
  19. tmproto "github.com/tendermint/tendermint/proto/tendermint/types"
  20. )
  21. func TestValidatorSetBasic(t *testing.T) {
  22. // empty or nil validator lists are allowed,
  23. // but attempting to IncrementProposerPriority on them will panic.
  24. vset := NewValidatorSet([]*Validator{})
  25. assert.Panics(t, func() { vset.IncrementProposerPriority(1) })
  26. vset = NewValidatorSet(nil)
  27. assert.Panics(t, func() { vset.IncrementProposerPriority(1) })
  28. assert.EqualValues(t, vset, vset.Copy())
  29. assert.False(t, vset.HasAddress([]byte("some val")))
  30. idx, val := vset.GetByAddress([]byte("some val"))
  31. assert.EqualValues(t, -1, idx)
  32. assert.Nil(t, val)
  33. addr, val := vset.GetByIndex(-100)
  34. assert.Nil(t, addr)
  35. assert.Nil(t, val)
  36. addr, val = vset.GetByIndex(0)
  37. assert.Nil(t, addr)
  38. assert.Nil(t, val)
  39. addr, val = vset.GetByIndex(100)
  40. assert.Nil(t, addr)
  41. assert.Nil(t, val)
  42. assert.Zero(t, vset.Size())
  43. assert.Equal(t, int64(0), vset.TotalVotingPower())
  44. assert.Nil(t, vset.GetProposer())
  45. assert.Equal(t, []byte{0xe3, 0xb0, 0xc4, 0x42, 0x98, 0xfc, 0x1c, 0x14, 0x9a, 0xfb, 0xf4,
  46. 0xc8, 0x99, 0x6f, 0xb9, 0x24, 0x27, 0xae, 0x41, 0xe4, 0x64, 0x9b, 0x93, 0x4c, 0xa4, 0x95,
  47. 0x99, 0x1b, 0x78, 0x52, 0xb8, 0x55}, vset.Hash())
  48. // add
  49. val = randModuloValidator(vset.TotalVotingPower())
  50. assert.NoError(t, vset.UpdateWithChangeSet([]*Validator{val}))
  51. assert.True(t, vset.HasAddress(val.Address))
  52. idx, _ = vset.GetByAddress(val.Address)
  53. assert.EqualValues(t, 0, idx)
  54. addr, _ = vset.GetByIndex(0)
  55. assert.Equal(t, []byte(val.Address), addr)
  56. assert.Equal(t, 1, vset.Size())
  57. assert.Equal(t, val.VotingPower, vset.TotalVotingPower())
  58. assert.NotNil(t, vset.Hash())
  59. assert.NotPanics(t, func() { vset.IncrementProposerPriority(1) })
  60. assert.Equal(t, val.Address, vset.GetProposer().Address)
  61. // update
  62. val = randModuloValidator(vset.TotalVotingPower())
  63. assert.NoError(t, vset.UpdateWithChangeSet([]*Validator{val}))
  64. _, val = vset.GetByAddress(val.Address)
  65. val.VotingPower += 100
  66. proposerPriority := val.ProposerPriority
  67. val.ProposerPriority = 0
  68. assert.NoError(t, vset.UpdateWithChangeSet([]*Validator{val}))
  69. _, val = vset.GetByAddress(val.Address)
  70. assert.Equal(t, proposerPriority, val.ProposerPriority)
  71. }
  72. func TestValidatorSetValidateBasic(t *testing.T) {
  73. val, _ := randValidator(false, 1)
  74. badVal := &Validator{}
  75. testCases := []struct {
  76. vals ValidatorSet
  77. err bool
  78. msg string
  79. }{
  80. {
  81. vals: ValidatorSet{},
  82. err: true,
  83. msg: "validator set is nil or empty",
  84. },
  85. {
  86. vals: ValidatorSet{
  87. Validators: []*Validator{},
  88. },
  89. err: true,
  90. msg: "validator set is nil or empty",
  91. },
  92. {
  93. vals: ValidatorSet{
  94. Validators: []*Validator{val},
  95. },
  96. err: true,
  97. msg: "proposer failed validate basic, error: nil validator",
  98. },
  99. {
  100. vals: ValidatorSet{
  101. Validators: []*Validator{badVal},
  102. },
  103. err: true,
  104. msg: "invalid validator #0: validator does not have a public key",
  105. },
  106. {
  107. vals: ValidatorSet{
  108. Validators: []*Validator{val},
  109. Proposer: val,
  110. },
  111. err: false,
  112. msg: "",
  113. },
  114. }
  115. for _, tc := range testCases {
  116. err := tc.vals.ValidateBasic()
  117. if tc.err {
  118. if assert.Error(t, err) {
  119. assert.Equal(t, tc.msg, err.Error())
  120. }
  121. } else {
  122. assert.NoError(t, err)
  123. }
  124. }
  125. }
  126. func TestCopy(t *testing.T) {
  127. vset := randModuloValidatorSet(10)
  128. vsetHash := vset.Hash()
  129. if len(vsetHash) == 0 {
  130. t.Fatalf("ValidatorSet had unexpected zero hash")
  131. }
  132. vsetCopy := vset.Copy()
  133. vsetCopyHash := vsetCopy.Hash()
  134. if !bytes.Equal(vsetHash, vsetCopyHash) {
  135. t.Fatalf("ValidatorSet copy had wrong hash. Orig: %X, Copy: %X", vsetHash, vsetCopyHash)
  136. }
  137. }
  138. // Test that IncrementProposerPriority requires positive times.
  139. func TestIncrementProposerPriorityPositiveTimes(t *testing.T) {
  140. vset := NewValidatorSet([]*Validator{
  141. newValidator([]byte("foo"), 1000),
  142. newValidator([]byte("bar"), 300),
  143. newValidator([]byte("baz"), 330),
  144. })
  145. assert.Panics(t, func() { vset.IncrementProposerPriority(-1) })
  146. assert.Panics(t, func() { vset.IncrementProposerPriority(0) })
  147. vset.IncrementProposerPriority(1)
  148. }
  149. func BenchmarkValidatorSetCopy(b *testing.B) {
  150. b.StopTimer()
  151. vset := NewValidatorSet([]*Validator{})
  152. for i := 0; i < 1000; i++ {
  153. privKey := ed25519.GenPrivKey()
  154. pubKey := privKey.PubKey()
  155. val := NewValidator(pubKey, 10)
  156. err := vset.UpdateWithChangeSet([]*Validator{val})
  157. if err != nil {
  158. panic("Failed to add validator")
  159. }
  160. }
  161. b.StartTimer()
  162. for i := 0; i < b.N; i++ {
  163. vset.Copy()
  164. }
  165. }
  166. //-------------------------------------------------------------------
  167. func TestProposerSelection1(t *testing.T) {
  168. vset := NewValidatorSet([]*Validator{
  169. newValidator([]byte("foo"), 1000),
  170. newValidator([]byte("bar"), 300),
  171. newValidator([]byte("baz"), 330),
  172. })
  173. var proposers []string
  174. for i := 0; i < 99; i++ {
  175. val := vset.GetProposer()
  176. proposers = append(proposers, string(val.Address))
  177. vset.IncrementProposerPriority(1)
  178. }
  179. expected := `foo baz foo bar foo foo baz foo bar foo foo baz foo foo bar foo baz foo foo bar` +
  180. ` foo foo baz foo bar foo foo baz foo bar foo foo baz foo foo bar foo baz foo foo bar` +
  181. ` foo baz foo foo bar foo baz foo foo bar foo baz foo foo foo baz bar foo foo foo baz` +
  182. ` foo bar foo foo baz foo bar foo foo baz foo bar foo foo baz foo bar foo foo baz foo` +
  183. ` foo bar foo baz foo foo bar foo baz foo foo bar foo baz foo foo`
  184. if expected != strings.Join(proposers, " ") {
  185. t.Errorf("expected sequence of proposers was\n%v\nbut got \n%v", expected, strings.Join(proposers, " "))
  186. }
  187. }
  188. func TestProposerSelection2(t *testing.T) {
  189. addr0 := []byte{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}
  190. addr1 := []byte{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1}
  191. addr2 := []byte{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2}
  192. // when all voting power is same, we go in order of addresses
  193. val0, val1, val2 := newValidator(addr0, 100), newValidator(addr1, 100), newValidator(addr2, 100)
  194. valList := []*Validator{val0, val1, val2}
  195. vals := NewValidatorSet(valList)
  196. for i := 0; i < len(valList)*5; i++ {
  197. ii := (i) % len(valList)
  198. prop := vals.GetProposer()
  199. if !bytes.Equal(prop.Address, valList[ii].Address) {
  200. t.Fatalf("(%d): Expected %X. Got %X", i, valList[ii].Address, prop.Address)
  201. }
  202. vals.IncrementProposerPriority(1)
  203. }
  204. // One validator has more than the others, but not enough to propose twice in a row
  205. *val2 = *newValidator(addr2, 400)
  206. vals = NewValidatorSet(valList)
  207. // vals.IncrementProposerPriority(1)
  208. prop := vals.GetProposer()
  209. if !bytes.Equal(prop.Address, addr2) {
  210. t.Fatalf("Expected address with highest voting power to be first proposer. Got %X", prop.Address)
  211. }
  212. vals.IncrementProposerPriority(1)
  213. prop = vals.GetProposer()
  214. if !bytes.Equal(prop.Address, addr0) {
  215. t.Fatalf("Expected smallest address to be validator. Got %X", prop.Address)
  216. }
  217. // One validator has more than the others, and enough to be proposer twice in a row
  218. *val2 = *newValidator(addr2, 401)
  219. vals = NewValidatorSet(valList)
  220. prop = vals.GetProposer()
  221. if !bytes.Equal(prop.Address, addr2) {
  222. t.Fatalf("Expected address with highest voting power to be first proposer. Got %X", prop.Address)
  223. }
  224. vals.IncrementProposerPriority(1)
  225. prop = vals.GetProposer()
  226. if !bytes.Equal(prop.Address, addr2) {
  227. t.Fatalf("Expected address with highest voting power to be second proposer. Got %X", prop.Address)
  228. }
  229. vals.IncrementProposerPriority(1)
  230. prop = vals.GetProposer()
  231. if !bytes.Equal(prop.Address, addr0) {
  232. t.Fatalf("Expected smallest address to be validator. Got %X", prop.Address)
  233. }
  234. // each validator should be the proposer a proportional number of times
  235. val0, val1, val2 = newValidator(addr0, 4), newValidator(addr1, 5), newValidator(addr2, 3)
  236. valList = []*Validator{val0, val1, val2}
  237. propCount := make([]int, 3)
  238. vals = NewValidatorSet(valList)
  239. N := 1
  240. for i := 0; i < 120*N; i++ {
  241. prop := vals.GetProposer()
  242. ii := prop.Address[19]
  243. propCount[ii]++
  244. vals.IncrementProposerPriority(1)
  245. }
  246. if propCount[0] != 40*N {
  247. t.Fatalf(
  248. "Expected prop count for validator with 4/12 of voting power to be %d/%d. Got %d/%d",
  249. 40*N,
  250. 120*N,
  251. propCount[0],
  252. 120*N,
  253. )
  254. }
  255. if propCount[1] != 50*N {
  256. t.Fatalf(
  257. "Expected prop count for validator with 5/12 of voting power to be %d/%d. Got %d/%d",
  258. 50*N,
  259. 120*N,
  260. propCount[1],
  261. 120*N,
  262. )
  263. }
  264. if propCount[2] != 30*N {
  265. t.Fatalf(
  266. "Expected prop count for validator with 3/12 of voting power to be %d/%d. Got %d/%d",
  267. 30*N,
  268. 120*N,
  269. propCount[2],
  270. 120*N,
  271. )
  272. }
  273. }
  274. func TestProposerSelection3(t *testing.T) {
  275. vset := NewValidatorSet([]*Validator{
  276. newValidator([]byte("avalidator_address12"), 1),
  277. newValidator([]byte("bvalidator_address12"), 1),
  278. newValidator([]byte("cvalidator_address12"), 1),
  279. newValidator([]byte("dvalidator_address12"), 1),
  280. })
  281. proposerOrder := make([]*Validator, 4)
  282. for i := 0; i < 4; i++ {
  283. // need to give all validators to have keys
  284. pk := ed25519.GenPrivKey().PubKey()
  285. vset.Validators[i].PubKey = pk
  286. proposerOrder[i] = vset.GetProposer()
  287. vset.IncrementProposerPriority(1)
  288. }
  289. // i for the loop
  290. // j for the times
  291. // we should go in order for ever, despite some IncrementProposerPriority with times > 1
  292. var (
  293. i int
  294. j int32
  295. )
  296. for ; i < 10000; i++ {
  297. got := vset.GetProposer().Address
  298. expected := proposerOrder[j%4].Address
  299. if !bytes.Equal(got, expected) {
  300. t.Fatalf(fmt.Sprintf("vset.Proposer (%X) does not match expected proposer (%X) for (%d, %d)", got, expected, i, j))
  301. }
  302. // serialize, deserialize, check proposer
  303. b := vset.toBytes()
  304. vset = vset.fromBytes(b)
  305. computed := vset.GetProposer() // findGetProposer()
  306. if i != 0 {
  307. if !bytes.Equal(got, computed.Address) {
  308. t.Fatalf(
  309. fmt.Sprintf(
  310. "vset.Proposer (%X) does not match computed proposer (%X) for (%d, %d)",
  311. got,
  312. computed.Address,
  313. i,
  314. j,
  315. ),
  316. )
  317. }
  318. }
  319. // times is usually 1
  320. times := int32(1)
  321. mod := (rand.Int() % 5) + 1
  322. if rand.Int()%mod > 0 {
  323. // sometimes its up to 5
  324. times = (rand.Int31() % 4) + 1
  325. }
  326. vset.IncrementProposerPriority(times)
  327. j += times
  328. }
  329. }
  330. func newValidator(address []byte, power int64) *Validator {
  331. return &Validator{Address: address, VotingPower: power}
  332. }
  333. func randPubKey() crypto.PubKey {
  334. pubKey := make(ed25519.PubKey, ed25519.PubKeySize)
  335. copy(pubKey, tmrand.Bytes(32))
  336. return ed25519.PubKey(tmrand.Bytes(32))
  337. }
  338. func randModuloValidator(totalVotingPower int64) *Validator {
  339. // this modulo limits the ProposerPriority/VotingPower to stay in the
  340. // bounds of MaxTotalVotingPower minus the already existing voting power:
  341. val := NewValidator(randPubKey(), int64(rand.Uint64()%uint64(MaxTotalVotingPower-totalVotingPower)))
  342. val.ProposerPriority = rand.Int63() % (MaxTotalVotingPower - totalVotingPower)
  343. return val
  344. }
  345. func randValidator(randPower bool, minPower int64) (*Validator, PrivValidator) {
  346. privVal := NewMockPV()
  347. votePower := minPower
  348. if randPower {
  349. votePower += int64(rand.Uint32())
  350. }
  351. pubKey, err := privVal.GetPubKey(context.Background())
  352. if err != nil {
  353. panic(fmt.Errorf("could not retrieve pubkey %w", err))
  354. }
  355. val := NewValidator(pubKey, votePower)
  356. return val, privVal
  357. }
  358. func randModuloValidatorSet(numValidators int) *ValidatorSet {
  359. validators := make([]*Validator, numValidators)
  360. totalVotingPower := int64(0)
  361. for i := 0; i < numValidators; i++ {
  362. validators[i] = randModuloValidator(totalVotingPower)
  363. totalVotingPower += validators[i].VotingPower
  364. }
  365. return NewValidatorSet(validators)
  366. }
  367. func (vals *ValidatorSet) toBytes() []byte {
  368. pbvs, err := vals.ToProto()
  369. if err != nil {
  370. panic(err)
  371. }
  372. bz, err := pbvs.Marshal()
  373. if err != nil {
  374. panic(err)
  375. }
  376. return bz
  377. }
  378. func (vals *ValidatorSet) fromBytes(b []byte) *ValidatorSet {
  379. pbvs := new(tmproto.ValidatorSet)
  380. err := pbvs.Unmarshal(b)
  381. if err != nil {
  382. // DATA HAS BEEN CORRUPTED OR THE SPEC HAS CHANGED
  383. panic(err)
  384. }
  385. vs, err := ValidatorSetFromProto(pbvs)
  386. if err != nil {
  387. panic(err)
  388. }
  389. return vs
  390. }
  391. //-------------------------------------------------------------------
  392. func TestValidatorSetTotalVotingPowerPanicsOnOverflow(t *testing.T) {
  393. // NewValidatorSet calls IncrementProposerPriority which calls TotalVotingPower()
  394. // which should panic on overflows:
  395. shouldPanic := func() {
  396. NewValidatorSet([]*Validator{
  397. {Address: []byte("a"), VotingPower: math.MaxInt64, ProposerPriority: 0},
  398. {Address: []byte("b"), VotingPower: math.MaxInt64, ProposerPriority: 0},
  399. {Address: []byte("c"), VotingPower: math.MaxInt64, ProposerPriority: 0},
  400. })
  401. }
  402. assert.Panics(t, shouldPanic)
  403. }
  404. func TestAvgProposerPriority(t *testing.T) {
  405. // Create Validator set without calling IncrementProposerPriority:
  406. tcs := []struct {
  407. vs ValidatorSet
  408. want int64
  409. }{
  410. 0: {ValidatorSet{Validators: []*Validator{{ProposerPriority: 0}, {ProposerPriority: 0}, {ProposerPriority: 0}}}, 0},
  411. 1: {
  412. ValidatorSet{
  413. Validators: []*Validator{{ProposerPriority: math.MaxInt64}, {ProposerPriority: 0}, {ProposerPriority: 0}},
  414. }, math.MaxInt64 / 3,
  415. },
  416. 2: {
  417. ValidatorSet{
  418. Validators: []*Validator{{ProposerPriority: math.MaxInt64}, {ProposerPriority: 0}},
  419. }, math.MaxInt64 / 2,
  420. },
  421. 3: {
  422. ValidatorSet{
  423. Validators: []*Validator{{ProposerPriority: math.MaxInt64}, {ProposerPriority: math.MaxInt64}},
  424. }, math.MaxInt64,
  425. },
  426. 4: {
  427. ValidatorSet{
  428. Validators: []*Validator{{ProposerPriority: math.MinInt64}, {ProposerPriority: math.MinInt64}},
  429. }, math.MinInt64,
  430. },
  431. }
  432. for i, tc := range tcs {
  433. got := tc.vs.computeAvgProposerPriority()
  434. assert.Equal(t, tc.want, got, "test case: %v", i)
  435. }
  436. }
  437. func TestAveragingInIncrementProposerPriority(t *testing.T) {
  438. // Test that the averaging works as expected inside of IncrementProposerPriority.
  439. // Each validator comes with zero voting power which simplifies reasoning about
  440. // the expected ProposerPriority.
  441. tcs := []struct {
  442. vs ValidatorSet
  443. times int32
  444. avg int64
  445. }{
  446. 0: {ValidatorSet{
  447. Validators: []*Validator{
  448. {Address: []byte("a"), ProposerPriority: 1},
  449. {Address: []byte("b"), ProposerPriority: 2},
  450. {Address: []byte("c"), ProposerPriority: 3}}},
  451. 1, 2},
  452. 1: {ValidatorSet{
  453. Validators: []*Validator{
  454. {Address: []byte("a"), ProposerPriority: 10},
  455. {Address: []byte("b"), ProposerPriority: -10},
  456. {Address: []byte("c"), ProposerPriority: 1}}},
  457. // this should average twice but the average should be 0 after the first iteration
  458. // (voting power is 0 -> no changes)
  459. 11, 1 / 3},
  460. 2: {ValidatorSet{
  461. Validators: []*Validator{
  462. {Address: []byte("a"), ProposerPriority: 100},
  463. {Address: []byte("b"), ProposerPriority: -10},
  464. {Address: []byte("c"), ProposerPriority: 1}}},
  465. 1, 91 / 3},
  466. }
  467. for i, tc := range tcs {
  468. // work on copy to have the old ProposerPriorities:
  469. newVset := tc.vs.CopyIncrementProposerPriority(tc.times)
  470. for _, val := range tc.vs.Validators {
  471. _, updatedVal := newVset.GetByAddress(val.Address)
  472. assert.Equal(t, updatedVal.ProposerPriority, val.ProposerPriority-tc.avg, "test case: %v", i)
  473. }
  474. }
  475. }
  476. func TestAveragingInIncrementProposerPriorityWithVotingPower(t *testing.T) {
  477. // Other than TestAveragingInIncrementProposerPriority this is a more complete test showing
  478. // how each ProposerPriority changes in relation to the validator's voting power respectively.
  479. // average is zero in each round:
  480. vp0 := int64(10)
  481. vp1 := int64(1)
  482. vp2 := int64(1)
  483. total := vp0 + vp1 + vp2
  484. avg := (vp0 + vp1 + vp2 - total) / 3
  485. vals := ValidatorSet{Validators: []*Validator{
  486. {Address: []byte{0}, ProposerPriority: 0, VotingPower: vp0},
  487. {Address: []byte{1}, ProposerPriority: 0, VotingPower: vp1},
  488. {Address: []byte{2}, ProposerPriority: 0, VotingPower: vp2}}}
  489. tcs := []struct {
  490. vals *ValidatorSet
  491. wantProposerPrioritys []int64
  492. times int32
  493. wantProposer *Validator
  494. }{
  495. 0: {
  496. vals.Copy(),
  497. []int64{
  498. // Acumm+VotingPower-Avg:
  499. 0 + vp0 - total - avg, // mostest will be subtracted by total voting power (12)
  500. 0 + vp1,
  501. 0 + vp2},
  502. 1,
  503. vals.Validators[0]},
  504. 1: {
  505. vals.Copy(),
  506. []int64{
  507. (0 + vp0 - total) + vp0 - total - avg, // this will be mostest on 2nd iter, too
  508. (0 + vp1) + vp1,
  509. (0 + vp2) + vp2},
  510. 2,
  511. vals.Validators[0]}, // increment twice -> expect average to be subtracted twice
  512. 2: {
  513. vals.Copy(),
  514. []int64{
  515. 0 + 3*(vp0-total) - avg, // still mostest
  516. 0 + 3*vp1,
  517. 0 + 3*vp2},
  518. 3,
  519. vals.Validators[0]},
  520. 3: {
  521. vals.Copy(),
  522. []int64{
  523. 0 + 4*(vp0-total), // still mostest
  524. 0 + 4*vp1,
  525. 0 + 4*vp2},
  526. 4,
  527. vals.Validators[0]},
  528. 4: {
  529. vals.Copy(),
  530. []int64{
  531. 0 + 4*(vp0-total) + vp0, // 4 iters was mostest
  532. 0 + 5*vp1 - total, // now this val is mostest for the 1st time (hence -12==totalVotingPower)
  533. 0 + 5*vp2},
  534. 5,
  535. vals.Validators[1]},
  536. 5: {
  537. vals.Copy(),
  538. []int64{
  539. 0 + 6*vp0 - 5*total, // mostest again
  540. 0 + 6*vp1 - total, // mostest once up to here
  541. 0 + 6*vp2},
  542. 6,
  543. vals.Validators[0]},
  544. 6: {
  545. vals.Copy(),
  546. []int64{
  547. 0 + 7*vp0 - 6*total, // in 7 iters this val is mostest 6 times
  548. 0 + 7*vp1 - total, // in 7 iters this val is mostest 1 time
  549. 0 + 7*vp2},
  550. 7,
  551. vals.Validators[0]},
  552. 7: {
  553. vals.Copy(),
  554. []int64{
  555. 0 + 8*vp0 - 7*total, // mostest again
  556. 0 + 8*vp1 - total,
  557. 0 + 8*vp2},
  558. 8,
  559. vals.Validators[0]},
  560. 8: {
  561. vals.Copy(),
  562. []int64{
  563. 0 + 9*vp0 - 7*total,
  564. 0 + 9*vp1 - total,
  565. 0 + 9*vp2 - total}, // mostest
  566. 9,
  567. vals.Validators[2]},
  568. 9: {
  569. vals.Copy(),
  570. []int64{
  571. 0 + 10*vp0 - 8*total, // after 10 iters this is mostest again
  572. 0 + 10*vp1 - total, // after 6 iters this val is "mostest" once and not in between
  573. 0 + 10*vp2 - total}, // in between 10 iters this val is "mostest" once
  574. 10,
  575. vals.Validators[0]},
  576. 10: {
  577. vals.Copy(),
  578. []int64{
  579. 0 + 11*vp0 - 9*total,
  580. 0 + 11*vp1 - total, // after 6 iters this val is "mostest" once and not in between
  581. 0 + 11*vp2 - total}, // after 10 iters this val is "mostest" once
  582. 11,
  583. vals.Validators[0]},
  584. }
  585. for i, tc := range tcs {
  586. tc.vals.IncrementProposerPriority(tc.times)
  587. assert.Equal(t, tc.wantProposer.Address, tc.vals.GetProposer().Address,
  588. "test case: %v",
  589. i)
  590. for valIdx, val := range tc.vals.Validators {
  591. assert.Equal(t,
  592. tc.wantProposerPrioritys[valIdx],
  593. val.ProposerPriority,
  594. "test case: %v, validator: %v",
  595. i,
  596. valIdx)
  597. }
  598. }
  599. }
  600. func TestSafeAdd(t *testing.T) {
  601. f := func(a, b int64) bool {
  602. c, overflow := safeAdd(a, b)
  603. return overflow || (!overflow && c == a+b)
  604. }
  605. if err := quick.Check(f, nil); err != nil {
  606. t.Error(err)
  607. }
  608. }
  609. func TestSafeAddClip(t *testing.T) {
  610. assert.EqualValues(t, math.MaxInt64, safeAddClip(math.MaxInt64, 10))
  611. assert.EqualValues(t, math.MaxInt64, safeAddClip(math.MaxInt64, math.MaxInt64))
  612. assert.EqualValues(t, math.MinInt64, safeAddClip(math.MinInt64, -10))
  613. }
  614. func TestSafeSubClip(t *testing.T) {
  615. assert.EqualValues(t, math.MinInt64, safeSubClip(math.MinInt64, 10))
  616. assert.EqualValues(t, 0, safeSubClip(math.MinInt64, math.MinInt64))
  617. assert.EqualValues(t, math.MinInt64, safeSubClip(math.MinInt64, math.MaxInt64))
  618. assert.EqualValues(t, math.MaxInt64, safeSubClip(math.MaxInt64, -10))
  619. }
  620. //-------------------------------------------------------------------
  621. // Check VerifyCommit, VerifyCommitLight and VerifyCommitLightTrusting basic
  622. // verification.
  623. func TestValidatorSet_VerifyCommit_All(t *testing.T) {
  624. var (
  625. privKey = ed25519.GenPrivKey()
  626. pubKey = privKey.PubKey()
  627. v1 = NewValidator(pubKey, 1000)
  628. vset = NewValidatorSet([]*Validator{v1})
  629. chainID = "Lalande21185"
  630. )
  631. vote := examplePrecommit()
  632. vote.ValidatorAddress = pubKey.Address()
  633. v := vote.ToProto()
  634. sig, err := privKey.Sign(VoteSignBytes(chainID, v))
  635. require.NoError(t, err)
  636. vote.Signature = sig
  637. commit := NewCommit(vote.Height, vote.Round, vote.BlockID, []CommitSig{vote.CommitSig()})
  638. vote2 := *vote
  639. sig2, err := privKey.Sign(VoteSignBytes("EpsilonEridani", v))
  640. require.NoError(t, err)
  641. vote2.Signature = sig2
  642. testCases := []struct {
  643. description string
  644. chainID string
  645. blockID BlockID
  646. height int64
  647. commit *Commit
  648. expErr bool
  649. }{
  650. {"good", chainID, vote.BlockID, vote.Height, commit, false},
  651. {"wrong signature (#0)", "EpsilonEridani", vote.BlockID, vote.Height, commit, true},
  652. {"wrong block ID", chainID, makeBlockIDRandom(), vote.Height, commit, true},
  653. {"wrong height", chainID, vote.BlockID, vote.Height - 1, commit, true},
  654. {"wrong set size: 1 vs 0", chainID, vote.BlockID, vote.Height,
  655. NewCommit(vote.Height, vote.Round, vote.BlockID, []CommitSig{}), true},
  656. {"wrong set size: 1 vs 2", chainID, vote.BlockID, vote.Height,
  657. NewCommit(vote.Height, vote.Round, vote.BlockID,
  658. []CommitSig{vote.CommitSig(), {BlockIDFlag: BlockIDFlagAbsent}}), true},
  659. {"insufficient voting power: got 0, needed more than 666", chainID, vote.BlockID, vote.Height,
  660. NewCommit(vote.Height, vote.Round, vote.BlockID, []CommitSig{{BlockIDFlag: BlockIDFlagAbsent}}), true},
  661. {"wrong signature (#0)", chainID, vote.BlockID, vote.Height,
  662. NewCommit(vote.Height, vote.Round, vote.BlockID, []CommitSig{vote2.CommitSig()}), true},
  663. }
  664. for _, tc := range testCases {
  665. tc := tc
  666. t.Run(tc.description, func(t *testing.T) {
  667. err := vset.VerifyCommit(tc.chainID, tc.blockID, tc.height, tc.commit)
  668. if tc.expErr {
  669. if assert.Error(t, err, "VerifyCommit") {
  670. assert.Contains(t, err.Error(), tc.description, "VerifyCommit")
  671. }
  672. } else {
  673. assert.NoError(t, err, "VerifyCommit")
  674. }
  675. err = vset.VerifyCommitLight(tc.chainID, tc.blockID, tc.height, tc.commit)
  676. if tc.expErr {
  677. if assert.Error(t, err, "VerifyCommitLight") {
  678. assert.Contains(t, err.Error(), tc.description, "VerifyCommitLight")
  679. }
  680. } else {
  681. assert.NoError(t, err, "VerifyCommitLight")
  682. }
  683. })
  684. }
  685. }
  686. func TestValidatorSet_VerifyCommit_CheckAllSignatures(t *testing.T) {
  687. var (
  688. chainID = "test_chain_id"
  689. h = int64(3)
  690. blockID = makeBlockIDRandom()
  691. )
  692. voteSet, valSet, vals := randVoteSet(h, 0, tmproto.PrecommitType, 4, 10)
  693. commit, err := makeCommit(blockID, h, 0, voteSet, vals, time.Now())
  694. require.NoError(t, err)
  695. // malleate 4th signature
  696. vote := voteSet.GetByIndex(3)
  697. v := vote.ToProto()
  698. err = vals[3].SignVote(context.Background(), "CentaurusA", v)
  699. require.NoError(t, err)
  700. vote.Signature = v.Signature
  701. commit.Signatures[3] = vote.CommitSig()
  702. err = valSet.VerifyCommit(chainID, blockID, h, commit)
  703. if assert.Error(t, err) {
  704. assert.Contains(t, err.Error(), "wrong signature (#3)")
  705. }
  706. }
  707. func TestValidatorSet_VerifyCommitLight_ReturnsAsSoonAsMajorityOfVotingPowerSigned(t *testing.T) {
  708. var (
  709. chainID = "test_chain_id"
  710. h = int64(3)
  711. blockID = makeBlockIDRandom()
  712. )
  713. voteSet, valSet, vals := randVoteSet(h, 0, tmproto.PrecommitType, 4, 10)
  714. commit, err := makeCommit(blockID, h, 0, voteSet, vals, time.Now())
  715. require.NoError(t, err)
  716. // malleate 4th signature (3 signatures are enough for 2/3+)
  717. vote := voteSet.GetByIndex(3)
  718. v := vote.ToProto()
  719. err = vals[3].SignVote(context.Background(), "CentaurusA", v)
  720. require.NoError(t, err)
  721. vote.Signature = v.Signature
  722. commit.Signatures[3] = vote.CommitSig()
  723. err = valSet.VerifyCommitLight(chainID, blockID, h, commit)
  724. assert.NoError(t, err)
  725. }
  726. func TestValidatorSet_VerifyCommitLightTrusting_ReturnsAsSoonAsTrustLevelOfVotingPowerSigned(t *testing.T) {
  727. var (
  728. chainID = "test_chain_id"
  729. h = int64(3)
  730. blockID = makeBlockIDRandom()
  731. )
  732. voteSet, valSet, vals := randVoteSet(h, 0, tmproto.PrecommitType, 4, 10)
  733. commit, err := makeCommit(blockID, h, 0, voteSet, vals, time.Now())
  734. require.NoError(t, err)
  735. // malleate 3rd signature (2 signatures are enough for 1/3+ trust level)
  736. vote := voteSet.GetByIndex(2)
  737. v := vote.ToProto()
  738. err = vals[2].SignVote(context.Background(), "CentaurusA", v)
  739. require.NoError(t, err)
  740. vote.Signature = v.Signature
  741. commit.Signatures[2] = vote.CommitSig()
  742. err = valSet.VerifyCommitLightTrusting(chainID, commit, tmmath.Fraction{Numerator: 1, Denominator: 3})
  743. assert.NoError(t, err)
  744. }
  745. func TestEmptySet(t *testing.T) {
  746. var valList []*Validator
  747. valSet := NewValidatorSet(valList)
  748. assert.Panics(t, func() { valSet.IncrementProposerPriority(1) })
  749. assert.Panics(t, func() { valSet.RescalePriorities(100) })
  750. assert.Panics(t, func() { valSet.shiftByAvgProposerPriority() })
  751. assert.Panics(t, func() { assert.Zero(t, computeMaxMinPriorityDiff(valSet)) })
  752. valSet.GetProposer()
  753. // Add to empty set
  754. v1 := newValidator([]byte("v1"), 100)
  755. v2 := newValidator([]byte("v2"), 100)
  756. valList = []*Validator{v1, v2}
  757. assert.NoError(t, valSet.UpdateWithChangeSet(valList))
  758. verifyValidatorSet(t, valSet)
  759. // Delete all validators from set
  760. v1 = newValidator([]byte("v1"), 0)
  761. v2 = newValidator([]byte("v2"), 0)
  762. delList := []*Validator{v1, v2}
  763. assert.Error(t, valSet.UpdateWithChangeSet(delList))
  764. // Attempt delete from empty set
  765. assert.Error(t, valSet.UpdateWithChangeSet(delList))
  766. }
  767. func TestUpdatesForNewValidatorSet(t *testing.T) {
  768. v1 := newValidator([]byte("v1"), 100)
  769. v2 := newValidator([]byte("v2"), 100)
  770. valList := []*Validator{v1, v2}
  771. valSet := NewValidatorSet(valList)
  772. verifyValidatorSet(t, valSet)
  773. // Verify duplicates are caught in NewValidatorSet() and it panics
  774. v111 := newValidator([]byte("v1"), 100)
  775. v112 := newValidator([]byte("v1"), 123)
  776. v113 := newValidator([]byte("v1"), 234)
  777. valList = []*Validator{v111, v112, v113}
  778. assert.Panics(t, func() { NewValidatorSet(valList) })
  779. // Verify set including validator with voting power 0 cannot be created
  780. v1 = newValidator([]byte("v1"), 0)
  781. v2 = newValidator([]byte("v2"), 22)
  782. v3 := newValidator([]byte("v3"), 33)
  783. valList = []*Validator{v1, v2, v3}
  784. assert.Panics(t, func() { NewValidatorSet(valList) })
  785. // Verify set including validator with negative voting power cannot be created
  786. v1 = newValidator([]byte("v1"), 10)
  787. v2 = newValidator([]byte("v2"), -20)
  788. v3 = newValidator([]byte("v3"), 30)
  789. valList = []*Validator{v1, v2, v3}
  790. assert.Panics(t, func() { NewValidatorSet(valList) })
  791. }
  792. type testVal struct {
  793. name string
  794. power int64
  795. }
  796. func permutation(valList []testVal) []testVal {
  797. if len(valList) == 0 {
  798. return nil
  799. }
  800. permList := make([]testVal, len(valList))
  801. perm := rand.Perm(len(valList))
  802. for i, v := range perm {
  803. permList[v] = valList[i]
  804. }
  805. return permList
  806. }
  807. func createNewValidatorList(testValList []testVal) []*Validator {
  808. valList := make([]*Validator, 0, len(testValList))
  809. for _, val := range testValList {
  810. valList = append(valList, newValidator([]byte(val.name), val.power))
  811. }
  812. return valList
  813. }
  814. func createNewValidatorSet(testValList []testVal) *ValidatorSet {
  815. return NewValidatorSet(createNewValidatorList(testValList))
  816. }
  817. func valSetTotalProposerPriority(valSet *ValidatorSet) int64 {
  818. sum := int64(0)
  819. for _, val := range valSet.Validators {
  820. // mind overflow
  821. sum = safeAddClip(sum, val.ProposerPriority)
  822. }
  823. return sum
  824. }
  825. func verifyValidatorSet(t *testing.T, valSet *ValidatorSet) {
  826. // verify that the capacity and length of validators is the same
  827. assert.Equal(t, len(valSet.Validators), cap(valSet.Validators))
  828. // verify that the set's total voting power has been updated
  829. tvp := valSet.totalVotingPower
  830. valSet.updateTotalVotingPower()
  831. expectedTvp := valSet.TotalVotingPower()
  832. assert.Equal(t, expectedTvp, tvp,
  833. "expected TVP %d. Got %d, valSet=%s", expectedTvp, tvp, valSet)
  834. // verify that validator priorities are centered
  835. valsCount := int64(len(valSet.Validators))
  836. tpp := valSetTotalProposerPriority(valSet)
  837. assert.True(t, tpp < valsCount && tpp > -valsCount,
  838. "expected total priority in (-%d, %d). Got %d", valsCount, valsCount, tpp)
  839. // verify that priorities are scaled
  840. dist := computeMaxMinPriorityDiff(valSet)
  841. assert.True(t, dist <= PriorityWindowSizeFactor*tvp,
  842. "expected priority distance < %d. Got %d", PriorityWindowSizeFactor*tvp, dist)
  843. }
  844. func toTestValList(valList []*Validator) []testVal {
  845. testList := make([]testVal, len(valList))
  846. for i, val := range valList {
  847. testList[i].name = string(val.Address)
  848. testList[i].power = val.VotingPower
  849. }
  850. return testList
  851. }
  852. func testValSet(nVals int, power int64) []testVal {
  853. vals := make([]testVal, nVals)
  854. for i := 0; i < nVals; i++ {
  855. vals[i] = testVal{fmt.Sprintf("v%d", i+1), power}
  856. }
  857. return vals
  858. }
  859. type valSetErrTestCase struct {
  860. startVals []testVal
  861. updateVals []testVal
  862. }
  863. func executeValSetErrTestCase(t *testing.T, idx int, tt valSetErrTestCase) {
  864. // create a new set and apply updates, keeping copies for the checks
  865. valSet := createNewValidatorSet(tt.startVals)
  866. valSetCopy := valSet.Copy()
  867. valList := createNewValidatorList(tt.updateVals)
  868. valListCopy := validatorListCopy(valList)
  869. err := valSet.UpdateWithChangeSet(valList)
  870. // for errors check the validator set has not been changed
  871. assert.Error(t, err, "test %d", idx)
  872. assert.Equal(t, valSet, valSetCopy, "test %v", idx)
  873. // check the parameter list has not changed
  874. assert.Equal(t, valList, valListCopy, "test %v", idx)
  875. }
  876. func TestValSetUpdatesDuplicateEntries(t *testing.T) {
  877. testCases := []valSetErrTestCase{
  878. // Duplicate entries in changes
  879. { // first entry is duplicated change
  880. testValSet(2, 10),
  881. []testVal{{"v1", 11}, {"v1", 22}},
  882. },
  883. { // second entry is duplicated change
  884. testValSet(2, 10),
  885. []testVal{{"v2", 11}, {"v2", 22}},
  886. },
  887. { // change duplicates are separated by a valid change
  888. testValSet(2, 10),
  889. []testVal{{"v1", 11}, {"v2", 22}, {"v1", 12}},
  890. },
  891. { // change duplicates are separated by a valid change
  892. testValSet(3, 10),
  893. []testVal{{"v1", 11}, {"v3", 22}, {"v1", 12}},
  894. },
  895. // Duplicate entries in remove
  896. { // first entry is duplicated remove
  897. testValSet(2, 10),
  898. []testVal{{"v1", 0}, {"v1", 0}},
  899. },
  900. { // second entry is duplicated remove
  901. testValSet(2, 10),
  902. []testVal{{"v2", 0}, {"v2", 0}},
  903. },
  904. { // remove duplicates are separated by a valid remove
  905. testValSet(2, 10),
  906. []testVal{{"v1", 0}, {"v2", 0}, {"v1", 0}},
  907. },
  908. { // remove duplicates are separated by a valid remove
  909. testValSet(3, 10),
  910. []testVal{{"v1", 0}, {"v3", 0}, {"v1", 0}},
  911. },
  912. { // remove and update same val
  913. testValSet(2, 10),
  914. []testVal{{"v1", 0}, {"v2", 20}, {"v1", 30}},
  915. },
  916. { // duplicate entries in removes + changes
  917. testValSet(2, 10),
  918. []testVal{{"v1", 0}, {"v2", 20}, {"v2", 30}, {"v1", 0}},
  919. },
  920. { // duplicate entries in removes + changes
  921. testValSet(3, 10),
  922. []testVal{{"v1", 0}, {"v3", 5}, {"v2", 20}, {"v2", 30}, {"v1", 0}},
  923. },
  924. }
  925. for i, tt := range testCases {
  926. executeValSetErrTestCase(t, i, tt)
  927. }
  928. }
  929. func TestValSetUpdatesOverflows(t *testing.T) {
  930. maxVP := MaxTotalVotingPower
  931. testCases := []valSetErrTestCase{
  932. { // single update leading to overflow
  933. testValSet(2, 10),
  934. []testVal{{"v1", math.MaxInt64}},
  935. },
  936. { // single update leading to overflow
  937. testValSet(2, 10),
  938. []testVal{{"v2", math.MaxInt64}},
  939. },
  940. { // add validator leading to overflow
  941. testValSet(1, maxVP),
  942. []testVal{{"v2", math.MaxInt64}},
  943. },
  944. { // add validator leading to exceed Max
  945. testValSet(1, maxVP-1),
  946. []testVal{{"v2", 5}},
  947. },
  948. { // add validator leading to exceed Max
  949. testValSet(2, maxVP/3),
  950. []testVal{{"v3", maxVP / 2}},
  951. },
  952. { // add validator leading to exceed Max
  953. testValSet(1, maxVP),
  954. []testVal{{"v2", maxVP}},
  955. },
  956. }
  957. for i, tt := range testCases {
  958. executeValSetErrTestCase(t, i, tt)
  959. }
  960. }
  961. func TestValSetUpdatesOtherErrors(t *testing.T) {
  962. testCases := []valSetErrTestCase{
  963. { // update with negative voting power
  964. testValSet(2, 10),
  965. []testVal{{"v1", -123}},
  966. },
  967. { // update with negative voting power
  968. testValSet(2, 10),
  969. []testVal{{"v2", -123}},
  970. },
  971. { // remove non-existing validator
  972. testValSet(2, 10),
  973. []testVal{{"v3", 0}},
  974. },
  975. { // delete all validators
  976. []testVal{{"v1", 10}, {"v2", 20}, {"v3", 30}},
  977. []testVal{{"v1", 0}, {"v2", 0}, {"v3", 0}},
  978. },
  979. }
  980. for i, tt := range testCases {
  981. executeValSetErrTestCase(t, i, tt)
  982. }
  983. }
  984. func TestValSetUpdatesBasicTestsExecute(t *testing.T) {
  985. valSetUpdatesBasicTests := []struct {
  986. startVals []testVal
  987. updateVals []testVal
  988. expectedVals []testVal
  989. }{
  990. { // no changes
  991. testValSet(2, 10),
  992. []testVal{},
  993. testValSet(2, 10),
  994. },
  995. { // voting power changes
  996. testValSet(2, 10),
  997. []testVal{{"v2", 22}, {"v1", 11}},
  998. []testVal{{"v2", 22}, {"v1", 11}},
  999. },
  1000. { // add new validators
  1001. []testVal{{"v2", 20}, {"v1", 10}},
  1002. []testVal{{"v4", 40}, {"v3", 30}},
  1003. []testVal{{"v4", 40}, {"v3", 30}, {"v2", 20}, {"v1", 10}},
  1004. },
  1005. { // add new validator to middle
  1006. []testVal{{"v3", 20}, {"v1", 10}},
  1007. []testVal{{"v2", 30}},
  1008. []testVal{{"v2", 30}, {"v3", 20}, {"v1", 10}},
  1009. },
  1010. { // add new validator to beginning
  1011. []testVal{{"v3", 20}, {"v2", 10}},
  1012. []testVal{{"v1", 30}},
  1013. []testVal{{"v1", 30}, {"v3", 20}, {"v2", 10}},
  1014. },
  1015. { // delete validators
  1016. []testVal{{"v3", 30}, {"v2", 20}, {"v1", 10}},
  1017. []testVal{{"v2", 0}},
  1018. []testVal{{"v3", 30}, {"v1", 10}},
  1019. },
  1020. }
  1021. for i, tt := range valSetUpdatesBasicTests {
  1022. // create a new set and apply updates, keeping copies for the checks
  1023. valSet := createNewValidatorSet(tt.startVals)
  1024. valList := createNewValidatorList(tt.updateVals)
  1025. err := valSet.UpdateWithChangeSet(valList)
  1026. assert.NoError(t, err, "test %d", i)
  1027. valListCopy := validatorListCopy(valSet.Validators)
  1028. // check that the voting power in the set's validators is not changing if the voting power
  1029. // is changed in the list of validators previously passed as parameter to UpdateWithChangeSet.
  1030. // this is to make sure copies of the validators are made by UpdateWithChangeSet.
  1031. if len(valList) > 0 {
  1032. valList[0].VotingPower++
  1033. assert.Equal(t, toTestValList(valListCopy), toTestValList(valSet.Validators), "test %v", i)
  1034. }
  1035. // check the final validator list is as expected and the set is properly scaled and centered.
  1036. assert.Equal(t, tt.expectedVals, toTestValList(valSet.Validators), "test %v", i)
  1037. verifyValidatorSet(t, valSet)
  1038. }
  1039. }
  1040. // Test that different permutations of an update give the same result.
  1041. func TestValSetUpdatesOrderIndependenceTestsExecute(t *testing.T) {
  1042. // startVals - initial validators to create the set with
  1043. // updateVals - a sequence of updates to be applied to the set.
  1044. // updateVals is shuffled a number of times during testing to check for same resulting validator set.
  1045. valSetUpdatesOrderTests := []struct {
  1046. startVals []testVal
  1047. updateVals []testVal
  1048. }{
  1049. 0: { // order of changes should not matter, the final validator sets should be the same
  1050. []testVal{{"v4", 40}, {"v3", 30}, {"v2", 10}, {"v1", 10}},
  1051. []testVal{{"v4", 44}, {"v3", 33}, {"v2", 22}, {"v1", 11}}},
  1052. 1: { // order of additions should not matter
  1053. []testVal{{"v2", 20}, {"v1", 10}},
  1054. []testVal{{"v3", 30}, {"v4", 40}, {"v5", 50}, {"v6", 60}}},
  1055. 2: { // order of removals should not matter
  1056. []testVal{{"v4", 40}, {"v3", 30}, {"v2", 20}, {"v1", 10}},
  1057. []testVal{{"v1", 0}, {"v3", 0}, {"v4", 0}}},
  1058. 3: { // order of mixed operations should not matter
  1059. []testVal{{"v4", 40}, {"v3", 30}, {"v2", 20}, {"v1", 10}},
  1060. []testVal{{"v1", 0}, {"v3", 0}, {"v2", 22}, {"v5", 50}, {"v4", 44}}},
  1061. }
  1062. for i, tt := range valSetUpdatesOrderTests {
  1063. // create a new set and apply updates
  1064. valSet := createNewValidatorSet(tt.startVals)
  1065. valSetCopy := valSet.Copy()
  1066. valList := createNewValidatorList(tt.updateVals)
  1067. assert.NoError(t, valSetCopy.UpdateWithChangeSet(valList))
  1068. // save the result as expected for next updates
  1069. valSetExp := valSetCopy.Copy()
  1070. // perform at most 20 permutations on the updates and call UpdateWithChangeSet()
  1071. n := len(tt.updateVals)
  1072. maxNumPerms := tmmath.MinInt(20, n*n)
  1073. for j := 0; j < maxNumPerms; j++ {
  1074. // create a copy of original set and apply a random permutation of updates
  1075. valSetCopy := valSet.Copy()
  1076. valList := createNewValidatorList(permutation(tt.updateVals))
  1077. // check there was no error and the set is properly scaled and centered.
  1078. assert.NoError(t, valSetCopy.UpdateWithChangeSet(valList),
  1079. "test %v failed for permutation %v", i, valList)
  1080. verifyValidatorSet(t, valSetCopy)
  1081. // verify the resulting test is same as the expected
  1082. assert.Equal(t, valSetCopy, valSetExp,
  1083. "test %v failed for permutation %v", i, valList)
  1084. }
  1085. }
  1086. }
  1087. // This tests the private function validator_set.go:applyUpdates() function, used only for additions and changes.
  1088. // Should perform a proper merge of updatedVals and startVals
  1089. func TestValSetApplyUpdatesTestsExecute(t *testing.T) {
  1090. valSetUpdatesBasicTests := []struct {
  1091. startVals []testVal
  1092. updateVals []testVal
  1093. expectedVals []testVal
  1094. }{
  1095. // additions
  1096. 0: { // prepend
  1097. []testVal{{"v4", 44}, {"v5", 55}},
  1098. []testVal{{"v1", 11}},
  1099. []testVal{{"v1", 11}, {"v4", 44}, {"v5", 55}}},
  1100. 1: { // append
  1101. []testVal{{"v4", 44}, {"v5", 55}},
  1102. []testVal{{"v6", 66}},
  1103. []testVal{{"v4", 44}, {"v5", 55}, {"v6", 66}}},
  1104. 2: { // insert
  1105. []testVal{{"v4", 44}, {"v6", 66}},
  1106. []testVal{{"v5", 55}},
  1107. []testVal{{"v4", 44}, {"v5", 55}, {"v6", 66}}},
  1108. 3: { // insert multi
  1109. []testVal{{"v4", 44}, {"v6", 66}, {"v9", 99}},
  1110. []testVal{{"v5", 55}, {"v7", 77}, {"v8", 88}},
  1111. []testVal{{"v4", 44}, {"v5", 55}, {"v6", 66}, {"v7", 77}, {"v8", 88}, {"v9", 99}}},
  1112. // changes
  1113. 4: { // head
  1114. []testVal{{"v1", 111}, {"v2", 22}},
  1115. []testVal{{"v1", 11}},
  1116. []testVal{{"v1", 11}, {"v2", 22}}},
  1117. 5: { // tail
  1118. []testVal{{"v1", 11}, {"v2", 222}},
  1119. []testVal{{"v2", 22}},
  1120. []testVal{{"v1", 11}, {"v2", 22}}},
  1121. 6: { // middle
  1122. []testVal{{"v1", 11}, {"v2", 222}, {"v3", 33}},
  1123. []testVal{{"v2", 22}},
  1124. []testVal{{"v1", 11}, {"v2", 22}, {"v3", 33}}},
  1125. 7: { // multi
  1126. []testVal{{"v1", 111}, {"v2", 222}, {"v3", 333}},
  1127. []testVal{{"v1", 11}, {"v2", 22}, {"v3", 33}},
  1128. []testVal{{"v1", 11}, {"v2", 22}, {"v3", 33}}},
  1129. // additions and changes
  1130. 8: {
  1131. []testVal{{"v1", 111}, {"v2", 22}},
  1132. []testVal{{"v1", 11}, {"v3", 33}, {"v4", 44}},
  1133. []testVal{{"v1", 11}, {"v2", 22}, {"v3", 33}, {"v4", 44}}},
  1134. }
  1135. for i, tt := range valSetUpdatesBasicTests {
  1136. // create a new validator set with the start values
  1137. valSet := createNewValidatorSet(tt.startVals)
  1138. // applyUpdates() with the update values
  1139. valList := createNewValidatorList(tt.updateVals)
  1140. valSet.applyUpdates(valList)
  1141. // check the new list of validators for proper merge
  1142. assert.Equal(t, toTestValList(valSet.Validators), tt.expectedVals, "test %v", i)
  1143. }
  1144. }
  1145. type testVSetCfg struct {
  1146. name string
  1147. startVals []testVal
  1148. deletedVals []testVal
  1149. updatedVals []testVal
  1150. addedVals []testVal
  1151. expectedVals []testVal
  1152. expErr error
  1153. }
  1154. func randTestVSetCfg(t *testing.T, nBase, nAddMax int) testVSetCfg {
  1155. if nBase <= 0 || nAddMax < 0 {
  1156. panic(fmt.Sprintf("bad parameters %v %v", nBase, nAddMax))
  1157. }
  1158. const maxPower = 1000
  1159. var nOld, nDel, nChanged, nAdd int
  1160. nOld = int(uint(rand.Int())%uint(nBase)) + 1
  1161. if nBase-nOld > 0 {
  1162. nDel = int(uint(rand.Int()) % uint(nBase-nOld))
  1163. }
  1164. nChanged = nBase - nOld - nDel
  1165. if nAddMax > 0 {
  1166. nAdd = rand.Int()%nAddMax + 1
  1167. }
  1168. cfg := testVSetCfg{}
  1169. cfg.startVals = make([]testVal, nBase)
  1170. cfg.deletedVals = make([]testVal, nDel)
  1171. cfg.addedVals = make([]testVal, nAdd)
  1172. cfg.updatedVals = make([]testVal, nChanged)
  1173. cfg.expectedVals = make([]testVal, nBase-nDel+nAdd)
  1174. for i := 0; i < nBase; i++ {
  1175. cfg.startVals[i] = testVal{fmt.Sprintf("v%d", i), int64(uint(rand.Int())%maxPower + 1)}
  1176. if i < nOld {
  1177. cfg.expectedVals[i] = cfg.startVals[i]
  1178. }
  1179. if i >= nOld && i < nOld+nChanged {
  1180. cfg.updatedVals[i-nOld] = testVal{fmt.Sprintf("v%d", i), int64(uint(rand.Int())%maxPower + 1)}
  1181. cfg.expectedVals[i] = cfg.updatedVals[i-nOld]
  1182. }
  1183. if i >= nOld+nChanged {
  1184. cfg.deletedVals[i-nOld-nChanged] = testVal{fmt.Sprintf("v%d", i), 0}
  1185. }
  1186. }
  1187. for i := nBase; i < nBase+nAdd; i++ {
  1188. cfg.addedVals[i-nBase] = testVal{fmt.Sprintf("v%d", i), int64(uint(rand.Int())%maxPower + 1)}
  1189. cfg.expectedVals[i-nDel] = cfg.addedVals[i-nBase]
  1190. }
  1191. sort.Sort(testValsByVotingPower(cfg.startVals))
  1192. sort.Sort(testValsByVotingPower(cfg.deletedVals))
  1193. sort.Sort(testValsByVotingPower(cfg.updatedVals))
  1194. sort.Sort(testValsByVotingPower(cfg.addedVals))
  1195. sort.Sort(testValsByVotingPower(cfg.expectedVals))
  1196. return cfg
  1197. }
  1198. func applyChangesToValSet(t *testing.T, expErr error, valSet *ValidatorSet, valsLists ...[]testVal) {
  1199. changes := make([]testVal, 0)
  1200. for _, valsList := range valsLists {
  1201. changes = append(changes, valsList...)
  1202. }
  1203. valList := createNewValidatorList(changes)
  1204. err := valSet.UpdateWithChangeSet(valList)
  1205. if expErr != nil {
  1206. assert.Equal(t, expErr, err)
  1207. } else {
  1208. assert.NoError(t, err)
  1209. }
  1210. }
  1211. func TestValSetUpdatePriorityOrderTests(t *testing.T) {
  1212. const nMaxElections int32 = 5000
  1213. testCases := []testVSetCfg{
  1214. 0: { // remove high power validator, keep old equal lower power validators
  1215. startVals: []testVal{{"v3", 1000}, {"v1", 1}, {"v2", 1}},
  1216. deletedVals: []testVal{{"v3", 0}},
  1217. updatedVals: []testVal{},
  1218. addedVals: []testVal{},
  1219. expectedVals: []testVal{{"v1", 1}, {"v2", 1}},
  1220. },
  1221. 1: { // remove high power validator, keep old different power validators
  1222. startVals: []testVal{{"v3", 1000}, {"v2", 10}, {"v1", 1}},
  1223. deletedVals: []testVal{{"v3", 0}},
  1224. updatedVals: []testVal{},
  1225. addedVals: []testVal{},
  1226. expectedVals: []testVal{{"v2", 10}, {"v1", 1}},
  1227. },
  1228. 2: { // remove high power validator, add new low power validators, keep old lower power
  1229. startVals: []testVal{{"v3", 1000}, {"v2", 2}, {"v1", 1}},
  1230. deletedVals: []testVal{{"v3", 0}},
  1231. updatedVals: []testVal{{"v2", 1}},
  1232. addedVals: []testVal{{"v5", 50}, {"v4", 40}},
  1233. expectedVals: []testVal{{"v5", 50}, {"v4", 40}, {"v1", 1}, {"v2", 1}},
  1234. },
  1235. // generate a configuration with 100 validators,
  1236. // randomly select validators for updates and deletes, and
  1237. // generate 10 new validators to be added
  1238. 3: randTestVSetCfg(t, 100, 10),
  1239. 4: randTestVSetCfg(t, 1000, 100),
  1240. 5: randTestVSetCfg(t, 10, 100),
  1241. 6: randTestVSetCfg(t, 100, 1000),
  1242. 7: randTestVSetCfg(t, 1000, 1000),
  1243. }
  1244. for _, cfg := range testCases {
  1245. // create a new validator set
  1246. valSet := createNewValidatorSet(cfg.startVals)
  1247. verifyValidatorSet(t, valSet)
  1248. // run election up to nMaxElections times, apply changes and verify that the priority order is correct
  1249. verifyValSetUpdatePriorityOrder(t, valSet, cfg, nMaxElections)
  1250. }
  1251. }
  1252. func verifyValSetUpdatePriorityOrder(t *testing.T, valSet *ValidatorSet, cfg testVSetCfg, nMaxElections int32) {
  1253. // Run election up to nMaxElections times, sort validators by priorities
  1254. valSet.IncrementProposerPriority(rand.Int31()%nMaxElections + 1)
  1255. // apply the changes, get the updated validators, sort by priorities
  1256. applyChangesToValSet(t, nil, valSet, cfg.addedVals, cfg.updatedVals, cfg.deletedVals)
  1257. // basic checks
  1258. assert.Equal(t, cfg.expectedVals, toTestValList(valSet.Validators))
  1259. verifyValidatorSet(t, valSet)
  1260. // verify that the added validators have the smallest priority:
  1261. // - they should be at the beginning of updatedValsPriSorted since it is
  1262. // sorted by priority
  1263. if len(cfg.addedVals) > 0 {
  1264. updatedValsPriSorted := validatorListCopy(valSet.Validators)
  1265. sort.Sort(validatorsByPriority(updatedValsPriSorted))
  1266. addedValsPriSlice := updatedValsPriSorted[:len(cfg.addedVals)]
  1267. sort.Sort(ValidatorsByVotingPower(addedValsPriSlice))
  1268. assert.Equal(t, cfg.addedVals, toTestValList(addedValsPriSlice))
  1269. // - and should all have the same priority
  1270. expectedPri := addedValsPriSlice[0].ProposerPriority
  1271. for _, val := range addedValsPriSlice[1:] {
  1272. assert.Equal(t, expectedPri, val.ProposerPriority)
  1273. }
  1274. }
  1275. }
  1276. func TestNewValidatorSetFromExistingValidators(t *testing.T) {
  1277. size := 5
  1278. vals := make([]*Validator, size)
  1279. for i := 0; i < size; i++ {
  1280. pv := NewMockPV()
  1281. vals[i] = pv.ExtractIntoValidator(int64(i + 1))
  1282. }
  1283. valSet := NewValidatorSet(vals)
  1284. valSet.IncrementProposerPriority(5)
  1285. newValSet := NewValidatorSet(valSet.Validators)
  1286. assert.NotEqual(t, valSet, newValSet)
  1287. existingValSet, err := ValidatorSetFromExistingValidators(valSet.Validators)
  1288. assert.NoError(t, err)
  1289. assert.Equal(t, valSet, existingValSet)
  1290. assert.Equal(t, valSet.CopyIncrementProposerPriority(3), existingValSet.CopyIncrementProposerPriority(3))
  1291. }
  1292. func TestValSetUpdateOverflowRelated(t *testing.T) {
  1293. testCases := []testVSetCfg{
  1294. {
  1295. name: "1 no false overflow error messages for updates",
  1296. startVals: []testVal{{"v2", MaxTotalVotingPower - 1}, {"v1", 1}},
  1297. updatedVals: []testVal{{"v1", MaxTotalVotingPower - 1}, {"v2", 1}},
  1298. expectedVals: []testVal{{"v1", MaxTotalVotingPower - 1}, {"v2", 1}},
  1299. expErr: nil,
  1300. },
  1301. {
  1302. // this test shows that it is important to apply the updates in the order of the change in power
  1303. // i.e. apply first updates with decreases in power, v2 change in this case.
  1304. name: "2 no false overflow error messages for updates",
  1305. startVals: []testVal{{"v2", MaxTotalVotingPower - 1}, {"v1", 1}},
  1306. updatedVals: []testVal{{"v1", MaxTotalVotingPower/2 - 1}, {"v2", MaxTotalVotingPower / 2}},
  1307. expectedVals: []testVal{{"v2", MaxTotalVotingPower / 2}, {"v1", MaxTotalVotingPower/2 - 1}},
  1308. expErr: nil,
  1309. },
  1310. {
  1311. name: "3 no false overflow error messages for deletes",
  1312. startVals: []testVal{{"v1", MaxTotalVotingPower - 2}, {"v2", 1}, {"v3", 1}},
  1313. deletedVals: []testVal{{"v1", 0}},
  1314. addedVals: []testVal{{"v4", MaxTotalVotingPower - 2}},
  1315. expectedVals: []testVal{{"v4", MaxTotalVotingPower - 2}, {"v2", 1}, {"v3", 1}},
  1316. expErr: nil,
  1317. },
  1318. {
  1319. name: "4 no false overflow error messages for adds, updates and deletes",
  1320. startVals: []testVal{
  1321. {"v1", MaxTotalVotingPower / 4}, {"v2", MaxTotalVotingPower / 4},
  1322. {"v3", MaxTotalVotingPower / 4}, {"v4", MaxTotalVotingPower / 4}},
  1323. deletedVals: []testVal{{"v2", 0}},
  1324. updatedVals: []testVal{
  1325. {"v1", MaxTotalVotingPower/2 - 2}, {"v3", MaxTotalVotingPower/2 - 3}, {"v4", 2}},
  1326. addedVals: []testVal{{"v5", 3}},
  1327. expectedVals: []testVal{
  1328. {"v1", MaxTotalVotingPower/2 - 2}, {"v3", MaxTotalVotingPower/2 - 3}, {"v5", 3}, {"v4", 2}},
  1329. expErr: nil,
  1330. },
  1331. {
  1332. name: "5 check panic on overflow is prevented: update 8 validators with power int64(math.MaxInt64)/8",
  1333. startVals: []testVal{
  1334. {"v1", 1}, {"v2", 1}, {"v3", 1}, {"v4", 1}, {"v5", 1},
  1335. {"v6", 1}, {"v7", 1}, {"v8", 1}, {"v9", 1}},
  1336. updatedVals: []testVal{
  1337. {"v1", MaxTotalVotingPower}, {"v2", MaxTotalVotingPower}, {"v3", MaxTotalVotingPower},
  1338. {"v4", MaxTotalVotingPower}, {"v5", MaxTotalVotingPower}, {"v6", MaxTotalVotingPower},
  1339. {"v7", MaxTotalVotingPower}, {"v8", MaxTotalVotingPower}, {"v9", 8}},
  1340. expectedVals: []testVal{
  1341. {"v1", 1}, {"v2", 1}, {"v3", 1}, {"v4", 1}, {"v5", 1},
  1342. {"v6", 1}, {"v7", 1}, {"v8", 1}, {"v9", 1}},
  1343. expErr: ErrTotalVotingPowerOverflow,
  1344. },
  1345. }
  1346. for _, tt := range testCases {
  1347. tt := tt
  1348. t.Run(tt.name, func(t *testing.T) {
  1349. valSet := createNewValidatorSet(tt.startVals)
  1350. verifyValidatorSet(t, valSet)
  1351. // execute update and verify returned error is as expected
  1352. applyChangesToValSet(t, tt.expErr, valSet, tt.addedVals, tt.updatedVals, tt.deletedVals)
  1353. // verify updated validator set is as expected
  1354. assert.Equal(t, tt.expectedVals, toTestValList(valSet.Validators))
  1355. verifyValidatorSet(t, valSet)
  1356. })
  1357. }
  1358. }
  1359. func TestValidatorSet_VerifyCommitLightTrusting(t *testing.T) {
  1360. var (
  1361. blockID = makeBlockIDRandom()
  1362. voteSet, originalValset, vals = randVoteSet(1, 1, tmproto.PrecommitType, 6, 1)
  1363. commit, err = makeCommit(blockID, 1, 1, voteSet, vals, time.Now())
  1364. newValSet, _ = randValidatorPrivValSet(2, 1)
  1365. )
  1366. require.NoError(t, err)
  1367. testCases := []struct {
  1368. valSet *ValidatorSet
  1369. err bool
  1370. }{
  1371. // good
  1372. 0: {
  1373. valSet: originalValset,
  1374. err: false,
  1375. },
  1376. // bad - no overlap between validator sets
  1377. 1: {
  1378. valSet: newValSet,
  1379. err: true,
  1380. },
  1381. // good - first two are different but the rest of the same -> >1/3
  1382. 2: {
  1383. valSet: NewValidatorSet(append(newValSet.Validators, originalValset.Validators...)),
  1384. err: false,
  1385. },
  1386. }
  1387. for _, tc := range testCases {
  1388. err = tc.valSet.VerifyCommitLightTrusting("test_chain_id", commit,
  1389. tmmath.Fraction{Numerator: 1, Denominator: 3})
  1390. if tc.err {
  1391. assert.Error(t, err)
  1392. } else {
  1393. assert.NoError(t, err)
  1394. }
  1395. }
  1396. }
  1397. func TestValidatorSet_VerifyCommitLightTrustingErrorsOnOverflow(t *testing.T) {
  1398. var (
  1399. blockID = makeBlockIDRandom()
  1400. voteSet, valSet, vals = randVoteSet(1, 1, tmproto.PrecommitType, 1, MaxTotalVotingPower)
  1401. commit, err = makeCommit(blockID, 1, 1, voteSet, vals, time.Now())
  1402. )
  1403. require.NoError(t, err)
  1404. err = valSet.VerifyCommitLightTrusting("test_chain_id", commit,
  1405. tmmath.Fraction{Numerator: 25, Denominator: 55})
  1406. if assert.Error(t, err) {
  1407. assert.Contains(t, err.Error(), "int64 overflow")
  1408. }
  1409. }
  1410. func TestSafeMul(t *testing.T) {
  1411. testCases := []struct {
  1412. a int64
  1413. b int64
  1414. c int64
  1415. overflow bool
  1416. }{
  1417. 0: {0, 0, 0, false},
  1418. 1: {1, 0, 0, false},
  1419. 2: {2, 3, 6, false},
  1420. 3: {2, -3, -6, false},
  1421. 4: {-2, -3, 6, false},
  1422. 5: {-2, 3, -6, false},
  1423. 6: {math.MaxInt64, 1, math.MaxInt64, false},
  1424. 7: {math.MaxInt64 / 2, 2, math.MaxInt64 - 1, false},
  1425. 8: {math.MaxInt64 / 2, 3, 0, true},
  1426. 9: {math.MaxInt64, 2, 0, true},
  1427. }
  1428. for i, tc := range testCases {
  1429. c, overflow := safeMul(tc.a, tc.b)
  1430. assert.Equal(t, tc.c, c, "#%d", i)
  1431. assert.Equal(t, tc.overflow, overflow, "#%d", i)
  1432. }
  1433. }
  1434. func TestValidatorSetProtoBuf(t *testing.T) {
  1435. valset, _ := randValidatorPrivValSet(10, 100)
  1436. valset2, _ := randValidatorPrivValSet(10, 100)
  1437. valset2.Validators[0] = &Validator{}
  1438. valset3, _ := randValidatorPrivValSet(10, 100)
  1439. valset3.Proposer = nil
  1440. valset4, _ := randValidatorPrivValSet(10, 100)
  1441. valset4.Proposer = &Validator{}
  1442. testCases := []struct {
  1443. msg string
  1444. v1 *ValidatorSet
  1445. expPass1 bool
  1446. expPass2 bool
  1447. }{
  1448. {"success", valset, true, true},
  1449. {"fail valSet2, pubkey empty", valset2, false, false},
  1450. {"fail nil Proposer", valset3, false, false},
  1451. {"fail empty Proposer", valset4, false, false},
  1452. {"fail empty valSet", &ValidatorSet{}, true, false},
  1453. {"false nil", nil, true, false},
  1454. }
  1455. for _, tc := range testCases {
  1456. protoValSet, err := tc.v1.ToProto()
  1457. if tc.expPass1 {
  1458. require.NoError(t, err, tc.msg)
  1459. } else {
  1460. require.Error(t, err, tc.msg)
  1461. }
  1462. valSet, err := ValidatorSetFromProto(protoValSet)
  1463. if tc.expPass2 {
  1464. require.NoError(t, err, tc.msg)
  1465. require.EqualValues(t, tc.v1, valSet, tc.msg)
  1466. } else {
  1467. require.Error(t, err, tc.msg)
  1468. }
  1469. }
  1470. }
  1471. //---------------------
  1472. // Sort validators by priority and address
  1473. type validatorsByPriority []*Validator
  1474. func (valz validatorsByPriority) Len() int {
  1475. return len(valz)
  1476. }
  1477. func (valz validatorsByPriority) Less(i, j int) bool {
  1478. if valz[i].ProposerPriority < valz[j].ProposerPriority {
  1479. return true
  1480. }
  1481. if valz[i].ProposerPriority > valz[j].ProposerPriority {
  1482. return false
  1483. }
  1484. return bytes.Compare(valz[i].Address, valz[j].Address) < 0
  1485. }
  1486. func (valz validatorsByPriority) Swap(i, j int) {
  1487. valz[i], valz[j] = valz[j], valz[i]
  1488. }
  1489. //-------------------------------------
  1490. type testValsByVotingPower []testVal
  1491. func (tvals testValsByVotingPower) Len() int {
  1492. return len(tvals)
  1493. }
  1494. func (tvals testValsByVotingPower) Less(i, j int) bool {
  1495. if tvals[i].power == tvals[j].power {
  1496. return bytes.Compare([]byte(tvals[i].name), []byte(tvals[j].name)) == -1
  1497. }
  1498. return tvals[i].power > tvals[j].power
  1499. }
  1500. func (tvals testValsByVotingPower) Swap(i, j int) {
  1501. tvals[i], tvals[j] = tvals[j], tvals[i]
  1502. }
  1503. //-------------------------------------
  1504. // Benchmark tests
  1505. //
  1506. func BenchmarkUpdates(b *testing.B) {
  1507. const (
  1508. n = 100
  1509. m = 2000
  1510. )
  1511. // Init with n validators
  1512. vs := make([]*Validator, n)
  1513. for j := 0; j < n; j++ {
  1514. vs[j] = newValidator([]byte(fmt.Sprintf("v%d", j)), 100)
  1515. }
  1516. valSet := NewValidatorSet(vs)
  1517. l := len(valSet.Validators)
  1518. // Make m new validators
  1519. newValList := make([]*Validator, m)
  1520. for j := 0; j < m; j++ {
  1521. newValList[j] = newValidator([]byte(fmt.Sprintf("v%d", j+l)), 1000)
  1522. }
  1523. b.ResetTimer()
  1524. for i := 0; i < b.N; i++ {
  1525. // Add m validators to valSetCopy
  1526. valSetCopy := valSet.Copy()
  1527. assert.NoError(b, valSetCopy.UpdateWithChangeSet(newValList))
  1528. }
  1529. }
  1530. func BenchmarkValidatorSet_VerifyCommit_Ed25519(b *testing.B) {
  1531. for _, n := range []int{1, 8, 64, 1024} {
  1532. n := n
  1533. var (
  1534. chainID = "test_chain_id"
  1535. h = int64(3)
  1536. blockID = makeBlockIDRandom()
  1537. )
  1538. b.Run(fmt.Sprintf("valset size %d", n), func(b *testing.B) {
  1539. b.ReportAllocs()
  1540. // generate n validators
  1541. voteSet, valSet, vals := randVoteSet(h, 0, tmproto.PrecommitType, n, int64(n*5))
  1542. // create a commit with n validators
  1543. commit, err := makeCommit(blockID, h, 0, voteSet, vals, time.Now())
  1544. require.NoError(b, err)
  1545. for i := 0; i < b.N/n; i++ {
  1546. err = valSet.VerifyCommit(chainID, blockID, h, commit)
  1547. assert.NoError(b, err)
  1548. }
  1549. })
  1550. }
  1551. }
  1552. func BenchmarkValidatorSet_VerifyCommitLight_Ed25519(b *testing.B) {
  1553. for _, n := range []int{1, 8, 64, 1024} {
  1554. n := n
  1555. var (
  1556. chainID = "test_chain_id"
  1557. h = int64(3)
  1558. blockID = makeBlockIDRandom()
  1559. )
  1560. b.Run(fmt.Sprintf("valset size %d", n), func(b *testing.B) {
  1561. b.ReportAllocs()
  1562. // generate n validators
  1563. voteSet, valSet, vals := randVoteSet(h, 0, tmproto.PrecommitType, n, int64(n*5))
  1564. // create a commit with n validators
  1565. commit, err := makeCommit(blockID, h, 0, voteSet, vals, time.Now())
  1566. require.NoError(b, err)
  1567. for i := 0; i < b.N/n; i++ {
  1568. err = valSet.VerifyCommitLight(chainID, blockID, h, commit)
  1569. assert.NoError(b, err)
  1570. }
  1571. })
  1572. }
  1573. }
  1574. func BenchmarkValidatorSet_VerifyCommitLightTrusting_Ed25519(b *testing.B) {
  1575. for _, n := range []int{1, 8, 64, 1024} {
  1576. n := n
  1577. var (
  1578. chainID = "test_chain_id"
  1579. h = int64(3)
  1580. blockID = makeBlockIDRandom()
  1581. )
  1582. b.Run(fmt.Sprintf("valset size %d", n), func(b *testing.B) {
  1583. b.ReportAllocs()
  1584. // generate n validators
  1585. voteSet, valSet, vals := randVoteSet(h, 0, tmproto.PrecommitType, n, int64(n*5))
  1586. // create a commit with n validators
  1587. commit, err := makeCommit(blockID, h, 0, voteSet, vals, time.Now())
  1588. require.NoError(b, err)
  1589. for i := 0; i < b.N/n; i++ {
  1590. err = valSet.VerifyCommitLightTrusting(chainID, commit, tmmath.Fraction{Numerator: 1, Denominator: 3})
  1591. assert.NoError(b, err)
  1592. }
  1593. })
  1594. }
  1595. }