You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

1695 lines
50 KiB

types: prevent spurious validator power overflow warnings when changing the validator set (#4183) Fix for #4164 The general problem is that in certain conditions an overflow warning is issued when attempting to update a validator set even if the final set's total voting power is not over the maximum allowed. Root cause is that in verifyUpdates(), updates are verified wrt to total voting power in the order of validator address. It is then possible that a low address validator may increase its power such that the temporary total voting power count goes over MaxTotalVotingPower. Scenarios where removing and adding/ updating validators with high voting power, in the same update operation, cause the same false warning and the updates are not applied. Main changes to fix this are in verifyUpdate() that now does the verification starting with the decreases in power. It also takes into account the removals that are part of the update. ## Commits: * tests for overflow detection and prevention * test fix * more tests * fix the false overflow warnings and golint * scopelint warning fix * review comments * variant with using sort by amount of change in power * compute separately number new validators in update * types: use a switch in processChanges * more review comments * types: use HasAddress in numNewValidators * types: refactor verifyUpdates copy updates, sort them by delta and use resulting slice to calculate tvpAfterUpdatesBeforeRemovals. * remove unused structs * review comments * update changelog
5 years ago
types: prevent spurious validator power overflow warnings when changing the validator set (#4183) Fix for #4164 The general problem is that in certain conditions an overflow warning is issued when attempting to update a validator set even if the final set's total voting power is not over the maximum allowed. Root cause is that in verifyUpdates(), updates are verified wrt to total voting power in the order of validator address. It is then possible that a low address validator may increase its power such that the temporary total voting power count goes over MaxTotalVotingPower. Scenarios where removing and adding/ updating validators with high voting power, in the same update operation, cause the same false warning and the updates are not applied. Main changes to fix this are in verifyUpdate() that now does the verification starting with the decreases in power. It also takes into account the removals that are part of the update. ## Commits: * tests for overflow detection and prevention * test fix * more tests * fix the false overflow warnings and golint * scopelint warning fix * review comments * variant with using sort by amount of change in power * compute separately number new validators in update * types: use a switch in processChanges * more review comments * types: use HasAddress in numNewValidators * types: refactor verifyUpdates copy updates, sort them by delta and use resulting slice to calculate tvpAfterUpdatesBeforeRemovals. * remove unused structs * review comments * update changelog
5 years ago
types: prevent spurious validator power overflow warnings when changing the validator set (#4183) Fix for #4164 The general problem is that in certain conditions an overflow warning is issued when attempting to update a validator set even if the final set's total voting power is not over the maximum allowed. Root cause is that in verifyUpdates(), updates are verified wrt to total voting power in the order of validator address. It is then possible that a low address validator may increase its power such that the temporary total voting power count goes over MaxTotalVotingPower. Scenarios where removing and adding/ updating validators with high voting power, in the same update operation, cause the same false warning and the updates are not applied. Main changes to fix this are in verifyUpdate() that now does the verification starting with the decreases in power. It also takes into account the removals that are part of the update. ## Commits: * tests for overflow detection and prevention * test fix * more tests * fix the false overflow warnings and golint * scopelint warning fix * review comments * variant with using sort by amount of change in power * compute separately number new validators in update * types: use a switch in processChanges * more review comments * types: use HasAddress in numNewValidators * types: refactor verifyUpdates copy updates, sort them by delta and use resulting slice to calculate tvpAfterUpdatesBeforeRemovals. * remove unused structs * review comments * update changelog
5 years ago
types: prevent spurious validator power overflow warnings when changing the validator set (#4183) Fix for #4164 The general problem is that in certain conditions an overflow warning is issued when attempting to update a validator set even if the final set's total voting power is not over the maximum allowed. Root cause is that in verifyUpdates(), updates are verified wrt to total voting power in the order of validator address. It is then possible that a low address validator may increase its power such that the temporary total voting power count goes over MaxTotalVotingPower. Scenarios where removing and adding/ updating validators with high voting power, in the same update operation, cause the same false warning and the updates are not applied. Main changes to fix this are in verifyUpdate() that now does the verification starting with the decreases in power. It also takes into account the removals that are part of the update. ## Commits: * tests for overflow detection and prevention * test fix * more tests * fix the false overflow warnings and golint * scopelint warning fix * review comments * variant with using sort by amount of change in power * compute separately number new validators in update * types: use a switch in processChanges * more review comments * types: use HasAddress in numNewValidators * types: refactor verifyUpdates copy updates, sort them by delta and use resulting slice to calculate tvpAfterUpdatesBeforeRemovals. * remove unused structs * review comments * update changelog
5 years ago
Normalize priorities to not exceed total voting power (#3049) * more proposer priority tests - test that we don't reset to zero when updating / adding - test that same power validators alternate * add another test to track / simulate similar behaviour as in #2960 * address some of Chris' review comments * address some more of Chris' review comments * temporarily pushing branch with the following changes: The total power might change if: - a validator is added - a validator is removed - a validator is updated Decrement the accums (of all validators) directly after any of these events (by the inverse of the change) * Fix 2960 by re-normalizing / scaling priorities to be in bounds of total power, additionally: - remove heap where it doesn't make sense - avg. only at the end of IncrementProposerPriority instead of each iteration - update (and slightly improve) TestAveragingInIncrementProposerPriorityWithVotingPower to reflect above changes * Fix 2960 by re-normalizing / scaling priorities to be in bounds of total power, additionally: - remove heap where it doesn't make sense - avg. only at the end of IncrementProposerPriority instead of each iteration - update (and slightly improve) TestAveragingInIncrementProposerPriorityWithVotingPower to reflect above changes * fix tests * add comment * update changelog pending & some minor changes * comment about division will floor the result & fix typo * Update TestLargeGenesisValidator: - remove TODO and increase large genesis validator's voting power accordingly * move changelog entry to P2P Protocol * Ceil instead of flooring when dividing & update test * quickly fix failing TestProposerPriorityDoesNotGetResetToZero: - divide by Ceil((maxPriority - minPriority) / 2*totalVotingPower) * fix typo: rename getValWitMostPriority -> getValWithMostPriority * test proposer frequencies * return absolute value for diff. keep testing * use for loop for div * cleanup, more tests * spellcheck * get rid of using floats: manually ceil where necessary * Remove float, simplify, fix tests to match chris's proof (#3157)
6 years ago
Normalize priorities to not exceed total voting power (#3049) * more proposer priority tests - test that we don't reset to zero when updating / adding - test that same power validators alternate * add another test to track / simulate similar behaviour as in #2960 * address some of Chris' review comments * address some more of Chris' review comments * temporarily pushing branch with the following changes: The total power might change if: - a validator is added - a validator is removed - a validator is updated Decrement the accums (of all validators) directly after any of these events (by the inverse of the change) * Fix 2960 by re-normalizing / scaling priorities to be in bounds of total power, additionally: - remove heap where it doesn't make sense - avg. only at the end of IncrementProposerPriority instead of each iteration - update (and slightly improve) TestAveragingInIncrementProposerPriorityWithVotingPower to reflect above changes * Fix 2960 by re-normalizing / scaling priorities to be in bounds of total power, additionally: - remove heap where it doesn't make sense - avg. only at the end of IncrementProposerPriority instead of each iteration - update (and slightly improve) TestAveragingInIncrementProposerPriorityWithVotingPower to reflect above changes * fix tests * add comment * update changelog pending & some minor changes * comment about division will floor the result & fix typo * Update TestLargeGenesisValidator: - remove TODO and increase large genesis validator's voting power accordingly * move changelog entry to P2P Protocol * Ceil instead of flooring when dividing & update test * quickly fix failing TestProposerPriorityDoesNotGetResetToZero: - divide by Ceil((maxPriority - minPriority) / 2*totalVotingPower) * fix typo: rename getValWitMostPriority -> getValWithMostPriority * test proposer frequencies * return absolute value for diff. keep testing * use for loop for div * cleanup, more tests * spellcheck * get rid of using floats: manually ceil where necessary * Remove float, simplify, fix tests to match chris's proof (#3157)
6 years ago
Normalize priorities to not exceed total voting power (#3049) * more proposer priority tests - test that we don't reset to zero when updating / adding - test that same power validators alternate * add another test to track / simulate similar behaviour as in #2960 * address some of Chris' review comments * address some more of Chris' review comments * temporarily pushing branch with the following changes: The total power might change if: - a validator is added - a validator is removed - a validator is updated Decrement the accums (of all validators) directly after any of these events (by the inverse of the change) * Fix 2960 by re-normalizing / scaling priorities to be in bounds of total power, additionally: - remove heap where it doesn't make sense - avg. only at the end of IncrementProposerPriority instead of each iteration - update (and slightly improve) TestAveragingInIncrementProposerPriorityWithVotingPower to reflect above changes * Fix 2960 by re-normalizing / scaling priorities to be in bounds of total power, additionally: - remove heap where it doesn't make sense - avg. only at the end of IncrementProposerPriority instead of each iteration - update (and slightly improve) TestAveragingInIncrementProposerPriorityWithVotingPower to reflect above changes * fix tests * add comment * update changelog pending & some minor changes * comment about division will floor the result & fix typo * Update TestLargeGenesisValidator: - remove TODO and increase large genesis validator's voting power accordingly * move changelog entry to P2P Protocol * Ceil instead of flooring when dividing & update test * quickly fix failing TestProposerPriorityDoesNotGetResetToZero: - divide by Ceil((maxPriority - minPriority) / 2*totalVotingPower) * fix typo: rename getValWitMostPriority -> getValWithMostPriority * test proposer frequencies * return absolute value for diff. keep testing * use for loop for div * cleanup, more tests * spellcheck * get rid of using floats: manually ceil where necessary * Remove float, simplify, fix tests to match chris's proof (#3157)
6 years ago
Normalize priorities to not exceed total voting power (#3049) * more proposer priority tests - test that we don't reset to zero when updating / adding - test that same power validators alternate * add another test to track / simulate similar behaviour as in #2960 * address some of Chris' review comments * address some more of Chris' review comments * temporarily pushing branch with the following changes: The total power might change if: - a validator is added - a validator is removed - a validator is updated Decrement the accums (of all validators) directly after any of these events (by the inverse of the change) * Fix 2960 by re-normalizing / scaling priorities to be in bounds of total power, additionally: - remove heap where it doesn't make sense - avg. only at the end of IncrementProposerPriority instead of each iteration - update (and slightly improve) TestAveragingInIncrementProposerPriorityWithVotingPower to reflect above changes * Fix 2960 by re-normalizing / scaling priorities to be in bounds of total power, additionally: - remove heap where it doesn't make sense - avg. only at the end of IncrementProposerPriority instead of each iteration - update (and slightly improve) TestAveragingInIncrementProposerPriorityWithVotingPower to reflect above changes * fix tests * add comment * update changelog pending & some minor changes * comment about division will floor the result & fix typo * Update TestLargeGenesisValidator: - remove TODO and increase large genesis validator's voting power accordingly * move changelog entry to P2P Protocol * Ceil instead of flooring when dividing & update test * quickly fix failing TestProposerPriorityDoesNotGetResetToZero: - divide by Ceil((maxPriority - minPriority) / 2*totalVotingPower) * fix typo: rename getValWitMostPriority -> getValWithMostPriority * test proposer frequencies * return absolute value for diff. keep testing * use for loop for div * cleanup, more tests * spellcheck * get rid of using floats: manually ceil where necessary * Remove float, simplify, fix tests to match chris's proof (#3157)
6 years ago
Normalize priorities to not exceed total voting power (#3049) * more proposer priority tests - test that we don't reset to zero when updating / adding - test that same power validators alternate * add another test to track / simulate similar behaviour as in #2960 * address some of Chris' review comments * address some more of Chris' review comments * temporarily pushing branch with the following changes: The total power might change if: - a validator is added - a validator is removed - a validator is updated Decrement the accums (of all validators) directly after any of these events (by the inverse of the change) * Fix 2960 by re-normalizing / scaling priorities to be in bounds of total power, additionally: - remove heap where it doesn't make sense - avg. only at the end of IncrementProposerPriority instead of each iteration - update (and slightly improve) TestAveragingInIncrementProposerPriorityWithVotingPower to reflect above changes * Fix 2960 by re-normalizing / scaling priorities to be in bounds of total power, additionally: - remove heap where it doesn't make sense - avg. only at the end of IncrementProposerPriority instead of each iteration - update (and slightly improve) TestAveragingInIncrementProposerPriorityWithVotingPower to reflect above changes * fix tests * add comment * update changelog pending & some minor changes * comment about division will floor the result & fix typo * Update TestLargeGenesisValidator: - remove TODO and increase large genesis validator's voting power accordingly * move changelog entry to P2P Protocol * Ceil instead of flooring when dividing & update test * quickly fix failing TestProposerPriorityDoesNotGetResetToZero: - divide by Ceil((maxPriority - minPriority) / 2*totalVotingPower) * fix typo: rename getValWitMostPriority -> getValWithMostPriority * test proposer frequencies * return absolute value for diff. keep testing * use for loop for div * cleanup, more tests * spellcheck * get rid of using floats: manually ceil where necessary * Remove float, simplify, fix tests to match chris's proof (#3157)
6 years ago
Normalize priorities to not exceed total voting power (#3049) * more proposer priority tests - test that we don't reset to zero when updating / adding - test that same power validators alternate * add another test to track / simulate similar behaviour as in #2960 * address some of Chris' review comments * address some more of Chris' review comments * temporarily pushing branch with the following changes: The total power might change if: - a validator is added - a validator is removed - a validator is updated Decrement the accums (of all validators) directly after any of these events (by the inverse of the change) * Fix 2960 by re-normalizing / scaling priorities to be in bounds of total power, additionally: - remove heap where it doesn't make sense - avg. only at the end of IncrementProposerPriority instead of each iteration - update (and slightly improve) TestAveragingInIncrementProposerPriorityWithVotingPower to reflect above changes * Fix 2960 by re-normalizing / scaling priorities to be in bounds of total power, additionally: - remove heap where it doesn't make sense - avg. only at the end of IncrementProposerPriority instead of each iteration - update (and slightly improve) TestAveragingInIncrementProposerPriorityWithVotingPower to reflect above changes * fix tests * add comment * update changelog pending & some minor changes * comment about division will floor the result & fix typo * Update TestLargeGenesisValidator: - remove TODO and increase large genesis validator's voting power accordingly * move changelog entry to P2P Protocol * Ceil instead of flooring when dividing & update test * quickly fix failing TestProposerPriorityDoesNotGetResetToZero: - divide by Ceil((maxPriority - minPriority) / 2*totalVotingPower) * fix typo: rename getValWitMostPriority -> getValWithMostPriority * test proposer frequencies * return absolute value for diff. keep testing * use for loop for div * cleanup, more tests * spellcheck * get rid of using floats: manually ceil where necessary * Remove float, simplify, fix tests to match chris's proof (#3157)
6 years ago
Normalize priorities to not exceed total voting power (#3049) * more proposer priority tests - test that we don't reset to zero when updating / adding - test that same power validators alternate * add another test to track / simulate similar behaviour as in #2960 * address some of Chris' review comments * address some more of Chris' review comments * temporarily pushing branch with the following changes: The total power might change if: - a validator is added - a validator is removed - a validator is updated Decrement the accums (of all validators) directly after any of these events (by the inverse of the change) * Fix 2960 by re-normalizing / scaling priorities to be in bounds of total power, additionally: - remove heap where it doesn't make sense - avg. only at the end of IncrementProposerPriority instead of each iteration - update (and slightly improve) TestAveragingInIncrementProposerPriorityWithVotingPower to reflect above changes * Fix 2960 by re-normalizing / scaling priorities to be in bounds of total power, additionally: - remove heap where it doesn't make sense - avg. only at the end of IncrementProposerPriority instead of each iteration - update (and slightly improve) TestAveragingInIncrementProposerPriorityWithVotingPower to reflect above changes * fix tests * add comment * update changelog pending & some minor changes * comment about division will floor the result & fix typo * Update TestLargeGenesisValidator: - remove TODO and increase large genesis validator's voting power accordingly * move changelog entry to P2P Protocol * Ceil instead of flooring when dividing & update test * quickly fix failing TestProposerPriorityDoesNotGetResetToZero: - divide by Ceil((maxPriority - minPriority) / 2*totalVotingPower) * fix typo: rename getValWitMostPriority -> getValWithMostPriority * test proposer frequencies * return absolute value for diff. keep testing * use for loop for div * cleanup, more tests * spellcheck * get rid of using floats: manually ceil where necessary * Remove float, simplify, fix tests to match chris's proof (#3157)
6 years ago
Normalize priorities to not exceed total voting power (#3049) * more proposer priority tests - test that we don't reset to zero when updating / adding - test that same power validators alternate * add another test to track / simulate similar behaviour as in #2960 * address some of Chris' review comments * address some more of Chris' review comments * temporarily pushing branch with the following changes: The total power might change if: - a validator is added - a validator is removed - a validator is updated Decrement the accums (of all validators) directly after any of these events (by the inverse of the change) * Fix 2960 by re-normalizing / scaling priorities to be in bounds of total power, additionally: - remove heap where it doesn't make sense - avg. only at the end of IncrementProposerPriority instead of each iteration - update (and slightly improve) TestAveragingInIncrementProposerPriorityWithVotingPower to reflect above changes * Fix 2960 by re-normalizing / scaling priorities to be in bounds of total power, additionally: - remove heap where it doesn't make sense - avg. only at the end of IncrementProposerPriority instead of each iteration - update (and slightly improve) TestAveragingInIncrementProposerPriorityWithVotingPower to reflect above changes * fix tests * add comment * update changelog pending & some minor changes * comment about division will floor the result & fix typo * Update TestLargeGenesisValidator: - remove TODO and increase large genesis validator's voting power accordingly * move changelog entry to P2P Protocol * Ceil instead of flooring when dividing & update test * quickly fix failing TestProposerPriorityDoesNotGetResetToZero: - divide by Ceil((maxPriority - minPriority) / 2*totalVotingPower) * fix typo: rename getValWitMostPriority -> getValWithMostPriority * test proposer frequencies * return absolute value for diff. keep testing * use for loop for div * cleanup, more tests * spellcheck * get rid of using floats: manually ceil where necessary * Remove float, simplify, fix tests to match chris's proof (#3157)
6 years ago
Normalize priorities to not exceed total voting power (#3049) * more proposer priority tests - test that we don't reset to zero when updating / adding - test that same power validators alternate * add another test to track / simulate similar behaviour as in #2960 * address some of Chris' review comments * address some more of Chris' review comments * temporarily pushing branch with the following changes: The total power might change if: - a validator is added - a validator is removed - a validator is updated Decrement the accums (of all validators) directly after any of these events (by the inverse of the change) * Fix 2960 by re-normalizing / scaling priorities to be in bounds of total power, additionally: - remove heap where it doesn't make sense - avg. only at the end of IncrementProposerPriority instead of each iteration - update (and slightly improve) TestAveragingInIncrementProposerPriorityWithVotingPower to reflect above changes * Fix 2960 by re-normalizing / scaling priorities to be in bounds of total power, additionally: - remove heap where it doesn't make sense - avg. only at the end of IncrementProposerPriority instead of each iteration - update (and slightly improve) TestAveragingInIncrementProposerPriorityWithVotingPower to reflect above changes * fix tests * add comment * update changelog pending & some minor changes * comment about division will floor the result & fix typo * Update TestLargeGenesisValidator: - remove TODO and increase large genesis validator's voting power accordingly * move changelog entry to P2P Protocol * Ceil instead of flooring when dividing & update test * quickly fix failing TestProposerPriorityDoesNotGetResetToZero: - divide by Ceil((maxPriority - minPriority) / 2*totalVotingPower) * fix typo: rename getValWitMostPriority -> getValWithMostPriority * test proposer frequencies * return absolute value for diff. keep testing * use for loop for div * cleanup, more tests * spellcheck * get rid of using floats: manually ceil where necessary * Remove float, simplify, fix tests to match chris's proof (#3157)
6 years ago
Normalize priorities to not exceed total voting power (#3049) * more proposer priority tests - test that we don't reset to zero when updating / adding - test that same power validators alternate * add another test to track / simulate similar behaviour as in #2960 * address some of Chris' review comments * address some more of Chris' review comments * temporarily pushing branch with the following changes: The total power might change if: - a validator is added - a validator is removed - a validator is updated Decrement the accums (of all validators) directly after any of these events (by the inverse of the change) * Fix 2960 by re-normalizing / scaling priorities to be in bounds of total power, additionally: - remove heap where it doesn't make sense - avg. only at the end of IncrementProposerPriority instead of each iteration - update (and slightly improve) TestAveragingInIncrementProposerPriorityWithVotingPower to reflect above changes * Fix 2960 by re-normalizing / scaling priorities to be in bounds of total power, additionally: - remove heap where it doesn't make sense - avg. only at the end of IncrementProposerPriority instead of each iteration - update (and slightly improve) TestAveragingInIncrementProposerPriorityWithVotingPower to reflect above changes * fix tests * add comment * update changelog pending & some minor changes * comment about division will floor the result & fix typo * Update TestLargeGenesisValidator: - remove TODO and increase large genesis validator's voting power accordingly * move changelog entry to P2P Protocol * Ceil instead of flooring when dividing & update test * quickly fix failing TestProposerPriorityDoesNotGetResetToZero: - divide by Ceil((maxPriority - minPriority) / 2*totalVotingPower) * fix typo: rename getValWitMostPriority -> getValWithMostPriority * test proposer frequencies * return absolute value for diff. keep testing * use for loop for div * cleanup, more tests * spellcheck * get rid of using floats: manually ceil where necessary * Remove float, simplify, fix tests to match chris's proof (#3157)
6 years ago
Normalize priorities to not exceed total voting power (#3049) * more proposer priority tests - test that we don't reset to zero when updating / adding - test that same power validators alternate * add another test to track / simulate similar behaviour as in #2960 * address some of Chris' review comments * address some more of Chris' review comments * temporarily pushing branch with the following changes: The total power might change if: - a validator is added - a validator is removed - a validator is updated Decrement the accums (of all validators) directly after any of these events (by the inverse of the change) * Fix 2960 by re-normalizing / scaling priorities to be in bounds of total power, additionally: - remove heap where it doesn't make sense - avg. only at the end of IncrementProposerPriority instead of each iteration - update (and slightly improve) TestAveragingInIncrementProposerPriorityWithVotingPower to reflect above changes * Fix 2960 by re-normalizing / scaling priorities to be in bounds of total power, additionally: - remove heap where it doesn't make sense - avg. only at the end of IncrementProposerPriority instead of each iteration - update (and slightly improve) TestAveragingInIncrementProposerPriorityWithVotingPower to reflect above changes * fix tests * add comment * update changelog pending & some minor changes * comment about division will floor the result & fix typo * Update TestLargeGenesisValidator: - remove TODO and increase large genesis validator's voting power accordingly * move changelog entry to P2P Protocol * Ceil instead of flooring when dividing & update test * quickly fix failing TestProposerPriorityDoesNotGetResetToZero: - divide by Ceil((maxPriority - minPriority) / 2*totalVotingPower) * fix typo: rename getValWitMostPriority -> getValWithMostPriority * test proposer frequencies * return absolute value for diff. keep testing * use for loop for div * cleanup, more tests * spellcheck * get rid of using floats: manually ceil where necessary * Remove float, simplify, fix tests to match chris's proof (#3157)
6 years ago
Normalize priorities to not exceed total voting power (#3049) * more proposer priority tests - test that we don't reset to zero when updating / adding - test that same power validators alternate * add another test to track / simulate similar behaviour as in #2960 * address some of Chris' review comments * address some more of Chris' review comments * temporarily pushing branch with the following changes: The total power might change if: - a validator is added - a validator is removed - a validator is updated Decrement the accums (of all validators) directly after any of these events (by the inverse of the change) * Fix 2960 by re-normalizing / scaling priorities to be in bounds of total power, additionally: - remove heap where it doesn't make sense - avg. only at the end of IncrementProposerPriority instead of each iteration - update (and slightly improve) TestAveragingInIncrementProposerPriorityWithVotingPower to reflect above changes * Fix 2960 by re-normalizing / scaling priorities to be in bounds of total power, additionally: - remove heap where it doesn't make sense - avg. only at the end of IncrementProposerPriority instead of each iteration - update (and slightly improve) TestAveragingInIncrementProposerPriorityWithVotingPower to reflect above changes * fix tests * add comment * update changelog pending & some minor changes * comment about division will floor the result & fix typo * Update TestLargeGenesisValidator: - remove TODO and increase large genesis validator's voting power accordingly * move changelog entry to P2P Protocol * Ceil instead of flooring when dividing & update test * quickly fix failing TestProposerPriorityDoesNotGetResetToZero: - divide by Ceil((maxPriority - minPriority) / 2*totalVotingPower) * fix typo: rename getValWitMostPriority -> getValWithMostPriority * test proposer frequencies * return absolute value for diff. keep testing * use for loop for div * cleanup, more tests * spellcheck * get rid of using floats: manually ceil where necessary * Remove float, simplify, fix tests to match chris's proof (#3157)
6 years ago
Normalize priorities to not exceed total voting power (#3049) * more proposer priority tests - test that we don't reset to zero when updating / adding - test that same power validators alternate * add another test to track / simulate similar behaviour as in #2960 * address some of Chris' review comments * address some more of Chris' review comments * temporarily pushing branch with the following changes: The total power might change if: - a validator is added - a validator is removed - a validator is updated Decrement the accums (of all validators) directly after any of these events (by the inverse of the change) * Fix 2960 by re-normalizing / scaling priorities to be in bounds of total power, additionally: - remove heap where it doesn't make sense - avg. only at the end of IncrementProposerPriority instead of each iteration - update (and slightly improve) TestAveragingInIncrementProposerPriorityWithVotingPower to reflect above changes * Fix 2960 by re-normalizing / scaling priorities to be in bounds of total power, additionally: - remove heap where it doesn't make sense - avg. only at the end of IncrementProposerPriority instead of each iteration - update (and slightly improve) TestAveragingInIncrementProposerPriorityWithVotingPower to reflect above changes * fix tests * add comment * update changelog pending & some minor changes * comment about division will floor the result & fix typo * Update TestLargeGenesisValidator: - remove TODO and increase large genesis validator's voting power accordingly * move changelog entry to P2P Protocol * Ceil instead of flooring when dividing & update test * quickly fix failing TestProposerPriorityDoesNotGetResetToZero: - divide by Ceil((maxPriority - minPriority) / 2*totalVotingPower) * fix typo: rename getValWitMostPriority -> getValWithMostPriority * test proposer frequencies * return absolute value for diff. keep testing * use for loop for div * cleanup, more tests * spellcheck * get rid of using floats: manually ceil where necessary * Remove float, simplify, fix tests to match chris's proof (#3157)
6 years ago
types: verify commit fully Since the light client work introduced in v0.33 it appears full nodes are no longer fully verifying commit signatures during block execution - they stop after +2/3. See in VerifyCommit: https://github.com/tendermint/tendermint/blob/0c7fd316eb006c0afc13996c00ac8bde1078b32c/types/validator_set.go#L700-L703 This means proposers can propose blocks that contain valid +2/3 signatures and then the rest of the signatures can be whatever they want. They can claim that all the other validators signed just by including a CommitSig with arbitrary signature data. While this doesn't seem to impact safety of Tendermint per se, it means that Commits may contain a lot of invalid data. This is already true of blocks, since they can include invalid txs filled with garbage, but in that case the application knows they they are invalid and can punish the proposer. But since applications dont verify commit signatures directly (they trust tendermint to do that), they won't be able to detect it. This can impact incentivization logic in the application that depends on the LastCommitInfo sent in BeginBlock, which includes which validators signed. For instance, Gaia incentivizes proposers with a bonus for including more than +2/3 of the signatures. But a proposer can now claim that bonus just by including arbitrary data for the final -1/3 of validators without actually waiting for their signatures. There may be other tricks that can be played because of this. In general, the full node should be a fully verifying machine. While it's true that the light client can avoid verifying all signatures by stopping after +2/3, the full node can not. Thus the light client and full node should use distinct VerifyCommit functions if one is going to stop after +2/3 or otherwise perform less validation (for instance light clients can also skip verifying votes for nil while full nodes can not). See a commit with a bad signature that verifies here: 56367fd. From what I can tell, Tendermint will go on to think this commit is valid and forward this data to the app, so the app will think the second validator actually signed when it clearly did not.
4 years ago
types: verify commit fully Since the light client work introduced in v0.33 it appears full nodes are no longer fully verifying commit signatures during block execution - they stop after +2/3. See in VerifyCommit: https://github.com/tendermint/tendermint/blob/0c7fd316eb006c0afc13996c00ac8bde1078b32c/types/validator_set.go#L700-L703 This means proposers can propose blocks that contain valid +2/3 signatures and then the rest of the signatures can be whatever they want. They can claim that all the other validators signed just by including a CommitSig with arbitrary signature data. While this doesn't seem to impact safety of Tendermint per se, it means that Commits may contain a lot of invalid data. This is already true of blocks, since they can include invalid txs filled with garbage, but in that case the application knows they they are invalid and can punish the proposer. But since applications dont verify commit signatures directly (they trust tendermint to do that), they won't be able to detect it. This can impact incentivization logic in the application that depends on the LastCommitInfo sent in BeginBlock, which includes which validators signed. For instance, Gaia incentivizes proposers with a bonus for including more than +2/3 of the signatures. But a proposer can now claim that bonus just by including arbitrary data for the final -1/3 of validators without actually waiting for their signatures. There may be other tricks that can be played because of this. In general, the full node should be a fully verifying machine. While it's true that the light client can avoid verifying all signatures by stopping after +2/3, the full node can not. Thus the light client and full node should use distinct VerifyCommit functions if one is going to stop after +2/3 or otherwise perform less validation (for instance light clients can also skip verifying votes for nil while full nodes can not). See a commit with a bad signature that verifies here: 56367fd. From what I can tell, Tendermint will go on to think this commit is valid and forward this data to the app, so the app will think the second validator actually signed when it clearly did not.
4 years ago
types: verify commit fully Since the light client work introduced in v0.33 it appears full nodes are no longer fully verifying commit signatures during block execution - they stop after +2/3. See in VerifyCommit: https://github.com/tendermint/tendermint/blob/0c7fd316eb006c0afc13996c00ac8bde1078b32c/types/validator_set.go#L700-L703 This means proposers can propose blocks that contain valid +2/3 signatures and then the rest of the signatures can be whatever they want. They can claim that all the other validators signed just by including a CommitSig with arbitrary signature data. While this doesn't seem to impact safety of Tendermint per se, it means that Commits may contain a lot of invalid data. This is already true of blocks, since they can include invalid txs filled with garbage, but in that case the application knows they they are invalid and can punish the proposer. But since applications dont verify commit signatures directly (they trust tendermint to do that), they won't be able to detect it. This can impact incentivization logic in the application that depends on the LastCommitInfo sent in BeginBlock, which includes which validators signed. For instance, Gaia incentivizes proposers with a bonus for including more than +2/3 of the signatures. But a proposer can now claim that bonus just by including arbitrary data for the final -1/3 of validators without actually waiting for their signatures. There may be other tricks that can be played because of this. In general, the full node should be a fully verifying machine. While it's true that the light client can avoid verifying all signatures by stopping after +2/3, the full node can not. Thus the light client and full node should use distinct VerifyCommit functions if one is going to stop after +2/3 or otherwise perform less validation (for instance light clients can also skip verifying votes for nil while full nodes can not). See a commit with a bad signature that verifies here: 56367fd. From what I can tell, Tendermint will go on to think this commit is valid and forward this data to the app, so the app will think the second validator actually signed when it clearly did not.
4 years ago
types: verify commit fully Since the light client work introduced in v0.33 it appears full nodes are no longer fully verifying commit signatures during block execution - they stop after +2/3. See in VerifyCommit: https://github.com/tendermint/tendermint/blob/0c7fd316eb006c0afc13996c00ac8bde1078b32c/types/validator_set.go#L700-L703 This means proposers can propose blocks that contain valid +2/3 signatures and then the rest of the signatures can be whatever they want. They can claim that all the other validators signed just by including a CommitSig with arbitrary signature data. While this doesn't seem to impact safety of Tendermint per se, it means that Commits may contain a lot of invalid data. This is already true of blocks, since they can include invalid txs filled with garbage, but in that case the application knows they they are invalid and can punish the proposer. But since applications dont verify commit signatures directly (they trust tendermint to do that), they won't be able to detect it. This can impact incentivization logic in the application that depends on the LastCommitInfo sent in BeginBlock, which includes which validators signed. For instance, Gaia incentivizes proposers with a bonus for including more than +2/3 of the signatures. But a proposer can now claim that bonus just by including arbitrary data for the final -1/3 of validators without actually waiting for their signatures. There may be other tricks that can be played because of this. In general, the full node should be a fully verifying machine. While it's true that the light client can avoid verifying all signatures by stopping after +2/3, the full node can not. Thus the light client and full node should use distinct VerifyCommit functions if one is going to stop after +2/3 or otherwise perform less validation (for instance light clients can also skip verifying votes for nil while full nodes can not). See a commit with a bad signature that verifies here: 56367fd. From what I can tell, Tendermint will go on to think this commit is valid and forward this data to the app, so the app will think the second validator actually signed when it clearly did not.
4 years ago
types: verify commit fully Since the light client work introduced in v0.33 it appears full nodes are no longer fully verifying commit signatures during block execution - they stop after +2/3. See in VerifyCommit: https://github.com/tendermint/tendermint/blob/0c7fd316eb006c0afc13996c00ac8bde1078b32c/types/validator_set.go#L700-L703 This means proposers can propose blocks that contain valid +2/3 signatures and then the rest of the signatures can be whatever they want. They can claim that all the other validators signed just by including a CommitSig with arbitrary signature data. While this doesn't seem to impact safety of Tendermint per se, it means that Commits may contain a lot of invalid data. This is already true of blocks, since they can include invalid txs filled with garbage, but in that case the application knows they they are invalid and can punish the proposer. But since applications dont verify commit signatures directly (they trust tendermint to do that), they won't be able to detect it. This can impact incentivization logic in the application that depends on the LastCommitInfo sent in BeginBlock, which includes which validators signed. For instance, Gaia incentivizes proposers with a bonus for including more than +2/3 of the signatures. But a proposer can now claim that bonus just by including arbitrary data for the final -1/3 of validators without actually waiting for their signatures. There may be other tricks that can be played because of this. In general, the full node should be a fully verifying machine. While it's true that the light client can avoid verifying all signatures by stopping after +2/3, the full node can not. Thus the light client and full node should use distinct VerifyCommit functions if one is going to stop after +2/3 or otherwise perform less validation (for instance light clients can also skip verifying votes for nil while full nodes can not). See a commit with a bad signature that verifies here: 56367fd. From what I can tell, Tendermint will go on to think this commit is valid and forward this data to the app, so the app will think the second validator actually signed when it clearly did not.
4 years ago
types: prevent spurious validator power overflow warnings when changing the validator set (#4183) Fix for #4164 The general problem is that in certain conditions an overflow warning is issued when attempting to update a validator set even if the final set's total voting power is not over the maximum allowed. Root cause is that in verifyUpdates(), updates are verified wrt to total voting power in the order of validator address. It is then possible that a low address validator may increase its power such that the temporary total voting power count goes over MaxTotalVotingPower. Scenarios where removing and adding/ updating validators with high voting power, in the same update operation, cause the same false warning and the updates are not applied. Main changes to fix this are in verifyUpdate() that now does the verification starting with the decreases in power. It also takes into account the removals that are part of the update. ## Commits: * tests for overflow detection and prevention * test fix * more tests * fix the false overflow warnings and golint * scopelint warning fix * review comments * variant with using sort by amount of change in power * compute separately number new validators in update * types: use a switch in processChanges * more review comments * types: use HasAddress in numNewValidators * types: refactor verifyUpdates copy updates, sort them by delta and use resulting slice to calculate tvpAfterUpdatesBeforeRemovals. * remove unused structs * review comments * update changelog
5 years ago
types: prevent spurious validator power overflow warnings when changing the validator set (#4183) Fix for #4164 The general problem is that in certain conditions an overflow warning is issued when attempting to update a validator set even if the final set's total voting power is not over the maximum allowed. Root cause is that in verifyUpdates(), updates are verified wrt to total voting power in the order of validator address. It is then possible that a low address validator may increase its power such that the temporary total voting power count goes over MaxTotalVotingPower. Scenarios where removing and adding/ updating validators with high voting power, in the same update operation, cause the same false warning and the updates are not applied. Main changes to fix this are in verifyUpdate() that now does the verification starting with the decreases in power. It also takes into account the removals that are part of the update. ## Commits: * tests for overflow detection and prevention * test fix * more tests * fix the false overflow warnings and golint * scopelint warning fix * review comments * variant with using sort by amount of change in power * compute separately number new validators in update * types: use a switch in processChanges * more review comments * types: use HasAddress in numNewValidators * types: refactor verifyUpdates copy updates, sort them by delta and use resulting slice to calculate tvpAfterUpdatesBeforeRemovals. * remove unused structs * review comments * update changelog
5 years ago
types: prevent spurious validator power overflow warnings when changing the validator set (#4183) Fix for #4164 The general problem is that in certain conditions an overflow warning is issued when attempting to update a validator set even if the final set's total voting power is not over the maximum allowed. Root cause is that in verifyUpdates(), updates are verified wrt to total voting power in the order of validator address. It is then possible that a low address validator may increase its power such that the temporary total voting power count goes over MaxTotalVotingPower. Scenarios where removing and adding/ updating validators with high voting power, in the same update operation, cause the same false warning and the updates are not applied. Main changes to fix this are in verifyUpdate() that now does the verification starting with the decreases in power. It also takes into account the removals that are part of the update. ## Commits: * tests for overflow detection and prevention * test fix * more tests * fix the false overflow warnings and golint * scopelint warning fix * review comments * variant with using sort by amount of change in power * compute separately number new validators in update * types: use a switch in processChanges * more review comments * types: use HasAddress in numNewValidators * types: refactor verifyUpdates copy updates, sort them by delta and use resulting slice to calculate tvpAfterUpdatesBeforeRemovals. * remove unused structs * review comments * update changelog
5 years ago
types: prevent spurious validator power overflow warnings when changing the validator set (#4183) Fix for #4164 The general problem is that in certain conditions an overflow warning is issued when attempting to update a validator set even if the final set's total voting power is not over the maximum allowed. Root cause is that in verifyUpdates(), updates are verified wrt to total voting power in the order of validator address. It is then possible that a low address validator may increase its power such that the temporary total voting power count goes over MaxTotalVotingPower. Scenarios where removing and adding/ updating validators with high voting power, in the same update operation, cause the same false warning and the updates are not applied. Main changes to fix this are in verifyUpdate() that now does the verification starting with the decreases in power. It also takes into account the removals that are part of the update. ## Commits: * tests for overflow detection and prevention * test fix * more tests * fix the false overflow warnings and golint * scopelint warning fix * review comments * variant with using sort by amount of change in power * compute separately number new validators in update * types: use a switch in processChanges * more review comments * types: use HasAddress in numNewValidators * types: refactor verifyUpdates copy updates, sort them by delta and use resulting slice to calculate tvpAfterUpdatesBeforeRemovals. * remove unused structs * review comments * update changelog
5 years ago
types: prevent spurious validator power overflow warnings when changing the validator set (#4183) Fix for #4164 The general problem is that in certain conditions an overflow warning is issued when attempting to update a validator set even if the final set's total voting power is not over the maximum allowed. Root cause is that in verifyUpdates(), updates are verified wrt to total voting power in the order of validator address. It is then possible that a low address validator may increase its power such that the temporary total voting power count goes over MaxTotalVotingPower. Scenarios where removing and adding/ updating validators with high voting power, in the same update operation, cause the same false warning and the updates are not applied. Main changes to fix this are in verifyUpdate() that now does the verification starting with the decreases in power. It also takes into account the removals that are part of the update. ## Commits: * tests for overflow detection and prevention * test fix * more tests * fix the false overflow warnings and golint * scopelint warning fix * review comments * variant with using sort by amount of change in power * compute separately number new validators in update * types: use a switch in processChanges * more review comments * types: use HasAddress in numNewValidators * types: refactor verifyUpdates copy updates, sort them by delta and use resulting slice to calculate tvpAfterUpdatesBeforeRemovals. * remove unused structs * review comments * update changelog
5 years ago
types: prevent spurious validator power overflow warnings when changing the validator set (#4183) Fix for #4164 The general problem is that in certain conditions an overflow warning is issued when attempting to update a validator set even if the final set's total voting power is not over the maximum allowed. Root cause is that in verifyUpdates(), updates are verified wrt to total voting power in the order of validator address. It is then possible that a low address validator may increase its power such that the temporary total voting power count goes over MaxTotalVotingPower. Scenarios where removing and adding/ updating validators with high voting power, in the same update operation, cause the same false warning and the updates are not applied. Main changes to fix this are in verifyUpdate() that now does the verification starting with the decreases in power. It also takes into account the removals that are part of the update. ## Commits: * tests for overflow detection and prevention * test fix * more tests * fix the false overflow warnings and golint * scopelint warning fix * review comments * variant with using sort by amount of change in power * compute separately number new validators in update * types: use a switch in processChanges * more review comments * types: use HasAddress in numNewValidators * types: refactor verifyUpdates copy updates, sort them by delta and use resulting slice to calculate tvpAfterUpdatesBeforeRemovals. * remove unused structs * review comments * update changelog
5 years ago
types: prevent spurious validator power overflow warnings when changing the validator set (#4183) Fix for #4164 The general problem is that in certain conditions an overflow warning is issued when attempting to update a validator set even if the final set's total voting power is not over the maximum allowed. Root cause is that in verifyUpdates(), updates are verified wrt to total voting power in the order of validator address. It is then possible that a low address validator may increase its power such that the temporary total voting power count goes over MaxTotalVotingPower. Scenarios where removing and adding/ updating validators with high voting power, in the same update operation, cause the same false warning and the updates are not applied. Main changes to fix this are in verifyUpdate() that now does the verification starting with the decreases in power. It also takes into account the removals that are part of the update. ## Commits: * tests for overflow detection and prevention * test fix * more tests * fix the false overflow warnings and golint * scopelint warning fix * review comments * variant with using sort by amount of change in power * compute separately number new validators in update * types: use a switch in processChanges * more review comments * types: use HasAddress in numNewValidators * types: refactor verifyUpdates copy updates, sort them by delta and use resulting slice to calculate tvpAfterUpdatesBeforeRemovals. * remove unused structs * review comments * update changelog
5 years ago
types: prevent spurious validator power overflow warnings when changing the validator set (#4183) Fix for #4164 The general problem is that in certain conditions an overflow warning is issued when attempting to update a validator set even if the final set's total voting power is not over the maximum allowed. Root cause is that in verifyUpdates(), updates are verified wrt to total voting power in the order of validator address. It is then possible that a low address validator may increase its power such that the temporary total voting power count goes over MaxTotalVotingPower. Scenarios where removing and adding/ updating validators with high voting power, in the same update operation, cause the same false warning and the updates are not applied. Main changes to fix this are in verifyUpdate() that now does the verification starting with the decreases in power. It also takes into account the removals that are part of the update. ## Commits: * tests for overflow detection and prevention * test fix * more tests * fix the false overflow warnings and golint * scopelint warning fix * review comments * variant with using sort by amount of change in power * compute separately number new validators in update * types: use a switch in processChanges * more review comments * types: use HasAddress in numNewValidators * types: refactor verifyUpdates copy updates, sort them by delta and use resulting slice to calculate tvpAfterUpdatesBeforeRemovals. * remove unused structs * review comments * update changelog
5 years ago
types: prevent spurious validator power overflow warnings when changing the validator set (#4183) Fix for #4164 The general problem is that in certain conditions an overflow warning is issued when attempting to update a validator set even if the final set's total voting power is not over the maximum allowed. Root cause is that in verifyUpdates(), updates are verified wrt to total voting power in the order of validator address. It is then possible that a low address validator may increase its power such that the temporary total voting power count goes over MaxTotalVotingPower. Scenarios where removing and adding/ updating validators with high voting power, in the same update operation, cause the same false warning and the updates are not applied. Main changes to fix this are in verifyUpdate() that now does the verification starting with the decreases in power. It also takes into account the removals that are part of the update. ## Commits: * tests for overflow detection and prevention * test fix * more tests * fix the false overflow warnings and golint * scopelint warning fix * review comments * variant with using sort by amount of change in power * compute separately number new validators in update * types: use a switch in processChanges * more review comments * types: use HasAddress in numNewValidators * types: refactor verifyUpdates copy updates, sort them by delta and use resulting slice to calculate tvpAfterUpdatesBeforeRemovals. * remove unused structs * review comments * update changelog
5 years ago
types: prevent spurious validator power overflow warnings when changing the validator set (#4183) Fix for #4164 The general problem is that in certain conditions an overflow warning is issued when attempting to update a validator set even if the final set's total voting power is not over the maximum allowed. Root cause is that in verifyUpdates(), updates are verified wrt to total voting power in the order of validator address. It is then possible that a low address validator may increase its power such that the temporary total voting power count goes over MaxTotalVotingPower. Scenarios where removing and adding/ updating validators with high voting power, in the same update operation, cause the same false warning and the updates are not applied. Main changes to fix this are in verifyUpdate() that now does the verification starting with the decreases in power. It also takes into account the removals that are part of the update. ## Commits: * tests for overflow detection and prevention * test fix * more tests * fix the false overflow warnings and golint * scopelint warning fix * review comments * variant with using sort by amount of change in power * compute separately number new validators in update * types: use a switch in processChanges * more review comments * types: use HasAddress in numNewValidators * types: refactor verifyUpdates copy updates, sort them by delta and use resulting slice to calculate tvpAfterUpdatesBeforeRemovals. * remove unused structs * review comments * update changelog
5 years ago
types: prevent spurious validator power overflow warnings when changing the validator set (#4183) Fix for #4164 The general problem is that in certain conditions an overflow warning is issued when attempting to update a validator set even if the final set's total voting power is not over the maximum allowed. Root cause is that in verifyUpdates(), updates are verified wrt to total voting power in the order of validator address. It is then possible that a low address validator may increase its power such that the temporary total voting power count goes over MaxTotalVotingPower. Scenarios where removing and adding/ updating validators with high voting power, in the same update operation, cause the same false warning and the updates are not applied. Main changes to fix this are in verifyUpdate() that now does the verification starting with the decreases in power. It also takes into account the removals that are part of the update. ## Commits: * tests for overflow detection and prevention * test fix * more tests * fix the false overflow warnings and golint * scopelint warning fix * review comments * variant with using sort by amount of change in power * compute separately number new validators in update * types: use a switch in processChanges * more review comments * types: use HasAddress in numNewValidators * types: refactor verifyUpdates copy updates, sort them by delta and use resulting slice to calculate tvpAfterUpdatesBeforeRemovals. * remove unused structs * review comments * update changelog
5 years ago
types: verify commit fully Since the light client work introduced in v0.33 it appears full nodes are no longer fully verifying commit signatures during block execution - they stop after +2/3. See in VerifyCommit: https://github.com/tendermint/tendermint/blob/0c7fd316eb006c0afc13996c00ac8bde1078b32c/types/validator_set.go#L700-L703 This means proposers can propose blocks that contain valid +2/3 signatures and then the rest of the signatures can be whatever they want. They can claim that all the other validators signed just by including a CommitSig with arbitrary signature data. While this doesn't seem to impact safety of Tendermint per se, it means that Commits may contain a lot of invalid data. This is already true of blocks, since they can include invalid txs filled with garbage, but in that case the application knows they they are invalid and can punish the proposer. But since applications dont verify commit signatures directly (they trust tendermint to do that), they won't be able to detect it. This can impact incentivization logic in the application that depends on the LastCommitInfo sent in BeginBlock, which includes which validators signed. For instance, Gaia incentivizes proposers with a bonus for including more than +2/3 of the signatures. But a proposer can now claim that bonus just by including arbitrary data for the final -1/3 of validators without actually waiting for their signatures. There may be other tricks that can be played because of this. In general, the full node should be a fully verifying machine. While it's true that the light client can avoid verifying all signatures by stopping after +2/3, the full node can not. Thus the light client and full node should use distinct VerifyCommit functions if one is going to stop after +2/3 or otherwise perform less validation (for instance light clients can also skip verifying votes for nil while full nodes can not). See a commit with a bad signature that verifies here: 56367fd. From what I can tell, Tendermint will go on to think this commit is valid and forward this data to the app, so the app will think the second validator actually signed when it clearly did not.
4 years ago
types: verify commit fully Since the light client work introduced in v0.33 it appears full nodes are no longer fully verifying commit signatures during block execution - they stop after +2/3. See in VerifyCommit: https://github.com/tendermint/tendermint/blob/0c7fd316eb006c0afc13996c00ac8bde1078b32c/types/validator_set.go#L700-L703 This means proposers can propose blocks that contain valid +2/3 signatures and then the rest of the signatures can be whatever they want. They can claim that all the other validators signed just by including a CommitSig with arbitrary signature data. While this doesn't seem to impact safety of Tendermint per se, it means that Commits may contain a lot of invalid data. This is already true of blocks, since they can include invalid txs filled with garbage, but in that case the application knows they they are invalid and can punish the proposer. But since applications dont verify commit signatures directly (they trust tendermint to do that), they won't be able to detect it. This can impact incentivization logic in the application that depends on the LastCommitInfo sent in BeginBlock, which includes which validators signed. For instance, Gaia incentivizes proposers with a bonus for including more than +2/3 of the signatures. But a proposer can now claim that bonus just by including arbitrary data for the final -1/3 of validators without actually waiting for their signatures. There may be other tricks that can be played because of this. In general, the full node should be a fully verifying machine. While it's true that the light client can avoid verifying all signatures by stopping after +2/3, the full node can not. Thus the light client and full node should use distinct VerifyCommit functions if one is going to stop after +2/3 or otherwise perform less validation (for instance light clients can also skip verifying votes for nil while full nodes can not). See a commit with a bad signature that verifies here: 56367fd. From what I can tell, Tendermint will go on to think this commit is valid and forward this data to the app, so the app will think the second validator actually signed when it clearly did not.
4 years ago
types: verify commit fully Since the light client work introduced in v0.33 it appears full nodes are no longer fully verifying commit signatures during block execution - they stop after +2/3. See in VerifyCommit: https://github.com/tendermint/tendermint/blob/0c7fd316eb006c0afc13996c00ac8bde1078b32c/types/validator_set.go#L700-L703 This means proposers can propose blocks that contain valid +2/3 signatures and then the rest of the signatures can be whatever they want. They can claim that all the other validators signed just by including a CommitSig with arbitrary signature data. While this doesn't seem to impact safety of Tendermint per se, it means that Commits may contain a lot of invalid data. This is already true of blocks, since they can include invalid txs filled with garbage, but in that case the application knows they they are invalid and can punish the proposer. But since applications dont verify commit signatures directly (they trust tendermint to do that), they won't be able to detect it. This can impact incentivization logic in the application that depends on the LastCommitInfo sent in BeginBlock, which includes which validators signed. For instance, Gaia incentivizes proposers with a bonus for including more than +2/3 of the signatures. But a proposer can now claim that bonus just by including arbitrary data for the final -1/3 of validators without actually waiting for their signatures. There may be other tricks that can be played because of this. In general, the full node should be a fully verifying machine. While it's true that the light client can avoid verifying all signatures by stopping after +2/3, the full node can not. Thus the light client and full node should use distinct VerifyCommit functions if one is going to stop after +2/3 or otherwise perform less validation (for instance light clients can also skip verifying votes for nil while full nodes can not). See a commit with a bad signature that verifies here: 56367fd. From what I can tell, Tendermint will go on to think this commit is valid and forward this data to the app, so the app will think the second validator actually signed when it clearly did not.
4 years ago
types: verify commit fully Since the light client work introduced in v0.33 it appears full nodes are no longer fully verifying commit signatures during block execution - they stop after +2/3. See in VerifyCommit: https://github.com/tendermint/tendermint/blob/0c7fd316eb006c0afc13996c00ac8bde1078b32c/types/validator_set.go#L700-L703 This means proposers can propose blocks that contain valid +2/3 signatures and then the rest of the signatures can be whatever they want. They can claim that all the other validators signed just by including a CommitSig with arbitrary signature data. While this doesn't seem to impact safety of Tendermint per se, it means that Commits may contain a lot of invalid data. This is already true of blocks, since they can include invalid txs filled with garbage, but in that case the application knows they they are invalid and can punish the proposer. But since applications dont verify commit signatures directly (they trust tendermint to do that), they won't be able to detect it. This can impact incentivization logic in the application that depends on the LastCommitInfo sent in BeginBlock, which includes which validators signed. For instance, Gaia incentivizes proposers with a bonus for including more than +2/3 of the signatures. But a proposer can now claim that bonus just by including arbitrary data for the final -1/3 of validators without actually waiting for their signatures. There may be other tricks that can be played because of this. In general, the full node should be a fully verifying machine. While it's true that the light client can avoid verifying all signatures by stopping after +2/3, the full node can not. Thus the light client and full node should use distinct VerifyCommit functions if one is going to stop after +2/3 or otherwise perform less validation (for instance light clients can also skip verifying votes for nil while full nodes can not). See a commit with a bad signature that verifies here: 56367fd. From what I can tell, Tendermint will go on to think this commit is valid and forward this data to the app, so the app will think the second validator actually signed when it clearly did not.
4 years ago
  1. package types
  2. import (
  3. "bytes"
  4. "fmt"
  5. "math"
  6. "sort"
  7. "strings"
  8. "testing"
  9. "testing/quick"
  10. "time"
  11. "github.com/stretchr/testify/assert"
  12. "github.com/stretchr/testify/require"
  13. "github.com/tendermint/tendermint/crypto"
  14. "github.com/tendermint/tendermint/crypto/ed25519"
  15. tmmath "github.com/tendermint/tendermint/libs/math"
  16. tmrand "github.com/tendermint/tendermint/libs/rand"
  17. tmproto "github.com/tendermint/tendermint/proto/tendermint/types"
  18. )
  19. func TestValidatorSetBasic(t *testing.T) {
  20. // empty or nil validator lists are allowed,
  21. // but attempting to IncrementProposerPriority on them will panic.
  22. vset := NewValidatorSet([]*Validator{})
  23. assert.Panics(t, func() { vset.IncrementProposerPriority(1) })
  24. vset = NewValidatorSet(nil)
  25. assert.Panics(t, func() { vset.IncrementProposerPriority(1) })
  26. assert.EqualValues(t, vset, vset.Copy())
  27. assert.False(t, vset.HasAddress([]byte("some val")))
  28. idx, val := vset.GetByAddress([]byte("some val"))
  29. assert.EqualValues(t, -1, idx)
  30. assert.Nil(t, val)
  31. addr, val := vset.GetByIndex(-100)
  32. assert.Nil(t, addr)
  33. assert.Nil(t, val)
  34. addr, val = vset.GetByIndex(0)
  35. assert.Nil(t, addr)
  36. assert.Nil(t, val)
  37. addr, val = vset.GetByIndex(100)
  38. assert.Nil(t, addr)
  39. assert.Nil(t, val)
  40. assert.Zero(t, vset.Size())
  41. assert.Equal(t, int64(0), vset.TotalVotingPower())
  42. assert.Nil(t, vset.GetProposer())
  43. assert.Nil(t, vset.Hash())
  44. // add
  45. val = randValidator(vset.TotalVotingPower())
  46. assert.NoError(t, vset.UpdateWithChangeSet([]*Validator{val}))
  47. assert.True(t, vset.HasAddress(val.Address))
  48. idx, _ = vset.GetByAddress(val.Address)
  49. assert.EqualValues(t, 0, idx)
  50. addr, _ = vset.GetByIndex(0)
  51. assert.Equal(t, []byte(val.Address), addr)
  52. assert.Equal(t, 1, vset.Size())
  53. assert.Equal(t, val.VotingPower, vset.TotalVotingPower())
  54. assert.NotNil(t, vset.Hash())
  55. assert.NotPanics(t, func() { vset.IncrementProposerPriority(1) })
  56. assert.Equal(t, val.Address, vset.GetProposer().Address)
  57. // update
  58. val = randValidator(vset.TotalVotingPower())
  59. assert.NoError(t, vset.UpdateWithChangeSet([]*Validator{val}))
  60. _, val = vset.GetByAddress(val.Address)
  61. val.VotingPower += 100
  62. proposerPriority := val.ProposerPriority
  63. val.ProposerPriority = 0
  64. assert.NoError(t, vset.UpdateWithChangeSet([]*Validator{val}))
  65. _, val = vset.GetByAddress(val.Address)
  66. assert.Equal(t, proposerPriority, val.ProposerPriority)
  67. }
  68. func TestValidatorSetValidateBasic(t *testing.T) {
  69. val, _ := RandValidator(false, 1)
  70. badVal := &Validator{}
  71. testCases := []struct {
  72. vals ValidatorSet
  73. err bool
  74. msg string
  75. }{
  76. {
  77. vals: ValidatorSet{},
  78. err: true,
  79. msg: "validator set is nil or empty",
  80. },
  81. {
  82. vals: ValidatorSet{
  83. Validators: []*Validator{},
  84. },
  85. err: true,
  86. msg: "validator set is nil or empty",
  87. },
  88. {
  89. vals: ValidatorSet{
  90. Validators: []*Validator{val},
  91. },
  92. err: true,
  93. msg: "proposer failed validate basic, error: nil validator",
  94. },
  95. {
  96. vals: ValidatorSet{
  97. Validators: []*Validator{badVal},
  98. },
  99. err: true,
  100. msg: "invalid validator #0: validator does not have a public key",
  101. },
  102. {
  103. vals: ValidatorSet{
  104. Validators: []*Validator{val},
  105. Proposer: val,
  106. },
  107. err: false,
  108. msg: "",
  109. },
  110. }
  111. for _, tc := range testCases {
  112. err := tc.vals.ValidateBasic()
  113. if tc.err {
  114. if assert.Error(t, err) {
  115. assert.Equal(t, tc.msg, err.Error())
  116. }
  117. } else {
  118. assert.NoError(t, err)
  119. }
  120. }
  121. }
  122. func TestCopy(t *testing.T) {
  123. vset := randValidatorSet(10)
  124. vsetHash := vset.Hash()
  125. if len(vsetHash) == 0 {
  126. t.Fatalf("ValidatorSet had unexpected zero hash")
  127. }
  128. vsetCopy := vset.Copy()
  129. vsetCopyHash := vsetCopy.Hash()
  130. if !bytes.Equal(vsetHash, vsetCopyHash) {
  131. t.Fatalf("ValidatorSet copy had wrong hash. Orig: %X, Copy: %X", vsetHash, vsetCopyHash)
  132. }
  133. }
  134. // Test that IncrementProposerPriority requires positive times.
  135. func TestIncrementProposerPriorityPositiveTimes(t *testing.T) {
  136. vset := NewValidatorSet([]*Validator{
  137. newValidator([]byte("foo"), 1000),
  138. newValidator([]byte("bar"), 300),
  139. newValidator([]byte("baz"), 330),
  140. })
  141. assert.Panics(t, func() { vset.IncrementProposerPriority(-1) })
  142. assert.Panics(t, func() { vset.IncrementProposerPriority(0) })
  143. vset.IncrementProposerPriority(1)
  144. }
  145. func BenchmarkValidatorSetCopy(b *testing.B) {
  146. b.StopTimer()
  147. vset := NewValidatorSet([]*Validator{})
  148. for i := 0; i < 1000; i++ {
  149. privKey := ed25519.GenPrivKey()
  150. pubKey := privKey.PubKey()
  151. val := NewValidator(pubKey, 10)
  152. err := vset.UpdateWithChangeSet([]*Validator{val})
  153. if err != nil {
  154. panic("Failed to add validator")
  155. }
  156. }
  157. b.StartTimer()
  158. for i := 0; i < b.N; i++ {
  159. vset.Copy()
  160. }
  161. }
  162. //-------------------------------------------------------------------
  163. func TestProposerSelection1(t *testing.T) {
  164. vset := NewValidatorSet([]*Validator{
  165. newValidator([]byte("foo"), 1000),
  166. newValidator([]byte("bar"), 300),
  167. newValidator([]byte("baz"), 330),
  168. })
  169. var proposers []string
  170. for i := 0; i < 99; i++ {
  171. val := vset.GetProposer()
  172. proposers = append(proposers, string(val.Address))
  173. vset.IncrementProposerPriority(1)
  174. }
  175. expected := `foo baz foo bar foo foo baz foo bar foo foo baz foo foo bar foo baz foo foo bar` +
  176. ` foo foo baz foo bar foo foo baz foo bar foo foo baz foo foo bar foo baz foo foo bar` +
  177. ` foo baz foo foo bar foo baz foo foo bar foo baz foo foo foo baz bar foo foo foo baz` +
  178. ` foo bar foo foo baz foo bar foo foo baz foo bar foo foo baz foo bar foo foo baz foo` +
  179. ` foo bar foo baz foo foo bar foo baz foo foo bar foo baz foo foo`
  180. if expected != strings.Join(proposers, " ") {
  181. t.Errorf("expected sequence of proposers was\n%v\nbut got \n%v", expected, strings.Join(proposers, " "))
  182. }
  183. }
  184. func TestProposerSelection2(t *testing.T) {
  185. addr0 := []byte{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}
  186. addr1 := []byte{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1}
  187. addr2 := []byte{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2}
  188. // when all voting power is same, we go in order of addresses
  189. val0, val1, val2 := newValidator(addr0, 100), newValidator(addr1, 100), newValidator(addr2, 100)
  190. valList := []*Validator{val0, val1, val2}
  191. vals := NewValidatorSet(valList)
  192. for i := 0; i < len(valList)*5; i++ {
  193. ii := (i) % len(valList)
  194. prop := vals.GetProposer()
  195. if !bytes.Equal(prop.Address, valList[ii].Address) {
  196. t.Fatalf("(%d): Expected %X. Got %X", i, valList[ii].Address, prop.Address)
  197. }
  198. vals.IncrementProposerPriority(1)
  199. }
  200. // One validator has more than the others, but not enough to propose twice in a row
  201. *val2 = *newValidator(addr2, 400)
  202. vals = NewValidatorSet(valList)
  203. // vals.IncrementProposerPriority(1)
  204. prop := vals.GetProposer()
  205. if !bytes.Equal(prop.Address, addr2) {
  206. t.Fatalf("Expected address with highest voting power to be first proposer. Got %X", prop.Address)
  207. }
  208. vals.IncrementProposerPriority(1)
  209. prop = vals.GetProposer()
  210. if !bytes.Equal(prop.Address, addr0) {
  211. t.Fatalf("Expected smallest address to be validator. Got %X", prop.Address)
  212. }
  213. // One validator has more than the others, and enough to be proposer twice in a row
  214. *val2 = *newValidator(addr2, 401)
  215. vals = NewValidatorSet(valList)
  216. prop = vals.GetProposer()
  217. if !bytes.Equal(prop.Address, addr2) {
  218. t.Fatalf("Expected address with highest voting power to be first proposer. Got %X", prop.Address)
  219. }
  220. vals.IncrementProposerPriority(1)
  221. prop = vals.GetProposer()
  222. if !bytes.Equal(prop.Address, addr2) {
  223. t.Fatalf("Expected address with highest voting power to be second proposer. Got %X", prop.Address)
  224. }
  225. vals.IncrementProposerPriority(1)
  226. prop = vals.GetProposer()
  227. if !bytes.Equal(prop.Address, addr0) {
  228. t.Fatalf("Expected smallest address to be validator. Got %X", prop.Address)
  229. }
  230. // each validator should be the proposer a proportional number of times
  231. val0, val1, val2 = newValidator(addr0, 4), newValidator(addr1, 5), newValidator(addr2, 3)
  232. valList = []*Validator{val0, val1, val2}
  233. propCount := make([]int, 3)
  234. vals = NewValidatorSet(valList)
  235. N := 1
  236. for i := 0; i < 120*N; i++ {
  237. prop := vals.GetProposer()
  238. ii := prop.Address[19]
  239. propCount[ii]++
  240. vals.IncrementProposerPriority(1)
  241. }
  242. if propCount[0] != 40*N {
  243. t.Fatalf(
  244. "Expected prop count for validator with 4/12 of voting power to be %d/%d. Got %d/%d",
  245. 40*N,
  246. 120*N,
  247. propCount[0],
  248. 120*N,
  249. )
  250. }
  251. if propCount[1] != 50*N {
  252. t.Fatalf(
  253. "Expected prop count for validator with 5/12 of voting power to be %d/%d. Got %d/%d",
  254. 50*N,
  255. 120*N,
  256. propCount[1],
  257. 120*N,
  258. )
  259. }
  260. if propCount[2] != 30*N {
  261. t.Fatalf(
  262. "Expected prop count for validator with 3/12 of voting power to be %d/%d. Got %d/%d",
  263. 30*N,
  264. 120*N,
  265. propCount[2],
  266. 120*N,
  267. )
  268. }
  269. }
  270. func TestProposerSelection3(t *testing.T) {
  271. vset := NewValidatorSet([]*Validator{
  272. newValidator([]byte("avalidator_address12"), 1),
  273. newValidator([]byte("bvalidator_address12"), 1),
  274. newValidator([]byte("cvalidator_address12"), 1),
  275. newValidator([]byte("dvalidator_address12"), 1),
  276. })
  277. proposerOrder := make([]*Validator, 4)
  278. for i := 0; i < 4; i++ {
  279. // need to give all validators to have keys
  280. pk := ed25519.GenPrivKey().PubKey()
  281. vset.Validators[i].PubKey = pk
  282. proposerOrder[i] = vset.GetProposer()
  283. vset.IncrementProposerPriority(1)
  284. }
  285. // i for the loop
  286. // j for the times
  287. // we should go in order for ever, despite some IncrementProposerPriority with times > 1
  288. var (
  289. i int
  290. j int32
  291. )
  292. for ; i < 10000; i++ {
  293. got := vset.GetProposer().Address
  294. expected := proposerOrder[j%4].Address
  295. if !bytes.Equal(got, expected) {
  296. t.Fatalf(fmt.Sprintf("vset.Proposer (%X) does not match expected proposer (%X) for (%d, %d)", got, expected, i, j))
  297. }
  298. // serialize, deserialize, check proposer
  299. b := vset.toBytes()
  300. vset = vset.fromBytes(b)
  301. computed := vset.GetProposer() // findGetProposer()
  302. if i != 0 {
  303. if !bytes.Equal(got, computed.Address) {
  304. t.Fatalf(
  305. fmt.Sprintf(
  306. "vset.Proposer (%X) does not match computed proposer (%X) for (%d, %d)",
  307. got,
  308. computed.Address,
  309. i,
  310. j,
  311. ),
  312. )
  313. }
  314. }
  315. // times is usually 1
  316. times := int32(1)
  317. mod := (tmrand.Int() % 5) + 1
  318. if tmrand.Int()%mod > 0 {
  319. // sometimes its up to 5
  320. times = (tmrand.Int31() % 4) + 1
  321. }
  322. vset.IncrementProposerPriority(times)
  323. j += times
  324. }
  325. }
  326. func newValidator(address []byte, power int64) *Validator {
  327. return &Validator{Address: address, VotingPower: power}
  328. }
  329. func randPubKey() crypto.PubKey {
  330. pubKey := make(ed25519.PubKey, ed25519.PubKeySize)
  331. copy(pubKey, tmrand.Bytes(32))
  332. return ed25519.PubKey(tmrand.Bytes(32))
  333. }
  334. func randValidator(totalVotingPower int64) *Validator {
  335. // this modulo limits the ProposerPriority/VotingPower to stay in the
  336. // bounds of MaxTotalVotingPower minus the already existing voting power:
  337. val := NewValidator(randPubKey(), int64(tmrand.Uint64()%uint64(MaxTotalVotingPower-totalVotingPower)))
  338. val.ProposerPriority = tmrand.Int64() % (MaxTotalVotingPower - totalVotingPower)
  339. return val
  340. }
  341. func randValidatorSet(numValidators int) *ValidatorSet {
  342. validators := make([]*Validator, numValidators)
  343. totalVotingPower := int64(0)
  344. for i := 0; i < numValidators; i++ {
  345. validators[i] = randValidator(totalVotingPower)
  346. totalVotingPower += validators[i].VotingPower
  347. }
  348. return NewValidatorSet(validators)
  349. }
  350. func (vals *ValidatorSet) toBytes() []byte {
  351. pbvs, err := vals.ToProto()
  352. if err != nil {
  353. panic(err)
  354. }
  355. bz, err := pbvs.Marshal()
  356. if err != nil {
  357. panic(err)
  358. }
  359. return bz
  360. }
  361. func (vals *ValidatorSet) fromBytes(b []byte) *ValidatorSet {
  362. pbvs := new(tmproto.ValidatorSet)
  363. err := pbvs.Unmarshal(b)
  364. if err != nil {
  365. // DATA HAS BEEN CORRUPTED OR THE SPEC HAS CHANGED
  366. panic(err)
  367. }
  368. vs, err := ValidatorSetFromProto(pbvs)
  369. if err != nil {
  370. panic(err)
  371. }
  372. return vs
  373. }
  374. //-------------------------------------------------------------------
  375. func TestValidatorSetTotalVotingPowerPanicsOnOverflow(t *testing.T) {
  376. // NewValidatorSet calls IncrementProposerPriority which calls TotalVotingPower()
  377. // which should panic on overflows:
  378. shouldPanic := func() {
  379. NewValidatorSet([]*Validator{
  380. {Address: []byte("a"), VotingPower: math.MaxInt64, ProposerPriority: 0},
  381. {Address: []byte("b"), VotingPower: math.MaxInt64, ProposerPriority: 0},
  382. {Address: []byte("c"), VotingPower: math.MaxInt64, ProposerPriority: 0},
  383. })
  384. }
  385. assert.Panics(t, shouldPanic)
  386. }
  387. func TestAvgProposerPriority(t *testing.T) {
  388. // Create Validator set without calling IncrementProposerPriority:
  389. tcs := []struct {
  390. vs ValidatorSet
  391. want int64
  392. }{
  393. 0: {ValidatorSet{Validators: []*Validator{{ProposerPriority: 0}, {ProposerPriority: 0}, {ProposerPriority: 0}}}, 0},
  394. 1: {
  395. ValidatorSet{
  396. Validators: []*Validator{{ProposerPriority: math.MaxInt64}, {ProposerPriority: 0}, {ProposerPriority: 0}},
  397. }, math.MaxInt64 / 3,
  398. },
  399. 2: {
  400. ValidatorSet{
  401. Validators: []*Validator{{ProposerPriority: math.MaxInt64}, {ProposerPriority: 0}},
  402. }, math.MaxInt64 / 2,
  403. },
  404. 3: {
  405. ValidatorSet{
  406. Validators: []*Validator{{ProposerPriority: math.MaxInt64}, {ProposerPriority: math.MaxInt64}},
  407. }, math.MaxInt64,
  408. },
  409. 4: {
  410. ValidatorSet{
  411. Validators: []*Validator{{ProposerPriority: math.MinInt64}, {ProposerPriority: math.MinInt64}},
  412. }, math.MinInt64,
  413. },
  414. }
  415. for i, tc := range tcs {
  416. got := tc.vs.computeAvgProposerPriority()
  417. assert.Equal(t, tc.want, got, "test case: %v", i)
  418. }
  419. }
  420. func TestAveragingInIncrementProposerPriority(t *testing.T) {
  421. // Test that the averaging works as expected inside of IncrementProposerPriority.
  422. // Each validator comes with zero voting power which simplifies reasoning about
  423. // the expected ProposerPriority.
  424. tcs := []struct {
  425. vs ValidatorSet
  426. times int32
  427. avg int64
  428. }{
  429. 0: {ValidatorSet{
  430. Validators: []*Validator{
  431. {Address: []byte("a"), ProposerPriority: 1},
  432. {Address: []byte("b"), ProposerPriority: 2},
  433. {Address: []byte("c"), ProposerPriority: 3}}},
  434. 1, 2},
  435. 1: {ValidatorSet{
  436. Validators: []*Validator{
  437. {Address: []byte("a"), ProposerPriority: 10},
  438. {Address: []byte("b"), ProposerPriority: -10},
  439. {Address: []byte("c"), ProposerPriority: 1}}},
  440. // this should average twice but the average should be 0 after the first iteration
  441. // (voting power is 0 -> no changes)
  442. 11, 1 / 3},
  443. 2: {ValidatorSet{
  444. Validators: []*Validator{
  445. {Address: []byte("a"), ProposerPriority: 100},
  446. {Address: []byte("b"), ProposerPriority: -10},
  447. {Address: []byte("c"), ProposerPriority: 1}}},
  448. 1, 91 / 3},
  449. }
  450. for i, tc := range tcs {
  451. // work on copy to have the old ProposerPriorities:
  452. newVset := tc.vs.CopyIncrementProposerPriority(tc.times)
  453. for _, val := range tc.vs.Validators {
  454. _, updatedVal := newVset.GetByAddress(val.Address)
  455. assert.Equal(t, updatedVal.ProposerPriority, val.ProposerPriority-tc.avg, "test case: %v", i)
  456. }
  457. }
  458. }
  459. func TestAveragingInIncrementProposerPriorityWithVotingPower(t *testing.T) {
  460. // Other than TestAveragingInIncrementProposerPriority this is a more complete test showing
  461. // how each ProposerPriority changes in relation to the validator's voting power respectively.
  462. // average is zero in each round:
  463. vp0 := int64(10)
  464. vp1 := int64(1)
  465. vp2 := int64(1)
  466. total := vp0 + vp1 + vp2
  467. avg := (vp0 + vp1 + vp2 - total) / 3
  468. vals := ValidatorSet{Validators: []*Validator{
  469. {Address: []byte{0}, ProposerPriority: 0, VotingPower: vp0},
  470. {Address: []byte{1}, ProposerPriority: 0, VotingPower: vp1},
  471. {Address: []byte{2}, ProposerPriority: 0, VotingPower: vp2}}}
  472. tcs := []struct {
  473. vals *ValidatorSet
  474. wantProposerPrioritys []int64
  475. times int32
  476. wantProposer *Validator
  477. }{
  478. 0: {
  479. vals.Copy(),
  480. []int64{
  481. // Acumm+VotingPower-Avg:
  482. 0 + vp0 - total - avg, // mostest will be subtracted by total voting power (12)
  483. 0 + vp1,
  484. 0 + vp2},
  485. 1,
  486. vals.Validators[0]},
  487. 1: {
  488. vals.Copy(),
  489. []int64{
  490. (0 + vp0 - total) + vp0 - total - avg, // this will be mostest on 2nd iter, too
  491. (0 + vp1) + vp1,
  492. (0 + vp2) + vp2},
  493. 2,
  494. vals.Validators[0]}, // increment twice -> expect average to be subtracted twice
  495. 2: {
  496. vals.Copy(),
  497. []int64{
  498. 0 + 3*(vp0-total) - avg, // still mostest
  499. 0 + 3*vp1,
  500. 0 + 3*vp2},
  501. 3,
  502. vals.Validators[0]},
  503. 3: {
  504. vals.Copy(),
  505. []int64{
  506. 0 + 4*(vp0-total), // still mostest
  507. 0 + 4*vp1,
  508. 0 + 4*vp2},
  509. 4,
  510. vals.Validators[0]},
  511. 4: {
  512. vals.Copy(),
  513. []int64{
  514. 0 + 4*(vp0-total) + vp0, // 4 iters was mostest
  515. 0 + 5*vp1 - total, // now this val is mostest for the 1st time (hence -12==totalVotingPower)
  516. 0 + 5*vp2},
  517. 5,
  518. vals.Validators[1]},
  519. 5: {
  520. vals.Copy(),
  521. []int64{
  522. 0 + 6*vp0 - 5*total, // mostest again
  523. 0 + 6*vp1 - total, // mostest once up to here
  524. 0 + 6*vp2},
  525. 6,
  526. vals.Validators[0]},
  527. 6: {
  528. vals.Copy(),
  529. []int64{
  530. 0 + 7*vp0 - 6*total, // in 7 iters this val is mostest 6 times
  531. 0 + 7*vp1 - total, // in 7 iters this val is mostest 1 time
  532. 0 + 7*vp2},
  533. 7,
  534. vals.Validators[0]},
  535. 7: {
  536. vals.Copy(),
  537. []int64{
  538. 0 + 8*vp0 - 7*total, // mostest again
  539. 0 + 8*vp1 - total,
  540. 0 + 8*vp2},
  541. 8,
  542. vals.Validators[0]},
  543. 8: {
  544. vals.Copy(),
  545. []int64{
  546. 0 + 9*vp0 - 7*total,
  547. 0 + 9*vp1 - total,
  548. 0 + 9*vp2 - total}, // mostest
  549. 9,
  550. vals.Validators[2]},
  551. 9: {
  552. vals.Copy(),
  553. []int64{
  554. 0 + 10*vp0 - 8*total, // after 10 iters this is mostest again
  555. 0 + 10*vp1 - total, // after 6 iters this val is "mostest" once and not in between
  556. 0 + 10*vp2 - total}, // in between 10 iters this val is "mostest" once
  557. 10,
  558. vals.Validators[0]},
  559. 10: {
  560. vals.Copy(),
  561. []int64{
  562. 0 + 11*vp0 - 9*total,
  563. 0 + 11*vp1 - total, // after 6 iters this val is "mostest" once and not in between
  564. 0 + 11*vp2 - total}, // after 10 iters this val is "mostest" once
  565. 11,
  566. vals.Validators[0]},
  567. }
  568. for i, tc := range tcs {
  569. tc.vals.IncrementProposerPriority(tc.times)
  570. assert.Equal(t, tc.wantProposer.Address, tc.vals.GetProposer().Address,
  571. "test case: %v",
  572. i)
  573. for valIdx, val := range tc.vals.Validators {
  574. assert.Equal(t,
  575. tc.wantProposerPrioritys[valIdx],
  576. val.ProposerPriority,
  577. "test case: %v, validator: %v",
  578. i,
  579. valIdx)
  580. }
  581. }
  582. }
  583. func TestSafeAdd(t *testing.T) {
  584. f := func(a, b int64) bool {
  585. c, overflow := safeAdd(a, b)
  586. return overflow || (!overflow && c == a+b)
  587. }
  588. if err := quick.Check(f, nil); err != nil {
  589. t.Error(err)
  590. }
  591. }
  592. func TestSafeAddClip(t *testing.T) {
  593. assert.EqualValues(t, math.MaxInt64, safeAddClip(math.MaxInt64, 10))
  594. assert.EqualValues(t, math.MaxInt64, safeAddClip(math.MaxInt64, math.MaxInt64))
  595. assert.EqualValues(t, math.MinInt64, safeAddClip(math.MinInt64, -10))
  596. }
  597. func TestSafeSubClip(t *testing.T) {
  598. assert.EqualValues(t, math.MinInt64, safeSubClip(math.MinInt64, 10))
  599. assert.EqualValues(t, 0, safeSubClip(math.MinInt64, math.MinInt64))
  600. assert.EqualValues(t, math.MinInt64, safeSubClip(math.MinInt64, math.MaxInt64))
  601. assert.EqualValues(t, math.MaxInt64, safeSubClip(math.MaxInt64, -10))
  602. }
  603. //-------------------------------------------------------------------
  604. // Check VerifyCommit, VerifyCommitLight and VerifyCommitLightTrusting basic
  605. // verification.
  606. func TestValidatorSet_VerifyCommit_All(t *testing.T) {
  607. var (
  608. privKey = ed25519.GenPrivKey()
  609. pubKey = privKey.PubKey()
  610. v1 = NewValidator(pubKey, 1000)
  611. vset = NewValidatorSet([]*Validator{v1})
  612. chainID = "Lalande21185"
  613. )
  614. vote := examplePrecommit()
  615. vote.ValidatorAddress = pubKey.Address()
  616. v := vote.ToProto()
  617. sig, err := privKey.Sign(VoteSignBytes(chainID, v))
  618. require.NoError(t, err)
  619. vote.Signature = sig
  620. commit := NewCommit(vote.Height, vote.Round, vote.BlockID, []CommitSig{vote.CommitSig()})
  621. vote2 := *vote
  622. sig2, err := privKey.Sign(VoteSignBytes("EpsilonEridani", v))
  623. require.NoError(t, err)
  624. vote2.Signature = sig2
  625. testCases := []struct {
  626. description string
  627. chainID string
  628. blockID BlockID
  629. height int64
  630. commit *Commit
  631. expErr bool
  632. }{
  633. {"good", chainID, vote.BlockID, vote.Height, commit, false},
  634. {"wrong signature (#0)", "EpsilonEridani", vote.BlockID, vote.Height, commit, true},
  635. {"wrong block ID", chainID, makeBlockIDRandom(), vote.Height, commit, true},
  636. {"wrong height", chainID, vote.BlockID, vote.Height - 1, commit, true},
  637. {"wrong set size: 1 vs 0", chainID, vote.BlockID, vote.Height,
  638. NewCommit(vote.Height, vote.Round, vote.BlockID, []CommitSig{}), true},
  639. {"wrong set size: 1 vs 2", chainID, vote.BlockID, vote.Height,
  640. NewCommit(vote.Height, vote.Round, vote.BlockID,
  641. []CommitSig{vote.CommitSig(), {BlockIDFlag: BlockIDFlagAbsent}}), true},
  642. {"insufficient voting power: got 0, needed more than 666", chainID, vote.BlockID, vote.Height,
  643. NewCommit(vote.Height, vote.Round, vote.BlockID, []CommitSig{{BlockIDFlag: BlockIDFlagAbsent}}), true},
  644. {"wrong signature (#0)", chainID, vote.BlockID, vote.Height,
  645. NewCommit(vote.Height, vote.Round, vote.BlockID, []CommitSig{vote2.CommitSig()}), true},
  646. }
  647. for _, tc := range testCases {
  648. tc := tc
  649. t.Run(tc.description, func(t *testing.T) {
  650. err := vset.VerifyCommit(tc.chainID, tc.blockID, tc.height, tc.commit)
  651. if tc.expErr {
  652. if assert.Error(t, err, "VerifyCommit") {
  653. assert.Contains(t, err.Error(), tc.description, "VerifyCommit")
  654. }
  655. } else {
  656. assert.NoError(t, err, "VerifyCommit")
  657. }
  658. err = vset.VerifyCommitLight(tc.chainID, tc.blockID, tc.height, tc.commit)
  659. if tc.expErr {
  660. if assert.Error(t, err, "VerifyCommitLight") {
  661. assert.Contains(t, err.Error(), tc.description, "VerifyCommitLight")
  662. }
  663. } else {
  664. assert.NoError(t, err, "VerifyCommitLight")
  665. }
  666. })
  667. }
  668. }
  669. func TestValidatorSet_VerifyCommit_CheckAllSignatures(t *testing.T) {
  670. var (
  671. chainID = "test_chain_id"
  672. h = int64(3)
  673. blockID = makeBlockIDRandom()
  674. )
  675. voteSet, valSet, vals := randVoteSet(h, 0, tmproto.PrecommitType, 4, 10)
  676. commit, err := MakeCommit(blockID, h, 0, voteSet, vals, time.Now())
  677. require.NoError(t, err)
  678. // malleate 4th signature
  679. vote := voteSet.GetByIndex(3)
  680. v := vote.ToProto()
  681. err = vals[3].SignVote("CentaurusA", v)
  682. require.NoError(t, err)
  683. vote.Signature = v.Signature
  684. commit.Signatures[3] = vote.CommitSig()
  685. err = valSet.VerifyCommit(chainID, blockID, h, commit)
  686. if assert.Error(t, err) {
  687. assert.Contains(t, err.Error(), "wrong signature (#3)")
  688. }
  689. }
  690. func TestValidatorSet_VerifyCommitLight_ReturnsAsSoonAsMajorityOfVotingPowerSigned(t *testing.T) {
  691. var (
  692. chainID = "test_chain_id"
  693. h = int64(3)
  694. blockID = makeBlockIDRandom()
  695. )
  696. voteSet, valSet, vals := randVoteSet(h, 0, tmproto.PrecommitType, 4, 10)
  697. commit, err := MakeCommit(blockID, h, 0, voteSet, vals, time.Now())
  698. require.NoError(t, err)
  699. // malleate 4th signature (3 signatures are enough for 2/3+)
  700. vote := voteSet.GetByIndex(3)
  701. v := vote.ToProto()
  702. err = vals[3].SignVote("CentaurusA", v)
  703. require.NoError(t, err)
  704. vote.Signature = v.Signature
  705. commit.Signatures[3] = vote.CommitSig()
  706. err = valSet.VerifyCommitLight(chainID, blockID, h, commit)
  707. assert.NoError(t, err)
  708. }
  709. func TestValidatorSet_VerifyCommitLightTrusting_ReturnsAsSoonAsTrustLevelOfVotingPowerSigned(t *testing.T) {
  710. var (
  711. chainID = "test_chain_id"
  712. h = int64(3)
  713. blockID = makeBlockIDRandom()
  714. )
  715. voteSet, valSet, vals := randVoteSet(h, 0, tmproto.PrecommitType, 4, 10)
  716. commit, err := MakeCommit(blockID, h, 0, voteSet, vals, time.Now())
  717. require.NoError(t, err)
  718. // malleate 3rd signature (2 signatures are enough for 1/3+ trust level)
  719. vote := voteSet.GetByIndex(2)
  720. v := vote.ToProto()
  721. err = vals[2].SignVote("CentaurusA", v)
  722. require.NoError(t, err)
  723. vote.Signature = v.Signature
  724. commit.Signatures[2] = vote.CommitSig()
  725. err = valSet.VerifyCommitLightTrusting(chainID, commit, tmmath.Fraction{Numerator: 1, Denominator: 3})
  726. assert.NoError(t, err)
  727. }
  728. func TestEmptySet(t *testing.T) {
  729. var valList []*Validator
  730. valSet := NewValidatorSet(valList)
  731. assert.Panics(t, func() { valSet.IncrementProposerPriority(1) })
  732. assert.Panics(t, func() { valSet.RescalePriorities(100) })
  733. assert.Panics(t, func() { valSet.shiftByAvgProposerPriority() })
  734. assert.Panics(t, func() { assert.Zero(t, computeMaxMinPriorityDiff(valSet)) })
  735. valSet.GetProposer()
  736. // Add to empty set
  737. v1 := newValidator([]byte("v1"), 100)
  738. v2 := newValidator([]byte("v2"), 100)
  739. valList = []*Validator{v1, v2}
  740. assert.NoError(t, valSet.UpdateWithChangeSet(valList))
  741. verifyValidatorSet(t, valSet)
  742. // Delete all validators from set
  743. v1 = newValidator([]byte("v1"), 0)
  744. v2 = newValidator([]byte("v2"), 0)
  745. delList := []*Validator{v1, v2}
  746. assert.Error(t, valSet.UpdateWithChangeSet(delList))
  747. // Attempt delete from empty set
  748. assert.Error(t, valSet.UpdateWithChangeSet(delList))
  749. }
  750. func TestUpdatesForNewValidatorSet(t *testing.T) {
  751. v1 := newValidator([]byte("v1"), 100)
  752. v2 := newValidator([]byte("v2"), 100)
  753. valList := []*Validator{v1, v2}
  754. valSet := NewValidatorSet(valList)
  755. verifyValidatorSet(t, valSet)
  756. // Verify duplicates are caught in NewValidatorSet() and it panics
  757. v111 := newValidator([]byte("v1"), 100)
  758. v112 := newValidator([]byte("v1"), 123)
  759. v113 := newValidator([]byte("v1"), 234)
  760. valList = []*Validator{v111, v112, v113}
  761. assert.Panics(t, func() { NewValidatorSet(valList) })
  762. // Verify set including validator with voting power 0 cannot be created
  763. v1 = newValidator([]byte("v1"), 0)
  764. v2 = newValidator([]byte("v2"), 22)
  765. v3 := newValidator([]byte("v3"), 33)
  766. valList = []*Validator{v1, v2, v3}
  767. assert.Panics(t, func() { NewValidatorSet(valList) })
  768. // Verify set including validator with negative voting power cannot be created
  769. v1 = newValidator([]byte("v1"), 10)
  770. v2 = newValidator([]byte("v2"), -20)
  771. v3 = newValidator([]byte("v3"), 30)
  772. valList = []*Validator{v1, v2, v3}
  773. assert.Panics(t, func() { NewValidatorSet(valList) })
  774. }
  775. type testVal struct {
  776. name string
  777. power int64
  778. }
  779. func permutation(valList []testVal) []testVal {
  780. if len(valList) == 0 {
  781. return nil
  782. }
  783. permList := make([]testVal, len(valList))
  784. perm := tmrand.Perm(len(valList))
  785. for i, v := range perm {
  786. permList[v] = valList[i]
  787. }
  788. return permList
  789. }
  790. func createNewValidatorList(testValList []testVal) []*Validator {
  791. valList := make([]*Validator, 0, len(testValList))
  792. for _, val := range testValList {
  793. valList = append(valList, newValidator([]byte(val.name), val.power))
  794. }
  795. return valList
  796. }
  797. func createNewValidatorSet(testValList []testVal) *ValidatorSet {
  798. return NewValidatorSet(createNewValidatorList(testValList))
  799. }
  800. func valSetTotalProposerPriority(valSet *ValidatorSet) int64 {
  801. sum := int64(0)
  802. for _, val := range valSet.Validators {
  803. // mind overflow
  804. sum = safeAddClip(sum, val.ProposerPriority)
  805. }
  806. return sum
  807. }
  808. func verifyValidatorSet(t *testing.T, valSet *ValidatorSet) {
  809. // verify that the capacity and length of validators is the same
  810. assert.Equal(t, len(valSet.Validators), cap(valSet.Validators))
  811. // verify that the set's total voting power has been updated
  812. tvp := valSet.totalVotingPower
  813. valSet.updateTotalVotingPower()
  814. expectedTvp := valSet.TotalVotingPower()
  815. assert.Equal(t, expectedTvp, tvp,
  816. "expected TVP %d. Got %d, valSet=%s", expectedTvp, tvp, valSet)
  817. // verify that validator priorities are centered
  818. valsCount := int64(len(valSet.Validators))
  819. tpp := valSetTotalProposerPriority(valSet)
  820. assert.True(t, tpp < valsCount && tpp > -valsCount,
  821. "expected total priority in (-%d, %d). Got %d", valsCount, valsCount, tpp)
  822. // verify that priorities are scaled
  823. dist := computeMaxMinPriorityDiff(valSet)
  824. assert.True(t, dist <= PriorityWindowSizeFactor*tvp,
  825. "expected priority distance < %d. Got %d", PriorityWindowSizeFactor*tvp, dist)
  826. }
  827. func toTestValList(valList []*Validator) []testVal {
  828. testList := make([]testVal, len(valList))
  829. for i, val := range valList {
  830. testList[i].name = string(val.Address)
  831. testList[i].power = val.VotingPower
  832. }
  833. return testList
  834. }
  835. func testValSet(nVals int, power int64) []testVal {
  836. vals := make([]testVal, nVals)
  837. for i := 0; i < nVals; i++ {
  838. vals[i] = testVal{fmt.Sprintf("v%d", i+1), power}
  839. }
  840. return vals
  841. }
  842. type valSetErrTestCase struct {
  843. startVals []testVal
  844. updateVals []testVal
  845. }
  846. func executeValSetErrTestCase(t *testing.T, idx int, tt valSetErrTestCase) {
  847. // create a new set and apply updates, keeping copies for the checks
  848. valSet := createNewValidatorSet(tt.startVals)
  849. valSetCopy := valSet.Copy()
  850. valList := createNewValidatorList(tt.updateVals)
  851. valListCopy := validatorListCopy(valList)
  852. err := valSet.UpdateWithChangeSet(valList)
  853. // for errors check the validator set has not been changed
  854. assert.Error(t, err, "test %d", idx)
  855. assert.Equal(t, valSet, valSetCopy, "test %v", idx)
  856. // check the parameter list has not changed
  857. assert.Equal(t, valList, valListCopy, "test %v", idx)
  858. }
  859. func TestValSetUpdatesDuplicateEntries(t *testing.T) {
  860. testCases := []valSetErrTestCase{
  861. // Duplicate entries in changes
  862. { // first entry is duplicated change
  863. testValSet(2, 10),
  864. []testVal{{"v1", 11}, {"v1", 22}},
  865. },
  866. { // second entry is duplicated change
  867. testValSet(2, 10),
  868. []testVal{{"v2", 11}, {"v2", 22}},
  869. },
  870. { // change duplicates are separated by a valid change
  871. testValSet(2, 10),
  872. []testVal{{"v1", 11}, {"v2", 22}, {"v1", 12}},
  873. },
  874. { // change duplicates are separated by a valid change
  875. testValSet(3, 10),
  876. []testVal{{"v1", 11}, {"v3", 22}, {"v1", 12}},
  877. },
  878. // Duplicate entries in remove
  879. { // first entry is duplicated remove
  880. testValSet(2, 10),
  881. []testVal{{"v1", 0}, {"v1", 0}},
  882. },
  883. { // second entry is duplicated remove
  884. testValSet(2, 10),
  885. []testVal{{"v2", 0}, {"v2", 0}},
  886. },
  887. { // remove duplicates are separated by a valid remove
  888. testValSet(2, 10),
  889. []testVal{{"v1", 0}, {"v2", 0}, {"v1", 0}},
  890. },
  891. { // remove duplicates are separated by a valid remove
  892. testValSet(3, 10),
  893. []testVal{{"v1", 0}, {"v3", 0}, {"v1", 0}},
  894. },
  895. { // remove and update same val
  896. testValSet(2, 10),
  897. []testVal{{"v1", 0}, {"v2", 20}, {"v1", 30}},
  898. },
  899. { // duplicate entries in removes + changes
  900. testValSet(2, 10),
  901. []testVal{{"v1", 0}, {"v2", 20}, {"v2", 30}, {"v1", 0}},
  902. },
  903. { // duplicate entries in removes + changes
  904. testValSet(3, 10),
  905. []testVal{{"v1", 0}, {"v3", 5}, {"v2", 20}, {"v2", 30}, {"v1", 0}},
  906. },
  907. }
  908. for i, tt := range testCases {
  909. executeValSetErrTestCase(t, i, tt)
  910. }
  911. }
  912. func TestValSetUpdatesOverflows(t *testing.T) {
  913. maxVP := MaxTotalVotingPower
  914. testCases := []valSetErrTestCase{
  915. { // single update leading to overflow
  916. testValSet(2, 10),
  917. []testVal{{"v1", math.MaxInt64}},
  918. },
  919. { // single update leading to overflow
  920. testValSet(2, 10),
  921. []testVal{{"v2", math.MaxInt64}},
  922. },
  923. { // add validator leading to overflow
  924. testValSet(1, maxVP),
  925. []testVal{{"v2", math.MaxInt64}},
  926. },
  927. { // add validator leading to exceed Max
  928. testValSet(1, maxVP-1),
  929. []testVal{{"v2", 5}},
  930. },
  931. { // add validator leading to exceed Max
  932. testValSet(2, maxVP/3),
  933. []testVal{{"v3", maxVP / 2}},
  934. },
  935. { // add validator leading to exceed Max
  936. testValSet(1, maxVP),
  937. []testVal{{"v2", maxVP}},
  938. },
  939. }
  940. for i, tt := range testCases {
  941. executeValSetErrTestCase(t, i, tt)
  942. }
  943. }
  944. func TestValSetUpdatesOtherErrors(t *testing.T) {
  945. testCases := []valSetErrTestCase{
  946. { // update with negative voting power
  947. testValSet(2, 10),
  948. []testVal{{"v1", -123}},
  949. },
  950. { // update with negative voting power
  951. testValSet(2, 10),
  952. []testVal{{"v2", -123}},
  953. },
  954. { // remove non-existing validator
  955. testValSet(2, 10),
  956. []testVal{{"v3", 0}},
  957. },
  958. { // delete all validators
  959. []testVal{{"v1", 10}, {"v2", 20}, {"v3", 30}},
  960. []testVal{{"v1", 0}, {"v2", 0}, {"v3", 0}},
  961. },
  962. }
  963. for i, tt := range testCases {
  964. executeValSetErrTestCase(t, i, tt)
  965. }
  966. }
  967. func TestValSetUpdatesBasicTestsExecute(t *testing.T) {
  968. valSetUpdatesBasicTests := []struct {
  969. startVals []testVal
  970. updateVals []testVal
  971. expectedVals []testVal
  972. }{
  973. { // no changes
  974. testValSet(2, 10),
  975. []testVal{},
  976. testValSet(2, 10),
  977. },
  978. { // voting power changes
  979. testValSet(2, 10),
  980. []testVal{{"v2", 22}, {"v1", 11}},
  981. []testVal{{"v2", 22}, {"v1", 11}},
  982. },
  983. { // add new validators
  984. []testVal{{"v2", 20}, {"v1", 10}},
  985. []testVal{{"v4", 40}, {"v3", 30}},
  986. []testVal{{"v4", 40}, {"v3", 30}, {"v2", 20}, {"v1", 10}},
  987. },
  988. { // add new validator to middle
  989. []testVal{{"v3", 20}, {"v1", 10}},
  990. []testVal{{"v2", 30}},
  991. []testVal{{"v2", 30}, {"v3", 20}, {"v1", 10}},
  992. },
  993. { // add new validator to beginning
  994. []testVal{{"v3", 20}, {"v2", 10}},
  995. []testVal{{"v1", 30}},
  996. []testVal{{"v1", 30}, {"v3", 20}, {"v2", 10}},
  997. },
  998. { // delete validators
  999. []testVal{{"v3", 30}, {"v2", 20}, {"v1", 10}},
  1000. []testVal{{"v2", 0}},
  1001. []testVal{{"v3", 30}, {"v1", 10}},
  1002. },
  1003. }
  1004. for i, tt := range valSetUpdatesBasicTests {
  1005. // create a new set and apply updates, keeping copies for the checks
  1006. valSet := createNewValidatorSet(tt.startVals)
  1007. valList := createNewValidatorList(tt.updateVals)
  1008. err := valSet.UpdateWithChangeSet(valList)
  1009. assert.NoError(t, err, "test %d", i)
  1010. valListCopy := validatorListCopy(valSet.Validators)
  1011. // check that the voting power in the set's validators is not changing if the voting power
  1012. // is changed in the list of validators previously passed as parameter to UpdateWithChangeSet.
  1013. // this is to make sure copies of the validators are made by UpdateWithChangeSet.
  1014. if len(valList) > 0 {
  1015. valList[0].VotingPower++
  1016. assert.Equal(t, toTestValList(valListCopy), toTestValList(valSet.Validators), "test %v", i)
  1017. }
  1018. // check the final validator list is as expected and the set is properly scaled and centered.
  1019. assert.Equal(t, tt.expectedVals, toTestValList(valSet.Validators), "test %v", i)
  1020. verifyValidatorSet(t, valSet)
  1021. }
  1022. }
  1023. // Test that different permutations of an update give the same result.
  1024. func TestValSetUpdatesOrderIndependenceTestsExecute(t *testing.T) {
  1025. // startVals - initial validators to create the set with
  1026. // updateVals - a sequence of updates to be applied to the set.
  1027. // updateVals is shuffled a number of times during testing to check for same resulting validator set.
  1028. valSetUpdatesOrderTests := []struct {
  1029. startVals []testVal
  1030. updateVals []testVal
  1031. }{
  1032. 0: { // order of changes should not matter, the final validator sets should be the same
  1033. []testVal{{"v4", 40}, {"v3", 30}, {"v2", 10}, {"v1", 10}},
  1034. []testVal{{"v4", 44}, {"v3", 33}, {"v2", 22}, {"v1", 11}}},
  1035. 1: { // order of additions should not matter
  1036. []testVal{{"v2", 20}, {"v1", 10}},
  1037. []testVal{{"v3", 30}, {"v4", 40}, {"v5", 50}, {"v6", 60}}},
  1038. 2: { // order of removals should not matter
  1039. []testVal{{"v4", 40}, {"v3", 30}, {"v2", 20}, {"v1", 10}},
  1040. []testVal{{"v1", 0}, {"v3", 0}, {"v4", 0}}},
  1041. 3: { // order of mixed operations should not matter
  1042. []testVal{{"v4", 40}, {"v3", 30}, {"v2", 20}, {"v1", 10}},
  1043. []testVal{{"v1", 0}, {"v3", 0}, {"v2", 22}, {"v5", 50}, {"v4", 44}}},
  1044. }
  1045. for i, tt := range valSetUpdatesOrderTests {
  1046. // create a new set and apply updates
  1047. valSet := createNewValidatorSet(tt.startVals)
  1048. valSetCopy := valSet.Copy()
  1049. valList := createNewValidatorList(tt.updateVals)
  1050. assert.NoError(t, valSetCopy.UpdateWithChangeSet(valList))
  1051. // save the result as expected for next updates
  1052. valSetExp := valSetCopy.Copy()
  1053. // perform at most 20 permutations on the updates and call UpdateWithChangeSet()
  1054. n := len(tt.updateVals)
  1055. maxNumPerms := tmmath.MinInt(20, n*n)
  1056. for j := 0; j < maxNumPerms; j++ {
  1057. // create a copy of original set and apply a random permutation of updates
  1058. valSetCopy := valSet.Copy()
  1059. valList := createNewValidatorList(permutation(tt.updateVals))
  1060. // check there was no error and the set is properly scaled and centered.
  1061. assert.NoError(t, valSetCopy.UpdateWithChangeSet(valList),
  1062. "test %v failed for permutation %v", i, valList)
  1063. verifyValidatorSet(t, valSetCopy)
  1064. // verify the resulting test is same as the expected
  1065. assert.Equal(t, valSetCopy, valSetExp,
  1066. "test %v failed for permutation %v", i, valList)
  1067. }
  1068. }
  1069. }
  1070. // This tests the private function validator_set.go:applyUpdates() function, used only for additions and changes.
  1071. // Should perform a proper merge of updatedVals and startVals
  1072. func TestValSetApplyUpdatesTestsExecute(t *testing.T) {
  1073. valSetUpdatesBasicTests := []struct {
  1074. startVals []testVal
  1075. updateVals []testVal
  1076. expectedVals []testVal
  1077. }{
  1078. // additions
  1079. 0: { // prepend
  1080. []testVal{{"v4", 44}, {"v5", 55}},
  1081. []testVal{{"v1", 11}},
  1082. []testVal{{"v1", 11}, {"v4", 44}, {"v5", 55}}},
  1083. 1: { // append
  1084. []testVal{{"v4", 44}, {"v5", 55}},
  1085. []testVal{{"v6", 66}},
  1086. []testVal{{"v4", 44}, {"v5", 55}, {"v6", 66}}},
  1087. 2: { // insert
  1088. []testVal{{"v4", 44}, {"v6", 66}},
  1089. []testVal{{"v5", 55}},
  1090. []testVal{{"v4", 44}, {"v5", 55}, {"v6", 66}}},
  1091. 3: { // insert multi
  1092. []testVal{{"v4", 44}, {"v6", 66}, {"v9", 99}},
  1093. []testVal{{"v5", 55}, {"v7", 77}, {"v8", 88}},
  1094. []testVal{{"v4", 44}, {"v5", 55}, {"v6", 66}, {"v7", 77}, {"v8", 88}, {"v9", 99}}},
  1095. // changes
  1096. 4: { // head
  1097. []testVal{{"v1", 111}, {"v2", 22}},
  1098. []testVal{{"v1", 11}},
  1099. []testVal{{"v1", 11}, {"v2", 22}}},
  1100. 5: { // tail
  1101. []testVal{{"v1", 11}, {"v2", 222}},
  1102. []testVal{{"v2", 22}},
  1103. []testVal{{"v1", 11}, {"v2", 22}}},
  1104. 6: { // middle
  1105. []testVal{{"v1", 11}, {"v2", 222}, {"v3", 33}},
  1106. []testVal{{"v2", 22}},
  1107. []testVal{{"v1", 11}, {"v2", 22}, {"v3", 33}}},
  1108. 7: { // multi
  1109. []testVal{{"v1", 111}, {"v2", 222}, {"v3", 333}},
  1110. []testVal{{"v1", 11}, {"v2", 22}, {"v3", 33}},
  1111. []testVal{{"v1", 11}, {"v2", 22}, {"v3", 33}}},
  1112. // additions and changes
  1113. 8: {
  1114. []testVal{{"v1", 111}, {"v2", 22}},
  1115. []testVal{{"v1", 11}, {"v3", 33}, {"v4", 44}},
  1116. []testVal{{"v1", 11}, {"v2", 22}, {"v3", 33}, {"v4", 44}}},
  1117. }
  1118. for i, tt := range valSetUpdatesBasicTests {
  1119. // create a new validator set with the start values
  1120. valSet := createNewValidatorSet(tt.startVals)
  1121. // applyUpdates() with the update values
  1122. valList := createNewValidatorList(tt.updateVals)
  1123. valSet.applyUpdates(valList)
  1124. // check the new list of validators for proper merge
  1125. assert.Equal(t, toTestValList(valSet.Validators), tt.expectedVals, "test %v", i)
  1126. }
  1127. }
  1128. type testVSetCfg struct {
  1129. name string
  1130. startVals []testVal
  1131. deletedVals []testVal
  1132. updatedVals []testVal
  1133. addedVals []testVal
  1134. expectedVals []testVal
  1135. expErr error
  1136. }
  1137. func randTestVSetCfg(t *testing.T, nBase, nAddMax int) testVSetCfg {
  1138. if nBase <= 0 || nAddMax < 0 {
  1139. panic(fmt.Sprintf("bad parameters %v %v", nBase, nAddMax))
  1140. }
  1141. const maxPower = 1000
  1142. var nOld, nDel, nChanged, nAdd int
  1143. nOld = int(tmrand.Uint()%uint(nBase)) + 1
  1144. if nBase-nOld > 0 {
  1145. nDel = int(tmrand.Uint() % uint(nBase-nOld))
  1146. }
  1147. nChanged = nBase - nOld - nDel
  1148. if nAddMax > 0 {
  1149. nAdd = tmrand.Int()%nAddMax + 1
  1150. }
  1151. cfg := testVSetCfg{}
  1152. cfg.startVals = make([]testVal, nBase)
  1153. cfg.deletedVals = make([]testVal, nDel)
  1154. cfg.addedVals = make([]testVal, nAdd)
  1155. cfg.updatedVals = make([]testVal, nChanged)
  1156. cfg.expectedVals = make([]testVal, nBase-nDel+nAdd)
  1157. for i := 0; i < nBase; i++ {
  1158. cfg.startVals[i] = testVal{fmt.Sprintf("v%d", i), int64(tmrand.Uint()%maxPower + 1)}
  1159. if i < nOld {
  1160. cfg.expectedVals[i] = cfg.startVals[i]
  1161. }
  1162. if i >= nOld && i < nOld+nChanged {
  1163. cfg.updatedVals[i-nOld] = testVal{fmt.Sprintf("v%d", i), int64(tmrand.Uint()%maxPower + 1)}
  1164. cfg.expectedVals[i] = cfg.updatedVals[i-nOld]
  1165. }
  1166. if i >= nOld+nChanged {
  1167. cfg.deletedVals[i-nOld-nChanged] = testVal{fmt.Sprintf("v%d", i), 0}
  1168. }
  1169. }
  1170. for i := nBase; i < nBase+nAdd; i++ {
  1171. cfg.addedVals[i-nBase] = testVal{fmt.Sprintf("v%d", i), int64(tmrand.Uint()%maxPower + 1)}
  1172. cfg.expectedVals[i-nDel] = cfg.addedVals[i-nBase]
  1173. }
  1174. sort.Sort(testValsByVotingPower(cfg.startVals))
  1175. sort.Sort(testValsByVotingPower(cfg.deletedVals))
  1176. sort.Sort(testValsByVotingPower(cfg.updatedVals))
  1177. sort.Sort(testValsByVotingPower(cfg.addedVals))
  1178. sort.Sort(testValsByVotingPower(cfg.expectedVals))
  1179. return cfg
  1180. }
  1181. func applyChangesToValSet(t *testing.T, expErr error, valSet *ValidatorSet, valsLists ...[]testVal) {
  1182. changes := make([]testVal, 0)
  1183. for _, valsList := range valsLists {
  1184. changes = append(changes, valsList...)
  1185. }
  1186. valList := createNewValidatorList(changes)
  1187. err := valSet.UpdateWithChangeSet(valList)
  1188. if expErr != nil {
  1189. assert.Equal(t, expErr, err)
  1190. } else {
  1191. assert.NoError(t, err)
  1192. }
  1193. }
  1194. func TestValSetUpdatePriorityOrderTests(t *testing.T) {
  1195. const nMaxElections int32 = 5000
  1196. testCases := []testVSetCfg{
  1197. 0: { // remove high power validator, keep old equal lower power validators
  1198. startVals: []testVal{{"v3", 1000}, {"v1", 1}, {"v2", 1}},
  1199. deletedVals: []testVal{{"v3", 0}},
  1200. updatedVals: []testVal{},
  1201. addedVals: []testVal{},
  1202. expectedVals: []testVal{{"v1", 1}, {"v2", 1}},
  1203. },
  1204. 1: { // remove high power validator, keep old different power validators
  1205. startVals: []testVal{{"v3", 1000}, {"v2", 10}, {"v1", 1}},
  1206. deletedVals: []testVal{{"v3", 0}},
  1207. updatedVals: []testVal{},
  1208. addedVals: []testVal{},
  1209. expectedVals: []testVal{{"v2", 10}, {"v1", 1}},
  1210. },
  1211. 2: { // remove high power validator, add new low power validators, keep old lower power
  1212. startVals: []testVal{{"v3", 1000}, {"v2", 2}, {"v1", 1}},
  1213. deletedVals: []testVal{{"v3", 0}},
  1214. updatedVals: []testVal{{"v2", 1}},
  1215. addedVals: []testVal{{"v5", 50}, {"v4", 40}},
  1216. expectedVals: []testVal{{"v5", 50}, {"v4", 40}, {"v1", 1}, {"v2", 1}},
  1217. },
  1218. // generate a configuration with 100 validators,
  1219. // randomly select validators for updates and deletes, and
  1220. // generate 10 new validators to be added
  1221. 3: randTestVSetCfg(t, 100, 10),
  1222. 4: randTestVSetCfg(t, 1000, 100),
  1223. 5: randTestVSetCfg(t, 10, 100),
  1224. 6: randTestVSetCfg(t, 100, 1000),
  1225. 7: randTestVSetCfg(t, 1000, 1000),
  1226. }
  1227. for _, cfg := range testCases {
  1228. // create a new validator set
  1229. valSet := createNewValidatorSet(cfg.startVals)
  1230. verifyValidatorSet(t, valSet)
  1231. // run election up to nMaxElections times, apply changes and verify that the priority order is correct
  1232. verifyValSetUpdatePriorityOrder(t, valSet, cfg, nMaxElections)
  1233. }
  1234. }
  1235. func verifyValSetUpdatePriorityOrder(t *testing.T, valSet *ValidatorSet, cfg testVSetCfg, nMaxElections int32) {
  1236. // Run election up to nMaxElections times, sort validators by priorities
  1237. valSet.IncrementProposerPriority(tmrand.Int31()%nMaxElections + 1)
  1238. // apply the changes, get the updated validators, sort by priorities
  1239. applyChangesToValSet(t, nil, valSet, cfg.addedVals, cfg.updatedVals, cfg.deletedVals)
  1240. // basic checks
  1241. assert.Equal(t, cfg.expectedVals, toTestValList(valSet.Validators))
  1242. verifyValidatorSet(t, valSet)
  1243. // verify that the added validators have the smallest priority:
  1244. // - they should be at the beginning of updatedValsPriSorted since it is
  1245. // sorted by priority
  1246. if len(cfg.addedVals) > 0 {
  1247. updatedValsPriSorted := validatorListCopy(valSet.Validators)
  1248. sort.Sort(validatorsByPriority(updatedValsPriSorted))
  1249. addedValsPriSlice := updatedValsPriSorted[:len(cfg.addedVals)]
  1250. sort.Sort(ValidatorsByVotingPower(addedValsPriSlice))
  1251. assert.Equal(t, cfg.addedVals, toTestValList(addedValsPriSlice))
  1252. // - and should all have the same priority
  1253. expectedPri := addedValsPriSlice[0].ProposerPriority
  1254. for _, val := range addedValsPriSlice[1:] {
  1255. assert.Equal(t, expectedPri, val.ProposerPriority)
  1256. }
  1257. }
  1258. }
  1259. func TestValSetUpdateOverflowRelated(t *testing.T) {
  1260. testCases := []testVSetCfg{
  1261. {
  1262. name: "1 no false overflow error messages for updates",
  1263. startVals: []testVal{{"v2", MaxTotalVotingPower - 1}, {"v1", 1}},
  1264. updatedVals: []testVal{{"v1", MaxTotalVotingPower - 1}, {"v2", 1}},
  1265. expectedVals: []testVal{{"v1", MaxTotalVotingPower - 1}, {"v2", 1}},
  1266. expErr: nil,
  1267. },
  1268. {
  1269. // this test shows that it is important to apply the updates in the order of the change in power
  1270. // i.e. apply first updates with decreases in power, v2 change in this case.
  1271. name: "2 no false overflow error messages for updates",
  1272. startVals: []testVal{{"v2", MaxTotalVotingPower - 1}, {"v1", 1}},
  1273. updatedVals: []testVal{{"v1", MaxTotalVotingPower/2 - 1}, {"v2", MaxTotalVotingPower / 2}},
  1274. expectedVals: []testVal{{"v2", MaxTotalVotingPower / 2}, {"v1", MaxTotalVotingPower/2 - 1}},
  1275. expErr: nil,
  1276. },
  1277. {
  1278. name: "3 no false overflow error messages for deletes",
  1279. startVals: []testVal{{"v1", MaxTotalVotingPower - 2}, {"v2", 1}, {"v3", 1}},
  1280. deletedVals: []testVal{{"v1", 0}},
  1281. addedVals: []testVal{{"v4", MaxTotalVotingPower - 2}},
  1282. expectedVals: []testVal{{"v4", MaxTotalVotingPower - 2}, {"v2", 1}, {"v3", 1}},
  1283. expErr: nil,
  1284. },
  1285. {
  1286. name: "4 no false overflow error messages for adds, updates and deletes",
  1287. startVals: []testVal{
  1288. {"v1", MaxTotalVotingPower / 4}, {"v2", MaxTotalVotingPower / 4},
  1289. {"v3", MaxTotalVotingPower / 4}, {"v4", MaxTotalVotingPower / 4}},
  1290. deletedVals: []testVal{{"v2", 0}},
  1291. updatedVals: []testVal{
  1292. {"v1", MaxTotalVotingPower/2 - 2}, {"v3", MaxTotalVotingPower/2 - 3}, {"v4", 2}},
  1293. addedVals: []testVal{{"v5", 3}},
  1294. expectedVals: []testVal{
  1295. {"v1", MaxTotalVotingPower/2 - 2}, {"v3", MaxTotalVotingPower/2 - 3}, {"v5", 3}, {"v4", 2}},
  1296. expErr: nil,
  1297. },
  1298. {
  1299. name: "5 check panic on overflow is prevented: update 8 validators with power int64(math.MaxInt64)/8",
  1300. startVals: []testVal{
  1301. {"v1", 1}, {"v2", 1}, {"v3", 1}, {"v4", 1}, {"v5", 1},
  1302. {"v6", 1}, {"v7", 1}, {"v8", 1}, {"v9", 1}},
  1303. updatedVals: []testVal{
  1304. {"v1", MaxTotalVotingPower}, {"v2", MaxTotalVotingPower}, {"v3", MaxTotalVotingPower},
  1305. {"v4", MaxTotalVotingPower}, {"v5", MaxTotalVotingPower}, {"v6", MaxTotalVotingPower},
  1306. {"v7", MaxTotalVotingPower}, {"v8", MaxTotalVotingPower}, {"v9", 8}},
  1307. expectedVals: []testVal{
  1308. {"v1", 1}, {"v2", 1}, {"v3", 1}, {"v4", 1}, {"v5", 1},
  1309. {"v6", 1}, {"v7", 1}, {"v8", 1}, {"v9", 1}},
  1310. expErr: ErrTotalVotingPowerOverflow,
  1311. },
  1312. }
  1313. for _, tt := range testCases {
  1314. tt := tt
  1315. t.Run(tt.name, func(t *testing.T) {
  1316. valSet := createNewValidatorSet(tt.startVals)
  1317. verifyValidatorSet(t, valSet)
  1318. // execute update and verify returned error is as expected
  1319. applyChangesToValSet(t, tt.expErr, valSet, tt.addedVals, tt.updatedVals, tt.deletedVals)
  1320. // verify updated validator set is as expected
  1321. assert.Equal(t, tt.expectedVals, toTestValList(valSet.Validators))
  1322. verifyValidatorSet(t, valSet)
  1323. })
  1324. }
  1325. }
  1326. func TestValidatorSet_VerifyCommitLightTrusting(t *testing.T) {
  1327. var (
  1328. blockID = makeBlockIDRandom()
  1329. voteSet, originalValset, vals = randVoteSet(1, 1, tmproto.PrecommitType, 6, 1)
  1330. commit, err = MakeCommit(blockID, 1, 1, voteSet, vals, time.Now())
  1331. newValSet, _ = RandValidatorSet(2, 1)
  1332. )
  1333. require.NoError(t, err)
  1334. testCases := []struct {
  1335. valSet *ValidatorSet
  1336. err bool
  1337. }{
  1338. // good
  1339. 0: {
  1340. valSet: originalValset,
  1341. err: false,
  1342. },
  1343. // bad - no overlap between validator sets
  1344. 1: {
  1345. valSet: newValSet,
  1346. err: true,
  1347. },
  1348. // good - first two are different but the rest of the same -> >1/3
  1349. 2: {
  1350. valSet: NewValidatorSet(append(newValSet.Validators, originalValset.Validators...)),
  1351. err: false,
  1352. },
  1353. }
  1354. for _, tc := range testCases {
  1355. err = tc.valSet.VerifyCommitLightTrusting("test_chain_id", commit,
  1356. tmmath.Fraction{Numerator: 1, Denominator: 3})
  1357. if tc.err {
  1358. assert.Error(t, err)
  1359. } else {
  1360. assert.NoError(t, err)
  1361. }
  1362. }
  1363. }
  1364. func TestValidatorSet_VerifyCommitLightTrustingErrorsOnOverflow(t *testing.T) {
  1365. var (
  1366. blockID = makeBlockIDRandom()
  1367. voteSet, valSet, vals = randVoteSet(1, 1, tmproto.PrecommitType, 1, MaxTotalVotingPower)
  1368. commit, err = MakeCommit(blockID, 1, 1, voteSet, vals, time.Now())
  1369. )
  1370. require.NoError(t, err)
  1371. err = valSet.VerifyCommitLightTrusting("test_chain_id", commit,
  1372. tmmath.Fraction{Numerator: 25, Denominator: 55})
  1373. if assert.Error(t, err) {
  1374. assert.Contains(t, err.Error(), "int64 overflow")
  1375. }
  1376. }
  1377. func TestSafeMul(t *testing.T) {
  1378. testCases := []struct {
  1379. a int64
  1380. b int64
  1381. c int64
  1382. overflow bool
  1383. }{
  1384. 0: {0, 0, 0, false},
  1385. 1: {1, 0, 0, false},
  1386. 2: {2, 3, 6, false},
  1387. 3: {2, -3, -6, false},
  1388. 4: {-2, -3, 6, false},
  1389. 5: {-2, 3, -6, false},
  1390. 6: {math.MaxInt64, 1, math.MaxInt64, false},
  1391. 7: {math.MaxInt64 / 2, 2, math.MaxInt64 - 1, false},
  1392. 8: {math.MaxInt64 / 2, 3, 0, true},
  1393. 9: {math.MaxInt64, 2, 0, true},
  1394. }
  1395. for i, tc := range testCases {
  1396. c, overflow := safeMul(tc.a, tc.b)
  1397. assert.Equal(t, tc.c, c, "#%d", i)
  1398. assert.Equal(t, tc.overflow, overflow, "#%d", i)
  1399. }
  1400. }
  1401. func TestValidatorSetProtoBuf(t *testing.T) {
  1402. valset, _ := RandValidatorSet(10, 100)
  1403. valset2, _ := RandValidatorSet(10, 100)
  1404. valset2.Validators[0] = &Validator{}
  1405. valset3, _ := RandValidatorSet(10, 100)
  1406. valset3.Proposer = nil
  1407. valset4, _ := RandValidatorSet(10, 100)
  1408. valset4.Proposer = &Validator{}
  1409. testCases := []struct {
  1410. msg string
  1411. v1 *ValidatorSet
  1412. expPass1 bool
  1413. expPass2 bool
  1414. }{
  1415. {"success", valset, true, true},
  1416. {"fail valSet2, pubkey empty", valset2, false, false},
  1417. {"fail nil Proposer", valset3, false, false},
  1418. {"fail empty Proposer", valset4, false, false},
  1419. {"fail empty valSet", &ValidatorSet{}, false, false},
  1420. {"false nil", nil, false, false},
  1421. }
  1422. for _, tc := range testCases {
  1423. protoValSet, err := tc.v1.ToProto()
  1424. if tc.expPass1 {
  1425. require.NoError(t, err, tc.msg)
  1426. } else {
  1427. require.Error(t, err, tc.msg)
  1428. }
  1429. valSet, err := ValidatorSetFromProto(protoValSet)
  1430. if tc.expPass2 {
  1431. require.NoError(t, err, tc.msg)
  1432. require.EqualValues(t, tc.v1, valSet, tc.msg)
  1433. } else {
  1434. require.Error(t, err, tc.msg)
  1435. }
  1436. }
  1437. }
  1438. //---------------------
  1439. // Sort validators by priority and address
  1440. type validatorsByPriority []*Validator
  1441. func (valz validatorsByPriority) Len() int {
  1442. return len(valz)
  1443. }
  1444. func (valz validatorsByPriority) Less(i, j int) bool {
  1445. if valz[i].ProposerPriority < valz[j].ProposerPriority {
  1446. return true
  1447. }
  1448. if valz[i].ProposerPriority > valz[j].ProposerPriority {
  1449. return false
  1450. }
  1451. return bytes.Compare(valz[i].Address, valz[j].Address) < 0
  1452. }
  1453. func (valz validatorsByPriority) Swap(i, j int) {
  1454. it := valz[i]
  1455. valz[i] = valz[j]
  1456. valz[j] = it
  1457. }
  1458. //-------------------------------------
  1459. type testValsByVotingPower []testVal
  1460. func (tvals testValsByVotingPower) Len() int {
  1461. return len(tvals)
  1462. }
  1463. func (tvals testValsByVotingPower) Less(i, j int) bool {
  1464. if tvals[i].power == tvals[j].power {
  1465. return bytes.Compare([]byte(tvals[i].name), []byte(tvals[j].name)) == -1
  1466. }
  1467. return tvals[i].power > tvals[j].power
  1468. }
  1469. func (tvals testValsByVotingPower) Swap(i, j int) {
  1470. it := tvals[i]
  1471. tvals[i] = tvals[j]
  1472. tvals[j] = it
  1473. }
  1474. //-------------------------------------
  1475. // Benchmark tests
  1476. //
  1477. func BenchmarkUpdates(b *testing.B) {
  1478. const (
  1479. n = 100
  1480. m = 2000
  1481. )
  1482. // Init with n validators
  1483. vs := make([]*Validator, n)
  1484. for j := 0; j < n; j++ {
  1485. vs[j] = newValidator([]byte(fmt.Sprintf("v%d", j)), 100)
  1486. }
  1487. valSet := NewValidatorSet(vs)
  1488. l := len(valSet.Validators)
  1489. // Make m new validators
  1490. newValList := make([]*Validator, m)
  1491. for j := 0; j < m; j++ {
  1492. newValList[j] = newValidator([]byte(fmt.Sprintf("v%d", j+l)), 1000)
  1493. }
  1494. b.ResetTimer()
  1495. for i := 0; i < b.N; i++ {
  1496. // Add m validators to valSetCopy
  1497. valSetCopy := valSet.Copy()
  1498. assert.NoError(b, valSetCopy.UpdateWithChangeSet(newValList))
  1499. }
  1500. }