You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

1696 lines
50 KiB

types: prevent spurious validator power overflow warnings when changing the validator set (#4183) Fix for #4164 The general problem is that in certain conditions an overflow warning is issued when attempting to update a validator set even if the final set's total voting power is not over the maximum allowed. Root cause is that in verifyUpdates(), updates are verified wrt to total voting power in the order of validator address. It is then possible that a low address validator may increase its power such that the temporary total voting power count goes over MaxTotalVotingPower. Scenarios where removing and adding/ updating validators with high voting power, in the same update operation, cause the same false warning and the updates are not applied. Main changes to fix this are in verifyUpdate() that now does the verification starting with the decreases in power. It also takes into account the removals that are part of the update. ## Commits: * tests for overflow detection and prevention * test fix * more tests * fix the false overflow warnings and golint * scopelint warning fix * review comments * variant with using sort by amount of change in power * compute separately number new validators in update * types: use a switch in processChanges * more review comments * types: use HasAddress in numNewValidators * types: refactor verifyUpdates copy updates, sort them by delta and use resulting slice to calculate tvpAfterUpdatesBeforeRemovals. * remove unused structs * review comments * update changelog
5 years ago
types: prevent spurious validator power overflow warnings when changing the validator set (#4183) Fix for #4164 The general problem is that in certain conditions an overflow warning is issued when attempting to update a validator set even if the final set's total voting power is not over the maximum allowed. Root cause is that in verifyUpdates(), updates are verified wrt to total voting power in the order of validator address. It is then possible that a low address validator may increase its power such that the temporary total voting power count goes over MaxTotalVotingPower. Scenarios where removing and adding/ updating validators with high voting power, in the same update operation, cause the same false warning and the updates are not applied. Main changes to fix this are in verifyUpdate() that now does the verification starting with the decreases in power. It also takes into account the removals that are part of the update. ## Commits: * tests for overflow detection and prevention * test fix * more tests * fix the false overflow warnings and golint * scopelint warning fix * review comments * variant with using sort by amount of change in power * compute separately number new validators in update * types: use a switch in processChanges * more review comments * types: use HasAddress in numNewValidators * types: refactor verifyUpdates copy updates, sort them by delta and use resulting slice to calculate tvpAfterUpdatesBeforeRemovals. * remove unused structs * review comments * update changelog
5 years ago
types: prevent spurious validator power overflow warnings when changing the validator set (#4183) Fix for #4164 The general problem is that in certain conditions an overflow warning is issued when attempting to update a validator set even if the final set's total voting power is not over the maximum allowed. Root cause is that in verifyUpdates(), updates are verified wrt to total voting power in the order of validator address. It is then possible that a low address validator may increase its power such that the temporary total voting power count goes over MaxTotalVotingPower. Scenarios where removing and adding/ updating validators with high voting power, in the same update operation, cause the same false warning and the updates are not applied. Main changes to fix this are in verifyUpdate() that now does the verification starting with the decreases in power. It also takes into account the removals that are part of the update. ## Commits: * tests for overflow detection and prevention * test fix * more tests * fix the false overflow warnings and golint * scopelint warning fix * review comments * variant with using sort by amount of change in power * compute separately number new validators in update * types: use a switch in processChanges * more review comments * types: use HasAddress in numNewValidators * types: refactor verifyUpdates copy updates, sort them by delta and use resulting slice to calculate tvpAfterUpdatesBeforeRemovals. * remove unused structs * review comments * update changelog
5 years ago
types: prevent spurious validator power overflow warnings when changing the validator set (#4183) Fix for #4164 The general problem is that in certain conditions an overflow warning is issued when attempting to update a validator set even if the final set's total voting power is not over the maximum allowed. Root cause is that in verifyUpdates(), updates are verified wrt to total voting power in the order of validator address. It is then possible that a low address validator may increase its power such that the temporary total voting power count goes over MaxTotalVotingPower. Scenarios where removing and adding/ updating validators with high voting power, in the same update operation, cause the same false warning and the updates are not applied. Main changes to fix this are in verifyUpdate() that now does the verification starting with the decreases in power. It also takes into account the removals that are part of the update. ## Commits: * tests for overflow detection and prevention * test fix * more tests * fix the false overflow warnings and golint * scopelint warning fix * review comments * variant with using sort by amount of change in power * compute separately number new validators in update * types: use a switch in processChanges * more review comments * types: use HasAddress in numNewValidators * types: refactor verifyUpdates copy updates, sort them by delta and use resulting slice to calculate tvpAfterUpdatesBeforeRemovals. * remove unused structs * review comments * update changelog
5 years ago
Normalize priorities to not exceed total voting power (#3049) * more proposer priority tests - test that we don't reset to zero when updating / adding - test that same power validators alternate * add another test to track / simulate similar behaviour as in #2960 * address some of Chris' review comments * address some more of Chris' review comments * temporarily pushing branch with the following changes: The total power might change if: - a validator is added - a validator is removed - a validator is updated Decrement the accums (of all validators) directly after any of these events (by the inverse of the change) * Fix 2960 by re-normalizing / scaling priorities to be in bounds of total power, additionally: - remove heap where it doesn't make sense - avg. only at the end of IncrementProposerPriority instead of each iteration - update (and slightly improve) TestAveragingInIncrementProposerPriorityWithVotingPower to reflect above changes * Fix 2960 by re-normalizing / scaling priorities to be in bounds of total power, additionally: - remove heap where it doesn't make sense - avg. only at the end of IncrementProposerPriority instead of each iteration - update (and slightly improve) TestAveragingInIncrementProposerPriorityWithVotingPower to reflect above changes * fix tests * add comment * update changelog pending & some minor changes * comment about division will floor the result & fix typo * Update TestLargeGenesisValidator: - remove TODO and increase large genesis validator's voting power accordingly * move changelog entry to P2P Protocol * Ceil instead of flooring when dividing & update test * quickly fix failing TestProposerPriorityDoesNotGetResetToZero: - divide by Ceil((maxPriority - minPriority) / 2*totalVotingPower) * fix typo: rename getValWitMostPriority -> getValWithMostPriority * test proposer frequencies * return absolute value for diff. keep testing * use for loop for div * cleanup, more tests * spellcheck * get rid of using floats: manually ceil where necessary * Remove float, simplify, fix tests to match chris's proof (#3157)
5 years ago
Normalize priorities to not exceed total voting power (#3049) * more proposer priority tests - test that we don't reset to zero when updating / adding - test that same power validators alternate * add another test to track / simulate similar behaviour as in #2960 * address some of Chris' review comments * address some more of Chris' review comments * temporarily pushing branch with the following changes: The total power might change if: - a validator is added - a validator is removed - a validator is updated Decrement the accums (of all validators) directly after any of these events (by the inverse of the change) * Fix 2960 by re-normalizing / scaling priorities to be in bounds of total power, additionally: - remove heap where it doesn't make sense - avg. only at the end of IncrementProposerPriority instead of each iteration - update (and slightly improve) TestAveragingInIncrementProposerPriorityWithVotingPower to reflect above changes * Fix 2960 by re-normalizing / scaling priorities to be in bounds of total power, additionally: - remove heap where it doesn't make sense - avg. only at the end of IncrementProposerPriority instead of each iteration - update (and slightly improve) TestAveragingInIncrementProposerPriorityWithVotingPower to reflect above changes * fix tests * add comment * update changelog pending & some minor changes * comment about division will floor the result & fix typo * Update TestLargeGenesisValidator: - remove TODO and increase large genesis validator's voting power accordingly * move changelog entry to P2P Protocol * Ceil instead of flooring when dividing & update test * quickly fix failing TestProposerPriorityDoesNotGetResetToZero: - divide by Ceil((maxPriority - minPriority) / 2*totalVotingPower) * fix typo: rename getValWitMostPriority -> getValWithMostPriority * test proposer frequencies * return absolute value for diff. keep testing * use for loop for div * cleanup, more tests * spellcheck * get rid of using floats: manually ceil where necessary * Remove float, simplify, fix tests to match chris's proof (#3157)
5 years ago
Normalize priorities to not exceed total voting power (#3049) * more proposer priority tests - test that we don't reset to zero when updating / adding - test that same power validators alternate * add another test to track / simulate similar behaviour as in #2960 * address some of Chris' review comments * address some more of Chris' review comments * temporarily pushing branch with the following changes: The total power might change if: - a validator is added - a validator is removed - a validator is updated Decrement the accums (of all validators) directly after any of these events (by the inverse of the change) * Fix 2960 by re-normalizing / scaling priorities to be in bounds of total power, additionally: - remove heap where it doesn't make sense - avg. only at the end of IncrementProposerPriority instead of each iteration - update (and slightly improve) TestAveragingInIncrementProposerPriorityWithVotingPower to reflect above changes * Fix 2960 by re-normalizing / scaling priorities to be in bounds of total power, additionally: - remove heap where it doesn't make sense - avg. only at the end of IncrementProposerPriority instead of each iteration - update (and slightly improve) TestAveragingInIncrementProposerPriorityWithVotingPower to reflect above changes * fix tests * add comment * update changelog pending & some minor changes * comment about division will floor the result & fix typo * Update TestLargeGenesisValidator: - remove TODO and increase large genesis validator's voting power accordingly * move changelog entry to P2P Protocol * Ceil instead of flooring when dividing & update test * quickly fix failing TestProposerPriorityDoesNotGetResetToZero: - divide by Ceil((maxPriority - minPriority) / 2*totalVotingPower) * fix typo: rename getValWitMostPriority -> getValWithMostPriority * test proposer frequencies * return absolute value for diff. keep testing * use for loop for div * cleanup, more tests * spellcheck * get rid of using floats: manually ceil where necessary * Remove float, simplify, fix tests to match chris's proof (#3157)
5 years ago
Normalize priorities to not exceed total voting power (#3049) * more proposer priority tests - test that we don't reset to zero when updating / adding - test that same power validators alternate * add another test to track / simulate similar behaviour as in #2960 * address some of Chris' review comments * address some more of Chris' review comments * temporarily pushing branch with the following changes: The total power might change if: - a validator is added - a validator is removed - a validator is updated Decrement the accums (of all validators) directly after any of these events (by the inverse of the change) * Fix 2960 by re-normalizing / scaling priorities to be in bounds of total power, additionally: - remove heap where it doesn't make sense - avg. only at the end of IncrementProposerPriority instead of each iteration - update (and slightly improve) TestAveragingInIncrementProposerPriorityWithVotingPower to reflect above changes * Fix 2960 by re-normalizing / scaling priorities to be in bounds of total power, additionally: - remove heap where it doesn't make sense - avg. only at the end of IncrementProposerPriority instead of each iteration - update (and slightly improve) TestAveragingInIncrementProposerPriorityWithVotingPower to reflect above changes * fix tests * add comment * update changelog pending & some minor changes * comment about division will floor the result & fix typo * Update TestLargeGenesisValidator: - remove TODO and increase large genesis validator's voting power accordingly * move changelog entry to P2P Protocol * Ceil instead of flooring when dividing & update test * quickly fix failing TestProposerPriorityDoesNotGetResetToZero: - divide by Ceil((maxPriority - minPriority) / 2*totalVotingPower) * fix typo: rename getValWitMostPriority -> getValWithMostPriority * test proposer frequencies * return absolute value for diff. keep testing * use for loop for div * cleanup, more tests * spellcheck * get rid of using floats: manually ceil where necessary * Remove float, simplify, fix tests to match chris's proof (#3157)
5 years ago
Normalize priorities to not exceed total voting power (#3049) * more proposer priority tests - test that we don't reset to zero when updating / adding - test that same power validators alternate * add another test to track / simulate similar behaviour as in #2960 * address some of Chris' review comments * address some more of Chris' review comments * temporarily pushing branch with the following changes: The total power might change if: - a validator is added - a validator is removed - a validator is updated Decrement the accums (of all validators) directly after any of these events (by the inverse of the change) * Fix 2960 by re-normalizing / scaling priorities to be in bounds of total power, additionally: - remove heap where it doesn't make sense - avg. only at the end of IncrementProposerPriority instead of each iteration - update (and slightly improve) TestAveragingInIncrementProposerPriorityWithVotingPower to reflect above changes * Fix 2960 by re-normalizing / scaling priorities to be in bounds of total power, additionally: - remove heap where it doesn't make sense - avg. only at the end of IncrementProposerPriority instead of each iteration - update (and slightly improve) TestAveragingInIncrementProposerPriorityWithVotingPower to reflect above changes * fix tests * add comment * update changelog pending & some minor changes * comment about division will floor the result & fix typo * Update TestLargeGenesisValidator: - remove TODO and increase large genesis validator's voting power accordingly * move changelog entry to P2P Protocol * Ceil instead of flooring when dividing & update test * quickly fix failing TestProposerPriorityDoesNotGetResetToZero: - divide by Ceil((maxPriority - minPriority) / 2*totalVotingPower) * fix typo: rename getValWitMostPriority -> getValWithMostPriority * test proposer frequencies * return absolute value for diff. keep testing * use for loop for div * cleanup, more tests * spellcheck * get rid of using floats: manually ceil where necessary * Remove float, simplify, fix tests to match chris's proof (#3157)
5 years ago
Normalize priorities to not exceed total voting power (#3049) * more proposer priority tests - test that we don't reset to zero when updating / adding - test that same power validators alternate * add another test to track / simulate similar behaviour as in #2960 * address some of Chris' review comments * address some more of Chris' review comments * temporarily pushing branch with the following changes: The total power might change if: - a validator is added - a validator is removed - a validator is updated Decrement the accums (of all validators) directly after any of these events (by the inverse of the change) * Fix 2960 by re-normalizing / scaling priorities to be in bounds of total power, additionally: - remove heap where it doesn't make sense - avg. only at the end of IncrementProposerPriority instead of each iteration - update (and slightly improve) TestAveragingInIncrementProposerPriorityWithVotingPower to reflect above changes * Fix 2960 by re-normalizing / scaling priorities to be in bounds of total power, additionally: - remove heap where it doesn't make sense - avg. only at the end of IncrementProposerPriority instead of each iteration - update (and slightly improve) TestAveragingInIncrementProposerPriorityWithVotingPower to reflect above changes * fix tests * add comment * update changelog pending & some minor changes * comment about division will floor the result & fix typo * Update TestLargeGenesisValidator: - remove TODO and increase large genesis validator's voting power accordingly * move changelog entry to P2P Protocol * Ceil instead of flooring when dividing & update test * quickly fix failing TestProposerPriorityDoesNotGetResetToZero: - divide by Ceil((maxPriority - minPriority) / 2*totalVotingPower) * fix typo: rename getValWitMostPriority -> getValWithMostPriority * test proposer frequencies * return absolute value for diff. keep testing * use for loop for div * cleanup, more tests * spellcheck * get rid of using floats: manually ceil where necessary * Remove float, simplify, fix tests to match chris's proof (#3157)
5 years ago
Normalize priorities to not exceed total voting power (#3049) * more proposer priority tests - test that we don't reset to zero when updating / adding - test that same power validators alternate * add another test to track / simulate similar behaviour as in #2960 * address some of Chris' review comments * address some more of Chris' review comments * temporarily pushing branch with the following changes: The total power might change if: - a validator is added - a validator is removed - a validator is updated Decrement the accums (of all validators) directly after any of these events (by the inverse of the change) * Fix 2960 by re-normalizing / scaling priorities to be in bounds of total power, additionally: - remove heap where it doesn't make sense - avg. only at the end of IncrementProposerPriority instead of each iteration - update (and slightly improve) TestAveragingInIncrementProposerPriorityWithVotingPower to reflect above changes * Fix 2960 by re-normalizing / scaling priorities to be in bounds of total power, additionally: - remove heap where it doesn't make sense - avg. only at the end of IncrementProposerPriority instead of each iteration - update (and slightly improve) TestAveragingInIncrementProposerPriorityWithVotingPower to reflect above changes * fix tests * add comment * update changelog pending & some minor changes * comment about division will floor the result & fix typo * Update TestLargeGenesisValidator: - remove TODO and increase large genesis validator's voting power accordingly * move changelog entry to P2P Protocol * Ceil instead of flooring when dividing & update test * quickly fix failing TestProposerPriorityDoesNotGetResetToZero: - divide by Ceil((maxPriority - minPriority) / 2*totalVotingPower) * fix typo: rename getValWitMostPriority -> getValWithMostPriority * test proposer frequencies * return absolute value for diff. keep testing * use for loop for div * cleanup, more tests * spellcheck * get rid of using floats: manually ceil where necessary * Remove float, simplify, fix tests to match chris's proof (#3157)
5 years ago
Normalize priorities to not exceed total voting power (#3049) * more proposer priority tests - test that we don't reset to zero when updating / adding - test that same power validators alternate * add another test to track / simulate similar behaviour as in #2960 * address some of Chris' review comments * address some more of Chris' review comments * temporarily pushing branch with the following changes: The total power might change if: - a validator is added - a validator is removed - a validator is updated Decrement the accums (of all validators) directly after any of these events (by the inverse of the change) * Fix 2960 by re-normalizing / scaling priorities to be in bounds of total power, additionally: - remove heap where it doesn't make sense - avg. only at the end of IncrementProposerPriority instead of each iteration - update (and slightly improve) TestAveragingInIncrementProposerPriorityWithVotingPower to reflect above changes * Fix 2960 by re-normalizing / scaling priorities to be in bounds of total power, additionally: - remove heap where it doesn't make sense - avg. only at the end of IncrementProposerPriority instead of each iteration - update (and slightly improve) TestAveragingInIncrementProposerPriorityWithVotingPower to reflect above changes * fix tests * add comment * update changelog pending & some minor changes * comment about division will floor the result & fix typo * Update TestLargeGenesisValidator: - remove TODO and increase large genesis validator's voting power accordingly * move changelog entry to P2P Protocol * Ceil instead of flooring when dividing & update test * quickly fix failing TestProposerPriorityDoesNotGetResetToZero: - divide by Ceil((maxPriority - minPriority) / 2*totalVotingPower) * fix typo: rename getValWitMostPriority -> getValWithMostPriority * test proposer frequencies * return absolute value for diff. keep testing * use for loop for div * cleanup, more tests * spellcheck * get rid of using floats: manually ceil where necessary * Remove float, simplify, fix tests to match chris's proof (#3157)
5 years ago
Normalize priorities to not exceed total voting power (#3049) * more proposer priority tests - test that we don't reset to zero when updating / adding - test that same power validators alternate * add another test to track / simulate similar behaviour as in #2960 * address some of Chris' review comments * address some more of Chris' review comments * temporarily pushing branch with the following changes: The total power might change if: - a validator is added - a validator is removed - a validator is updated Decrement the accums (of all validators) directly after any of these events (by the inverse of the change) * Fix 2960 by re-normalizing / scaling priorities to be in bounds of total power, additionally: - remove heap where it doesn't make sense - avg. only at the end of IncrementProposerPriority instead of each iteration - update (and slightly improve) TestAveragingInIncrementProposerPriorityWithVotingPower to reflect above changes * Fix 2960 by re-normalizing / scaling priorities to be in bounds of total power, additionally: - remove heap where it doesn't make sense - avg. only at the end of IncrementProposerPriority instead of each iteration - update (and slightly improve) TestAveragingInIncrementProposerPriorityWithVotingPower to reflect above changes * fix tests * add comment * update changelog pending & some minor changes * comment about division will floor the result & fix typo * Update TestLargeGenesisValidator: - remove TODO and increase large genesis validator's voting power accordingly * move changelog entry to P2P Protocol * Ceil instead of flooring when dividing & update test * quickly fix failing TestProposerPriorityDoesNotGetResetToZero: - divide by Ceil((maxPriority - minPriority) / 2*totalVotingPower) * fix typo: rename getValWitMostPriority -> getValWithMostPriority * test proposer frequencies * return absolute value for diff. keep testing * use for loop for div * cleanup, more tests * spellcheck * get rid of using floats: manually ceil where necessary * Remove float, simplify, fix tests to match chris's proof (#3157)
5 years ago
Normalize priorities to not exceed total voting power (#3049) * more proposer priority tests - test that we don't reset to zero when updating / adding - test that same power validators alternate * add another test to track / simulate similar behaviour as in #2960 * address some of Chris' review comments * address some more of Chris' review comments * temporarily pushing branch with the following changes: The total power might change if: - a validator is added - a validator is removed - a validator is updated Decrement the accums (of all validators) directly after any of these events (by the inverse of the change) * Fix 2960 by re-normalizing / scaling priorities to be in bounds of total power, additionally: - remove heap where it doesn't make sense - avg. only at the end of IncrementProposerPriority instead of each iteration - update (and slightly improve) TestAveragingInIncrementProposerPriorityWithVotingPower to reflect above changes * Fix 2960 by re-normalizing / scaling priorities to be in bounds of total power, additionally: - remove heap where it doesn't make sense - avg. only at the end of IncrementProposerPriority instead of each iteration - update (and slightly improve) TestAveragingInIncrementProposerPriorityWithVotingPower to reflect above changes * fix tests * add comment * update changelog pending & some minor changes * comment about division will floor the result & fix typo * Update TestLargeGenesisValidator: - remove TODO and increase large genesis validator's voting power accordingly * move changelog entry to P2P Protocol * Ceil instead of flooring when dividing & update test * quickly fix failing TestProposerPriorityDoesNotGetResetToZero: - divide by Ceil((maxPriority - minPriority) / 2*totalVotingPower) * fix typo: rename getValWitMostPriority -> getValWithMostPriority * test proposer frequencies * return absolute value for diff. keep testing * use for loop for div * cleanup, more tests * spellcheck * get rid of using floats: manually ceil where necessary * Remove float, simplify, fix tests to match chris's proof (#3157)
5 years ago
Normalize priorities to not exceed total voting power (#3049) * more proposer priority tests - test that we don't reset to zero when updating / adding - test that same power validators alternate * add another test to track / simulate similar behaviour as in #2960 * address some of Chris' review comments * address some more of Chris' review comments * temporarily pushing branch with the following changes: The total power might change if: - a validator is added - a validator is removed - a validator is updated Decrement the accums (of all validators) directly after any of these events (by the inverse of the change) * Fix 2960 by re-normalizing / scaling priorities to be in bounds of total power, additionally: - remove heap where it doesn't make sense - avg. only at the end of IncrementProposerPriority instead of each iteration - update (and slightly improve) TestAveragingInIncrementProposerPriorityWithVotingPower to reflect above changes * Fix 2960 by re-normalizing / scaling priorities to be in bounds of total power, additionally: - remove heap where it doesn't make sense - avg. only at the end of IncrementProposerPriority instead of each iteration - update (and slightly improve) TestAveragingInIncrementProposerPriorityWithVotingPower to reflect above changes * fix tests * add comment * update changelog pending & some minor changes * comment about division will floor the result & fix typo * Update TestLargeGenesisValidator: - remove TODO and increase large genesis validator's voting power accordingly * move changelog entry to P2P Protocol * Ceil instead of flooring when dividing & update test * quickly fix failing TestProposerPriorityDoesNotGetResetToZero: - divide by Ceil((maxPriority - minPriority) / 2*totalVotingPower) * fix typo: rename getValWitMostPriority -> getValWithMostPriority * test proposer frequencies * return absolute value for diff. keep testing * use for loop for div * cleanup, more tests * spellcheck * get rid of using floats: manually ceil where necessary * Remove float, simplify, fix tests to match chris's proof (#3157)
5 years ago
Normalize priorities to not exceed total voting power (#3049) * more proposer priority tests - test that we don't reset to zero when updating / adding - test that same power validators alternate * add another test to track / simulate similar behaviour as in #2960 * address some of Chris' review comments * address some more of Chris' review comments * temporarily pushing branch with the following changes: The total power might change if: - a validator is added - a validator is removed - a validator is updated Decrement the accums (of all validators) directly after any of these events (by the inverse of the change) * Fix 2960 by re-normalizing / scaling priorities to be in bounds of total power, additionally: - remove heap where it doesn't make sense - avg. only at the end of IncrementProposerPriority instead of each iteration - update (and slightly improve) TestAveragingInIncrementProposerPriorityWithVotingPower to reflect above changes * Fix 2960 by re-normalizing / scaling priorities to be in bounds of total power, additionally: - remove heap where it doesn't make sense - avg. only at the end of IncrementProposerPriority instead of each iteration - update (and slightly improve) TestAveragingInIncrementProposerPriorityWithVotingPower to reflect above changes * fix tests * add comment * update changelog pending & some minor changes * comment about division will floor the result & fix typo * Update TestLargeGenesisValidator: - remove TODO and increase large genesis validator's voting power accordingly * move changelog entry to P2P Protocol * Ceil instead of flooring when dividing & update test * quickly fix failing TestProposerPriorityDoesNotGetResetToZero: - divide by Ceil((maxPriority - minPriority) / 2*totalVotingPower) * fix typo: rename getValWitMostPriority -> getValWithMostPriority * test proposer frequencies * return absolute value for diff. keep testing * use for loop for div * cleanup, more tests * spellcheck * get rid of using floats: manually ceil where necessary * Remove float, simplify, fix tests to match chris's proof (#3157)
5 years ago
Normalize priorities to not exceed total voting power (#3049) * more proposer priority tests - test that we don't reset to zero when updating / adding - test that same power validators alternate * add another test to track / simulate similar behaviour as in #2960 * address some of Chris' review comments * address some more of Chris' review comments * temporarily pushing branch with the following changes: The total power might change if: - a validator is added - a validator is removed - a validator is updated Decrement the accums (of all validators) directly after any of these events (by the inverse of the change) * Fix 2960 by re-normalizing / scaling priorities to be in bounds of total power, additionally: - remove heap where it doesn't make sense - avg. only at the end of IncrementProposerPriority instead of each iteration - update (and slightly improve) TestAveragingInIncrementProposerPriorityWithVotingPower to reflect above changes * Fix 2960 by re-normalizing / scaling priorities to be in bounds of total power, additionally: - remove heap where it doesn't make sense - avg. only at the end of IncrementProposerPriority instead of each iteration - update (and slightly improve) TestAveragingInIncrementProposerPriorityWithVotingPower to reflect above changes * fix tests * add comment * update changelog pending & some minor changes * comment about division will floor the result & fix typo * Update TestLargeGenesisValidator: - remove TODO and increase large genesis validator's voting power accordingly * move changelog entry to P2P Protocol * Ceil instead of flooring when dividing & update test * quickly fix failing TestProposerPriorityDoesNotGetResetToZero: - divide by Ceil((maxPriority - minPriority) / 2*totalVotingPower) * fix typo: rename getValWitMostPriority -> getValWithMostPriority * test proposer frequencies * return absolute value for diff. keep testing * use for loop for div * cleanup, more tests * spellcheck * get rid of using floats: manually ceil where necessary * Remove float, simplify, fix tests to match chris's proof (#3157)
5 years ago
types: verify commit fully Since the light client work introduced in v0.33 it appears full nodes are no longer fully verifying commit signatures during block execution - they stop after +2/3. See in VerifyCommit: https://github.com/tendermint/tendermint/blob/0c7fd316eb006c0afc13996c00ac8bde1078b32c/types/validator_set.go#L700-L703 This means proposers can propose blocks that contain valid +2/3 signatures and then the rest of the signatures can be whatever they want. They can claim that all the other validators signed just by including a CommitSig with arbitrary signature data. While this doesn't seem to impact safety of Tendermint per se, it means that Commits may contain a lot of invalid data. This is already true of blocks, since they can include invalid txs filled with garbage, but in that case the application knows they they are invalid and can punish the proposer. But since applications dont verify commit signatures directly (they trust tendermint to do that), they won't be able to detect it. This can impact incentivization logic in the application that depends on the LastCommitInfo sent in BeginBlock, which includes which validators signed. For instance, Gaia incentivizes proposers with a bonus for including more than +2/3 of the signatures. But a proposer can now claim that bonus just by including arbitrary data for the final -1/3 of validators without actually waiting for their signatures. There may be other tricks that can be played because of this. In general, the full node should be a fully verifying machine. While it's true that the light client can avoid verifying all signatures by stopping after +2/3, the full node can not. Thus the light client and full node should use distinct VerifyCommit functions if one is going to stop after +2/3 or otherwise perform less validation (for instance light clients can also skip verifying votes for nil while full nodes can not). See a commit with a bad signature that verifies here: 56367fd. From what I can tell, Tendermint will go on to think this commit is valid and forward this data to the app, so the app will think the second validator actually signed when it clearly did not.
4 years ago
types: verify commit fully Since the light client work introduced in v0.33 it appears full nodes are no longer fully verifying commit signatures during block execution - they stop after +2/3. See in VerifyCommit: https://github.com/tendermint/tendermint/blob/0c7fd316eb006c0afc13996c00ac8bde1078b32c/types/validator_set.go#L700-L703 This means proposers can propose blocks that contain valid +2/3 signatures and then the rest of the signatures can be whatever they want. They can claim that all the other validators signed just by including a CommitSig with arbitrary signature data. While this doesn't seem to impact safety of Tendermint per se, it means that Commits may contain a lot of invalid data. This is already true of blocks, since they can include invalid txs filled with garbage, but in that case the application knows they they are invalid and can punish the proposer. But since applications dont verify commit signatures directly (they trust tendermint to do that), they won't be able to detect it. This can impact incentivization logic in the application that depends on the LastCommitInfo sent in BeginBlock, which includes which validators signed. For instance, Gaia incentivizes proposers with a bonus for including more than +2/3 of the signatures. But a proposer can now claim that bonus just by including arbitrary data for the final -1/3 of validators without actually waiting for their signatures. There may be other tricks that can be played because of this. In general, the full node should be a fully verifying machine. While it's true that the light client can avoid verifying all signatures by stopping after +2/3, the full node can not. Thus the light client and full node should use distinct VerifyCommit functions if one is going to stop after +2/3 or otherwise perform less validation (for instance light clients can also skip verifying votes for nil while full nodes can not). See a commit with a bad signature that verifies here: 56367fd. From what I can tell, Tendermint will go on to think this commit is valid and forward this data to the app, so the app will think the second validator actually signed when it clearly did not.
4 years ago
types: verify commit fully Since the light client work introduced in v0.33 it appears full nodes are no longer fully verifying commit signatures during block execution - they stop after +2/3. See in VerifyCommit: https://github.com/tendermint/tendermint/blob/0c7fd316eb006c0afc13996c00ac8bde1078b32c/types/validator_set.go#L700-L703 This means proposers can propose blocks that contain valid +2/3 signatures and then the rest of the signatures can be whatever they want. They can claim that all the other validators signed just by including a CommitSig with arbitrary signature data. While this doesn't seem to impact safety of Tendermint per se, it means that Commits may contain a lot of invalid data. This is already true of blocks, since they can include invalid txs filled with garbage, but in that case the application knows they they are invalid and can punish the proposer. But since applications dont verify commit signatures directly (they trust tendermint to do that), they won't be able to detect it. This can impact incentivization logic in the application that depends on the LastCommitInfo sent in BeginBlock, which includes which validators signed. For instance, Gaia incentivizes proposers with a bonus for including more than +2/3 of the signatures. But a proposer can now claim that bonus just by including arbitrary data for the final -1/3 of validators without actually waiting for their signatures. There may be other tricks that can be played because of this. In general, the full node should be a fully verifying machine. While it's true that the light client can avoid verifying all signatures by stopping after +2/3, the full node can not. Thus the light client and full node should use distinct VerifyCommit functions if one is going to stop after +2/3 or otherwise perform less validation (for instance light clients can also skip verifying votes for nil while full nodes can not). See a commit with a bad signature that verifies here: 56367fd. From what I can tell, Tendermint will go on to think this commit is valid and forward this data to the app, so the app will think the second validator actually signed when it clearly did not.
4 years ago
types: verify commit fully Since the light client work introduced in v0.33 it appears full nodes are no longer fully verifying commit signatures during block execution - they stop after +2/3. See in VerifyCommit: https://github.com/tendermint/tendermint/blob/0c7fd316eb006c0afc13996c00ac8bde1078b32c/types/validator_set.go#L700-L703 This means proposers can propose blocks that contain valid +2/3 signatures and then the rest of the signatures can be whatever they want. They can claim that all the other validators signed just by including a CommitSig with arbitrary signature data. While this doesn't seem to impact safety of Tendermint per se, it means that Commits may contain a lot of invalid data. This is already true of blocks, since they can include invalid txs filled with garbage, but in that case the application knows they they are invalid and can punish the proposer. But since applications dont verify commit signatures directly (they trust tendermint to do that), they won't be able to detect it. This can impact incentivization logic in the application that depends on the LastCommitInfo sent in BeginBlock, which includes which validators signed. For instance, Gaia incentivizes proposers with a bonus for including more than +2/3 of the signatures. But a proposer can now claim that bonus just by including arbitrary data for the final -1/3 of validators without actually waiting for their signatures. There may be other tricks that can be played because of this. In general, the full node should be a fully verifying machine. While it's true that the light client can avoid verifying all signatures by stopping after +2/3, the full node can not. Thus the light client and full node should use distinct VerifyCommit functions if one is going to stop after +2/3 or otherwise perform less validation (for instance light clients can also skip verifying votes for nil while full nodes can not). See a commit with a bad signature that verifies here: 56367fd. From what I can tell, Tendermint will go on to think this commit is valid and forward this data to the app, so the app will think the second validator actually signed when it clearly did not.
4 years ago
types: verify commit fully Since the light client work introduced in v0.33 it appears full nodes are no longer fully verifying commit signatures during block execution - they stop after +2/3. See in VerifyCommit: https://github.com/tendermint/tendermint/blob/0c7fd316eb006c0afc13996c00ac8bde1078b32c/types/validator_set.go#L700-L703 This means proposers can propose blocks that contain valid +2/3 signatures and then the rest of the signatures can be whatever they want. They can claim that all the other validators signed just by including a CommitSig with arbitrary signature data. While this doesn't seem to impact safety of Tendermint per se, it means that Commits may contain a lot of invalid data. This is already true of blocks, since they can include invalid txs filled with garbage, but in that case the application knows they they are invalid and can punish the proposer. But since applications dont verify commit signatures directly (they trust tendermint to do that), they won't be able to detect it. This can impact incentivization logic in the application that depends on the LastCommitInfo sent in BeginBlock, which includes which validators signed. For instance, Gaia incentivizes proposers with a bonus for including more than +2/3 of the signatures. But a proposer can now claim that bonus just by including arbitrary data for the final -1/3 of validators without actually waiting for their signatures. There may be other tricks that can be played because of this. In general, the full node should be a fully verifying machine. While it's true that the light client can avoid verifying all signatures by stopping after +2/3, the full node can not. Thus the light client and full node should use distinct VerifyCommit functions if one is going to stop after +2/3 or otherwise perform less validation (for instance light clients can also skip verifying votes for nil while full nodes can not). See a commit with a bad signature that verifies here: 56367fd. From what I can tell, Tendermint will go on to think this commit is valid and forward this data to the app, so the app will think the second validator actually signed when it clearly did not.
4 years ago
types: prevent spurious validator power overflow warnings when changing the validator set (#4183) Fix for #4164 The general problem is that in certain conditions an overflow warning is issued when attempting to update a validator set even if the final set's total voting power is not over the maximum allowed. Root cause is that in verifyUpdates(), updates are verified wrt to total voting power in the order of validator address. It is then possible that a low address validator may increase its power such that the temporary total voting power count goes over MaxTotalVotingPower. Scenarios where removing and adding/ updating validators with high voting power, in the same update operation, cause the same false warning and the updates are not applied. Main changes to fix this are in verifyUpdate() that now does the verification starting with the decreases in power. It also takes into account the removals that are part of the update. ## Commits: * tests for overflow detection and prevention * test fix * more tests * fix the false overflow warnings and golint * scopelint warning fix * review comments * variant with using sort by amount of change in power * compute separately number new validators in update * types: use a switch in processChanges * more review comments * types: use HasAddress in numNewValidators * types: refactor verifyUpdates copy updates, sort them by delta and use resulting slice to calculate tvpAfterUpdatesBeforeRemovals. * remove unused structs * review comments * update changelog
5 years ago
types: prevent spurious validator power overflow warnings when changing the validator set (#4183) Fix for #4164 The general problem is that in certain conditions an overflow warning is issued when attempting to update a validator set even if the final set's total voting power is not over the maximum allowed. Root cause is that in verifyUpdates(), updates are verified wrt to total voting power in the order of validator address. It is then possible that a low address validator may increase its power such that the temporary total voting power count goes over MaxTotalVotingPower. Scenarios where removing and adding/ updating validators with high voting power, in the same update operation, cause the same false warning and the updates are not applied. Main changes to fix this are in verifyUpdate() that now does the verification starting with the decreases in power. It also takes into account the removals that are part of the update. ## Commits: * tests for overflow detection and prevention * test fix * more tests * fix the false overflow warnings and golint * scopelint warning fix * review comments * variant with using sort by amount of change in power * compute separately number new validators in update * types: use a switch in processChanges * more review comments * types: use HasAddress in numNewValidators * types: refactor verifyUpdates copy updates, sort them by delta and use resulting slice to calculate tvpAfterUpdatesBeforeRemovals. * remove unused structs * review comments * update changelog
5 years ago
types: prevent spurious validator power overflow warnings when changing the validator set (#4183) Fix for #4164 The general problem is that in certain conditions an overflow warning is issued when attempting to update a validator set even if the final set's total voting power is not over the maximum allowed. Root cause is that in verifyUpdates(), updates are verified wrt to total voting power in the order of validator address. It is then possible that a low address validator may increase its power such that the temporary total voting power count goes over MaxTotalVotingPower. Scenarios where removing and adding/ updating validators with high voting power, in the same update operation, cause the same false warning and the updates are not applied. Main changes to fix this are in verifyUpdate() that now does the verification starting with the decreases in power. It also takes into account the removals that are part of the update. ## Commits: * tests for overflow detection and prevention * test fix * more tests * fix the false overflow warnings and golint * scopelint warning fix * review comments * variant with using sort by amount of change in power * compute separately number new validators in update * types: use a switch in processChanges * more review comments * types: use HasAddress in numNewValidators * types: refactor verifyUpdates copy updates, sort them by delta and use resulting slice to calculate tvpAfterUpdatesBeforeRemovals. * remove unused structs * review comments * update changelog
5 years ago
types: prevent spurious validator power overflow warnings when changing the validator set (#4183) Fix for #4164 The general problem is that in certain conditions an overflow warning is issued when attempting to update a validator set even if the final set's total voting power is not over the maximum allowed. Root cause is that in verifyUpdates(), updates are verified wrt to total voting power in the order of validator address. It is then possible that a low address validator may increase its power such that the temporary total voting power count goes over MaxTotalVotingPower. Scenarios where removing and adding/ updating validators with high voting power, in the same update operation, cause the same false warning and the updates are not applied. Main changes to fix this are in verifyUpdate() that now does the verification starting with the decreases in power. It also takes into account the removals that are part of the update. ## Commits: * tests for overflow detection and prevention * test fix * more tests * fix the false overflow warnings and golint * scopelint warning fix * review comments * variant with using sort by amount of change in power * compute separately number new validators in update * types: use a switch in processChanges * more review comments * types: use HasAddress in numNewValidators * types: refactor verifyUpdates copy updates, sort them by delta and use resulting slice to calculate tvpAfterUpdatesBeforeRemovals. * remove unused structs * review comments * update changelog
5 years ago
types: prevent spurious validator power overflow warnings when changing the validator set (#4183) Fix for #4164 The general problem is that in certain conditions an overflow warning is issued when attempting to update a validator set even if the final set's total voting power is not over the maximum allowed. Root cause is that in verifyUpdates(), updates are verified wrt to total voting power in the order of validator address. It is then possible that a low address validator may increase its power such that the temporary total voting power count goes over MaxTotalVotingPower. Scenarios where removing and adding/ updating validators with high voting power, in the same update operation, cause the same false warning and the updates are not applied. Main changes to fix this are in verifyUpdate() that now does the verification starting with the decreases in power. It also takes into account the removals that are part of the update. ## Commits: * tests for overflow detection and prevention * test fix * more tests * fix the false overflow warnings and golint * scopelint warning fix * review comments * variant with using sort by amount of change in power * compute separately number new validators in update * types: use a switch in processChanges * more review comments * types: use HasAddress in numNewValidators * types: refactor verifyUpdates copy updates, sort them by delta and use resulting slice to calculate tvpAfterUpdatesBeforeRemovals. * remove unused structs * review comments * update changelog
5 years ago
types: prevent spurious validator power overflow warnings when changing the validator set (#4183) Fix for #4164 The general problem is that in certain conditions an overflow warning is issued when attempting to update a validator set even if the final set's total voting power is not over the maximum allowed. Root cause is that in verifyUpdates(), updates are verified wrt to total voting power in the order of validator address. It is then possible that a low address validator may increase its power such that the temporary total voting power count goes over MaxTotalVotingPower. Scenarios where removing and adding/ updating validators with high voting power, in the same update operation, cause the same false warning and the updates are not applied. Main changes to fix this are in verifyUpdate() that now does the verification starting with the decreases in power. It also takes into account the removals that are part of the update. ## Commits: * tests for overflow detection and prevention * test fix * more tests * fix the false overflow warnings and golint * scopelint warning fix * review comments * variant with using sort by amount of change in power * compute separately number new validators in update * types: use a switch in processChanges * more review comments * types: use HasAddress in numNewValidators * types: refactor verifyUpdates copy updates, sort them by delta and use resulting slice to calculate tvpAfterUpdatesBeforeRemovals. * remove unused structs * review comments * update changelog
5 years ago
types: prevent spurious validator power overflow warnings when changing the validator set (#4183) Fix for #4164 The general problem is that in certain conditions an overflow warning is issued when attempting to update a validator set even if the final set's total voting power is not over the maximum allowed. Root cause is that in verifyUpdates(), updates are verified wrt to total voting power in the order of validator address. It is then possible that a low address validator may increase its power such that the temporary total voting power count goes over MaxTotalVotingPower. Scenarios where removing and adding/ updating validators with high voting power, in the same update operation, cause the same false warning and the updates are not applied. Main changes to fix this are in verifyUpdate() that now does the verification starting with the decreases in power. It also takes into account the removals that are part of the update. ## Commits: * tests for overflow detection and prevention * test fix * more tests * fix the false overflow warnings and golint * scopelint warning fix * review comments * variant with using sort by amount of change in power * compute separately number new validators in update * types: use a switch in processChanges * more review comments * types: use HasAddress in numNewValidators * types: refactor verifyUpdates copy updates, sort them by delta and use resulting slice to calculate tvpAfterUpdatesBeforeRemovals. * remove unused structs * review comments * update changelog
5 years ago
types: prevent spurious validator power overflow warnings when changing the validator set (#4183) Fix for #4164 The general problem is that in certain conditions an overflow warning is issued when attempting to update a validator set even if the final set's total voting power is not over the maximum allowed. Root cause is that in verifyUpdates(), updates are verified wrt to total voting power in the order of validator address. It is then possible that a low address validator may increase its power such that the temporary total voting power count goes over MaxTotalVotingPower. Scenarios where removing and adding/ updating validators with high voting power, in the same update operation, cause the same false warning and the updates are not applied. Main changes to fix this are in verifyUpdate() that now does the verification starting with the decreases in power. It also takes into account the removals that are part of the update. ## Commits: * tests for overflow detection and prevention * test fix * more tests * fix the false overflow warnings and golint * scopelint warning fix * review comments * variant with using sort by amount of change in power * compute separately number new validators in update * types: use a switch in processChanges * more review comments * types: use HasAddress in numNewValidators * types: refactor verifyUpdates copy updates, sort them by delta and use resulting slice to calculate tvpAfterUpdatesBeforeRemovals. * remove unused structs * review comments * update changelog
5 years ago
types: prevent spurious validator power overflow warnings when changing the validator set (#4183) Fix for #4164 The general problem is that in certain conditions an overflow warning is issued when attempting to update a validator set even if the final set's total voting power is not over the maximum allowed. Root cause is that in verifyUpdates(), updates are verified wrt to total voting power in the order of validator address. It is then possible that a low address validator may increase its power such that the temporary total voting power count goes over MaxTotalVotingPower. Scenarios where removing and adding/ updating validators with high voting power, in the same update operation, cause the same false warning and the updates are not applied. Main changes to fix this are in verifyUpdate() that now does the verification starting with the decreases in power. It also takes into account the removals that are part of the update. ## Commits: * tests for overflow detection and prevention * test fix * more tests * fix the false overflow warnings and golint * scopelint warning fix * review comments * variant with using sort by amount of change in power * compute separately number new validators in update * types: use a switch in processChanges * more review comments * types: use HasAddress in numNewValidators * types: refactor verifyUpdates copy updates, sort them by delta and use resulting slice to calculate tvpAfterUpdatesBeforeRemovals. * remove unused structs * review comments * update changelog
5 years ago
types: prevent spurious validator power overflow warnings when changing the validator set (#4183) Fix for #4164 The general problem is that in certain conditions an overflow warning is issued when attempting to update a validator set even if the final set's total voting power is not over the maximum allowed. Root cause is that in verifyUpdates(), updates are verified wrt to total voting power in the order of validator address. It is then possible that a low address validator may increase its power such that the temporary total voting power count goes over MaxTotalVotingPower. Scenarios where removing and adding/ updating validators with high voting power, in the same update operation, cause the same false warning and the updates are not applied. Main changes to fix this are in verifyUpdate() that now does the verification starting with the decreases in power. It also takes into account the removals that are part of the update. ## Commits: * tests for overflow detection and prevention * test fix * more tests * fix the false overflow warnings and golint * scopelint warning fix * review comments * variant with using sort by amount of change in power * compute separately number new validators in update * types: use a switch in processChanges * more review comments * types: use HasAddress in numNewValidators * types: refactor verifyUpdates copy updates, sort them by delta and use resulting slice to calculate tvpAfterUpdatesBeforeRemovals. * remove unused structs * review comments * update changelog
5 years ago
types: prevent spurious validator power overflow warnings when changing the validator set (#4183) Fix for #4164 The general problem is that in certain conditions an overflow warning is issued when attempting to update a validator set even if the final set's total voting power is not over the maximum allowed. Root cause is that in verifyUpdates(), updates are verified wrt to total voting power in the order of validator address. It is then possible that a low address validator may increase its power such that the temporary total voting power count goes over MaxTotalVotingPower. Scenarios where removing and adding/ updating validators with high voting power, in the same update operation, cause the same false warning and the updates are not applied. Main changes to fix this are in verifyUpdate() that now does the verification starting with the decreases in power. It also takes into account the removals that are part of the update. ## Commits: * tests for overflow detection and prevention * test fix * more tests * fix the false overflow warnings and golint * scopelint warning fix * review comments * variant with using sort by amount of change in power * compute separately number new validators in update * types: use a switch in processChanges * more review comments * types: use HasAddress in numNewValidators * types: refactor verifyUpdates copy updates, sort them by delta and use resulting slice to calculate tvpAfterUpdatesBeforeRemovals. * remove unused structs * review comments * update changelog
5 years ago
types: verify commit fully Since the light client work introduced in v0.33 it appears full nodes are no longer fully verifying commit signatures during block execution - they stop after +2/3. See in VerifyCommit: https://github.com/tendermint/tendermint/blob/0c7fd316eb006c0afc13996c00ac8bde1078b32c/types/validator_set.go#L700-L703 This means proposers can propose blocks that contain valid +2/3 signatures and then the rest of the signatures can be whatever they want. They can claim that all the other validators signed just by including a CommitSig with arbitrary signature data. While this doesn't seem to impact safety of Tendermint per se, it means that Commits may contain a lot of invalid data. This is already true of blocks, since they can include invalid txs filled with garbage, but in that case the application knows they they are invalid and can punish the proposer. But since applications dont verify commit signatures directly (they trust tendermint to do that), they won't be able to detect it. This can impact incentivization logic in the application that depends on the LastCommitInfo sent in BeginBlock, which includes which validators signed. For instance, Gaia incentivizes proposers with a bonus for including more than +2/3 of the signatures. But a proposer can now claim that bonus just by including arbitrary data for the final -1/3 of validators without actually waiting for their signatures. There may be other tricks that can be played because of this. In general, the full node should be a fully verifying machine. While it's true that the light client can avoid verifying all signatures by stopping after +2/3, the full node can not. Thus the light client and full node should use distinct VerifyCommit functions if one is going to stop after +2/3 or otherwise perform less validation (for instance light clients can also skip verifying votes for nil while full nodes can not). See a commit with a bad signature that verifies here: 56367fd. From what I can tell, Tendermint will go on to think this commit is valid and forward this data to the app, so the app will think the second validator actually signed when it clearly did not.
4 years ago
types: verify commit fully Since the light client work introduced in v0.33 it appears full nodes are no longer fully verifying commit signatures during block execution - they stop after +2/3. See in VerifyCommit: https://github.com/tendermint/tendermint/blob/0c7fd316eb006c0afc13996c00ac8bde1078b32c/types/validator_set.go#L700-L703 This means proposers can propose blocks that contain valid +2/3 signatures and then the rest of the signatures can be whatever they want. They can claim that all the other validators signed just by including a CommitSig with arbitrary signature data. While this doesn't seem to impact safety of Tendermint per se, it means that Commits may contain a lot of invalid data. This is already true of blocks, since they can include invalid txs filled with garbage, but in that case the application knows they they are invalid and can punish the proposer. But since applications dont verify commit signatures directly (they trust tendermint to do that), they won't be able to detect it. This can impact incentivization logic in the application that depends on the LastCommitInfo sent in BeginBlock, which includes which validators signed. For instance, Gaia incentivizes proposers with a bonus for including more than +2/3 of the signatures. But a proposer can now claim that bonus just by including arbitrary data for the final -1/3 of validators without actually waiting for their signatures. There may be other tricks that can be played because of this. In general, the full node should be a fully verifying machine. While it's true that the light client can avoid verifying all signatures by stopping after +2/3, the full node can not. Thus the light client and full node should use distinct VerifyCommit functions if one is going to stop after +2/3 or otherwise perform less validation (for instance light clients can also skip verifying votes for nil while full nodes can not). See a commit with a bad signature that verifies here: 56367fd. From what I can tell, Tendermint will go on to think this commit is valid and forward this data to the app, so the app will think the second validator actually signed when it clearly did not.
4 years ago
types: verify commit fully Since the light client work introduced in v0.33 it appears full nodes are no longer fully verifying commit signatures during block execution - they stop after +2/3. See in VerifyCommit: https://github.com/tendermint/tendermint/blob/0c7fd316eb006c0afc13996c00ac8bde1078b32c/types/validator_set.go#L700-L703 This means proposers can propose blocks that contain valid +2/3 signatures and then the rest of the signatures can be whatever they want. They can claim that all the other validators signed just by including a CommitSig with arbitrary signature data. While this doesn't seem to impact safety of Tendermint per se, it means that Commits may contain a lot of invalid data. This is already true of blocks, since they can include invalid txs filled with garbage, but in that case the application knows they they are invalid and can punish the proposer. But since applications dont verify commit signatures directly (they trust tendermint to do that), they won't be able to detect it. This can impact incentivization logic in the application that depends on the LastCommitInfo sent in BeginBlock, which includes which validators signed. For instance, Gaia incentivizes proposers with a bonus for including more than +2/3 of the signatures. But a proposer can now claim that bonus just by including arbitrary data for the final -1/3 of validators without actually waiting for their signatures. There may be other tricks that can be played because of this. In general, the full node should be a fully verifying machine. While it's true that the light client can avoid verifying all signatures by stopping after +2/3, the full node can not. Thus the light client and full node should use distinct VerifyCommit functions if one is going to stop after +2/3 or otherwise perform less validation (for instance light clients can also skip verifying votes for nil while full nodes can not). See a commit with a bad signature that verifies here: 56367fd. From what I can tell, Tendermint will go on to think this commit is valid and forward this data to the app, so the app will think the second validator actually signed when it clearly did not.
4 years ago
types: verify commit fully Since the light client work introduced in v0.33 it appears full nodes are no longer fully verifying commit signatures during block execution - they stop after +2/3. See in VerifyCommit: https://github.com/tendermint/tendermint/blob/0c7fd316eb006c0afc13996c00ac8bde1078b32c/types/validator_set.go#L700-L703 This means proposers can propose blocks that contain valid +2/3 signatures and then the rest of the signatures can be whatever they want. They can claim that all the other validators signed just by including a CommitSig with arbitrary signature data. While this doesn't seem to impact safety of Tendermint per se, it means that Commits may contain a lot of invalid data. This is already true of blocks, since they can include invalid txs filled with garbage, but in that case the application knows they they are invalid and can punish the proposer. But since applications dont verify commit signatures directly (they trust tendermint to do that), they won't be able to detect it. This can impact incentivization logic in the application that depends on the LastCommitInfo sent in BeginBlock, which includes which validators signed. For instance, Gaia incentivizes proposers with a bonus for including more than +2/3 of the signatures. But a proposer can now claim that bonus just by including arbitrary data for the final -1/3 of validators without actually waiting for their signatures. There may be other tricks that can be played because of this. In general, the full node should be a fully verifying machine. While it's true that the light client can avoid verifying all signatures by stopping after +2/3, the full node can not. Thus the light client and full node should use distinct VerifyCommit functions if one is going to stop after +2/3 or otherwise perform less validation (for instance light clients can also skip verifying votes for nil while full nodes can not). See a commit with a bad signature that verifies here: 56367fd. From what I can tell, Tendermint will go on to think this commit is valid and forward this data to the app, so the app will think the second validator actually signed when it clearly did not.
4 years ago
  1. package types
  2. import (
  3. "bytes"
  4. "fmt"
  5. "math"
  6. "sort"
  7. "strings"
  8. "testing"
  9. "testing/quick"
  10. "time"
  11. "github.com/stretchr/testify/assert"
  12. "github.com/stretchr/testify/require"
  13. "github.com/tendermint/tendermint/crypto"
  14. "github.com/tendermint/tendermint/crypto/ed25519"
  15. tmmath "github.com/tendermint/tendermint/libs/math"
  16. tmrand "github.com/tendermint/tendermint/libs/rand"
  17. tmproto "github.com/tendermint/tendermint/proto/tendermint/types"
  18. )
  19. func TestValidatorSetBasic(t *testing.T) {
  20. // empty or nil validator lists are allowed,
  21. // but attempting to IncrementProposerPriority on them will panic.
  22. vset := NewValidatorSet([]*Validator{})
  23. assert.Panics(t, func() { vset.IncrementProposerPriority(1) })
  24. vset = NewValidatorSet(nil)
  25. assert.Panics(t, func() { vset.IncrementProposerPriority(1) })
  26. assert.EqualValues(t, vset, vset.Copy())
  27. assert.False(t, vset.HasAddress([]byte("some val")))
  28. idx, val := vset.GetByAddress([]byte("some val"))
  29. assert.EqualValues(t, -1, idx)
  30. assert.Nil(t, val)
  31. addr, val := vset.GetByIndex(-100)
  32. assert.Nil(t, addr)
  33. assert.Nil(t, val)
  34. addr, val = vset.GetByIndex(0)
  35. assert.Nil(t, addr)
  36. assert.Nil(t, val)
  37. addr, val = vset.GetByIndex(100)
  38. assert.Nil(t, addr)
  39. assert.Nil(t, val)
  40. assert.Zero(t, vset.Size())
  41. assert.Equal(t, int64(0), vset.TotalVotingPower())
  42. assert.Nil(t, vset.GetProposer())
  43. assert.Equal(t, []byte{0xe3, 0xb0, 0xc4, 0x42, 0x98, 0xfc, 0x1c, 0x14, 0x9a, 0xfb, 0xf4,
  44. 0xc8, 0x99, 0x6f, 0xb9, 0x24, 0x27, 0xae, 0x41, 0xe4, 0x64, 0x9b, 0x93, 0x4c, 0xa4, 0x95,
  45. 0x99, 0x1b, 0x78, 0x52, 0xb8, 0x55}, vset.Hash())
  46. // add
  47. val = randValidator(vset.TotalVotingPower())
  48. assert.NoError(t, vset.UpdateWithChangeSet([]*Validator{val}))
  49. assert.True(t, vset.HasAddress(val.Address))
  50. idx, _ = vset.GetByAddress(val.Address)
  51. assert.EqualValues(t, 0, idx)
  52. addr, _ = vset.GetByIndex(0)
  53. assert.Equal(t, []byte(val.Address), addr)
  54. assert.Equal(t, 1, vset.Size())
  55. assert.Equal(t, val.VotingPower, vset.TotalVotingPower())
  56. assert.NotNil(t, vset.Hash())
  57. assert.NotPanics(t, func() { vset.IncrementProposerPriority(1) })
  58. assert.Equal(t, val.Address, vset.GetProposer().Address)
  59. // update
  60. val = randValidator(vset.TotalVotingPower())
  61. assert.NoError(t, vset.UpdateWithChangeSet([]*Validator{val}))
  62. _, val = vset.GetByAddress(val.Address)
  63. val.VotingPower += 100
  64. proposerPriority := val.ProposerPriority
  65. val.ProposerPriority = 0
  66. assert.NoError(t, vset.UpdateWithChangeSet([]*Validator{val}))
  67. _, val = vset.GetByAddress(val.Address)
  68. assert.Equal(t, proposerPriority, val.ProposerPriority)
  69. }
  70. func TestValidatorSetValidateBasic(t *testing.T) {
  71. val, _ := RandValidator(false, 1)
  72. badVal := &Validator{}
  73. testCases := []struct {
  74. vals ValidatorSet
  75. err bool
  76. msg string
  77. }{
  78. {
  79. vals: ValidatorSet{},
  80. err: true,
  81. msg: "validator set is nil or empty",
  82. },
  83. {
  84. vals: ValidatorSet{
  85. Validators: []*Validator{},
  86. },
  87. err: true,
  88. msg: "validator set is nil or empty",
  89. },
  90. {
  91. vals: ValidatorSet{
  92. Validators: []*Validator{val},
  93. },
  94. err: true,
  95. msg: "proposer failed validate basic, error: nil validator",
  96. },
  97. {
  98. vals: ValidatorSet{
  99. Validators: []*Validator{badVal},
  100. },
  101. err: true,
  102. msg: "invalid validator #0: validator does not have a public key",
  103. },
  104. {
  105. vals: ValidatorSet{
  106. Validators: []*Validator{val},
  107. Proposer: val,
  108. },
  109. err: false,
  110. msg: "",
  111. },
  112. }
  113. for _, tc := range testCases {
  114. err := tc.vals.ValidateBasic()
  115. if tc.err {
  116. if assert.Error(t, err) {
  117. assert.Equal(t, tc.msg, err.Error())
  118. }
  119. } else {
  120. assert.NoError(t, err)
  121. }
  122. }
  123. }
  124. func TestCopy(t *testing.T) {
  125. vset := randValidatorSet(10)
  126. vsetHash := vset.Hash()
  127. if len(vsetHash) == 0 {
  128. t.Fatalf("ValidatorSet had unexpected zero hash")
  129. }
  130. vsetCopy := vset.Copy()
  131. vsetCopyHash := vsetCopy.Hash()
  132. if !bytes.Equal(vsetHash, vsetCopyHash) {
  133. t.Fatalf("ValidatorSet copy had wrong hash. Orig: %X, Copy: %X", vsetHash, vsetCopyHash)
  134. }
  135. }
  136. // Test that IncrementProposerPriority requires positive times.
  137. func TestIncrementProposerPriorityPositiveTimes(t *testing.T) {
  138. vset := NewValidatorSet([]*Validator{
  139. newValidator([]byte("foo"), 1000),
  140. newValidator([]byte("bar"), 300),
  141. newValidator([]byte("baz"), 330),
  142. })
  143. assert.Panics(t, func() { vset.IncrementProposerPriority(-1) })
  144. assert.Panics(t, func() { vset.IncrementProposerPriority(0) })
  145. vset.IncrementProposerPriority(1)
  146. }
  147. func BenchmarkValidatorSetCopy(b *testing.B) {
  148. b.StopTimer()
  149. vset := NewValidatorSet([]*Validator{})
  150. for i := 0; i < 1000; i++ {
  151. privKey := ed25519.GenPrivKey()
  152. pubKey := privKey.PubKey()
  153. val := NewValidator(pubKey, 10)
  154. err := vset.UpdateWithChangeSet([]*Validator{val})
  155. if err != nil {
  156. panic("Failed to add validator")
  157. }
  158. }
  159. b.StartTimer()
  160. for i := 0; i < b.N; i++ {
  161. vset.Copy()
  162. }
  163. }
  164. //-------------------------------------------------------------------
  165. func TestProposerSelection1(t *testing.T) {
  166. vset := NewValidatorSet([]*Validator{
  167. newValidator([]byte("foo"), 1000),
  168. newValidator([]byte("bar"), 300),
  169. newValidator([]byte("baz"), 330),
  170. })
  171. var proposers []string
  172. for i := 0; i < 99; i++ {
  173. val := vset.GetProposer()
  174. proposers = append(proposers, string(val.Address))
  175. vset.IncrementProposerPriority(1)
  176. }
  177. expected := `foo baz foo bar foo foo baz foo bar foo foo baz foo foo bar foo baz foo foo bar` +
  178. ` foo foo baz foo bar foo foo baz foo bar foo foo baz foo foo bar foo baz foo foo bar` +
  179. ` foo baz foo foo bar foo baz foo foo bar foo baz foo foo foo baz bar foo foo foo baz` +
  180. ` foo bar foo foo baz foo bar foo foo baz foo bar foo foo baz foo bar foo foo baz foo` +
  181. ` foo bar foo baz foo foo bar foo baz foo foo bar foo baz foo foo`
  182. if expected != strings.Join(proposers, " ") {
  183. t.Errorf("expected sequence of proposers was\n%v\nbut got \n%v", expected, strings.Join(proposers, " "))
  184. }
  185. }
  186. func TestProposerSelection2(t *testing.T) {
  187. addr0 := []byte{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}
  188. addr1 := []byte{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1}
  189. addr2 := []byte{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2}
  190. // when all voting power is same, we go in order of addresses
  191. val0, val1, val2 := newValidator(addr0, 100), newValidator(addr1, 100), newValidator(addr2, 100)
  192. valList := []*Validator{val0, val1, val2}
  193. vals := NewValidatorSet(valList)
  194. for i := 0; i < len(valList)*5; i++ {
  195. ii := (i) % len(valList)
  196. prop := vals.GetProposer()
  197. if !bytes.Equal(prop.Address, valList[ii].Address) {
  198. t.Fatalf("(%d): Expected %X. Got %X", i, valList[ii].Address, prop.Address)
  199. }
  200. vals.IncrementProposerPriority(1)
  201. }
  202. // One validator has more than the others, but not enough to propose twice in a row
  203. *val2 = *newValidator(addr2, 400)
  204. vals = NewValidatorSet(valList)
  205. // vals.IncrementProposerPriority(1)
  206. prop := vals.GetProposer()
  207. if !bytes.Equal(prop.Address, addr2) {
  208. t.Fatalf("Expected address with highest voting power to be first proposer. Got %X", prop.Address)
  209. }
  210. vals.IncrementProposerPriority(1)
  211. prop = vals.GetProposer()
  212. if !bytes.Equal(prop.Address, addr0) {
  213. t.Fatalf("Expected smallest address to be validator. Got %X", prop.Address)
  214. }
  215. // One validator has more than the others, and enough to be proposer twice in a row
  216. *val2 = *newValidator(addr2, 401)
  217. vals = NewValidatorSet(valList)
  218. prop = vals.GetProposer()
  219. if !bytes.Equal(prop.Address, addr2) {
  220. t.Fatalf("Expected address with highest voting power to be first proposer. Got %X", prop.Address)
  221. }
  222. vals.IncrementProposerPriority(1)
  223. prop = vals.GetProposer()
  224. if !bytes.Equal(prop.Address, addr2) {
  225. t.Fatalf("Expected address with highest voting power to be second proposer. Got %X", prop.Address)
  226. }
  227. vals.IncrementProposerPriority(1)
  228. prop = vals.GetProposer()
  229. if !bytes.Equal(prop.Address, addr0) {
  230. t.Fatalf("Expected smallest address to be validator. Got %X", prop.Address)
  231. }
  232. // each validator should be the proposer a proportional number of times
  233. val0, val1, val2 = newValidator(addr0, 4), newValidator(addr1, 5), newValidator(addr2, 3)
  234. valList = []*Validator{val0, val1, val2}
  235. propCount := make([]int, 3)
  236. vals = NewValidatorSet(valList)
  237. N := 1
  238. for i := 0; i < 120*N; i++ {
  239. prop := vals.GetProposer()
  240. ii := prop.Address[19]
  241. propCount[ii]++
  242. vals.IncrementProposerPriority(1)
  243. }
  244. if propCount[0] != 40*N {
  245. t.Fatalf(
  246. "Expected prop count for validator with 4/12 of voting power to be %d/%d. Got %d/%d",
  247. 40*N,
  248. 120*N,
  249. propCount[0],
  250. 120*N,
  251. )
  252. }
  253. if propCount[1] != 50*N {
  254. t.Fatalf(
  255. "Expected prop count for validator with 5/12 of voting power to be %d/%d. Got %d/%d",
  256. 50*N,
  257. 120*N,
  258. propCount[1],
  259. 120*N,
  260. )
  261. }
  262. if propCount[2] != 30*N {
  263. t.Fatalf(
  264. "Expected prop count for validator with 3/12 of voting power to be %d/%d. Got %d/%d",
  265. 30*N,
  266. 120*N,
  267. propCount[2],
  268. 120*N,
  269. )
  270. }
  271. }
  272. func TestProposerSelection3(t *testing.T) {
  273. vset := NewValidatorSet([]*Validator{
  274. newValidator([]byte("avalidator_address12"), 1),
  275. newValidator([]byte("bvalidator_address12"), 1),
  276. newValidator([]byte("cvalidator_address12"), 1),
  277. newValidator([]byte("dvalidator_address12"), 1),
  278. })
  279. proposerOrder := make([]*Validator, 4)
  280. for i := 0; i < 4; i++ {
  281. // need to give all validators to have keys
  282. pk := ed25519.GenPrivKey().PubKey()
  283. vset.Validators[i].PubKey = pk
  284. proposerOrder[i] = vset.GetProposer()
  285. vset.IncrementProposerPriority(1)
  286. }
  287. // i for the loop
  288. // j for the times
  289. // we should go in order for ever, despite some IncrementProposerPriority with times > 1
  290. var (
  291. i int
  292. j int32
  293. )
  294. for ; i < 10000; i++ {
  295. got := vset.GetProposer().Address
  296. expected := proposerOrder[j%4].Address
  297. if !bytes.Equal(got, expected) {
  298. t.Fatalf(fmt.Sprintf("vset.Proposer (%X) does not match expected proposer (%X) for (%d, %d)", got, expected, i, j))
  299. }
  300. // serialize, deserialize, check proposer
  301. b := vset.toBytes()
  302. vset = vset.fromBytes(b)
  303. computed := vset.GetProposer() // findGetProposer()
  304. if i != 0 {
  305. if !bytes.Equal(got, computed.Address) {
  306. t.Fatalf(
  307. fmt.Sprintf(
  308. "vset.Proposer (%X) does not match computed proposer (%X) for (%d, %d)",
  309. got,
  310. computed.Address,
  311. i,
  312. j,
  313. ),
  314. )
  315. }
  316. }
  317. // times is usually 1
  318. times := int32(1)
  319. mod := (tmrand.Int() % 5) + 1
  320. if tmrand.Int()%mod > 0 {
  321. // sometimes its up to 5
  322. times = (tmrand.Int31() % 4) + 1
  323. }
  324. vset.IncrementProposerPriority(times)
  325. j += times
  326. }
  327. }
  328. func newValidator(address []byte, power int64) *Validator {
  329. return &Validator{Address: address, VotingPower: power}
  330. }
  331. func randPubKey() crypto.PubKey {
  332. pubKey := make(ed25519.PubKey, ed25519.PubKeySize)
  333. copy(pubKey, tmrand.Bytes(32))
  334. return ed25519.PubKey(tmrand.Bytes(32))
  335. }
  336. func randValidator(totalVotingPower int64) *Validator {
  337. // this modulo limits the ProposerPriority/VotingPower to stay in the
  338. // bounds of MaxTotalVotingPower minus the already existing voting power:
  339. val := NewValidator(randPubKey(), int64(tmrand.Uint64()%uint64(MaxTotalVotingPower-totalVotingPower)))
  340. val.ProposerPriority = tmrand.Int64() % (MaxTotalVotingPower - totalVotingPower)
  341. return val
  342. }
  343. func randValidatorSet(numValidators int) *ValidatorSet {
  344. validators := make([]*Validator, numValidators)
  345. totalVotingPower := int64(0)
  346. for i := 0; i < numValidators; i++ {
  347. validators[i] = randValidator(totalVotingPower)
  348. totalVotingPower += validators[i].VotingPower
  349. }
  350. return NewValidatorSet(validators)
  351. }
  352. func (vals *ValidatorSet) toBytes() []byte {
  353. pbvs, err := vals.ToProto()
  354. if err != nil {
  355. panic(err)
  356. }
  357. bz, err := pbvs.Marshal()
  358. if err != nil {
  359. panic(err)
  360. }
  361. return bz
  362. }
  363. func (vals *ValidatorSet) fromBytes(b []byte) *ValidatorSet {
  364. pbvs := new(tmproto.ValidatorSet)
  365. err := pbvs.Unmarshal(b)
  366. if err != nil {
  367. // DATA HAS BEEN CORRUPTED OR THE SPEC HAS CHANGED
  368. panic(err)
  369. }
  370. vs, err := ValidatorSetFromProto(pbvs)
  371. if err != nil {
  372. panic(err)
  373. }
  374. return vs
  375. }
  376. //-------------------------------------------------------------------
  377. func TestValidatorSetTotalVotingPowerPanicsOnOverflow(t *testing.T) {
  378. // NewValidatorSet calls IncrementProposerPriority which calls TotalVotingPower()
  379. // which should panic on overflows:
  380. shouldPanic := func() {
  381. NewValidatorSet([]*Validator{
  382. {Address: []byte("a"), VotingPower: math.MaxInt64, ProposerPriority: 0},
  383. {Address: []byte("b"), VotingPower: math.MaxInt64, ProposerPriority: 0},
  384. {Address: []byte("c"), VotingPower: math.MaxInt64, ProposerPriority: 0},
  385. })
  386. }
  387. assert.Panics(t, shouldPanic)
  388. }
  389. func TestAvgProposerPriority(t *testing.T) {
  390. // Create Validator set without calling IncrementProposerPriority:
  391. tcs := []struct {
  392. vs ValidatorSet
  393. want int64
  394. }{
  395. 0: {ValidatorSet{Validators: []*Validator{{ProposerPriority: 0}, {ProposerPriority: 0}, {ProposerPriority: 0}}}, 0},
  396. 1: {
  397. ValidatorSet{
  398. Validators: []*Validator{{ProposerPriority: math.MaxInt64}, {ProposerPriority: 0}, {ProposerPriority: 0}},
  399. }, math.MaxInt64 / 3,
  400. },
  401. 2: {
  402. ValidatorSet{
  403. Validators: []*Validator{{ProposerPriority: math.MaxInt64}, {ProposerPriority: 0}},
  404. }, math.MaxInt64 / 2,
  405. },
  406. 3: {
  407. ValidatorSet{
  408. Validators: []*Validator{{ProposerPriority: math.MaxInt64}, {ProposerPriority: math.MaxInt64}},
  409. }, math.MaxInt64,
  410. },
  411. 4: {
  412. ValidatorSet{
  413. Validators: []*Validator{{ProposerPriority: math.MinInt64}, {ProposerPriority: math.MinInt64}},
  414. }, math.MinInt64,
  415. },
  416. }
  417. for i, tc := range tcs {
  418. got := tc.vs.computeAvgProposerPriority()
  419. assert.Equal(t, tc.want, got, "test case: %v", i)
  420. }
  421. }
  422. func TestAveragingInIncrementProposerPriority(t *testing.T) {
  423. // Test that the averaging works as expected inside of IncrementProposerPriority.
  424. // Each validator comes with zero voting power which simplifies reasoning about
  425. // the expected ProposerPriority.
  426. tcs := []struct {
  427. vs ValidatorSet
  428. times int32
  429. avg int64
  430. }{
  431. 0: {ValidatorSet{
  432. Validators: []*Validator{
  433. {Address: []byte("a"), ProposerPriority: 1},
  434. {Address: []byte("b"), ProposerPriority: 2},
  435. {Address: []byte("c"), ProposerPriority: 3}}},
  436. 1, 2},
  437. 1: {ValidatorSet{
  438. Validators: []*Validator{
  439. {Address: []byte("a"), ProposerPriority: 10},
  440. {Address: []byte("b"), ProposerPriority: -10},
  441. {Address: []byte("c"), ProposerPriority: 1}}},
  442. // this should average twice but the average should be 0 after the first iteration
  443. // (voting power is 0 -> no changes)
  444. 11, 1 / 3},
  445. 2: {ValidatorSet{
  446. Validators: []*Validator{
  447. {Address: []byte("a"), ProposerPriority: 100},
  448. {Address: []byte("b"), ProposerPriority: -10},
  449. {Address: []byte("c"), ProposerPriority: 1}}},
  450. 1, 91 / 3},
  451. }
  452. for i, tc := range tcs {
  453. // work on copy to have the old ProposerPriorities:
  454. newVset := tc.vs.CopyIncrementProposerPriority(tc.times)
  455. for _, val := range tc.vs.Validators {
  456. _, updatedVal := newVset.GetByAddress(val.Address)
  457. assert.Equal(t, updatedVal.ProposerPriority, val.ProposerPriority-tc.avg, "test case: %v", i)
  458. }
  459. }
  460. }
  461. func TestAveragingInIncrementProposerPriorityWithVotingPower(t *testing.T) {
  462. // Other than TestAveragingInIncrementProposerPriority this is a more complete test showing
  463. // how each ProposerPriority changes in relation to the validator's voting power respectively.
  464. // average is zero in each round:
  465. vp0 := int64(10)
  466. vp1 := int64(1)
  467. vp2 := int64(1)
  468. total := vp0 + vp1 + vp2
  469. avg := (vp0 + vp1 + vp2 - total) / 3
  470. vals := ValidatorSet{Validators: []*Validator{
  471. {Address: []byte{0}, ProposerPriority: 0, VotingPower: vp0},
  472. {Address: []byte{1}, ProposerPriority: 0, VotingPower: vp1},
  473. {Address: []byte{2}, ProposerPriority: 0, VotingPower: vp2}}}
  474. tcs := []struct {
  475. vals *ValidatorSet
  476. wantProposerPrioritys []int64
  477. times int32
  478. wantProposer *Validator
  479. }{
  480. 0: {
  481. vals.Copy(),
  482. []int64{
  483. // Acumm+VotingPower-Avg:
  484. 0 + vp0 - total - avg, // mostest will be subtracted by total voting power (12)
  485. 0 + vp1,
  486. 0 + vp2},
  487. 1,
  488. vals.Validators[0]},
  489. 1: {
  490. vals.Copy(),
  491. []int64{
  492. (0 + vp0 - total) + vp0 - total - avg, // this will be mostest on 2nd iter, too
  493. (0 + vp1) + vp1,
  494. (0 + vp2) + vp2},
  495. 2,
  496. vals.Validators[0]}, // increment twice -> expect average to be subtracted twice
  497. 2: {
  498. vals.Copy(),
  499. []int64{
  500. 0 + 3*(vp0-total) - avg, // still mostest
  501. 0 + 3*vp1,
  502. 0 + 3*vp2},
  503. 3,
  504. vals.Validators[0]},
  505. 3: {
  506. vals.Copy(),
  507. []int64{
  508. 0 + 4*(vp0-total), // still mostest
  509. 0 + 4*vp1,
  510. 0 + 4*vp2},
  511. 4,
  512. vals.Validators[0]},
  513. 4: {
  514. vals.Copy(),
  515. []int64{
  516. 0 + 4*(vp0-total) + vp0, // 4 iters was mostest
  517. 0 + 5*vp1 - total, // now this val is mostest for the 1st time (hence -12==totalVotingPower)
  518. 0 + 5*vp2},
  519. 5,
  520. vals.Validators[1]},
  521. 5: {
  522. vals.Copy(),
  523. []int64{
  524. 0 + 6*vp0 - 5*total, // mostest again
  525. 0 + 6*vp1 - total, // mostest once up to here
  526. 0 + 6*vp2},
  527. 6,
  528. vals.Validators[0]},
  529. 6: {
  530. vals.Copy(),
  531. []int64{
  532. 0 + 7*vp0 - 6*total, // in 7 iters this val is mostest 6 times
  533. 0 + 7*vp1 - total, // in 7 iters this val is mostest 1 time
  534. 0 + 7*vp2},
  535. 7,
  536. vals.Validators[0]},
  537. 7: {
  538. vals.Copy(),
  539. []int64{
  540. 0 + 8*vp0 - 7*total, // mostest again
  541. 0 + 8*vp1 - total,
  542. 0 + 8*vp2},
  543. 8,
  544. vals.Validators[0]},
  545. 8: {
  546. vals.Copy(),
  547. []int64{
  548. 0 + 9*vp0 - 7*total,
  549. 0 + 9*vp1 - total,
  550. 0 + 9*vp2 - total}, // mostest
  551. 9,
  552. vals.Validators[2]},
  553. 9: {
  554. vals.Copy(),
  555. []int64{
  556. 0 + 10*vp0 - 8*total, // after 10 iters this is mostest again
  557. 0 + 10*vp1 - total, // after 6 iters this val is "mostest" once and not in between
  558. 0 + 10*vp2 - total}, // in between 10 iters this val is "mostest" once
  559. 10,
  560. vals.Validators[0]},
  561. 10: {
  562. vals.Copy(),
  563. []int64{
  564. 0 + 11*vp0 - 9*total,
  565. 0 + 11*vp1 - total, // after 6 iters this val is "mostest" once and not in between
  566. 0 + 11*vp2 - total}, // after 10 iters this val is "mostest" once
  567. 11,
  568. vals.Validators[0]},
  569. }
  570. for i, tc := range tcs {
  571. tc.vals.IncrementProposerPriority(tc.times)
  572. assert.Equal(t, tc.wantProposer.Address, tc.vals.GetProposer().Address,
  573. "test case: %v",
  574. i)
  575. for valIdx, val := range tc.vals.Validators {
  576. assert.Equal(t,
  577. tc.wantProposerPrioritys[valIdx],
  578. val.ProposerPriority,
  579. "test case: %v, validator: %v",
  580. i,
  581. valIdx)
  582. }
  583. }
  584. }
  585. func TestSafeAdd(t *testing.T) {
  586. f := func(a, b int64) bool {
  587. c, overflow := safeAdd(a, b)
  588. return overflow || (!overflow && c == a+b)
  589. }
  590. if err := quick.Check(f, nil); err != nil {
  591. t.Error(err)
  592. }
  593. }
  594. func TestSafeAddClip(t *testing.T) {
  595. assert.EqualValues(t, math.MaxInt64, safeAddClip(math.MaxInt64, 10))
  596. assert.EqualValues(t, math.MaxInt64, safeAddClip(math.MaxInt64, math.MaxInt64))
  597. assert.EqualValues(t, math.MinInt64, safeAddClip(math.MinInt64, -10))
  598. }
  599. func TestSafeSubClip(t *testing.T) {
  600. assert.EqualValues(t, math.MinInt64, safeSubClip(math.MinInt64, 10))
  601. assert.EqualValues(t, 0, safeSubClip(math.MinInt64, math.MinInt64))
  602. assert.EqualValues(t, math.MinInt64, safeSubClip(math.MinInt64, math.MaxInt64))
  603. assert.EqualValues(t, math.MaxInt64, safeSubClip(math.MaxInt64, -10))
  604. }
  605. //-------------------------------------------------------------------
  606. // Check VerifyCommit, VerifyCommitLight and VerifyCommitLightTrusting basic
  607. // verification.
  608. func TestValidatorSet_VerifyCommit_All(t *testing.T) {
  609. var (
  610. privKey = ed25519.GenPrivKey()
  611. pubKey = privKey.PubKey()
  612. v1 = NewValidator(pubKey, 1000)
  613. vset = NewValidatorSet([]*Validator{v1})
  614. chainID = "Lalande21185"
  615. )
  616. vote := examplePrecommit()
  617. vote.ValidatorAddress = pubKey.Address()
  618. v := vote.ToProto()
  619. sig, err := privKey.Sign(VoteSignBytes(chainID, v))
  620. require.NoError(t, err)
  621. vote.Signature = sig
  622. commit := NewCommit(vote.Height, vote.Round, vote.BlockID, []CommitSig{vote.CommitSig()})
  623. vote2 := *vote
  624. sig2, err := privKey.Sign(VoteSignBytes("EpsilonEridani", v))
  625. require.NoError(t, err)
  626. vote2.Signature = sig2
  627. testCases := []struct {
  628. description string
  629. chainID string
  630. blockID BlockID
  631. height int64
  632. commit *Commit
  633. expErr bool
  634. }{
  635. {"good", chainID, vote.BlockID, vote.Height, commit, false},
  636. {"wrong signature (#0)", "EpsilonEridani", vote.BlockID, vote.Height, commit, true},
  637. {"wrong block ID", chainID, makeBlockIDRandom(), vote.Height, commit, true},
  638. {"wrong height", chainID, vote.BlockID, vote.Height - 1, commit, true},
  639. {"wrong set size: 1 vs 0", chainID, vote.BlockID, vote.Height,
  640. NewCommit(vote.Height, vote.Round, vote.BlockID, []CommitSig{}), true},
  641. {"wrong set size: 1 vs 2", chainID, vote.BlockID, vote.Height,
  642. NewCommit(vote.Height, vote.Round, vote.BlockID,
  643. []CommitSig{vote.CommitSig(), {BlockIDFlag: BlockIDFlagAbsent}}), true},
  644. {"insufficient voting power: got 0, needed more than 666", chainID, vote.BlockID, vote.Height,
  645. NewCommit(vote.Height, vote.Round, vote.BlockID, []CommitSig{{BlockIDFlag: BlockIDFlagAbsent}}), true},
  646. {"wrong signature (#0)", chainID, vote.BlockID, vote.Height,
  647. NewCommit(vote.Height, vote.Round, vote.BlockID, []CommitSig{vote2.CommitSig()}), true},
  648. }
  649. for _, tc := range testCases {
  650. tc := tc
  651. t.Run(tc.description, func(t *testing.T) {
  652. err := vset.VerifyCommit(tc.chainID, tc.blockID, tc.height, tc.commit)
  653. if tc.expErr {
  654. if assert.Error(t, err, "VerifyCommit") {
  655. assert.Contains(t, err.Error(), tc.description, "VerifyCommit")
  656. }
  657. } else {
  658. assert.NoError(t, err, "VerifyCommit")
  659. }
  660. err = vset.VerifyCommitLight(tc.chainID, tc.blockID, tc.height, tc.commit)
  661. if tc.expErr {
  662. if assert.Error(t, err, "VerifyCommitLight") {
  663. assert.Contains(t, err.Error(), tc.description, "VerifyCommitLight")
  664. }
  665. } else {
  666. assert.NoError(t, err, "VerifyCommitLight")
  667. }
  668. })
  669. }
  670. }
  671. func TestValidatorSet_VerifyCommit_CheckAllSignatures(t *testing.T) {
  672. var (
  673. chainID = "test_chain_id"
  674. h = int64(3)
  675. blockID = makeBlockIDRandom()
  676. )
  677. voteSet, valSet, vals := randVoteSet(h, 0, tmproto.PrecommitType, 4, 10)
  678. commit, err := MakeCommit(blockID, h, 0, voteSet, vals, time.Now())
  679. require.NoError(t, err)
  680. // malleate 4th signature
  681. vote := voteSet.GetByIndex(3)
  682. v := vote.ToProto()
  683. err = vals[3].SignVote("CentaurusA", v)
  684. require.NoError(t, err)
  685. vote.Signature = v.Signature
  686. commit.Signatures[3] = vote.CommitSig()
  687. err = valSet.VerifyCommit(chainID, blockID, h, commit)
  688. if assert.Error(t, err) {
  689. assert.Contains(t, err.Error(), "wrong signature (#3)")
  690. }
  691. }
  692. func TestValidatorSet_VerifyCommitLight_ReturnsAsSoonAsMajorityOfVotingPowerSigned(t *testing.T) {
  693. var (
  694. chainID = "test_chain_id"
  695. h = int64(3)
  696. blockID = makeBlockIDRandom()
  697. )
  698. voteSet, valSet, vals := randVoteSet(h, 0, tmproto.PrecommitType, 4, 10)
  699. commit, err := MakeCommit(blockID, h, 0, voteSet, vals, time.Now())
  700. require.NoError(t, err)
  701. // malleate 4th signature (3 signatures are enough for 2/3+)
  702. vote := voteSet.GetByIndex(3)
  703. v := vote.ToProto()
  704. err = vals[3].SignVote("CentaurusA", v)
  705. require.NoError(t, err)
  706. vote.Signature = v.Signature
  707. commit.Signatures[3] = vote.CommitSig()
  708. err = valSet.VerifyCommitLight(chainID, blockID, h, commit)
  709. assert.NoError(t, err)
  710. }
  711. func TestValidatorSet_VerifyCommitLightTrusting_ReturnsAsSoonAsTrustLevelOfVotingPowerSigned(t *testing.T) {
  712. var (
  713. chainID = "test_chain_id"
  714. h = int64(3)
  715. blockID = makeBlockIDRandom()
  716. )
  717. voteSet, valSet, vals := randVoteSet(h, 0, tmproto.PrecommitType, 4, 10)
  718. commit, err := MakeCommit(blockID, h, 0, voteSet, vals, time.Now())
  719. require.NoError(t, err)
  720. // malleate 3rd signature (2 signatures are enough for 1/3+ trust level)
  721. vote := voteSet.GetByIndex(2)
  722. v := vote.ToProto()
  723. err = vals[2].SignVote("CentaurusA", v)
  724. require.NoError(t, err)
  725. vote.Signature = v.Signature
  726. commit.Signatures[2] = vote.CommitSig()
  727. err = valSet.VerifyCommitLightTrusting(chainID, commit, tmmath.Fraction{Numerator: 1, Denominator: 3})
  728. assert.NoError(t, err)
  729. }
  730. func TestEmptySet(t *testing.T) {
  731. var valList []*Validator
  732. valSet := NewValidatorSet(valList)
  733. assert.Panics(t, func() { valSet.IncrementProposerPriority(1) })
  734. assert.Panics(t, func() { valSet.RescalePriorities(100) })
  735. assert.Panics(t, func() { valSet.shiftByAvgProposerPriority() })
  736. assert.Panics(t, func() { assert.Zero(t, computeMaxMinPriorityDiff(valSet)) })
  737. valSet.GetProposer()
  738. // Add to empty set
  739. v1 := newValidator([]byte("v1"), 100)
  740. v2 := newValidator([]byte("v2"), 100)
  741. valList = []*Validator{v1, v2}
  742. assert.NoError(t, valSet.UpdateWithChangeSet(valList))
  743. verifyValidatorSet(t, valSet)
  744. // Delete all validators from set
  745. v1 = newValidator([]byte("v1"), 0)
  746. v2 = newValidator([]byte("v2"), 0)
  747. delList := []*Validator{v1, v2}
  748. assert.Error(t, valSet.UpdateWithChangeSet(delList))
  749. // Attempt delete from empty set
  750. assert.Error(t, valSet.UpdateWithChangeSet(delList))
  751. }
  752. func TestUpdatesForNewValidatorSet(t *testing.T) {
  753. v1 := newValidator([]byte("v1"), 100)
  754. v2 := newValidator([]byte("v2"), 100)
  755. valList := []*Validator{v1, v2}
  756. valSet := NewValidatorSet(valList)
  757. verifyValidatorSet(t, valSet)
  758. // Verify duplicates are caught in NewValidatorSet() and it panics
  759. v111 := newValidator([]byte("v1"), 100)
  760. v112 := newValidator([]byte("v1"), 123)
  761. v113 := newValidator([]byte("v1"), 234)
  762. valList = []*Validator{v111, v112, v113}
  763. assert.Panics(t, func() { NewValidatorSet(valList) })
  764. // Verify set including validator with voting power 0 cannot be created
  765. v1 = newValidator([]byte("v1"), 0)
  766. v2 = newValidator([]byte("v2"), 22)
  767. v3 := newValidator([]byte("v3"), 33)
  768. valList = []*Validator{v1, v2, v3}
  769. assert.Panics(t, func() { NewValidatorSet(valList) })
  770. // Verify set including validator with negative voting power cannot be created
  771. v1 = newValidator([]byte("v1"), 10)
  772. v2 = newValidator([]byte("v2"), -20)
  773. v3 = newValidator([]byte("v3"), 30)
  774. valList = []*Validator{v1, v2, v3}
  775. assert.Panics(t, func() { NewValidatorSet(valList) })
  776. }
  777. type testVal struct {
  778. name string
  779. power int64
  780. }
  781. func permutation(valList []testVal) []testVal {
  782. if len(valList) == 0 {
  783. return nil
  784. }
  785. permList := make([]testVal, len(valList))
  786. perm := tmrand.Perm(len(valList))
  787. for i, v := range perm {
  788. permList[v] = valList[i]
  789. }
  790. return permList
  791. }
  792. func createNewValidatorList(testValList []testVal) []*Validator {
  793. valList := make([]*Validator, 0, len(testValList))
  794. for _, val := range testValList {
  795. valList = append(valList, newValidator([]byte(val.name), val.power))
  796. }
  797. return valList
  798. }
  799. func createNewValidatorSet(testValList []testVal) *ValidatorSet {
  800. return NewValidatorSet(createNewValidatorList(testValList))
  801. }
  802. func valSetTotalProposerPriority(valSet *ValidatorSet) int64 {
  803. sum := int64(0)
  804. for _, val := range valSet.Validators {
  805. // mind overflow
  806. sum = safeAddClip(sum, val.ProposerPriority)
  807. }
  808. return sum
  809. }
  810. func verifyValidatorSet(t *testing.T, valSet *ValidatorSet) {
  811. // verify that the capacity and length of validators is the same
  812. assert.Equal(t, len(valSet.Validators), cap(valSet.Validators))
  813. // verify that the set's total voting power has been updated
  814. tvp := valSet.totalVotingPower
  815. valSet.updateTotalVotingPower()
  816. expectedTvp := valSet.TotalVotingPower()
  817. assert.Equal(t, expectedTvp, tvp,
  818. "expected TVP %d. Got %d, valSet=%s", expectedTvp, tvp, valSet)
  819. // verify that validator priorities are centered
  820. valsCount := int64(len(valSet.Validators))
  821. tpp := valSetTotalProposerPriority(valSet)
  822. assert.True(t, tpp < valsCount && tpp > -valsCount,
  823. "expected total priority in (-%d, %d). Got %d", valsCount, valsCount, tpp)
  824. // verify that priorities are scaled
  825. dist := computeMaxMinPriorityDiff(valSet)
  826. assert.True(t, dist <= PriorityWindowSizeFactor*tvp,
  827. "expected priority distance < %d. Got %d", PriorityWindowSizeFactor*tvp, dist)
  828. }
  829. func toTestValList(valList []*Validator) []testVal {
  830. testList := make([]testVal, len(valList))
  831. for i, val := range valList {
  832. testList[i].name = string(val.Address)
  833. testList[i].power = val.VotingPower
  834. }
  835. return testList
  836. }
  837. func testValSet(nVals int, power int64) []testVal {
  838. vals := make([]testVal, nVals)
  839. for i := 0; i < nVals; i++ {
  840. vals[i] = testVal{fmt.Sprintf("v%d", i+1), power}
  841. }
  842. return vals
  843. }
  844. type valSetErrTestCase struct {
  845. startVals []testVal
  846. updateVals []testVal
  847. }
  848. func executeValSetErrTestCase(t *testing.T, idx int, tt valSetErrTestCase) {
  849. // create a new set and apply updates, keeping copies for the checks
  850. valSet := createNewValidatorSet(tt.startVals)
  851. valSetCopy := valSet.Copy()
  852. valList := createNewValidatorList(tt.updateVals)
  853. valListCopy := validatorListCopy(valList)
  854. err := valSet.UpdateWithChangeSet(valList)
  855. // for errors check the validator set has not been changed
  856. assert.Error(t, err, "test %d", idx)
  857. assert.Equal(t, valSet, valSetCopy, "test %v", idx)
  858. // check the parameter list has not changed
  859. assert.Equal(t, valList, valListCopy, "test %v", idx)
  860. }
  861. func TestValSetUpdatesDuplicateEntries(t *testing.T) {
  862. testCases := []valSetErrTestCase{
  863. // Duplicate entries in changes
  864. { // first entry is duplicated change
  865. testValSet(2, 10),
  866. []testVal{{"v1", 11}, {"v1", 22}},
  867. },
  868. { // second entry is duplicated change
  869. testValSet(2, 10),
  870. []testVal{{"v2", 11}, {"v2", 22}},
  871. },
  872. { // change duplicates are separated by a valid change
  873. testValSet(2, 10),
  874. []testVal{{"v1", 11}, {"v2", 22}, {"v1", 12}},
  875. },
  876. { // change duplicates are separated by a valid change
  877. testValSet(3, 10),
  878. []testVal{{"v1", 11}, {"v3", 22}, {"v1", 12}},
  879. },
  880. // Duplicate entries in remove
  881. { // first entry is duplicated remove
  882. testValSet(2, 10),
  883. []testVal{{"v1", 0}, {"v1", 0}},
  884. },
  885. { // second entry is duplicated remove
  886. testValSet(2, 10),
  887. []testVal{{"v2", 0}, {"v2", 0}},
  888. },
  889. { // remove duplicates are separated by a valid remove
  890. testValSet(2, 10),
  891. []testVal{{"v1", 0}, {"v2", 0}, {"v1", 0}},
  892. },
  893. { // remove duplicates are separated by a valid remove
  894. testValSet(3, 10),
  895. []testVal{{"v1", 0}, {"v3", 0}, {"v1", 0}},
  896. },
  897. { // remove and update same val
  898. testValSet(2, 10),
  899. []testVal{{"v1", 0}, {"v2", 20}, {"v1", 30}},
  900. },
  901. { // duplicate entries in removes + changes
  902. testValSet(2, 10),
  903. []testVal{{"v1", 0}, {"v2", 20}, {"v2", 30}, {"v1", 0}},
  904. },
  905. { // duplicate entries in removes + changes
  906. testValSet(3, 10),
  907. []testVal{{"v1", 0}, {"v3", 5}, {"v2", 20}, {"v2", 30}, {"v1", 0}},
  908. },
  909. }
  910. for i, tt := range testCases {
  911. executeValSetErrTestCase(t, i, tt)
  912. }
  913. }
  914. func TestValSetUpdatesOverflows(t *testing.T) {
  915. maxVP := MaxTotalVotingPower
  916. testCases := []valSetErrTestCase{
  917. { // single update leading to overflow
  918. testValSet(2, 10),
  919. []testVal{{"v1", math.MaxInt64}},
  920. },
  921. { // single update leading to overflow
  922. testValSet(2, 10),
  923. []testVal{{"v2", math.MaxInt64}},
  924. },
  925. { // add validator leading to overflow
  926. testValSet(1, maxVP),
  927. []testVal{{"v2", math.MaxInt64}},
  928. },
  929. { // add validator leading to exceed Max
  930. testValSet(1, maxVP-1),
  931. []testVal{{"v2", 5}},
  932. },
  933. { // add validator leading to exceed Max
  934. testValSet(2, maxVP/3),
  935. []testVal{{"v3", maxVP / 2}},
  936. },
  937. { // add validator leading to exceed Max
  938. testValSet(1, maxVP),
  939. []testVal{{"v2", maxVP}},
  940. },
  941. }
  942. for i, tt := range testCases {
  943. executeValSetErrTestCase(t, i, tt)
  944. }
  945. }
  946. func TestValSetUpdatesOtherErrors(t *testing.T) {
  947. testCases := []valSetErrTestCase{
  948. { // update with negative voting power
  949. testValSet(2, 10),
  950. []testVal{{"v1", -123}},
  951. },
  952. { // update with negative voting power
  953. testValSet(2, 10),
  954. []testVal{{"v2", -123}},
  955. },
  956. { // remove non-existing validator
  957. testValSet(2, 10),
  958. []testVal{{"v3", 0}},
  959. },
  960. { // delete all validators
  961. []testVal{{"v1", 10}, {"v2", 20}, {"v3", 30}},
  962. []testVal{{"v1", 0}, {"v2", 0}, {"v3", 0}},
  963. },
  964. }
  965. for i, tt := range testCases {
  966. executeValSetErrTestCase(t, i, tt)
  967. }
  968. }
  969. func TestValSetUpdatesBasicTestsExecute(t *testing.T) {
  970. valSetUpdatesBasicTests := []struct {
  971. startVals []testVal
  972. updateVals []testVal
  973. expectedVals []testVal
  974. }{
  975. { // no changes
  976. testValSet(2, 10),
  977. []testVal{},
  978. testValSet(2, 10),
  979. },
  980. { // voting power changes
  981. testValSet(2, 10),
  982. []testVal{{"v2", 22}, {"v1", 11}},
  983. []testVal{{"v2", 22}, {"v1", 11}},
  984. },
  985. { // add new validators
  986. []testVal{{"v2", 20}, {"v1", 10}},
  987. []testVal{{"v4", 40}, {"v3", 30}},
  988. []testVal{{"v4", 40}, {"v3", 30}, {"v2", 20}, {"v1", 10}},
  989. },
  990. { // add new validator to middle
  991. []testVal{{"v3", 20}, {"v1", 10}},
  992. []testVal{{"v2", 30}},
  993. []testVal{{"v2", 30}, {"v3", 20}, {"v1", 10}},
  994. },
  995. { // add new validator to beginning
  996. []testVal{{"v3", 20}, {"v2", 10}},
  997. []testVal{{"v1", 30}},
  998. []testVal{{"v1", 30}, {"v3", 20}, {"v2", 10}},
  999. },
  1000. { // delete validators
  1001. []testVal{{"v3", 30}, {"v2", 20}, {"v1", 10}},
  1002. []testVal{{"v2", 0}},
  1003. []testVal{{"v3", 30}, {"v1", 10}},
  1004. },
  1005. }
  1006. for i, tt := range valSetUpdatesBasicTests {
  1007. // create a new set and apply updates, keeping copies for the checks
  1008. valSet := createNewValidatorSet(tt.startVals)
  1009. valList := createNewValidatorList(tt.updateVals)
  1010. err := valSet.UpdateWithChangeSet(valList)
  1011. assert.NoError(t, err, "test %d", i)
  1012. valListCopy := validatorListCopy(valSet.Validators)
  1013. // check that the voting power in the set's validators is not changing if the voting power
  1014. // is changed in the list of validators previously passed as parameter to UpdateWithChangeSet.
  1015. // this is to make sure copies of the validators are made by UpdateWithChangeSet.
  1016. if len(valList) > 0 {
  1017. valList[0].VotingPower++
  1018. assert.Equal(t, toTestValList(valListCopy), toTestValList(valSet.Validators), "test %v", i)
  1019. }
  1020. // check the final validator list is as expected and the set is properly scaled and centered.
  1021. assert.Equal(t, tt.expectedVals, toTestValList(valSet.Validators), "test %v", i)
  1022. verifyValidatorSet(t, valSet)
  1023. }
  1024. }
  1025. // Test that different permutations of an update give the same result.
  1026. func TestValSetUpdatesOrderIndependenceTestsExecute(t *testing.T) {
  1027. // startVals - initial validators to create the set with
  1028. // updateVals - a sequence of updates to be applied to the set.
  1029. // updateVals is shuffled a number of times during testing to check for same resulting validator set.
  1030. valSetUpdatesOrderTests := []struct {
  1031. startVals []testVal
  1032. updateVals []testVal
  1033. }{
  1034. 0: { // order of changes should not matter, the final validator sets should be the same
  1035. []testVal{{"v4", 40}, {"v3", 30}, {"v2", 10}, {"v1", 10}},
  1036. []testVal{{"v4", 44}, {"v3", 33}, {"v2", 22}, {"v1", 11}}},
  1037. 1: { // order of additions should not matter
  1038. []testVal{{"v2", 20}, {"v1", 10}},
  1039. []testVal{{"v3", 30}, {"v4", 40}, {"v5", 50}, {"v6", 60}}},
  1040. 2: { // order of removals should not matter
  1041. []testVal{{"v4", 40}, {"v3", 30}, {"v2", 20}, {"v1", 10}},
  1042. []testVal{{"v1", 0}, {"v3", 0}, {"v4", 0}}},
  1043. 3: { // order of mixed operations should not matter
  1044. []testVal{{"v4", 40}, {"v3", 30}, {"v2", 20}, {"v1", 10}},
  1045. []testVal{{"v1", 0}, {"v3", 0}, {"v2", 22}, {"v5", 50}, {"v4", 44}}},
  1046. }
  1047. for i, tt := range valSetUpdatesOrderTests {
  1048. // create a new set and apply updates
  1049. valSet := createNewValidatorSet(tt.startVals)
  1050. valSetCopy := valSet.Copy()
  1051. valList := createNewValidatorList(tt.updateVals)
  1052. assert.NoError(t, valSetCopy.UpdateWithChangeSet(valList))
  1053. // save the result as expected for next updates
  1054. valSetExp := valSetCopy.Copy()
  1055. // perform at most 20 permutations on the updates and call UpdateWithChangeSet()
  1056. n := len(tt.updateVals)
  1057. maxNumPerms := tmmath.MinInt(20, n*n)
  1058. for j := 0; j < maxNumPerms; j++ {
  1059. // create a copy of original set and apply a random permutation of updates
  1060. valSetCopy := valSet.Copy()
  1061. valList := createNewValidatorList(permutation(tt.updateVals))
  1062. // check there was no error and the set is properly scaled and centered.
  1063. assert.NoError(t, valSetCopy.UpdateWithChangeSet(valList),
  1064. "test %v failed for permutation %v", i, valList)
  1065. verifyValidatorSet(t, valSetCopy)
  1066. // verify the resulting test is same as the expected
  1067. assert.Equal(t, valSetCopy, valSetExp,
  1068. "test %v failed for permutation %v", i, valList)
  1069. }
  1070. }
  1071. }
  1072. // This tests the private function validator_set.go:applyUpdates() function, used only for additions and changes.
  1073. // Should perform a proper merge of updatedVals and startVals
  1074. func TestValSetApplyUpdatesTestsExecute(t *testing.T) {
  1075. valSetUpdatesBasicTests := []struct {
  1076. startVals []testVal
  1077. updateVals []testVal
  1078. expectedVals []testVal
  1079. }{
  1080. // additions
  1081. 0: { // prepend
  1082. []testVal{{"v4", 44}, {"v5", 55}},
  1083. []testVal{{"v1", 11}},
  1084. []testVal{{"v1", 11}, {"v4", 44}, {"v5", 55}}},
  1085. 1: { // append
  1086. []testVal{{"v4", 44}, {"v5", 55}},
  1087. []testVal{{"v6", 66}},
  1088. []testVal{{"v4", 44}, {"v5", 55}, {"v6", 66}}},
  1089. 2: { // insert
  1090. []testVal{{"v4", 44}, {"v6", 66}},
  1091. []testVal{{"v5", 55}},
  1092. []testVal{{"v4", 44}, {"v5", 55}, {"v6", 66}}},
  1093. 3: { // insert multi
  1094. []testVal{{"v4", 44}, {"v6", 66}, {"v9", 99}},
  1095. []testVal{{"v5", 55}, {"v7", 77}, {"v8", 88}},
  1096. []testVal{{"v4", 44}, {"v5", 55}, {"v6", 66}, {"v7", 77}, {"v8", 88}, {"v9", 99}}},
  1097. // changes
  1098. 4: { // head
  1099. []testVal{{"v1", 111}, {"v2", 22}},
  1100. []testVal{{"v1", 11}},
  1101. []testVal{{"v1", 11}, {"v2", 22}}},
  1102. 5: { // tail
  1103. []testVal{{"v1", 11}, {"v2", 222}},
  1104. []testVal{{"v2", 22}},
  1105. []testVal{{"v1", 11}, {"v2", 22}}},
  1106. 6: { // middle
  1107. []testVal{{"v1", 11}, {"v2", 222}, {"v3", 33}},
  1108. []testVal{{"v2", 22}},
  1109. []testVal{{"v1", 11}, {"v2", 22}, {"v3", 33}}},
  1110. 7: { // multi
  1111. []testVal{{"v1", 111}, {"v2", 222}, {"v3", 333}},
  1112. []testVal{{"v1", 11}, {"v2", 22}, {"v3", 33}},
  1113. []testVal{{"v1", 11}, {"v2", 22}, {"v3", 33}}},
  1114. // additions and changes
  1115. 8: {
  1116. []testVal{{"v1", 111}, {"v2", 22}},
  1117. []testVal{{"v1", 11}, {"v3", 33}, {"v4", 44}},
  1118. []testVal{{"v1", 11}, {"v2", 22}, {"v3", 33}, {"v4", 44}}},
  1119. }
  1120. for i, tt := range valSetUpdatesBasicTests {
  1121. // create a new validator set with the start values
  1122. valSet := createNewValidatorSet(tt.startVals)
  1123. // applyUpdates() with the update values
  1124. valList := createNewValidatorList(tt.updateVals)
  1125. valSet.applyUpdates(valList)
  1126. // check the new list of validators for proper merge
  1127. assert.Equal(t, toTestValList(valSet.Validators), tt.expectedVals, "test %v", i)
  1128. }
  1129. }
  1130. type testVSetCfg struct {
  1131. name string
  1132. startVals []testVal
  1133. deletedVals []testVal
  1134. updatedVals []testVal
  1135. addedVals []testVal
  1136. expectedVals []testVal
  1137. expErr error
  1138. }
  1139. func randTestVSetCfg(t *testing.T, nBase, nAddMax int) testVSetCfg {
  1140. if nBase <= 0 || nAddMax < 0 {
  1141. panic(fmt.Sprintf("bad parameters %v %v", nBase, nAddMax))
  1142. }
  1143. const maxPower = 1000
  1144. var nOld, nDel, nChanged, nAdd int
  1145. nOld = int(tmrand.Uint()%uint(nBase)) + 1
  1146. if nBase-nOld > 0 {
  1147. nDel = int(tmrand.Uint() % uint(nBase-nOld))
  1148. }
  1149. nChanged = nBase - nOld - nDel
  1150. if nAddMax > 0 {
  1151. nAdd = tmrand.Int()%nAddMax + 1
  1152. }
  1153. cfg := testVSetCfg{}
  1154. cfg.startVals = make([]testVal, nBase)
  1155. cfg.deletedVals = make([]testVal, nDel)
  1156. cfg.addedVals = make([]testVal, nAdd)
  1157. cfg.updatedVals = make([]testVal, nChanged)
  1158. cfg.expectedVals = make([]testVal, nBase-nDel+nAdd)
  1159. for i := 0; i < nBase; i++ {
  1160. cfg.startVals[i] = testVal{fmt.Sprintf("v%d", i), int64(tmrand.Uint()%maxPower + 1)}
  1161. if i < nOld {
  1162. cfg.expectedVals[i] = cfg.startVals[i]
  1163. }
  1164. if i >= nOld && i < nOld+nChanged {
  1165. cfg.updatedVals[i-nOld] = testVal{fmt.Sprintf("v%d", i), int64(tmrand.Uint()%maxPower + 1)}
  1166. cfg.expectedVals[i] = cfg.updatedVals[i-nOld]
  1167. }
  1168. if i >= nOld+nChanged {
  1169. cfg.deletedVals[i-nOld-nChanged] = testVal{fmt.Sprintf("v%d", i), 0}
  1170. }
  1171. }
  1172. for i := nBase; i < nBase+nAdd; i++ {
  1173. cfg.addedVals[i-nBase] = testVal{fmt.Sprintf("v%d", i), int64(tmrand.Uint()%maxPower + 1)}
  1174. cfg.expectedVals[i-nDel] = cfg.addedVals[i-nBase]
  1175. }
  1176. sort.Sort(testValsByVotingPower(cfg.startVals))
  1177. sort.Sort(testValsByVotingPower(cfg.deletedVals))
  1178. sort.Sort(testValsByVotingPower(cfg.updatedVals))
  1179. sort.Sort(testValsByVotingPower(cfg.addedVals))
  1180. sort.Sort(testValsByVotingPower(cfg.expectedVals))
  1181. return cfg
  1182. }
  1183. func applyChangesToValSet(t *testing.T, expErr error, valSet *ValidatorSet, valsLists ...[]testVal) {
  1184. changes := make([]testVal, 0)
  1185. for _, valsList := range valsLists {
  1186. changes = append(changes, valsList...)
  1187. }
  1188. valList := createNewValidatorList(changes)
  1189. err := valSet.UpdateWithChangeSet(valList)
  1190. if expErr != nil {
  1191. assert.Equal(t, expErr, err)
  1192. } else {
  1193. assert.NoError(t, err)
  1194. }
  1195. }
  1196. func TestValSetUpdatePriorityOrderTests(t *testing.T) {
  1197. const nMaxElections int32 = 5000
  1198. testCases := []testVSetCfg{
  1199. 0: { // remove high power validator, keep old equal lower power validators
  1200. startVals: []testVal{{"v3", 1000}, {"v1", 1}, {"v2", 1}},
  1201. deletedVals: []testVal{{"v3", 0}},
  1202. updatedVals: []testVal{},
  1203. addedVals: []testVal{},
  1204. expectedVals: []testVal{{"v1", 1}, {"v2", 1}},
  1205. },
  1206. 1: { // remove high power validator, keep old different power validators
  1207. startVals: []testVal{{"v3", 1000}, {"v2", 10}, {"v1", 1}},
  1208. deletedVals: []testVal{{"v3", 0}},
  1209. updatedVals: []testVal{},
  1210. addedVals: []testVal{},
  1211. expectedVals: []testVal{{"v2", 10}, {"v1", 1}},
  1212. },
  1213. 2: { // remove high power validator, add new low power validators, keep old lower power
  1214. startVals: []testVal{{"v3", 1000}, {"v2", 2}, {"v1", 1}},
  1215. deletedVals: []testVal{{"v3", 0}},
  1216. updatedVals: []testVal{{"v2", 1}},
  1217. addedVals: []testVal{{"v5", 50}, {"v4", 40}},
  1218. expectedVals: []testVal{{"v5", 50}, {"v4", 40}, {"v1", 1}, {"v2", 1}},
  1219. },
  1220. // generate a configuration with 100 validators,
  1221. // randomly select validators for updates and deletes, and
  1222. // generate 10 new validators to be added
  1223. 3: randTestVSetCfg(t, 100, 10),
  1224. 4: randTestVSetCfg(t, 1000, 100),
  1225. 5: randTestVSetCfg(t, 10, 100),
  1226. 6: randTestVSetCfg(t, 100, 1000),
  1227. 7: randTestVSetCfg(t, 1000, 1000),
  1228. }
  1229. for _, cfg := range testCases {
  1230. // create a new validator set
  1231. valSet := createNewValidatorSet(cfg.startVals)
  1232. verifyValidatorSet(t, valSet)
  1233. // run election up to nMaxElections times, apply changes and verify that the priority order is correct
  1234. verifyValSetUpdatePriorityOrder(t, valSet, cfg, nMaxElections)
  1235. }
  1236. }
  1237. func verifyValSetUpdatePriorityOrder(t *testing.T, valSet *ValidatorSet, cfg testVSetCfg, nMaxElections int32) {
  1238. // Run election up to nMaxElections times, sort validators by priorities
  1239. valSet.IncrementProposerPriority(tmrand.Int31()%nMaxElections + 1)
  1240. // apply the changes, get the updated validators, sort by priorities
  1241. applyChangesToValSet(t, nil, valSet, cfg.addedVals, cfg.updatedVals, cfg.deletedVals)
  1242. // basic checks
  1243. assert.Equal(t, cfg.expectedVals, toTestValList(valSet.Validators))
  1244. verifyValidatorSet(t, valSet)
  1245. // verify that the added validators have the smallest priority:
  1246. // - they should be at the beginning of updatedValsPriSorted since it is
  1247. // sorted by priority
  1248. if len(cfg.addedVals) > 0 {
  1249. updatedValsPriSorted := validatorListCopy(valSet.Validators)
  1250. sort.Sort(validatorsByPriority(updatedValsPriSorted))
  1251. addedValsPriSlice := updatedValsPriSorted[:len(cfg.addedVals)]
  1252. sort.Sort(ValidatorsByVotingPower(addedValsPriSlice))
  1253. assert.Equal(t, cfg.addedVals, toTestValList(addedValsPriSlice))
  1254. // - and should all have the same priority
  1255. expectedPri := addedValsPriSlice[0].ProposerPriority
  1256. for _, val := range addedValsPriSlice[1:] {
  1257. assert.Equal(t, expectedPri, val.ProposerPriority)
  1258. }
  1259. }
  1260. }
  1261. func TestValSetUpdateOverflowRelated(t *testing.T) {
  1262. testCases := []testVSetCfg{
  1263. {
  1264. name: "1 no false overflow error messages for updates",
  1265. startVals: []testVal{{"v2", MaxTotalVotingPower - 1}, {"v1", 1}},
  1266. updatedVals: []testVal{{"v1", MaxTotalVotingPower - 1}, {"v2", 1}},
  1267. expectedVals: []testVal{{"v1", MaxTotalVotingPower - 1}, {"v2", 1}},
  1268. expErr: nil,
  1269. },
  1270. {
  1271. // this test shows that it is important to apply the updates in the order of the change in power
  1272. // i.e. apply first updates with decreases in power, v2 change in this case.
  1273. name: "2 no false overflow error messages for updates",
  1274. startVals: []testVal{{"v2", MaxTotalVotingPower - 1}, {"v1", 1}},
  1275. updatedVals: []testVal{{"v1", MaxTotalVotingPower/2 - 1}, {"v2", MaxTotalVotingPower / 2}},
  1276. expectedVals: []testVal{{"v2", MaxTotalVotingPower / 2}, {"v1", MaxTotalVotingPower/2 - 1}},
  1277. expErr: nil,
  1278. },
  1279. {
  1280. name: "3 no false overflow error messages for deletes",
  1281. startVals: []testVal{{"v1", MaxTotalVotingPower - 2}, {"v2", 1}, {"v3", 1}},
  1282. deletedVals: []testVal{{"v1", 0}},
  1283. addedVals: []testVal{{"v4", MaxTotalVotingPower - 2}},
  1284. expectedVals: []testVal{{"v4", MaxTotalVotingPower - 2}, {"v2", 1}, {"v3", 1}},
  1285. expErr: nil,
  1286. },
  1287. {
  1288. name: "4 no false overflow error messages for adds, updates and deletes",
  1289. startVals: []testVal{
  1290. {"v1", MaxTotalVotingPower / 4}, {"v2", MaxTotalVotingPower / 4},
  1291. {"v3", MaxTotalVotingPower / 4}, {"v4", MaxTotalVotingPower / 4}},
  1292. deletedVals: []testVal{{"v2", 0}},
  1293. updatedVals: []testVal{
  1294. {"v1", MaxTotalVotingPower/2 - 2}, {"v3", MaxTotalVotingPower/2 - 3}, {"v4", 2}},
  1295. addedVals: []testVal{{"v5", 3}},
  1296. expectedVals: []testVal{
  1297. {"v1", MaxTotalVotingPower/2 - 2}, {"v3", MaxTotalVotingPower/2 - 3}, {"v5", 3}, {"v4", 2}},
  1298. expErr: nil,
  1299. },
  1300. {
  1301. name: "5 check panic on overflow is prevented: update 8 validators with power int64(math.MaxInt64)/8",
  1302. startVals: []testVal{
  1303. {"v1", 1}, {"v2", 1}, {"v3", 1}, {"v4", 1}, {"v5", 1},
  1304. {"v6", 1}, {"v7", 1}, {"v8", 1}, {"v9", 1}},
  1305. updatedVals: []testVal{
  1306. {"v1", MaxTotalVotingPower}, {"v2", MaxTotalVotingPower}, {"v3", MaxTotalVotingPower},
  1307. {"v4", MaxTotalVotingPower}, {"v5", MaxTotalVotingPower}, {"v6", MaxTotalVotingPower},
  1308. {"v7", MaxTotalVotingPower}, {"v8", MaxTotalVotingPower}, {"v9", 8}},
  1309. expectedVals: []testVal{
  1310. {"v1", 1}, {"v2", 1}, {"v3", 1}, {"v4", 1}, {"v5", 1},
  1311. {"v6", 1}, {"v7", 1}, {"v8", 1}, {"v9", 1}},
  1312. expErr: ErrTotalVotingPowerOverflow,
  1313. },
  1314. }
  1315. for _, tt := range testCases {
  1316. tt := tt
  1317. t.Run(tt.name, func(t *testing.T) {
  1318. valSet := createNewValidatorSet(tt.startVals)
  1319. verifyValidatorSet(t, valSet)
  1320. // execute update and verify returned error is as expected
  1321. applyChangesToValSet(t, tt.expErr, valSet, tt.addedVals, tt.updatedVals, tt.deletedVals)
  1322. // verify updated validator set is as expected
  1323. assert.Equal(t, tt.expectedVals, toTestValList(valSet.Validators))
  1324. verifyValidatorSet(t, valSet)
  1325. })
  1326. }
  1327. }
  1328. func TestValidatorSet_VerifyCommitLightTrusting(t *testing.T) {
  1329. var (
  1330. blockID = makeBlockIDRandom()
  1331. voteSet, originalValset, vals = randVoteSet(1, 1, tmproto.PrecommitType, 6, 1)
  1332. commit, err = MakeCommit(blockID, 1, 1, voteSet, vals, time.Now())
  1333. newValSet, _ = RandValidatorSet(2, 1)
  1334. )
  1335. require.NoError(t, err)
  1336. testCases := []struct {
  1337. valSet *ValidatorSet
  1338. err bool
  1339. }{
  1340. // good
  1341. 0: {
  1342. valSet: originalValset,
  1343. err: false,
  1344. },
  1345. // bad - no overlap between validator sets
  1346. 1: {
  1347. valSet: newValSet,
  1348. err: true,
  1349. },
  1350. // good - first two are different but the rest of the same -> >1/3
  1351. 2: {
  1352. valSet: NewValidatorSet(append(newValSet.Validators, originalValset.Validators...)),
  1353. err: false,
  1354. },
  1355. }
  1356. for _, tc := range testCases {
  1357. err = tc.valSet.VerifyCommitLightTrusting("test_chain_id", commit,
  1358. tmmath.Fraction{Numerator: 1, Denominator: 3})
  1359. if tc.err {
  1360. assert.Error(t, err)
  1361. } else {
  1362. assert.NoError(t, err)
  1363. }
  1364. }
  1365. }
  1366. func TestValidatorSet_VerifyCommitLightTrustingErrorsOnOverflow(t *testing.T) {
  1367. var (
  1368. blockID = makeBlockIDRandom()
  1369. voteSet, valSet, vals = randVoteSet(1, 1, tmproto.PrecommitType, 1, MaxTotalVotingPower)
  1370. commit, err = MakeCommit(blockID, 1, 1, voteSet, vals, time.Now())
  1371. )
  1372. require.NoError(t, err)
  1373. err = valSet.VerifyCommitLightTrusting("test_chain_id", commit,
  1374. tmmath.Fraction{Numerator: 25, Denominator: 55})
  1375. if assert.Error(t, err) {
  1376. assert.Contains(t, err.Error(), "int64 overflow")
  1377. }
  1378. }
  1379. func TestSafeMul(t *testing.T) {
  1380. testCases := []struct {
  1381. a int64
  1382. b int64
  1383. c int64
  1384. overflow bool
  1385. }{
  1386. 0: {0, 0, 0, false},
  1387. 1: {1, 0, 0, false},
  1388. 2: {2, 3, 6, false},
  1389. 3: {2, -3, -6, false},
  1390. 4: {-2, -3, 6, false},
  1391. 5: {-2, 3, -6, false},
  1392. 6: {math.MaxInt64, 1, math.MaxInt64, false},
  1393. 7: {math.MaxInt64 / 2, 2, math.MaxInt64 - 1, false},
  1394. 8: {math.MaxInt64 / 2, 3, 0, true},
  1395. 9: {math.MaxInt64, 2, 0, true},
  1396. }
  1397. for i, tc := range testCases {
  1398. c, overflow := safeMul(tc.a, tc.b)
  1399. assert.Equal(t, tc.c, c, "#%d", i)
  1400. assert.Equal(t, tc.overflow, overflow, "#%d", i)
  1401. }
  1402. }
  1403. func TestValidatorSetProtoBuf(t *testing.T) {
  1404. valset, _ := RandValidatorSet(10, 100)
  1405. valset2, _ := RandValidatorSet(10, 100)
  1406. valset2.Validators[0] = &Validator{}
  1407. valset3, _ := RandValidatorSet(10, 100)
  1408. valset3.Proposer = nil
  1409. valset4, _ := RandValidatorSet(10, 100)
  1410. valset4.Proposer = &Validator{}
  1411. testCases := []struct {
  1412. msg string
  1413. v1 *ValidatorSet
  1414. expPass1 bool
  1415. expPass2 bool
  1416. }{
  1417. {"success", valset, true, true},
  1418. {"fail valSet2, pubkey empty", valset2, false, false},
  1419. {"fail nil Proposer", valset3, false, false},
  1420. {"fail empty Proposer", valset4, false, false},
  1421. {"fail empty valSet", &ValidatorSet{}, true, false},
  1422. {"false nil", nil, true, false},
  1423. }
  1424. for _, tc := range testCases {
  1425. protoValSet, err := tc.v1.ToProto()
  1426. if tc.expPass1 {
  1427. require.NoError(t, err, tc.msg)
  1428. } else {
  1429. require.Error(t, err, tc.msg)
  1430. }
  1431. valSet, err := ValidatorSetFromProto(protoValSet)
  1432. if tc.expPass2 {
  1433. require.NoError(t, err, tc.msg)
  1434. require.EqualValues(t, tc.v1, valSet, tc.msg)
  1435. } else {
  1436. require.Error(t, err, tc.msg)
  1437. }
  1438. }
  1439. }
  1440. //---------------------
  1441. // Sort validators by priority and address
  1442. type validatorsByPriority []*Validator
  1443. func (valz validatorsByPriority) Len() int {
  1444. return len(valz)
  1445. }
  1446. func (valz validatorsByPriority) Less(i, j int) bool {
  1447. if valz[i].ProposerPriority < valz[j].ProposerPriority {
  1448. return true
  1449. }
  1450. if valz[i].ProposerPriority > valz[j].ProposerPriority {
  1451. return false
  1452. }
  1453. return bytes.Compare(valz[i].Address, valz[j].Address) < 0
  1454. }
  1455. func (valz validatorsByPriority) Swap(i, j int) {
  1456. it := valz[i]
  1457. valz[i] = valz[j]
  1458. valz[j] = it
  1459. }
  1460. //-------------------------------------
  1461. type testValsByVotingPower []testVal
  1462. func (tvals testValsByVotingPower) Len() int {
  1463. return len(tvals)
  1464. }
  1465. func (tvals testValsByVotingPower) Less(i, j int) bool {
  1466. if tvals[i].power == tvals[j].power {
  1467. return bytes.Compare([]byte(tvals[i].name), []byte(tvals[j].name)) == -1
  1468. }
  1469. return tvals[i].power > tvals[j].power
  1470. }
  1471. func (tvals testValsByVotingPower) Swap(i, j int) {
  1472. it := tvals[i]
  1473. tvals[i] = tvals[j]
  1474. tvals[j] = it
  1475. }
  1476. //-------------------------------------
  1477. // Benchmark tests
  1478. //
  1479. func BenchmarkUpdates(b *testing.B) {
  1480. const (
  1481. n = 100
  1482. m = 2000
  1483. )
  1484. // Init with n validators
  1485. vs := make([]*Validator, n)
  1486. for j := 0; j < n; j++ {
  1487. vs[j] = newValidator([]byte(fmt.Sprintf("v%d", j)), 100)
  1488. }
  1489. valSet := NewValidatorSet(vs)
  1490. l := len(valSet.Validators)
  1491. // Make m new validators
  1492. newValList := make([]*Validator, m)
  1493. for j := 0; j < m; j++ {
  1494. newValList[j] = newValidator([]byte(fmt.Sprintf("v%d", j+l)), 1000)
  1495. }
  1496. b.ResetTimer()
  1497. for i := 0; i < b.N; i++ {
  1498. // Add m validators to valSetCopy
  1499. valSetCopy := valSet.Copy()
  1500. assert.NoError(b, valSetCopy.UpdateWithChangeSet(newValList))
  1501. }
  1502. }