You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

1784 lines
53 KiB

types: prevent spurious validator power overflow warnings when changing the validator set (#4183) Fix for #4164 The general problem is that in certain conditions an overflow warning is issued when attempting to update a validator set even if the final set's total voting power is not over the maximum allowed. Root cause is that in verifyUpdates(), updates are verified wrt to total voting power in the order of validator address. It is then possible that a low address validator may increase its power such that the temporary total voting power count goes over MaxTotalVotingPower. Scenarios where removing and adding/ updating validators with high voting power, in the same update operation, cause the same false warning and the updates are not applied. Main changes to fix this are in verifyUpdate() that now does the verification starting with the decreases in power. It also takes into account the removals that are part of the update. ## Commits: * tests for overflow detection and prevention * test fix * more tests * fix the false overflow warnings and golint * scopelint warning fix * review comments * variant with using sort by amount of change in power * compute separately number new validators in update * types: use a switch in processChanges * more review comments * types: use HasAddress in numNewValidators * types: refactor verifyUpdates copy updates, sort them by delta and use resulting slice to calculate tvpAfterUpdatesBeforeRemovals. * remove unused structs * review comments * update changelog
5 years ago
types: prevent spurious validator power overflow warnings when changing the validator set (#4183) Fix for #4164 The general problem is that in certain conditions an overflow warning is issued when attempting to update a validator set even if the final set's total voting power is not over the maximum allowed. Root cause is that in verifyUpdates(), updates are verified wrt to total voting power in the order of validator address. It is then possible that a low address validator may increase its power such that the temporary total voting power count goes over MaxTotalVotingPower. Scenarios where removing and adding/ updating validators with high voting power, in the same update operation, cause the same false warning and the updates are not applied. Main changes to fix this are in verifyUpdate() that now does the verification starting with the decreases in power. It also takes into account the removals that are part of the update. ## Commits: * tests for overflow detection and prevention * test fix * more tests * fix the false overflow warnings and golint * scopelint warning fix * review comments * variant with using sort by amount of change in power * compute separately number new validators in update * types: use a switch in processChanges * more review comments * types: use HasAddress in numNewValidators * types: refactor verifyUpdates copy updates, sort them by delta and use resulting slice to calculate tvpAfterUpdatesBeforeRemovals. * remove unused structs * review comments * update changelog
5 years ago
types: prevent spurious validator power overflow warnings when changing the validator set (#4183) Fix for #4164 The general problem is that in certain conditions an overflow warning is issued when attempting to update a validator set even if the final set's total voting power is not over the maximum allowed. Root cause is that in verifyUpdates(), updates are verified wrt to total voting power in the order of validator address. It is then possible that a low address validator may increase its power such that the temporary total voting power count goes over MaxTotalVotingPower. Scenarios where removing and adding/ updating validators with high voting power, in the same update operation, cause the same false warning and the updates are not applied. Main changes to fix this are in verifyUpdate() that now does the verification starting with the decreases in power. It also takes into account the removals that are part of the update. ## Commits: * tests for overflow detection and prevention * test fix * more tests * fix the false overflow warnings and golint * scopelint warning fix * review comments * variant with using sort by amount of change in power * compute separately number new validators in update * types: use a switch in processChanges * more review comments * types: use HasAddress in numNewValidators * types: refactor verifyUpdates copy updates, sort them by delta and use resulting slice to calculate tvpAfterUpdatesBeforeRemovals. * remove unused structs * review comments * update changelog
5 years ago
types: prevent spurious validator power overflow warnings when changing the validator set (#4183) Fix for #4164 The general problem is that in certain conditions an overflow warning is issued when attempting to update a validator set even if the final set's total voting power is not over the maximum allowed. Root cause is that in verifyUpdates(), updates are verified wrt to total voting power in the order of validator address. It is then possible that a low address validator may increase its power such that the temporary total voting power count goes over MaxTotalVotingPower. Scenarios where removing and adding/ updating validators with high voting power, in the same update operation, cause the same false warning and the updates are not applied. Main changes to fix this are in verifyUpdate() that now does the verification starting with the decreases in power. It also takes into account the removals that are part of the update. ## Commits: * tests for overflow detection and prevention * test fix * more tests * fix the false overflow warnings and golint * scopelint warning fix * review comments * variant with using sort by amount of change in power * compute separately number new validators in update * types: use a switch in processChanges * more review comments * types: use HasAddress in numNewValidators * types: refactor verifyUpdates copy updates, sort them by delta and use resulting slice to calculate tvpAfterUpdatesBeforeRemovals. * remove unused structs * review comments * update changelog
5 years ago
Normalize priorities to not exceed total voting power (#3049) * more proposer priority tests - test that we don't reset to zero when updating / adding - test that same power validators alternate * add another test to track / simulate similar behaviour as in #2960 * address some of Chris' review comments * address some more of Chris' review comments * temporarily pushing branch with the following changes: The total power might change if: - a validator is added - a validator is removed - a validator is updated Decrement the accums (of all validators) directly after any of these events (by the inverse of the change) * Fix 2960 by re-normalizing / scaling priorities to be in bounds of total power, additionally: - remove heap where it doesn't make sense - avg. only at the end of IncrementProposerPriority instead of each iteration - update (and slightly improve) TestAveragingInIncrementProposerPriorityWithVotingPower to reflect above changes * Fix 2960 by re-normalizing / scaling priorities to be in bounds of total power, additionally: - remove heap where it doesn't make sense - avg. only at the end of IncrementProposerPriority instead of each iteration - update (and slightly improve) TestAveragingInIncrementProposerPriorityWithVotingPower to reflect above changes * fix tests * add comment * update changelog pending & some minor changes * comment about division will floor the result & fix typo * Update TestLargeGenesisValidator: - remove TODO and increase large genesis validator's voting power accordingly * move changelog entry to P2P Protocol * Ceil instead of flooring when dividing & update test * quickly fix failing TestProposerPriorityDoesNotGetResetToZero: - divide by Ceil((maxPriority - minPriority) / 2*totalVotingPower) * fix typo: rename getValWitMostPriority -> getValWithMostPriority * test proposer frequencies * return absolute value for diff. keep testing * use for loop for div * cleanup, more tests * spellcheck * get rid of using floats: manually ceil where necessary * Remove float, simplify, fix tests to match chris's proof (#3157)
5 years ago
Normalize priorities to not exceed total voting power (#3049) * more proposer priority tests - test that we don't reset to zero when updating / adding - test that same power validators alternate * add another test to track / simulate similar behaviour as in #2960 * address some of Chris' review comments * address some more of Chris' review comments * temporarily pushing branch with the following changes: The total power might change if: - a validator is added - a validator is removed - a validator is updated Decrement the accums (of all validators) directly after any of these events (by the inverse of the change) * Fix 2960 by re-normalizing / scaling priorities to be in bounds of total power, additionally: - remove heap where it doesn't make sense - avg. only at the end of IncrementProposerPriority instead of each iteration - update (and slightly improve) TestAveragingInIncrementProposerPriorityWithVotingPower to reflect above changes * Fix 2960 by re-normalizing / scaling priorities to be in bounds of total power, additionally: - remove heap where it doesn't make sense - avg. only at the end of IncrementProposerPriority instead of each iteration - update (and slightly improve) TestAveragingInIncrementProposerPriorityWithVotingPower to reflect above changes * fix tests * add comment * update changelog pending & some minor changes * comment about division will floor the result & fix typo * Update TestLargeGenesisValidator: - remove TODO and increase large genesis validator's voting power accordingly * move changelog entry to P2P Protocol * Ceil instead of flooring when dividing & update test * quickly fix failing TestProposerPriorityDoesNotGetResetToZero: - divide by Ceil((maxPriority - minPriority) / 2*totalVotingPower) * fix typo: rename getValWitMostPriority -> getValWithMostPriority * test proposer frequencies * return absolute value for diff. keep testing * use for loop for div * cleanup, more tests * spellcheck * get rid of using floats: manually ceil where necessary * Remove float, simplify, fix tests to match chris's proof (#3157)
5 years ago
Normalize priorities to not exceed total voting power (#3049) * more proposer priority tests - test that we don't reset to zero when updating / adding - test that same power validators alternate * add another test to track / simulate similar behaviour as in #2960 * address some of Chris' review comments * address some more of Chris' review comments * temporarily pushing branch with the following changes: The total power might change if: - a validator is added - a validator is removed - a validator is updated Decrement the accums (of all validators) directly after any of these events (by the inverse of the change) * Fix 2960 by re-normalizing / scaling priorities to be in bounds of total power, additionally: - remove heap where it doesn't make sense - avg. only at the end of IncrementProposerPriority instead of each iteration - update (and slightly improve) TestAveragingInIncrementProposerPriorityWithVotingPower to reflect above changes * Fix 2960 by re-normalizing / scaling priorities to be in bounds of total power, additionally: - remove heap where it doesn't make sense - avg. only at the end of IncrementProposerPriority instead of each iteration - update (and slightly improve) TestAveragingInIncrementProposerPriorityWithVotingPower to reflect above changes * fix tests * add comment * update changelog pending & some minor changes * comment about division will floor the result & fix typo * Update TestLargeGenesisValidator: - remove TODO and increase large genesis validator's voting power accordingly * move changelog entry to P2P Protocol * Ceil instead of flooring when dividing & update test * quickly fix failing TestProposerPriorityDoesNotGetResetToZero: - divide by Ceil((maxPriority - minPriority) / 2*totalVotingPower) * fix typo: rename getValWitMostPriority -> getValWithMostPriority * test proposer frequencies * return absolute value for diff. keep testing * use for loop for div * cleanup, more tests * spellcheck * get rid of using floats: manually ceil where necessary * Remove float, simplify, fix tests to match chris's proof (#3157)
5 years ago
Normalize priorities to not exceed total voting power (#3049) * more proposer priority tests - test that we don't reset to zero when updating / adding - test that same power validators alternate * add another test to track / simulate similar behaviour as in #2960 * address some of Chris' review comments * address some more of Chris' review comments * temporarily pushing branch with the following changes: The total power might change if: - a validator is added - a validator is removed - a validator is updated Decrement the accums (of all validators) directly after any of these events (by the inverse of the change) * Fix 2960 by re-normalizing / scaling priorities to be in bounds of total power, additionally: - remove heap where it doesn't make sense - avg. only at the end of IncrementProposerPriority instead of each iteration - update (and slightly improve) TestAveragingInIncrementProposerPriorityWithVotingPower to reflect above changes * Fix 2960 by re-normalizing / scaling priorities to be in bounds of total power, additionally: - remove heap where it doesn't make sense - avg. only at the end of IncrementProposerPriority instead of each iteration - update (and slightly improve) TestAveragingInIncrementProposerPriorityWithVotingPower to reflect above changes * fix tests * add comment * update changelog pending & some minor changes * comment about division will floor the result & fix typo * Update TestLargeGenesisValidator: - remove TODO and increase large genesis validator's voting power accordingly * move changelog entry to P2P Protocol * Ceil instead of flooring when dividing & update test * quickly fix failing TestProposerPriorityDoesNotGetResetToZero: - divide by Ceil((maxPriority - minPriority) / 2*totalVotingPower) * fix typo: rename getValWitMostPriority -> getValWithMostPriority * test proposer frequencies * return absolute value for diff. keep testing * use for loop for div * cleanup, more tests * spellcheck * get rid of using floats: manually ceil where necessary * Remove float, simplify, fix tests to match chris's proof (#3157)
5 years ago
Normalize priorities to not exceed total voting power (#3049) * more proposer priority tests - test that we don't reset to zero when updating / adding - test that same power validators alternate * add another test to track / simulate similar behaviour as in #2960 * address some of Chris' review comments * address some more of Chris' review comments * temporarily pushing branch with the following changes: The total power might change if: - a validator is added - a validator is removed - a validator is updated Decrement the accums (of all validators) directly after any of these events (by the inverse of the change) * Fix 2960 by re-normalizing / scaling priorities to be in bounds of total power, additionally: - remove heap where it doesn't make sense - avg. only at the end of IncrementProposerPriority instead of each iteration - update (and slightly improve) TestAveragingInIncrementProposerPriorityWithVotingPower to reflect above changes * Fix 2960 by re-normalizing / scaling priorities to be in bounds of total power, additionally: - remove heap where it doesn't make sense - avg. only at the end of IncrementProposerPriority instead of each iteration - update (and slightly improve) TestAveragingInIncrementProposerPriorityWithVotingPower to reflect above changes * fix tests * add comment * update changelog pending & some minor changes * comment about division will floor the result & fix typo * Update TestLargeGenesisValidator: - remove TODO and increase large genesis validator's voting power accordingly * move changelog entry to P2P Protocol * Ceil instead of flooring when dividing & update test * quickly fix failing TestProposerPriorityDoesNotGetResetToZero: - divide by Ceil((maxPriority - minPriority) / 2*totalVotingPower) * fix typo: rename getValWitMostPriority -> getValWithMostPriority * test proposer frequencies * return absolute value for diff. keep testing * use for loop for div * cleanup, more tests * spellcheck * get rid of using floats: manually ceil where necessary * Remove float, simplify, fix tests to match chris's proof (#3157)
5 years ago
Normalize priorities to not exceed total voting power (#3049) * more proposer priority tests - test that we don't reset to zero when updating / adding - test that same power validators alternate * add another test to track / simulate similar behaviour as in #2960 * address some of Chris' review comments * address some more of Chris' review comments * temporarily pushing branch with the following changes: The total power might change if: - a validator is added - a validator is removed - a validator is updated Decrement the accums (of all validators) directly after any of these events (by the inverse of the change) * Fix 2960 by re-normalizing / scaling priorities to be in bounds of total power, additionally: - remove heap where it doesn't make sense - avg. only at the end of IncrementProposerPriority instead of each iteration - update (and slightly improve) TestAveragingInIncrementProposerPriorityWithVotingPower to reflect above changes * Fix 2960 by re-normalizing / scaling priorities to be in bounds of total power, additionally: - remove heap where it doesn't make sense - avg. only at the end of IncrementProposerPriority instead of each iteration - update (and slightly improve) TestAveragingInIncrementProposerPriorityWithVotingPower to reflect above changes * fix tests * add comment * update changelog pending & some minor changes * comment about division will floor the result & fix typo * Update TestLargeGenesisValidator: - remove TODO and increase large genesis validator's voting power accordingly * move changelog entry to P2P Protocol * Ceil instead of flooring when dividing & update test * quickly fix failing TestProposerPriorityDoesNotGetResetToZero: - divide by Ceil((maxPriority - minPriority) / 2*totalVotingPower) * fix typo: rename getValWitMostPriority -> getValWithMostPriority * test proposer frequencies * return absolute value for diff. keep testing * use for loop for div * cleanup, more tests * spellcheck * get rid of using floats: manually ceil where necessary * Remove float, simplify, fix tests to match chris's proof (#3157)
5 years ago
Normalize priorities to not exceed total voting power (#3049) * more proposer priority tests - test that we don't reset to zero when updating / adding - test that same power validators alternate * add another test to track / simulate similar behaviour as in #2960 * address some of Chris' review comments * address some more of Chris' review comments * temporarily pushing branch with the following changes: The total power might change if: - a validator is added - a validator is removed - a validator is updated Decrement the accums (of all validators) directly after any of these events (by the inverse of the change) * Fix 2960 by re-normalizing / scaling priorities to be in bounds of total power, additionally: - remove heap where it doesn't make sense - avg. only at the end of IncrementProposerPriority instead of each iteration - update (and slightly improve) TestAveragingInIncrementProposerPriorityWithVotingPower to reflect above changes * Fix 2960 by re-normalizing / scaling priorities to be in bounds of total power, additionally: - remove heap where it doesn't make sense - avg. only at the end of IncrementProposerPriority instead of each iteration - update (and slightly improve) TestAveragingInIncrementProposerPriorityWithVotingPower to reflect above changes * fix tests * add comment * update changelog pending & some minor changes * comment about division will floor the result & fix typo * Update TestLargeGenesisValidator: - remove TODO and increase large genesis validator's voting power accordingly * move changelog entry to P2P Protocol * Ceil instead of flooring when dividing & update test * quickly fix failing TestProposerPriorityDoesNotGetResetToZero: - divide by Ceil((maxPriority - minPriority) / 2*totalVotingPower) * fix typo: rename getValWitMostPriority -> getValWithMostPriority * test proposer frequencies * return absolute value for diff. keep testing * use for loop for div * cleanup, more tests * spellcheck * get rid of using floats: manually ceil where necessary * Remove float, simplify, fix tests to match chris's proof (#3157)
5 years ago
Normalize priorities to not exceed total voting power (#3049) * more proposer priority tests - test that we don't reset to zero when updating / adding - test that same power validators alternate * add another test to track / simulate similar behaviour as in #2960 * address some of Chris' review comments * address some more of Chris' review comments * temporarily pushing branch with the following changes: The total power might change if: - a validator is added - a validator is removed - a validator is updated Decrement the accums (of all validators) directly after any of these events (by the inverse of the change) * Fix 2960 by re-normalizing / scaling priorities to be in bounds of total power, additionally: - remove heap where it doesn't make sense - avg. only at the end of IncrementProposerPriority instead of each iteration - update (and slightly improve) TestAveragingInIncrementProposerPriorityWithVotingPower to reflect above changes * Fix 2960 by re-normalizing / scaling priorities to be in bounds of total power, additionally: - remove heap where it doesn't make sense - avg. only at the end of IncrementProposerPriority instead of each iteration - update (and slightly improve) TestAveragingInIncrementProposerPriorityWithVotingPower to reflect above changes * fix tests * add comment * update changelog pending & some minor changes * comment about division will floor the result & fix typo * Update TestLargeGenesisValidator: - remove TODO and increase large genesis validator's voting power accordingly * move changelog entry to P2P Protocol * Ceil instead of flooring when dividing & update test * quickly fix failing TestProposerPriorityDoesNotGetResetToZero: - divide by Ceil((maxPriority - minPriority) / 2*totalVotingPower) * fix typo: rename getValWitMostPriority -> getValWithMostPriority * test proposer frequencies * return absolute value for diff. keep testing * use for loop for div * cleanup, more tests * spellcheck * get rid of using floats: manually ceil where necessary * Remove float, simplify, fix tests to match chris's proof (#3157)
5 years ago
Normalize priorities to not exceed total voting power (#3049) * more proposer priority tests - test that we don't reset to zero when updating / adding - test that same power validators alternate * add another test to track / simulate similar behaviour as in #2960 * address some of Chris' review comments * address some more of Chris' review comments * temporarily pushing branch with the following changes: The total power might change if: - a validator is added - a validator is removed - a validator is updated Decrement the accums (of all validators) directly after any of these events (by the inverse of the change) * Fix 2960 by re-normalizing / scaling priorities to be in bounds of total power, additionally: - remove heap where it doesn't make sense - avg. only at the end of IncrementProposerPriority instead of each iteration - update (and slightly improve) TestAveragingInIncrementProposerPriorityWithVotingPower to reflect above changes * Fix 2960 by re-normalizing / scaling priorities to be in bounds of total power, additionally: - remove heap where it doesn't make sense - avg. only at the end of IncrementProposerPriority instead of each iteration - update (and slightly improve) TestAveragingInIncrementProposerPriorityWithVotingPower to reflect above changes * fix tests * add comment * update changelog pending & some minor changes * comment about division will floor the result & fix typo * Update TestLargeGenesisValidator: - remove TODO and increase large genesis validator's voting power accordingly * move changelog entry to P2P Protocol * Ceil instead of flooring when dividing & update test * quickly fix failing TestProposerPriorityDoesNotGetResetToZero: - divide by Ceil((maxPriority - minPriority) / 2*totalVotingPower) * fix typo: rename getValWitMostPriority -> getValWithMostPriority * test proposer frequencies * return absolute value for diff. keep testing * use for loop for div * cleanup, more tests * spellcheck * get rid of using floats: manually ceil where necessary * Remove float, simplify, fix tests to match chris's proof (#3157)
5 years ago
Normalize priorities to not exceed total voting power (#3049) * more proposer priority tests - test that we don't reset to zero when updating / adding - test that same power validators alternate * add another test to track / simulate similar behaviour as in #2960 * address some of Chris' review comments * address some more of Chris' review comments * temporarily pushing branch with the following changes: The total power might change if: - a validator is added - a validator is removed - a validator is updated Decrement the accums (of all validators) directly after any of these events (by the inverse of the change) * Fix 2960 by re-normalizing / scaling priorities to be in bounds of total power, additionally: - remove heap where it doesn't make sense - avg. only at the end of IncrementProposerPriority instead of each iteration - update (and slightly improve) TestAveragingInIncrementProposerPriorityWithVotingPower to reflect above changes * Fix 2960 by re-normalizing / scaling priorities to be in bounds of total power, additionally: - remove heap where it doesn't make sense - avg. only at the end of IncrementProposerPriority instead of each iteration - update (and slightly improve) TestAveragingInIncrementProposerPriorityWithVotingPower to reflect above changes * fix tests * add comment * update changelog pending & some minor changes * comment about division will floor the result & fix typo * Update TestLargeGenesisValidator: - remove TODO and increase large genesis validator's voting power accordingly * move changelog entry to P2P Protocol * Ceil instead of flooring when dividing & update test * quickly fix failing TestProposerPriorityDoesNotGetResetToZero: - divide by Ceil((maxPriority - minPriority) / 2*totalVotingPower) * fix typo: rename getValWitMostPriority -> getValWithMostPriority * test proposer frequencies * return absolute value for diff. keep testing * use for loop for div * cleanup, more tests * spellcheck * get rid of using floats: manually ceil where necessary * Remove float, simplify, fix tests to match chris's proof (#3157)
5 years ago
Normalize priorities to not exceed total voting power (#3049) * more proposer priority tests - test that we don't reset to zero when updating / adding - test that same power validators alternate * add another test to track / simulate similar behaviour as in #2960 * address some of Chris' review comments * address some more of Chris' review comments * temporarily pushing branch with the following changes: The total power might change if: - a validator is added - a validator is removed - a validator is updated Decrement the accums (of all validators) directly after any of these events (by the inverse of the change) * Fix 2960 by re-normalizing / scaling priorities to be in bounds of total power, additionally: - remove heap where it doesn't make sense - avg. only at the end of IncrementProposerPriority instead of each iteration - update (and slightly improve) TestAveragingInIncrementProposerPriorityWithVotingPower to reflect above changes * Fix 2960 by re-normalizing / scaling priorities to be in bounds of total power, additionally: - remove heap where it doesn't make sense - avg. only at the end of IncrementProposerPriority instead of each iteration - update (and slightly improve) TestAveragingInIncrementProposerPriorityWithVotingPower to reflect above changes * fix tests * add comment * update changelog pending & some minor changes * comment about division will floor the result & fix typo * Update TestLargeGenesisValidator: - remove TODO and increase large genesis validator's voting power accordingly * move changelog entry to P2P Protocol * Ceil instead of flooring when dividing & update test * quickly fix failing TestProposerPriorityDoesNotGetResetToZero: - divide by Ceil((maxPriority - minPriority) / 2*totalVotingPower) * fix typo: rename getValWitMostPriority -> getValWithMostPriority * test proposer frequencies * return absolute value for diff. keep testing * use for loop for div * cleanup, more tests * spellcheck * get rid of using floats: manually ceil where necessary * Remove float, simplify, fix tests to match chris's proof (#3157)
5 years ago
Normalize priorities to not exceed total voting power (#3049) * more proposer priority tests - test that we don't reset to zero when updating / adding - test that same power validators alternate * add another test to track / simulate similar behaviour as in #2960 * address some of Chris' review comments * address some more of Chris' review comments * temporarily pushing branch with the following changes: The total power might change if: - a validator is added - a validator is removed - a validator is updated Decrement the accums (of all validators) directly after any of these events (by the inverse of the change) * Fix 2960 by re-normalizing / scaling priorities to be in bounds of total power, additionally: - remove heap where it doesn't make sense - avg. only at the end of IncrementProposerPriority instead of each iteration - update (and slightly improve) TestAveragingInIncrementProposerPriorityWithVotingPower to reflect above changes * Fix 2960 by re-normalizing / scaling priorities to be in bounds of total power, additionally: - remove heap where it doesn't make sense - avg. only at the end of IncrementProposerPriority instead of each iteration - update (and slightly improve) TestAveragingInIncrementProposerPriorityWithVotingPower to reflect above changes * fix tests * add comment * update changelog pending & some minor changes * comment about division will floor the result & fix typo * Update TestLargeGenesisValidator: - remove TODO and increase large genesis validator's voting power accordingly * move changelog entry to P2P Protocol * Ceil instead of flooring when dividing & update test * quickly fix failing TestProposerPriorityDoesNotGetResetToZero: - divide by Ceil((maxPriority - minPriority) / 2*totalVotingPower) * fix typo: rename getValWitMostPriority -> getValWithMostPriority * test proposer frequencies * return absolute value for diff. keep testing * use for loop for div * cleanup, more tests * spellcheck * get rid of using floats: manually ceil where necessary * Remove float, simplify, fix tests to match chris's proof (#3157)
5 years ago
Normalize priorities to not exceed total voting power (#3049) * more proposer priority tests - test that we don't reset to zero when updating / adding - test that same power validators alternate * add another test to track / simulate similar behaviour as in #2960 * address some of Chris' review comments * address some more of Chris' review comments * temporarily pushing branch with the following changes: The total power might change if: - a validator is added - a validator is removed - a validator is updated Decrement the accums (of all validators) directly after any of these events (by the inverse of the change) * Fix 2960 by re-normalizing / scaling priorities to be in bounds of total power, additionally: - remove heap where it doesn't make sense - avg. only at the end of IncrementProposerPriority instead of each iteration - update (and slightly improve) TestAveragingInIncrementProposerPriorityWithVotingPower to reflect above changes * Fix 2960 by re-normalizing / scaling priorities to be in bounds of total power, additionally: - remove heap where it doesn't make sense - avg. only at the end of IncrementProposerPriority instead of each iteration - update (and slightly improve) TestAveragingInIncrementProposerPriorityWithVotingPower to reflect above changes * fix tests * add comment * update changelog pending & some minor changes * comment about division will floor the result & fix typo * Update TestLargeGenesisValidator: - remove TODO and increase large genesis validator's voting power accordingly * move changelog entry to P2P Protocol * Ceil instead of flooring when dividing & update test * quickly fix failing TestProposerPriorityDoesNotGetResetToZero: - divide by Ceil((maxPriority - minPriority) / 2*totalVotingPower) * fix typo: rename getValWitMostPriority -> getValWithMostPriority * test proposer frequencies * return absolute value for diff. keep testing * use for loop for div * cleanup, more tests * spellcheck * get rid of using floats: manually ceil where necessary * Remove float, simplify, fix tests to match chris's proof (#3157)
5 years ago
types: verify commit fully Since the light client work introduced in v0.33 it appears full nodes are no longer fully verifying commit signatures during block execution - they stop after +2/3. See in VerifyCommit: https://github.com/tendermint/tendermint/blob/0c7fd316eb006c0afc13996c00ac8bde1078b32c/types/validator_set.go#L700-L703 This means proposers can propose blocks that contain valid +2/3 signatures and then the rest of the signatures can be whatever they want. They can claim that all the other validators signed just by including a CommitSig with arbitrary signature data. While this doesn't seem to impact safety of Tendermint per se, it means that Commits may contain a lot of invalid data. This is already true of blocks, since they can include invalid txs filled with garbage, but in that case the application knows they they are invalid and can punish the proposer. But since applications dont verify commit signatures directly (they trust tendermint to do that), they won't be able to detect it. This can impact incentivization logic in the application that depends on the LastCommitInfo sent in BeginBlock, which includes which validators signed. For instance, Gaia incentivizes proposers with a bonus for including more than +2/3 of the signatures. But a proposer can now claim that bonus just by including arbitrary data for the final -1/3 of validators without actually waiting for their signatures. There may be other tricks that can be played because of this. In general, the full node should be a fully verifying machine. While it's true that the light client can avoid verifying all signatures by stopping after +2/3, the full node can not. Thus the light client and full node should use distinct VerifyCommit functions if one is going to stop after +2/3 or otherwise perform less validation (for instance light clients can also skip verifying votes for nil while full nodes can not). See a commit with a bad signature that verifies here: 56367fd. From what I can tell, Tendermint will go on to think this commit is valid and forward this data to the app, so the app will think the second validator actually signed when it clearly did not.
4 years ago
types: verify commit fully Since the light client work introduced in v0.33 it appears full nodes are no longer fully verifying commit signatures during block execution - they stop after +2/3. See in VerifyCommit: https://github.com/tendermint/tendermint/blob/0c7fd316eb006c0afc13996c00ac8bde1078b32c/types/validator_set.go#L700-L703 This means proposers can propose blocks that contain valid +2/3 signatures and then the rest of the signatures can be whatever they want. They can claim that all the other validators signed just by including a CommitSig with arbitrary signature data. While this doesn't seem to impact safety of Tendermint per se, it means that Commits may contain a lot of invalid data. This is already true of blocks, since they can include invalid txs filled with garbage, but in that case the application knows they they are invalid and can punish the proposer. But since applications dont verify commit signatures directly (they trust tendermint to do that), they won't be able to detect it. This can impact incentivization logic in the application that depends on the LastCommitInfo sent in BeginBlock, which includes which validators signed. For instance, Gaia incentivizes proposers with a bonus for including more than +2/3 of the signatures. But a proposer can now claim that bonus just by including arbitrary data for the final -1/3 of validators without actually waiting for their signatures. There may be other tricks that can be played because of this. In general, the full node should be a fully verifying machine. While it's true that the light client can avoid verifying all signatures by stopping after +2/3, the full node can not. Thus the light client and full node should use distinct VerifyCommit functions if one is going to stop after +2/3 or otherwise perform less validation (for instance light clients can also skip verifying votes for nil while full nodes can not). See a commit with a bad signature that verifies here: 56367fd. From what I can tell, Tendermint will go on to think this commit is valid and forward this data to the app, so the app will think the second validator actually signed when it clearly did not.
4 years ago
types: verify commit fully Since the light client work introduced in v0.33 it appears full nodes are no longer fully verifying commit signatures during block execution - they stop after +2/3. See in VerifyCommit: https://github.com/tendermint/tendermint/blob/0c7fd316eb006c0afc13996c00ac8bde1078b32c/types/validator_set.go#L700-L703 This means proposers can propose blocks that contain valid +2/3 signatures and then the rest of the signatures can be whatever they want. They can claim that all the other validators signed just by including a CommitSig with arbitrary signature data. While this doesn't seem to impact safety of Tendermint per se, it means that Commits may contain a lot of invalid data. This is already true of blocks, since they can include invalid txs filled with garbage, but in that case the application knows they they are invalid and can punish the proposer. But since applications dont verify commit signatures directly (they trust tendermint to do that), they won't be able to detect it. This can impact incentivization logic in the application that depends on the LastCommitInfo sent in BeginBlock, which includes which validators signed. For instance, Gaia incentivizes proposers with a bonus for including more than +2/3 of the signatures. But a proposer can now claim that bonus just by including arbitrary data for the final -1/3 of validators without actually waiting for their signatures. There may be other tricks that can be played because of this. In general, the full node should be a fully verifying machine. While it's true that the light client can avoid verifying all signatures by stopping after +2/3, the full node can not. Thus the light client and full node should use distinct VerifyCommit functions if one is going to stop after +2/3 or otherwise perform less validation (for instance light clients can also skip verifying votes for nil while full nodes can not). See a commit with a bad signature that verifies here: 56367fd. From what I can tell, Tendermint will go on to think this commit is valid and forward this data to the app, so the app will think the second validator actually signed when it clearly did not.
4 years ago
types: verify commit fully Since the light client work introduced in v0.33 it appears full nodes are no longer fully verifying commit signatures during block execution - they stop after +2/3. See in VerifyCommit: https://github.com/tendermint/tendermint/blob/0c7fd316eb006c0afc13996c00ac8bde1078b32c/types/validator_set.go#L700-L703 This means proposers can propose blocks that contain valid +2/3 signatures and then the rest of the signatures can be whatever they want. They can claim that all the other validators signed just by including a CommitSig with arbitrary signature data. While this doesn't seem to impact safety of Tendermint per se, it means that Commits may contain a lot of invalid data. This is already true of blocks, since they can include invalid txs filled with garbage, but in that case the application knows they they are invalid and can punish the proposer. But since applications dont verify commit signatures directly (they trust tendermint to do that), they won't be able to detect it. This can impact incentivization logic in the application that depends on the LastCommitInfo sent in BeginBlock, which includes which validators signed. For instance, Gaia incentivizes proposers with a bonus for including more than +2/3 of the signatures. But a proposer can now claim that bonus just by including arbitrary data for the final -1/3 of validators without actually waiting for their signatures. There may be other tricks that can be played because of this. In general, the full node should be a fully verifying machine. While it's true that the light client can avoid verifying all signatures by stopping after +2/3, the full node can not. Thus the light client and full node should use distinct VerifyCommit functions if one is going to stop after +2/3 or otherwise perform less validation (for instance light clients can also skip verifying votes for nil while full nodes can not). See a commit with a bad signature that verifies here: 56367fd. From what I can tell, Tendermint will go on to think this commit is valid and forward this data to the app, so the app will think the second validator actually signed when it clearly did not.
4 years ago
types: verify commit fully Since the light client work introduced in v0.33 it appears full nodes are no longer fully verifying commit signatures during block execution - they stop after +2/3. See in VerifyCommit: https://github.com/tendermint/tendermint/blob/0c7fd316eb006c0afc13996c00ac8bde1078b32c/types/validator_set.go#L700-L703 This means proposers can propose blocks that contain valid +2/3 signatures and then the rest of the signatures can be whatever they want. They can claim that all the other validators signed just by including a CommitSig with arbitrary signature data. While this doesn't seem to impact safety of Tendermint per se, it means that Commits may contain a lot of invalid data. This is already true of blocks, since they can include invalid txs filled with garbage, but in that case the application knows they they are invalid and can punish the proposer. But since applications dont verify commit signatures directly (they trust tendermint to do that), they won't be able to detect it. This can impact incentivization logic in the application that depends on the LastCommitInfo sent in BeginBlock, which includes which validators signed. For instance, Gaia incentivizes proposers with a bonus for including more than +2/3 of the signatures. But a proposer can now claim that bonus just by including arbitrary data for the final -1/3 of validators without actually waiting for their signatures. There may be other tricks that can be played because of this. In general, the full node should be a fully verifying machine. While it's true that the light client can avoid verifying all signatures by stopping after +2/3, the full node can not. Thus the light client and full node should use distinct VerifyCommit functions if one is going to stop after +2/3 or otherwise perform less validation (for instance light clients can also skip verifying votes for nil while full nodes can not). See a commit with a bad signature that verifies here: 56367fd. From what I can tell, Tendermint will go on to think this commit is valid and forward this data to the app, so the app will think the second validator actually signed when it clearly did not.
4 years ago
types: verify commit fully Since the light client work introduced in v0.33 it appears full nodes are no longer fully verifying commit signatures during block execution - they stop after +2/3. See in VerifyCommit: https://github.com/tendermint/tendermint/blob/0c7fd316eb006c0afc13996c00ac8bde1078b32c/types/validator_set.go#L700-L703 This means proposers can propose blocks that contain valid +2/3 signatures and then the rest of the signatures can be whatever they want. They can claim that all the other validators signed just by including a CommitSig with arbitrary signature data. While this doesn't seem to impact safety of Tendermint per se, it means that Commits may contain a lot of invalid data. This is already true of blocks, since they can include invalid txs filled with garbage, but in that case the application knows they they are invalid and can punish the proposer. But since applications dont verify commit signatures directly (they trust tendermint to do that), they won't be able to detect it. This can impact incentivization logic in the application that depends on the LastCommitInfo sent in BeginBlock, which includes which validators signed. For instance, Gaia incentivizes proposers with a bonus for including more than +2/3 of the signatures. But a proposer can now claim that bonus just by including arbitrary data for the final -1/3 of validators without actually waiting for their signatures. There may be other tricks that can be played because of this. In general, the full node should be a fully verifying machine. While it's true that the light client can avoid verifying all signatures by stopping after +2/3, the full node can not. Thus the light client and full node should use distinct VerifyCommit functions if one is going to stop after +2/3 or otherwise perform less validation (for instance light clients can also skip verifying votes for nil while full nodes can not). See a commit with a bad signature that verifies here: 56367fd. From what I can tell, Tendermint will go on to think this commit is valid and forward this data to the app, so the app will think the second validator actually signed when it clearly did not.
4 years ago
types: verify commit fully Since the light client work introduced in v0.33 it appears full nodes are no longer fully verifying commit signatures during block execution - they stop after +2/3. See in VerifyCommit: https://github.com/tendermint/tendermint/blob/0c7fd316eb006c0afc13996c00ac8bde1078b32c/types/validator_set.go#L700-L703 This means proposers can propose blocks that contain valid +2/3 signatures and then the rest of the signatures can be whatever they want. They can claim that all the other validators signed just by including a CommitSig with arbitrary signature data. While this doesn't seem to impact safety of Tendermint per se, it means that Commits may contain a lot of invalid data. This is already true of blocks, since they can include invalid txs filled with garbage, but in that case the application knows they they are invalid and can punish the proposer. But since applications dont verify commit signatures directly (they trust tendermint to do that), they won't be able to detect it. This can impact incentivization logic in the application that depends on the LastCommitInfo sent in BeginBlock, which includes which validators signed. For instance, Gaia incentivizes proposers with a bonus for including more than +2/3 of the signatures. But a proposer can now claim that bonus just by including arbitrary data for the final -1/3 of validators without actually waiting for their signatures. There may be other tricks that can be played because of this. In general, the full node should be a fully verifying machine. While it's true that the light client can avoid verifying all signatures by stopping after +2/3, the full node can not. Thus the light client and full node should use distinct VerifyCommit functions if one is going to stop after +2/3 or otherwise perform less validation (for instance light clients can also skip verifying votes for nil while full nodes can not). See a commit with a bad signature that verifies here: 56367fd. From what I can tell, Tendermint will go on to think this commit is valid and forward this data to the app, so the app will think the second validator actually signed when it clearly did not.
4 years ago
types: verify commit fully Since the light client work introduced in v0.33 it appears full nodes are no longer fully verifying commit signatures during block execution - they stop after +2/3. See in VerifyCommit: https://github.com/tendermint/tendermint/blob/0c7fd316eb006c0afc13996c00ac8bde1078b32c/types/validator_set.go#L700-L703 This means proposers can propose blocks that contain valid +2/3 signatures and then the rest of the signatures can be whatever they want. They can claim that all the other validators signed just by including a CommitSig with arbitrary signature data. While this doesn't seem to impact safety of Tendermint per se, it means that Commits may contain a lot of invalid data. This is already true of blocks, since they can include invalid txs filled with garbage, but in that case the application knows they they are invalid and can punish the proposer. But since applications dont verify commit signatures directly (they trust tendermint to do that), they won't be able to detect it. This can impact incentivization logic in the application that depends on the LastCommitInfo sent in BeginBlock, which includes which validators signed. For instance, Gaia incentivizes proposers with a bonus for including more than +2/3 of the signatures. But a proposer can now claim that bonus just by including arbitrary data for the final -1/3 of validators without actually waiting for their signatures. There may be other tricks that can be played because of this. In general, the full node should be a fully verifying machine. While it's true that the light client can avoid verifying all signatures by stopping after +2/3, the full node can not. Thus the light client and full node should use distinct VerifyCommit functions if one is going to stop after +2/3 or otherwise perform less validation (for instance light clients can also skip verifying votes for nil while full nodes can not). See a commit with a bad signature that verifies here: 56367fd. From what I can tell, Tendermint will go on to think this commit is valid and forward this data to the app, so the app will think the second validator actually signed when it clearly did not.
4 years ago
types: prevent spurious validator power overflow warnings when changing the validator set (#4183) Fix for #4164 The general problem is that in certain conditions an overflow warning is issued when attempting to update a validator set even if the final set's total voting power is not over the maximum allowed. Root cause is that in verifyUpdates(), updates are verified wrt to total voting power in the order of validator address. It is then possible that a low address validator may increase its power such that the temporary total voting power count goes over MaxTotalVotingPower. Scenarios where removing and adding/ updating validators with high voting power, in the same update operation, cause the same false warning and the updates are not applied. Main changes to fix this are in verifyUpdate() that now does the verification starting with the decreases in power. It also takes into account the removals that are part of the update. ## Commits: * tests for overflow detection and prevention * test fix * more tests * fix the false overflow warnings and golint * scopelint warning fix * review comments * variant with using sort by amount of change in power * compute separately number new validators in update * types: use a switch in processChanges * more review comments * types: use HasAddress in numNewValidators * types: refactor verifyUpdates copy updates, sort them by delta and use resulting slice to calculate tvpAfterUpdatesBeforeRemovals. * remove unused structs * review comments * update changelog
5 years ago
types: prevent spurious validator power overflow warnings when changing the validator set (#4183) Fix for #4164 The general problem is that in certain conditions an overflow warning is issued when attempting to update a validator set even if the final set's total voting power is not over the maximum allowed. Root cause is that in verifyUpdates(), updates are verified wrt to total voting power in the order of validator address. It is then possible that a low address validator may increase its power such that the temporary total voting power count goes over MaxTotalVotingPower. Scenarios where removing and adding/ updating validators with high voting power, in the same update operation, cause the same false warning and the updates are not applied. Main changes to fix this are in verifyUpdate() that now does the verification starting with the decreases in power. It also takes into account the removals that are part of the update. ## Commits: * tests for overflow detection and prevention * test fix * more tests * fix the false overflow warnings and golint * scopelint warning fix * review comments * variant with using sort by amount of change in power * compute separately number new validators in update * types: use a switch in processChanges * more review comments * types: use HasAddress in numNewValidators * types: refactor verifyUpdates copy updates, sort them by delta and use resulting slice to calculate tvpAfterUpdatesBeforeRemovals. * remove unused structs * review comments * update changelog
5 years ago
types: prevent spurious validator power overflow warnings when changing the validator set (#4183) Fix for #4164 The general problem is that in certain conditions an overflow warning is issued when attempting to update a validator set even if the final set's total voting power is not over the maximum allowed. Root cause is that in verifyUpdates(), updates are verified wrt to total voting power in the order of validator address. It is then possible that a low address validator may increase its power such that the temporary total voting power count goes over MaxTotalVotingPower. Scenarios where removing and adding/ updating validators with high voting power, in the same update operation, cause the same false warning and the updates are not applied. Main changes to fix this are in verifyUpdate() that now does the verification starting with the decreases in power. It also takes into account the removals that are part of the update. ## Commits: * tests for overflow detection and prevention * test fix * more tests * fix the false overflow warnings and golint * scopelint warning fix * review comments * variant with using sort by amount of change in power * compute separately number new validators in update * types: use a switch in processChanges * more review comments * types: use HasAddress in numNewValidators * types: refactor verifyUpdates copy updates, sort them by delta and use resulting slice to calculate tvpAfterUpdatesBeforeRemovals. * remove unused structs * review comments * update changelog
5 years ago
types: prevent spurious validator power overflow warnings when changing the validator set (#4183) Fix for #4164 The general problem is that in certain conditions an overflow warning is issued when attempting to update a validator set even if the final set's total voting power is not over the maximum allowed. Root cause is that in verifyUpdates(), updates are verified wrt to total voting power in the order of validator address. It is then possible that a low address validator may increase its power such that the temporary total voting power count goes over MaxTotalVotingPower. Scenarios where removing and adding/ updating validators with high voting power, in the same update operation, cause the same false warning and the updates are not applied. Main changes to fix this are in verifyUpdate() that now does the verification starting with the decreases in power. It also takes into account the removals that are part of the update. ## Commits: * tests for overflow detection and prevention * test fix * more tests * fix the false overflow warnings and golint * scopelint warning fix * review comments * variant with using sort by amount of change in power * compute separately number new validators in update * types: use a switch in processChanges * more review comments * types: use HasAddress in numNewValidators * types: refactor verifyUpdates copy updates, sort them by delta and use resulting slice to calculate tvpAfterUpdatesBeforeRemovals. * remove unused structs * review comments * update changelog
5 years ago
types: prevent spurious validator power overflow warnings when changing the validator set (#4183) Fix for #4164 The general problem is that in certain conditions an overflow warning is issued when attempting to update a validator set even if the final set's total voting power is not over the maximum allowed. Root cause is that in verifyUpdates(), updates are verified wrt to total voting power in the order of validator address. It is then possible that a low address validator may increase its power such that the temporary total voting power count goes over MaxTotalVotingPower. Scenarios where removing and adding/ updating validators with high voting power, in the same update operation, cause the same false warning and the updates are not applied. Main changes to fix this are in verifyUpdate() that now does the verification starting with the decreases in power. It also takes into account the removals that are part of the update. ## Commits: * tests for overflow detection and prevention * test fix * more tests * fix the false overflow warnings and golint * scopelint warning fix * review comments * variant with using sort by amount of change in power * compute separately number new validators in update * types: use a switch in processChanges * more review comments * types: use HasAddress in numNewValidators * types: refactor verifyUpdates copy updates, sort them by delta and use resulting slice to calculate tvpAfterUpdatesBeforeRemovals. * remove unused structs * review comments * update changelog
5 years ago
types: prevent spurious validator power overflow warnings when changing the validator set (#4183) Fix for #4164 The general problem is that in certain conditions an overflow warning is issued when attempting to update a validator set even if the final set's total voting power is not over the maximum allowed. Root cause is that in verifyUpdates(), updates are verified wrt to total voting power in the order of validator address. It is then possible that a low address validator may increase its power such that the temporary total voting power count goes over MaxTotalVotingPower. Scenarios where removing and adding/ updating validators with high voting power, in the same update operation, cause the same false warning and the updates are not applied. Main changes to fix this are in verifyUpdate() that now does the verification starting with the decreases in power. It also takes into account the removals that are part of the update. ## Commits: * tests for overflow detection and prevention * test fix * more tests * fix the false overflow warnings and golint * scopelint warning fix * review comments * variant with using sort by amount of change in power * compute separately number new validators in update * types: use a switch in processChanges * more review comments * types: use HasAddress in numNewValidators * types: refactor verifyUpdates copy updates, sort them by delta and use resulting slice to calculate tvpAfterUpdatesBeforeRemovals. * remove unused structs * review comments * update changelog
5 years ago
types: prevent spurious validator power overflow warnings when changing the validator set (#4183) Fix for #4164 The general problem is that in certain conditions an overflow warning is issued when attempting to update a validator set even if the final set's total voting power is not over the maximum allowed. Root cause is that in verifyUpdates(), updates are verified wrt to total voting power in the order of validator address. It is then possible that a low address validator may increase its power such that the temporary total voting power count goes over MaxTotalVotingPower. Scenarios where removing and adding/ updating validators with high voting power, in the same update operation, cause the same false warning and the updates are not applied. Main changes to fix this are in verifyUpdate() that now does the verification starting with the decreases in power. It also takes into account the removals that are part of the update. ## Commits: * tests for overflow detection and prevention * test fix * more tests * fix the false overflow warnings and golint * scopelint warning fix * review comments * variant with using sort by amount of change in power * compute separately number new validators in update * types: use a switch in processChanges * more review comments * types: use HasAddress in numNewValidators * types: refactor verifyUpdates copy updates, sort them by delta and use resulting slice to calculate tvpAfterUpdatesBeforeRemovals. * remove unused structs * review comments * update changelog
5 years ago
types: prevent spurious validator power overflow warnings when changing the validator set (#4183) Fix for #4164 The general problem is that in certain conditions an overflow warning is issued when attempting to update a validator set even if the final set's total voting power is not over the maximum allowed. Root cause is that in verifyUpdates(), updates are verified wrt to total voting power in the order of validator address. It is then possible that a low address validator may increase its power such that the temporary total voting power count goes over MaxTotalVotingPower. Scenarios where removing and adding/ updating validators with high voting power, in the same update operation, cause the same false warning and the updates are not applied. Main changes to fix this are in verifyUpdate() that now does the verification starting with the decreases in power. It also takes into account the removals that are part of the update. ## Commits: * tests for overflow detection and prevention * test fix * more tests * fix the false overflow warnings and golint * scopelint warning fix * review comments * variant with using sort by amount of change in power * compute separately number new validators in update * types: use a switch in processChanges * more review comments * types: use HasAddress in numNewValidators * types: refactor verifyUpdates copy updates, sort them by delta and use resulting slice to calculate tvpAfterUpdatesBeforeRemovals. * remove unused structs * review comments * update changelog
5 years ago
types: prevent spurious validator power overflow warnings when changing the validator set (#4183) Fix for #4164 The general problem is that in certain conditions an overflow warning is issued when attempting to update a validator set even if the final set's total voting power is not over the maximum allowed. Root cause is that in verifyUpdates(), updates are verified wrt to total voting power in the order of validator address. It is then possible that a low address validator may increase its power such that the temporary total voting power count goes over MaxTotalVotingPower. Scenarios where removing and adding/ updating validators with high voting power, in the same update operation, cause the same false warning and the updates are not applied. Main changes to fix this are in verifyUpdate() that now does the verification starting with the decreases in power. It also takes into account the removals that are part of the update. ## Commits: * tests for overflow detection and prevention * test fix * more tests * fix the false overflow warnings and golint * scopelint warning fix * review comments * variant with using sort by amount of change in power * compute separately number new validators in update * types: use a switch in processChanges * more review comments * types: use HasAddress in numNewValidators * types: refactor verifyUpdates copy updates, sort them by delta and use resulting slice to calculate tvpAfterUpdatesBeforeRemovals. * remove unused structs * review comments * update changelog
5 years ago
types: prevent spurious validator power overflow warnings when changing the validator set (#4183) Fix for #4164 The general problem is that in certain conditions an overflow warning is issued when attempting to update a validator set even if the final set's total voting power is not over the maximum allowed. Root cause is that in verifyUpdates(), updates are verified wrt to total voting power in the order of validator address. It is then possible that a low address validator may increase its power such that the temporary total voting power count goes over MaxTotalVotingPower. Scenarios where removing and adding/ updating validators with high voting power, in the same update operation, cause the same false warning and the updates are not applied. Main changes to fix this are in verifyUpdate() that now does the verification starting with the decreases in power. It also takes into account the removals that are part of the update. ## Commits: * tests for overflow detection and prevention * test fix * more tests * fix the false overflow warnings and golint * scopelint warning fix * review comments * variant with using sort by amount of change in power * compute separately number new validators in update * types: use a switch in processChanges * more review comments * types: use HasAddress in numNewValidators * types: refactor verifyUpdates copy updates, sort them by delta and use resulting slice to calculate tvpAfterUpdatesBeforeRemovals. * remove unused structs * review comments * update changelog
5 years ago
types: prevent spurious validator power overflow warnings when changing the validator set (#4183) Fix for #4164 The general problem is that in certain conditions an overflow warning is issued when attempting to update a validator set even if the final set's total voting power is not over the maximum allowed. Root cause is that in verifyUpdates(), updates are verified wrt to total voting power in the order of validator address. It is then possible that a low address validator may increase its power such that the temporary total voting power count goes over MaxTotalVotingPower. Scenarios where removing and adding/ updating validators with high voting power, in the same update operation, cause the same false warning and the updates are not applied. Main changes to fix this are in verifyUpdate() that now does the verification starting with the decreases in power. It also takes into account the removals that are part of the update. ## Commits: * tests for overflow detection and prevention * test fix * more tests * fix the false overflow warnings and golint * scopelint warning fix * review comments * variant with using sort by amount of change in power * compute separately number new validators in update * types: use a switch in processChanges * more review comments * types: use HasAddress in numNewValidators * types: refactor verifyUpdates copy updates, sort them by delta and use resulting slice to calculate tvpAfterUpdatesBeforeRemovals. * remove unused structs * review comments * update changelog
5 years ago
types: verify commit fully Since the light client work introduced in v0.33 it appears full nodes are no longer fully verifying commit signatures during block execution - they stop after +2/3. See in VerifyCommit: https://github.com/tendermint/tendermint/blob/0c7fd316eb006c0afc13996c00ac8bde1078b32c/types/validator_set.go#L700-L703 This means proposers can propose blocks that contain valid +2/3 signatures and then the rest of the signatures can be whatever they want. They can claim that all the other validators signed just by including a CommitSig with arbitrary signature data. While this doesn't seem to impact safety of Tendermint per se, it means that Commits may contain a lot of invalid data. This is already true of blocks, since they can include invalid txs filled with garbage, but in that case the application knows they they are invalid and can punish the proposer. But since applications dont verify commit signatures directly (they trust tendermint to do that), they won't be able to detect it. This can impact incentivization logic in the application that depends on the LastCommitInfo sent in BeginBlock, which includes which validators signed. For instance, Gaia incentivizes proposers with a bonus for including more than +2/3 of the signatures. But a proposer can now claim that bonus just by including arbitrary data for the final -1/3 of validators without actually waiting for their signatures. There may be other tricks that can be played because of this. In general, the full node should be a fully verifying machine. While it's true that the light client can avoid verifying all signatures by stopping after +2/3, the full node can not. Thus the light client and full node should use distinct VerifyCommit functions if one is going to stop after +2/3 or otherwise perform less validation (for instance light clients can also skip verifying votes for nil while full nodes can not). See a commit with a bad signature that verifies here: 56367fd. From what I can tell, Tendermint will go on to think this commit is valid and forward this data to the app, so the app will think the second validator actually signed when it clearly did not.
4 years ago
types: verify commit fully Since the light client work introduced in v0.33 it appears full nodes are no longer fully verifying commit signatures during block execution - they stop after +2/3. See in VerifyCommit: https://github.com/tendermint/tendermint/blob/0c7fd316eb006c0afc13996c00ac8bde1078b32c/types/validator_set.go#L700-L703 This means proposers can propose blocks that contain valid +2/3 signatures and then the rest of the signatures can be whatever they want. They can claim that all the other validators signed just by including a CommitSig with arbitrary signature data. While this doesn't seem to impact safety of Tendermint per se, it means that Commits may contain a lot of invalid data. This is already true of blocks, since they can include invalid txs filled with garbage, but in that case the application knows they they are invalid and can punish the proposer. But since applications dont verify commit signatures directly (they trust tendermint to do that), they won't be able to detect it. This can impact incentivization logic in the application that depends on the LastCommitInfo sent in BeginBlock, which includes which validators signed. For instance, Gaia incentivizes proposers with a bonus for including more than +2/3 of the signatures. But a proposer can now claim that bonus just by including arbitrary data for the final -1/3 of validators without actually waiting for their signatures. There may be other tricks that can be played because of this. In general, the full node should be a fully verifying machine. While it's true that the light client can avoid verifying all signatures by stopping after +2/3, the full node can not. Thus the light client and full node should use distinct VerifyCommit functions if one is going to stop after +2/3 or otherwise perform less validation (for instance light clients can also skip verifying votes for nil while full nodes can not). See a commit with a bad signature that verifies here: 56367fd. From what I can tell, Tendermint will go on to think this commit is valid and forward this data to the app, so the app will think the second validator actually signed when it clearly did not.
4 years ago
types: verify commit fully Since the light client work introduced in v0.33 it appears full nodes are no longer fully verifying commit signatures during block execution - they stop after +2/3. See in VerifyCommit: https://github.com/tendermint/tendermint/blob/0c7fd316eb006c0afc13996c00ac8bde1078b32c/types/validator_set.go#L700-L703 This means proposers can propose blocks that contain valid +2/3 signatures and then the rest of the signatures can be whatever they want. They can claim that all the other validators signed just by including a CommitSig with arbitrary signature data. While this doesn't seem to impact safety of Tendermint per se, it means that Commits may contain a lot of invalid data. This is already true of blocks, since they can include invalid txs filled with garbage, but in that case the application knows they they are invalid and can punish the proposer. But since applications dont verify commit signatures directly (they trust tendermint to do that), they won't be able to detect it. This can impact incentivization logic in the application that depends on the LastCommitInfo sent in BeginBlock, which includes which validators signed. For instance, Gaia incentivizes proposers with a bonus for including more than +2/3 of the signatures. But a proposer can now claim that bonus just by including arbitrary data for the final -1/3 of validators without actually waiting for their signatures. There may be other tricks that can be played because of this. In general, the full node should be a fully verifying machine. While it's true that the light client can avoid verifying all signatures by stopping after +2/3, the full node can not. Thus the light client and full node should use distinct VerifyCommit functions if one is going to stop after +2/3 or otherwise perform less validation (for instance light clients can also skip verifying votes for nil while full nodes can not). See a commit with a bad signature that verifies here: 56367fd. From what I can tell, Tendermint will go on to think this commit is valid and forward this data to the app, so the app will think the second validator actually signed when it clearly did not.
4 years ago
types: verify commit fully Since the light client work introduced in v0.33 it appears full nodes are no longer fully verifying commit signatures during block execution - they stop after +2/3. See in VerifyCommit: https://github.com/tendermint/tendermint/blob/0c7fd316eb006c0afc13996c00ac8bde1078b32c/types/validator_set.go#L700-L703 This means proposers can propose blocks that contain valid +2/3 signatures and then the rest of the signatures can be whatever they want. They can claim that all the other validators signed just by including a CommitSig with arbitrary signature data. While this doesn't seem to impact safety of Tendermint per se, it means that Commits may contain a lot of invalid data. This is already true of blocks, since they can include invalid txs filled with garbage, but in that case the application knows they they are invalid and can punish the proposer. But since applications dont verify commit signatures directly (they trust tendermint to do that), they won't be able to detect it. This can impact incentivization logic in the application that depends on the LastCommitInfo sent in BeginBlock, which includes which validators signed. For instance, Gaia incentivizes proposers with a bonus for including more than +2/3 of the signatures. But a proposer can now claim that bonus just by including arbitrary data for the final -1/3 of validators without actually waiting for their signatures. There may be other tricks that can be played because of this. In general, the full node should be a fully verifying machine. While it's true that the light client can avoid verifying all signatures by stopping after +2/3, the full node can not. Thus the light client and full node should use distinct VerifyCommit functions if one is going to stop after +2/3 or otherwise perform less validation (for instance light clients can also skip verifying votes for nil while full nodes can not). See a commit with a bad signature that verifies here: 56367fd. From what I can tell, Tendermint will go on to think this commit is valid and forward this data to the app, so the app will think the second validator actually signed when it clearly did not.
4 years ago
  1. package types
  2. import (
  3. "bytes"
  4. "context"
  5. "fmt"
  6. "math"
  7. "sort"
  8. "strings"
  9. "testing"
  10. "testing/quick"
  11. "time"
  12. "github.com/stretchr/testify/assert"
  13. "github.com/stretchr/testify/require"
  14. "github.com/tendermint/tendermint/crypto"
  15. "github.com/tendermint/tendermint/crypto/ed25519"
  16. tmmath "github.com/tendermint/tendermint/libs/math"
  17. tmrand "github.com/tendermint/tendermint/libs/rand"
  18. tmproto "github.com/tendermint/tendermint/proto/tendermint/types"
  19. )
  20. func TestValidatorSetBasic(t *testing.T) {
  21. // empty or nil validator lists are allowed,
  22. // but attempting to IncrementProposerPriority on them will panic.
  23. vset := NewValidatorSet([]*Validator{})
  24. assert.Panics(t, func() { vset.IncrementProposerPriority(1) })
  25. vset = NewValidatorSet(nil)
  26. assert.Panics(t, func() { vset.IncrementProposerPriority(1) })
  27. assert.EqualValues(t, vset, vset.Copy())
  28. assert.False(t, vset.HasAddress([]byte("some val")))
  29. idx, val := vset.GetByAddress([]byte("some val"))
  30. assert.EqualValues(t, -1, idx)
  31. assert.Nil(t, val)
  32. addr, val := vset.GetByIndex(-100)
  33. assert.Nil(t, addr)
  34. assert.Nil(t, val)
  35. addr, val = vset.GetByIndex(0)
  36. assert.Nil(t, addr)
  37. assert.Nil(t, val)
  38. addr, val = vset.GetByIndex(100)
  39. assert.Nil(t, addr)
  40. assert.Nil(t, val)
  41. assert.Zero(t, vset.Size())
  42. assert.Equal(t, int64(0), vset.TotalVotingPower())
  43. assert.Nil(t, vset.GetProposer())
  44. assert.Equal(t, []byte{0xe3, 0xb0, 0xc4, 0x42, 0x98, 0xfc, 0x1c, 0x14, 0x9a, 0xfb, 0xf4,
  45. 0xc8, 0x99, 0x6f, 0xb9, 0x24, 0x27, 0xae, 0x41, 0xe4, 0x64, 0x9b, 0x93, 0x4c, 0xa4, 0x95,
  46. 0x99, 0x1b, 0x78, 0x52, 0xb8, 0x55}, vset.Hash())
  47. // add
  48. val = randValidator(vset.TotalVotingPower())
  49. assert.NoError(t, vset.UpdateWithChangeSet([]*Validator{val}))
  50. assert.True(t, vset.HasAddress(val.Address))
  51. idx, _ = vset.GetByAddress(val.Address)
  52. assert.EqualValues(t, 0, idx)
  53. addr, _ = vset.GetByIndex(0)
  54. assert.Equal(t, []byte(val.Address), addr)
  55. assert.Equal(t, 1, vset.Size())
  56. assert.Equal(t, val.VotingPower, vset.TotalVotingPower())
  57. assert.NotNil(t, vset.Hash())
  58. assert.NotPanics(t, func() { vset.IncrementProposerPriority(1) })
  59. assert.Equal(t, val.Address, vset.GetProposer().Address)
  60. // update
  61. val = randValidator(vset.TotalVotingPower())
  62. assert.NoError(t, vset.UpdateWithChangeSet([]*Validator{val}))
  63. _, val = vset.GetByAddress(val.Address)
  64. val.VotingPower += 100
  65. proposerPriority := val.ProposerPriority
  66. val.ProposerPriority = 0
  67. assert.NoError(t, vset.UpdateWithChangeSet([]*Validator{val}))
  68. _, val = vset.GetByAddress(val.Address)
  69. assert.Equal(t, proposerPriority, val.ProposerPriority)
  70. }
  71. func TestValidatorSetValidateBasic(t *testing.T) {
  72. val, _ := RandValidator(false, 1)
  73. badVal := &Validator{}
  74. testCases := []struct {
  75. vals ValidatorSet
  76. err bool
  77. msg string
  78. }{
  79. {
  80. vals: ValidatorSet{},
  81. err: true,
  82. msg: "validator set is nil or empty",
  83. },
  84. {
  85. vals: ValidatorSet{
  86. Validators: []*Validator{},
  87. },
  88. err: true,
  89. msg: "validator set is nil or empty",
  90. },
  91. {
  92. vals: ValidatorSet{
  93. Validators: []*Validator{val},
  94. },
  95. err: true,
  96. msg: "proposer failed validate basic, error: nil validator",
  97. },
  98. {
  99. vals: ValidatorSet{
  100. Validators: []*Validator{badVal},
  101. },
  102. err: true,
  103. msg: "invalid validator #0: validator does not have a public key",
  104. },
  105. {
  106. vals: ValidatorSet{
  107. Validators: []*Validator{val},
  108. Proposer: val,
  109. },
  110. err: false,
  111. msg: "",
  112. },
  113. }
  114. for _, tc := range testCases {
  115. err := tc.vals.ValidateBasic()
  116. if tc.err {
  117. if assert.Error(t, err) {
  118. assert.Equal(t, tc.msg, err.Error())
  119. }
  120. } else {
  121. assert.NoError(t, err)
  122. }
  123. }
  124. }
  125. func TestCopy(t *testing.T) {
  126. vset := randValidatorSet(10)
  127. vsetHash := vset.Hash()
  128. if len(vsetHash) == 0 {
  129. t.Fatalf("ValidatorSet had unexpected zero hash")
  130. }
  131. vsetCopy := vset.Copy()
  132. vsetCopyHash := vsetCopy.Hash()
  133. if !bytes.Equal(vsetHash, vsetCopyHash) {
  134. t.Fatalf("ValidatorSet copy had wrong hash. Orig: %X, Copy: %X", vsetHash, vsetCopyHash)
  135. }
  136. }
  137. // Test that IncrementProposerPriority requires positive times.
  138. func TestIncrementProposerPriorityPositiveTimes(t *testing.T) {
  139. vset := NewValidatorSet([]*Validator{
  140. newValidator([]byte("foo"), 1000),
  141. newValidator([]byte("bar"), 300),
  142. newValidator([]byte("baz"), 330),
  143. })
  144. assert.Panics(t, func() { vset.IncrementProposerPriority(-1) })
  145. assert.Panics(t, func() { vset.IncrementProposerPriority(0) })
  146. vset.IncrementProposerPriority(1)
  147. }
  148. func BenchmarkValidatorSetCopy(b *testing.B) {
  149. b.StopTimer()
  150. vset := NewValidatorSet([]*Validator{})
  151. for i := 0; i < 1000; i++ {
  152. privKey := ed25519.GenPrivKey()
  153. pubKey := privKey.PubKey()
  154. val := NewValidator(pubKey, 10)
  155. err := vset.UpdateWithChangeSet([]*Validator{val})
  156. if err != nil {
  157. panic("Failed to add validator")
  158. }
  159. }
  160. b.StartTimer()
  161. for i := 0; i < b.N; i++ {
  162. vset.Copy()
  163. }
  164. }
  165. //-------------------------------------------------------------------
  166. func TestProposerSelection1(t *testing.T) {
  167. vset := NewValidatorSet([]*Validator{
  168. newValidator([]byte("foo"), 1000),
  169. newValidator([]byte("bar"), 300),
  170. newValidator([]byte("baz"), 330),
  171. })
  172. var proposers []string
  173. for i := 0; i < 99; i++ {
  174. val := vset.GetProposer()
  175. proposers = append(proposers, string(val.Address))
  176. vset.IncrementProposerPriority(1)
  177. }
  178. expected := `foo baz foo bar foo foo baz foo bar foo foo baz foo foo bar foo baz foo foo bar` +
  179. ` foo foo baz foo bar foo foo baz foo bar foo foo baz foo foo bar foo baz foo foo bar` +
  180. ` foo baz foo foo bar foo baz foo foo bar foo baz foo foo foo baz bar foo foo foo baz` +
  181. ` foo bar foo foo baz foo bar foo foo baz foo bar foo foo baz foo bar foo foo baz foo` +
  182. ` foo bar foo baz foo foo bar foo baz foo foo bar foo baz foo foo`
  183. if expected != strings.Join(proposers, " ") {
  184. t.Errorf("expected sequence of proposers was\n%v\nbut got \n%v", expected, strings.Join(proposers, " "))
  185. }
  186. }
  187. func TestProposerSelection2(t *testing.T) {
  188. addr0 := []byte{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}
  189. addr1 := []byte{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1}
  190. addr2 := []byte{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2}
  191. // when all voting power is same, we go in order of addresses
  192. val0, val1, val2 := newValidator(addr0, 100), newValidator(addr1, 100), newValidator(addr2, 100)
  193. valList := []*Validator{val0, val1, val2}
  194. vals := NewValidatorSet(valList)
  195. for i := 0; i < len(valList)*5; i++ {
  196. ii := (i) % len(valList)
  197. prop := vals.GetProposer()
  198. if !bytes.Equal(prop.Address, valList[ii].Address) {
  199. t.Fatalf("(%d): Expected %X. Got %X", i, valList[ii].Address, prop.Address)
  200. }
  201. vals.IncrementProposerPriority(1)
  202. }
  203. // One validator has more than the others, but not enough to propose twice in a row
  204. *val2 = *newValidator(addr2, 400)
  205. vals = NewValidatorSet(valList)
  206. // vals.IncrementProposerPriority(1)
  207. prop := vals.GetProposer()
  208. if !bytes.Equal(prop.Address, addr2) {
  209. t.Fatalf("Expected address with highest voting power to be first proposer. Got %X", prop.Address)
  210. }
  211. vals.IncrementProposerPriority(1)
  212. prop = vals.GetProposer()
  213. if !bytes.Equal(prop.Address, addr0) {
  214. t.Fatalf("Expected smallest address to be validator. Got %X", prop.Address)
  215. }
  216. // One validator has more than the others, and enough to be proposer twice in a row
  217. *val2 = *newValidator(addr2, 401)
  218. vals = NewValidatorSet(valList)
  219. prop = vals.GetProposer()
  220. if !bytes.Equal(prop.Address, addr2) {
  221. t.Fatalf("Expected address with highest voting power to be first proposer. Got %X", prop.Address)
  222. }
  223. vals.IncrementProposerPriority(1)
  224. prop = vals.GetProposer()
  225. if !bytes.Equal(prop.Address, addr2) {
  226. t.Fatalf("Expected address with highest voting power to be second proposer. Got %X", prop.Address)
  227. }
  228. vals.IncrementProposerPriority(1)
  229. prop = vals.GetProposer()
  230. if !bytes.Equal(prop.Address, addr0) {
  231. t.Fatalf("Expected smallest address to be validator. Got %X", prop.Address)
  232. }
  233. // each validator should be the proposer a proportional number of times
  234. val0, val1, val2 = newValidator(addr0, 4), newValidator(addr1, 5), newValidator(addr2, 3)
  235. valList = []*Validator{val0, val1, val2}
  236. propCount := make([]int, 3)
  237. vals = NewValidatorSet(valList)
  238. N := 1
  239. for i := 0; i < 120*N; i++ {
  240. prop := vals.GetProposer()
  241. ii := prop.Address[19]
  242. propCount[ii]++
  243. vals.IncrementProposerPriority(1)
  244. }
  245. if propCount[0] != 40*N {
  246. t.Fatalf(
  247. "Expected prop count for validator with 4/12 of voting power to be %d/%d. Got %d/%d",
  248. 40*N,
  249. 120*N,
  250. propCount[0],
  251. 120*N,
  252. )
  253. }
  254. if propCount[1] != 50*N {
  255. t.Fatalf(
  256. "Expected prop count for validator with 5/12 of voting power to be %d/%d. Got %d/%d",
  257. 50*N,
  258. 120*N,
  259. propCount[1],
  260. 120*N,
  261. )
  262. }
  263. if propCount[2] != 30*N {
  264. t.Fatalf(
  265. "Expected prop count for validator with 3/12 of voting power to be %d/%d. Got %d/%d",
  266. 30*N,
  267. 120*N,
  268. propCount[2],
  269. 120*N,
  270. )
  271. }
  272. }
  273. func TestProposerSelection3(t *testing.T) {
  274. vset := NewValidatorSet([]*Validator{
  275. newValidator([]byte("avalidator_address12"), 1),
  276. newValidator([]byte("bvalidator_address12"), 1),
  277. newValidator([]byte("cvalidator_address12"), 1),
  278. newValidator([]byte("dvalidator_address12"), 1),
  279. })
  280. proposerOrder := make([]*Validator, 4)
  281. for i := 0; i < 4; i++ {
  282. // need to give all validators to have keys
  283. pk := ed25519.GenPrivKey().PubKey()
  284. vset.Validators[i].PubKey = pk
  285. proposerOrder[i] = vset.GetProposer()
  286. vset.IncrementProposerPriority(1)
  287. }
  288. // i for the loop
  289. // j for the times
  290. // we should go in order for ever, despite some IncrementProposerPriority with times > 1
  291. var (
  292. i int
  293. j int32
  294. )
  295. for ; i < 10000; i++ {
  296. got := vset.GetProposer().Address
  297. expected := proposerOrder[j%4].Address
  298. if !bytes.Equal(got, expected) {
  299. t.Fatalf(fmt.Sprintf("vset.Proposer (%X) does not match expected proposer (%X) for (%d, %d)", got, expected, i, j))
  300. }
  301. // serialize, deserialize, check proposer
  302. b := vset.toBytes()
  303. vset = vset.fromBytes(b)
  304. computed := vset.GetProposer() // findGetProposer()
  305. if i != 0 {
  306. if !bytes.Equal(got, computed.Address) {
  307. t.Fatalf(
  308. fmt.Sprintf(
  309. "vset.Proposer (%X) does not match computed proposer (%X) for (%d, %d)",
  310. got,
  311. computed.Address,
  312. i,
  313. j,
  314. ),
  315. )
  316. }
  317. }
  318. // times is usually 1
  319. times := int32(1)
  320. mod := (tmrand.Int() % 5) + 1
  321. if tmrand.Int()%mod > 0 {
  322. // sometimes its up to 5
  323. times = (tmrand.Int31() % 4) + 1
  324. }
  325. vset.IncrementProposerPriority(times)
  326. j += times
  327. }
  328. }
  329. func newValidator(address []byte, power int64) *Validator {
  330. return &Validator{Address: address, VotingPower: power}
  331. }
  332. func randPubKey() crypto.PubKey {
  333. pubKey := make(ed25519.PubKey, ed25519.PubKeySize)
  334. copy(pubKey, tmrand.Bytes(32))
  335. return ed25519.PubKey(tmrand.Bytes(32))
  336. }
  337. func randValidator(totalVotingPower int64) *Validator {
  338. // this modulo limits the ProposerPriority/VotingPower to stay in the
  339. // bounds of MaxTotalVotingPower minus the already existing voting power:
  340. val := NewValidator(randPubKey(), int64(tmrand.Uint64()%uint64(MaxTotalVotingPower-totalVotingPower)))
  341. val.ProposerPriority = tmrand.Int64() % (MaxTotalVotingPower - totalVotingPower)
  342. return val
  343. }
  344. func randValidatorSet(numValidators int) *ValidatorSet {
  345. validators := make([]*Validator, numValidators)
  346. totalVotingPower := int64(0)
  347. for i := 0; i < numValidators; i++ {
  348. validators[i] = randValidator(totalVotingPower)
  349. totalVotingPower += validators[i].VotingPower
  350. }
  351. return NewValidatorSet(validators)
  352. }
  353. func (vals *ValidatorSet) toBytes() []byte {
  354. pbvs, err := vals.ToProto()
  355. if err != nil {
  356. panic(err)
  357. }
  358. bz, err := pbvs.Marshal()
  359. if err != nil {
  360. panic(err)
  361. }
  362. return bz
  363. }
  364. func (vals *ValidatorSet) fromBytes(b []byte) *ValidatorSet {
  365. pbvs := new(tmproto.ValidatorSet)
  366. err := pbvs.Unmarshal(b)
  367. if err != nil {
  368. // DATA HAS BEEN CORRUPTED OR THE SPEC HAS CHANGED
  369. panic(err)
  370. }
  371. vs, err := ValidatorSetFromProto(pbvs)
  372. if err != nil {
  373. panic(err)
  374. }
  375. return vs
  376. }
  377. //-------------------------------------------------------------------
  378. func TestValidatorSetTotalVotingPowerPanicsOnOverflow(t *testing.T) {
  379. // NewValidatorSet calls IncrementProposerPriority which calls TotalVotingPower()
  380. // which should panic on overflows:
  381. shouldPanic := func() {
  382. NewValidatorSet([]*Validator{
  383. {Address: []byte("a"), VotingPower: math.MaxInt64, ProposerPriority: 0},
  384. {Address: []byte("b"), VotingPower: math.MaxInt64, ProposerPriority: 0},
  385. {Address: []byte("c"), VotingPower: math.MaxInt64, ProposerPriority: 0},
  386. })
  387. }
  388. assert.Panics(t, shouldPanic)
  389. }
  390. func TestAvgProposerPriority(t *testing.T) {
  391. // Create Validator set without calling IncrementProposerPriority:
  392. tcs := []struct {
  393. vs ValidatorSet
  394. want int64
  395. }{
  396. 0: {ValidatorSet{Validators: []*Validator{{ProposerPriority: 0}, {ProposerPriority: 0}, {ProposerPriority: 0}}}, 0},
  397. 1: {
  398. ValidatorSet{
  399. Validators: []*Validator{{ProposerPriority: math.MaxInt64}, {ProposerPriority: 0}, {ProposerPriority: 0}},
  400. }, math.MaxInt64 / 3,
  401. },
  402. 2: {
  403. ValidatorSet{
  404. Validators: []*Validator{{ProposerPriority: math.MaxInt64}, {ProposerPriority: 0}},
  405. }, math.MaxInt64 / 2,
  406. },
  407. 3: {
  408. ValidatorSet{
  409. Validators: []*Validator{{ProposerPriority: math.MaxInt64}, {ProposerPriority: math.MaxInt64}},
  410. }, math.MaxInt64,
  411. },
  412. 4: {
  413. ValidatorSet{
  414. Validators: []*Validator{{ProposerPriority: math.MinInt64}, {ProposerPriority: math.MinInt64}},
  415. }, math.MinInt64,
  416. },
  417. }
  418. for i, tc := range tcs {
  419. got := tc.vs.computeAvgProposerPriority()
  420. assert.Equal(t, tc.want, got, "test case: %v", i)
  421. }
  422. }
  423. func TestAveragingInIncrementProposerPriority(t *testing.T) {
  424. // Test that the averaging works as expected inside of IncrementProposerPriority.
  425. // Each validator comes with zero voting power which simplifies reasoning about
  426. // the expected ProposerPriority.
  427. tcs := []struct {
  428. vs ValidatorSet
  429. times int32
  430. avg int64
  431. }{
  432. 0: {ValidatorSet{
  433. Validators: []*Validator{
  434. {Address: []byte("a"), ProposerPriority: 1},
  435. {Address: []byte("b"), ProposerPriority: 2},
  436. {Address: []byte("c"), ProposerPriority: 3}}},
  437. 1, 2},
  438. 1: {ValidatorSet{
  439. Validators: []*Validator{
  440. {Address: []byte("a"), ProposerPriority: 10},
  441. {Address: []byte("b"), ProposerPriority: -10},
  442. {Address: []byte("c"), ProposerPriority: 1}}},
  443. // this should average twice but the average should be 0 after the first iteration
  444. // (voting power is 0 -> no changes)
  445. 11, 1 / 3},
  446. 2: {ValidatorSet{
  447. Validators: []*Validator{
  448. {Address: []byte("a"), ProposerPriority: 100},
  449. {Address: []byte("b"), ProposerPriority: -10},
  450. {Address: []byte("c"), ProposerPriority: 1}}},
  451. 1, 91 / 3},
  452. }
  453. for i, tc := range tcs {
  454. // work on copy to have the old ProposerPriorities:
  455. newVset := tc.vs.CopyIncrementProposerPriority(tc.times)
  456. for _, val := range tc.vs.Validators {
  457. _, updatedVal := newVset.GetByAddress(val.Address)
  458. assert.Equal(t, updatedVal.ProposerPriority, val.ProposerPriority-tc.avg, "test case: %v", i)
  459. }
  460. }
  461. }
  462. func TestAveragingInIncrementProposerPriorityWithVotingPower(t *testing.T) {
  463. // Other than TestAveragingInIncrementProposerPriority this is a more complete test showing
  464. // how each ProposerPriority changes in relation to the validator's voting power respectively.
  465. // average is zero in each round:
  466. vp0 := int64(10)
  467. vp1 := int64(1)
  468. vp2 := int64(1)
  469. total := vp0 + vp1 + vp2
  470. avg := (vp0 + vp1 + vp2 - total) / 3
  471. vals := ValidatorSet{Validators: []*Validator{
  472. {Address: []byte{0}, ProposerPriority: 0, VotingPower: vp0},
  473. {Address: []byte{1}, ProposerPriority: 0, VotingPower: vp1},
  474. {Address: []byte{2}, ProposerPriority: 0, VotingPower: vp2}}}
  475. tcs := []struct {
  476. vals *ValidatorSet
  477. wantProposerPrioritys []int64
  478. times int32
  479. wantProposer *Validator
  480. }{
  481. 0: {
  482. vals.Copy(),
  483. []int64{
  484. // Acumm+VotingPower-Avg:
  485. 0 + vp0 - total - avg, // mostest will be subtracted by total voting power (12)
  486. 0 + vp1,
  487. 0 + vp2},
  488. 1,
  489. vals.Validators[0]},
  490. 1: {
  491. vals.Copy(),
  492. []int64{
  493. (0 + vp0 - total) + vp0 - total - avg, // this will be mostest on 2nd iter, too
  494. (0 + vp1) + vp1,
  495. (0 + vp2) + vp2},
  496. 2,
  497. vals.Validators[0]}, // increment twice -> expect average to be subtracted twice
  498. 2: {
  499. vals.Copy(),
  500. []int64{
  501. 0 + 3*(vp0-total) - avg, // still mostest
  502. 0 + 3*vp1,
  503. 0 + 3*vp2},
  504. 3,
  505. vals.Validators[0]},
  506. 3: {
  507. vals.Copy(),
  508. []int64{
  509. 0 + 4*(vp0-total), // still mostest
  510. 0 + 4*vp1,
  511. 0 + 4*vp2},
  512. 4,
  513. vals.Validators[0]},
  514. 4: {
  515. vals.Copy(),
  516. []int64{
  517. 0 + 4*(vp0-total) + vp0, // 4 iters was mostest
  518. 0 + 5*vp1 - total, // now this val is mostest for the 1st time (hence -12==totalVotingPower)
  519. 0 + 5*vp2},
  520. 5,
  521. vals.Validators[1]},
  522. 5: {
  523. vals.Copy(),
  524. []int64{
  525. 0 + 6*vp0 - 5*total, // mostest again
  526. 0 + 6*vp1 - total, // mostest once up to here
  527. 0 + 6*vp2},
  528. 6,
  529. vals.Validators[0]},
  530. 6: {
  531. vals.Copy(),
  532. []int64{
  533. 0 + 7*vp0 - 6*total, // in 7 iters this val is mostest 6 times
  534. 0 + 7*vp1 - total, // in 7 iters this val is mostest 1 time
  535. 0 + 7*vp2},
  536. 7,
  537. vals.Validators[0]},
  538. 7: {
  539. vals.Copy(),
  540. []int64{
  541. 0 + 8*vp0 - 7*total, // mostest again
  542. 0 + 8*vp1 - total,
  543. 0 + 8*vp2},
  544. 8,
  545. vals.Validators[0]},
  546. 8: {
  547. vals.Copy(),
  548. []int64{
  549. 0 + 9*vp0 - 7*total,
  550. 0 + 9*vp1 - total,
  551. 0 + 9*vp2 - total}, // mostest
  552. 9,
  553. vals.Validators[2]},
  554. 9: {
  555. vals.Copy(),
  556. []int64{
  557. 0 + 10*vp0 - 8*total, // after 10 iters this is mostest again
  558. 0 + 10*vp1 - total, // after 6 iters this val is "mostest" once and not in between
  559. 0 + 10*vp2 - total}, // in between 10 iters this val is "mostest" once
  560. 10,
  561. vals.Validators[0]},
  562. 10: {
  563. vals.Copy(),
  564. []int64{
  565. 0 + 11*vp0 - 9*total,
  566. 0 + 11*vp1 - total, // after 6 iters this val is "mostest" once and not in between
  567. 0 + 11*vp2 - total}, // after 10 iters this val is "mostest" once
  568. 11,
  569. vals.Validators[0]},
  570. }
  571. for i, tc := range tcs {
  572. tc.vals.IncrementProposerPriority(tc.times)
  573. assert.Equal(t, tc.wantProposer.Address, tc.vals.GetProposer().Address,
  574. "test case: %v",
  575. i)
  576. for valIdx, val := range tc.vals.Validators {
  577. assert.Equal(t,
  578. tc.wantProposerPrioritys[valIdx],
  579. val.ProposerPriority,
  580. "test case: %v, validator: %v",
  581. i,
  582. valIdx)
  583. }
  584. }
  585. }
  586. func TestSafeAdd(t *testing.T) {
  587. f := func(a, b int64) bool {
  588. c, overflow := safeAdd(a, b)
  589. return overflow || (!overflow && c == a+b)
  590. }
  591. if err := quick.Check(f, nil); err != nil {
  592. t.Error(err)
  593. }
  594. }
  595. func TestSafeAddClip(t *testing.T) {
  596. assert.EqualValues(t, math.MaxInt64, safeAddClip(math.MaxInt64, 10))
  597. assert.EqualValues(t, math.MaxInt64, safeAddClip(math.MaxInt64, math.MaxInt64))
  598. assert.EqualValues(t, math.MinInt64, safeAddClip(math.MinInt64, -10))
  599. }
  600. func TestSafeSubClip(t *testing.T) {
  601. assert.EqualValues(t, math.MinInt64, safeSubClip(math.MinInt64, 10))
  602. assert.EqualValues(t, 0, safeSubClip(math.MinInt64, math.MinInt64))
  603. assert.EqualValues(t, math.MinInt64, safeSubClip(math.MinInt64, math.MaxInt64))
  604. assert.EqualValues(t, math.MaxInt64, safeSubClip(math.MaxInt64, -10))
  605. }
  606. //-------------------------------------------------------------------
  607. // Check VerifyCommit, VerifyCommitLight and VerifyCommitLightTrusting basic
  608. // verification.
  609. func TestValidatorSet_VerifyCommit_All(t *testing.T) {
  610. var (
  611. privKey = ed25519.GenPrivKey()
  612. pubKey = privKey.PubKey()
  613. v1 = NewValidator(pubKey, 1000)
  614. vset = NewValidatorSet([]*Validator{v1})
  615. chainID = "Lalande21185"
  616. )
  617. vote := examplePrecommit()
  618. vote.ValidatorAddress = pubKey.Address()
  619. v := vote.ToProto()
  620. sig, err := privKey.Sign(VoteSignBytes(chainID, v))
  621. require.NoError(t, err)
  622. vote.Signature = sig
  623. commit := NewCommit(vote.Height, vote.Round, vote.BlockID, []CommitSig{vote.CommitSig()})
  624. vote2 := *vote
  625. sig2, err := privKey.Sign(VoteSignBytes("EpsilonEridani", v))
  626. require.NoError(t, err)
  627. vote2.Signature = sig2
  628. testCases := []struct {
  629. description string
  630. chainID string
  631. blockID BlockID
  632. height int64
  633. commit *Commit
  634. expErr bool
  635. }{
  636. {"good", chainID, vote.BlockID, vote.Height, commit, false},
  637. {"wrong signature (#0)", "EpsilonEridani", vote.BlockID, vote.Height, commit, true},
  638. {"wrong block ID", chainID, makeBlockIDRandom(), vote.Height, commit, true},
  639. {"wrong height", chainID, vote.BlockID, vote.Height - 1, commit, true},
  640. {"wrong set size: 1 vs 0", chainID, vote.BlockID, vote.Height,
  641. NewCommit(vote.Height, vote.Round, vote.BlockID, []CommitSig{}), true},
  642. {"wrong set size: 1 vs 2", chainID, vote.BlockID, vote.Height,
  643. NewCommit(vote.Height, vote.Round, vote.BlockID,
  644. []CommitSig{vote.CommitSig(), {BlockIDFlag: BlockIDFlagAbsent}}), true},
  645. {"insufficient voting power: got 0, needed more than 666", chainID, vote.BlockID, vote.Height,
  646. NewCommit(vote.Height, vote.Round, vote.BlockID, []CommitSig{{BlockIDFlag: BlockIDFlagAbsent}}), true},
  647. {"wrong signature (#0)", chainID, vote.BlockID, vote.Height,
  648. NewCommit(vote.Height, vote.Round, vote.BlockID, []CommitSig{vote2.CommitSig()}), true},
  649. }
  650. for _, tc := range testCases {
  651. tc := tc
  652. t.Run(tc.description, func(t *testing.T) {
  653. err := vset.VerifyCommit(tc.chainID, tc.blockID, tc.height, tc.commit)
  654. if tc.expErr {
  655. if assert.Error(t, err, "VerifyCommit") {
  656. assert.Contains(t, err.Error(), tc.description, "VerifyCommit")
  657. }
  658. } else {
  659. assert.NoError(t, err, "VerifyCommit")
  660. }
  661. err = vset.VerifyCommitLight(tc.chainID, tc.blockID, tc.height, tc.commit)
  662. if tc.expErr {
  663. if assert.Error(t, err, "VerifyCommitLight") {
  664. assert.Contains(t, err.Error(), tc.description, "VerifyCommitLight")
  665. }
  666. } else {
  667. assert.NoError(t, err, "VerifyCommitLight")
  668. }
  669. })
  670. }
  671. }
  672. func TestValidatorSet_VerifyCommit_CheckAllSignatures(t *testing.T) {
  673. var (
  674. chainID = "test_chain_id"
  675. h = int64(3)
  676. blockID = makeBlockIDRandom()
  677. )
  678. voteSet, valSet, vals := randVoteSet(h, 0, tmproto.PrecommitType, 4, 10)
  679. commit, err := MakeCommit(blockID, h, 0, voteSet, vals, time.Now())
  680. require.NoError(t, err)
  681. // malleate 4th signature
  682. vote := voteSet.GetByIndex(3)
  683. v := vote.ToProto()
  684. err = vals[3].SignVote(context.Background(), "CentaurusA", v)
  685. require.NoError(t, err)
  686. vote.Signature = v.Signature
  687. commit.Signatures[3] = vote.CommitSig()
  688. err = valSet.VerifyCommit(chainID, blockID, h, commit)
  689. if assert.Error(t, err) {
  690. assert.Contains(t, err.Error(), "wrong signature (#3)")
  691. }
  692. }
  693. func TestValidatorSet_VerifyCommitLight_ReturnsAsSoonAsMajorityOfVotingPowerSigned(t *testing.T) {
  694. var (
  695. chainID = "test_chain_id"
  696. h = int64(3)
  697. blockID = makeBlockIDRandom()
  698. )
  699. voteSet, valSet, vals := randVoteSet(h, 0, tmproto.PrecommitType, 4, 10)
  700. commit, err := MakeCommit(blockID, h, 0, voteSet, vals, time.Now())
  701. require.NoError(t, err)
  702. // malleate 4th signature (3 signatures are enough for 2/3+)
  703. vote := voteSet.GetByIndex(3)
  704. v := vote.ToProto()
  705. err = vals[3].SignVote(context.Background(), "CentaurusA", v)
  706. require.NoError(t, err)
  707. vote.Signature = v.Signature
  708. commit.Signatures[3] = vote.CommitSig()
  709. err = valSet.VerifyCommitLight(chainID, blockID, h, commit)
  710. assert.NoError(t, err)
  711. }
  712. func TestValidatorSet_VerifyCommitLightTrusting_ReturnsAsSoonAsTrustLevelOfVotingPowerSigned(t *testing.T) {
  713. var (
  714. chainID = "test_chain_id"
  715. h = int64(3)
  716. blockID = makeBlockIDRandom()
  717. )
  718. voteSet, valSet, vals := randVoteSet(h, 0, tmproto.PrecommitType, 4, 10)
  719. commit, err := MakeCommit(blockID, h, 0, voteSet, vals, time.Now())
  720. require.NoError(t, err)
  721. // malleate 3rd signature (2 signatures are enough for 1/3+ trust level)
  722. vote := voteSet.GetByIndex(2)
  723. v := vote.ToProto()
  724. err = vals[2].SignVote(context.Background(), "CentaurusA", v)
  725. require.NoError(t, err)
  726. vote.Signature = v.Signature
  727. commit.Signatures[2] = vote.CommitSig()
  728. err = valSet.VerifyCommitLightTrusting(chainID, commit, tmmath.Fraction{Numerator: 1, Denominator: 3})
  729. assert.NoError(t, err)
  730. }
  731. func TestEmptySet(t *testing.T) {
  732. var valList []*Validator
  733. valSet := NewValidatorSet(valList)
  734. assert.Panics(t, func() { valSet.IncrementProposerPriority(1) })
  735. assert.Panics(t, func() { valSet.RescalePriorities(100) })
  736. assert.Panics(t, func() { valSet.shiftByAvgProposerPriority() })
  737. assert.Panics(t, func() { assert.Zero(t, computeMaxMinPriorityDiff(valSet)) })
  738. valSet.GetProposer()
  739. // Add to empty set
  740. v1 := newValidator([]byte("v1"), 100)
  741. v2 := newValidator([]byte("v2"), 100)
  742. valList = []*Validator{v1, v2}
  743. assert.NoError(t, valSet.UpdateWithChangeSet(valList))
  744. verifyValidatorSet(t, valSet)
  745. // Delete all validators from set
  746. v1 = newValidator([]byte("v1"), 0)
  747. v2 = newValidator([]byte("v2"), 0)
  748. delList := []*Validator{v1, v2}
  749. assert.Error(t, valSet.UpdateWithChangeSet(delList))
  750. // Attempt delete from empty set
  751. assert.Error(t, valSet.UpdateWithChangeSet(delList))
  752. }
  753. func TestUpdatesForNewValidatorSet(t *testing.T) {
  754. v1 := newValidator([]byte("v1"), 100)
  755. v2 := newValidator([]byte("v2"), 100)
  756. valList := []*Validator{v1, v2}
  757. valSet := NewValidatorSet(valList)
  758. verifyValidatorSet(t, valSet)
  759. // Verify duplicates are caught in NewValidatorSet() and it panics
  760. v111 := newValidator([]byte("v1"), 100)
  761. v112 := newValidator([]byte("v1"), 123)
  762. v113 := newValidator([]byte("v1"), 234)
  763. valList = []*Validator{v111, v112, v113}
  764. assert.Panics(t, func() { NewValidatorSet(valList) })
  765. // Verify set including validator with voting power 0 cannot be created
  766. v1 = newValidator([]byte("v1"), 0)
  767. v2 = newValidator([]byte("v2"), 22)
  768. v3 := newValidator([]byte("v3"), 33)
  769. valList = []*Validator{v1, v2, v3}
  770. assert.Panics(t, func() { NewValidatorSet(valList) })
  771. // Verify set including validator with negative voting power cannot be created
  772. v1 = newValidator([]byte("v1"), 10)
  773. v2 = newValidator([]byte("v2"), -20)
  774. v3 = newValidator([]byte("v3"), 30)
  775. valList = []*Validator{v1, v2, v3}
  776. assert.Panics(t, func() { NewValidatorSet(valList) })
  777. }
  778. type testVal struct {
  779. name string
  780. power int64
  781. }
  782. func permutation(valList []testVal) []testVal {
  783. if len(valList) == 0 {
  784. return nil
  785. }
  786. permList := make([]testVal, len(valList))
  787. perm := tmrand.Perm(len(valList))
  788. for i, v := range perm {
  789. permList[v] = valList[i]
  790. }
  791. return permList
  792. }
  793. func createNewValidatorList(testValList []testVal) []*Validator {
  794. valList := make([]*Validator, 0, len(testValList))
  795. for _, val := range testValList {
  796. valList = append(valList, newValidator([]byte(val.name), val.power))
  797. }
  798. return valList
  799. }
  800. func createNewValidatorSet(testValList []testVal) *ValidatorSet {
  801. return NewValidatorSet(createNewValidatorList(testValList))
  802. }
  803. func valSetTotalProposerPriority(valSet *ValidatorSet) int64 {
  804. sum := int64(0)
  805. for _, val := range valSet.Validators {
  806. // mind overflow
  807. sum = safeAddClip(sum, val.ProposerPriority)
  808. }
  809. return sum
  810. }
  811. func verifyValidatorSet(t *testing.T, valSet *ValidatorSet) {
  812. // verify that the capacity and length of validators is the same
  813. assert.Equal(t, len(valSet.Validators), cap(valSet.Validators))
  814. // verify that the set's total voting power has been updated
  815. tvp := valSet.totalVotingPower
  816. valSet.updateTotalVotingPower()
  817. expectedTvp := valSet.TotalVotingPower()
  818. assert.Equal(t, expectedTvp, tvp,
  819. "expected TVP %d. Got %d, valSet=%s", expectedTvp, tvp, valSet)
  820. // verify that validator priorities are centered
  821. valsCount := int64(len(valSet.Validators))
  822. tpp := valSetTotalProposerPriority(valSet)
  823. assert.True(t, tpp < valsCount && tpp > -valsCount,
  824. "expected total priority in (-%d, %d). Got %d", valsCount, valsCount, tpp)
  825. // verify that priorities are scaled
  826. dist := computeMaxMinPriorityDiff(valSet)
  827. assert.True(t, dist <= PriorityWindowSizeFactor*tvp,
  828. "expected priority distance < %d. Got %d", PriorityWindowSizeFactor*tvp, dist)
  829. }
  830. func toTestValList(valList []*Validator) []testVal {
  831. testList := make([]testVal, len(valList))
  832. for i, val := range valList {
  833. testList[i].name = string(val.Address)
  834. testList[i].power = val.VotingPower
  835. }
  836. return testList
  837. }
  838. func testValSet(nVals int, power int64) []testVal {
  839. vals := make([]testVal, nVals)
  840. for i := 0; i < nVals; i++ {
  841. vals[i] = testVal{fmt.Sprintf("v%d", i+1), power}
  842. }
  843. return vals
  844. }
  845. type valSetErrTestCase struct {
  846. startVals []testVal
  847. updateVals []testVal
  848. }
  849. func executeValSetErrTestCase(t *testing.T, idx int, tt valSetErrTestCase) {
  850. // create a new set and apply updates, keeping copies for the checks
  851. valSet := createNewValidatorSet(tt.startVals)
  852. valSetCopy := valSet.Copy()
  853. valList := createNewValidatorList(tt.updateVals)
  854. valListCopy := validatorListCopy(valList)
  855. err := valSet.UpdateWithChangeSet(valList)
  856. // for errors check the validator set has not been changed
  857. assert.Error(t, err, "test %d", idx)
  858. assert.Equal(t, valSet, valSetCopy, "test %v", idx)
  859. // check the parameter list has not changed
  860. assert.Equal(t, valList, valListCopy, "test %v", idx)
  861. }
  862. func TestValSetUpdatesDuplicateEntries(t *testing.T) {
  863. testCases := []valSetErrTestCase{
  864. // Duplicate entries in changes
  865. { // first entry is duplicated change
  866. testValSet(2, 10),
  867. []testVal{{"v1", 11}, {"v1", 22}},
  868. },
  869. { // second entry is duplicated change
  870. testValSet(2, 10),
  871. []testVal{{"v2", 11}, {"v2", 22}},
  872. },
  873. { // change duplicates are separated by a valid change
  874. testValSet(2, 10),
  875. []testVal{{"v1", 11}, {"v2", 22}, {"v1", 12}},
  876. },
  877. { // change duplicates are separated by a valid change
  878. testValSet(3, 10),
  879. []testVal{{"v1", 11}, {"v3", 22}, {"v1", 12}},
  880. },
  881. // Duplicate entries in remove
  882. { // first entry is duplicated remove
  883. testValSet(2, 10),
  884. []testVal{{"v1", 0}, {"v1", 0}},
  885. },
  886. { // second entry is duplicated remove
  887. testValSet(2, 10),
  888. []testVal{{"v2", 0}, {"v2", 0}},
  889. },
  890. { // remove duplicates are separated by a valid remove
  891. testValSet(2, 10),
  892. []testVal{{"v1", 0}, {"v2", 0}, {"v1", 0}},
  893. },
  894. { // remove duplicates are separated by a valid remove
  895. testValSet(3, 10),
  896. []testVal{{"v1", 0}, {"v3", 0}, {"v1", 0}},
  897. },
  898. { // remove and update same val
  899. testValSet(2, 10),
  900. []testVal{{"v1", 0}, {"v2", 20}, {"v1", 30}},
  901. },
  902. { // duplicate entries in removes + changes
  903. testValSet(2, 10),
  904. []testVal{{"v1", 0}, {"v2", 20}, {"v2", 30}, {"v1", 0}},
  905. },
  906. { // duplicate entries in removes + changes
  907. testValSet(3, 10),
  908. []testVal{{"v1", 0}, {"v3", 5}, {"v2", 20}, {"v2", 30}, {"v1", 0}},
  909. },
  910. }
  911. for i, tt := range testCases {
  912. executeValSetErrTestCase(t, i, tt)
  913. }
  914. }
  915. func TestValSetUpdatesOverflows(t *testing.T) {
  916. maxVP := MaxTotalVotingPower
  917. testCases := []valSetErrTestCase{
  918. { // single update leading to overflow
  919. testValSet(2, 10),
  920. []testVal{{"v1", math.MaxInt64}},
  921. },
  922. { // single update leading to overflow
  923. testValSet(2, 10),
  924. []testVal{{"v2", math.MaxInt64}},
  925. },
  926. { // add validator leading to overflow
  927. testValSet(1, maxVP),
  928. []testVal{{"v2", math.MaxInt64}},
  929. },
  930. { // add validator leading to exceed Max
  931. testValSet(1, maxVP-1),
  932. []testVal{{"v2", 5}},
  933. },
  934. { // add validator leading to exceed Max
  935. testValSet(2, maxVP/3),
  936. []testVal{{"v3", maxVP / 2}},
  937. },
  938. { // add validator leading to exceed Max
  939. testValSet(1, maxVP),
  940. []testVal{{"v2", maxVP}},
  941. },
  942. }
  943. for i, tt := range testCases {
  944. executeValSetErrTestCase(t, i, tt)
  945. }
  946. }
  947. func TestValSetUpdatesOtherErrors(t *testing.T) {
  948. testCases := []valSetErrTestCase{
  949. { // update with negative voting power
  950. testValSet(2, 10),
  951. []testVal{{"v1", -123}},
  952. },
  953. { // update with negative voting power
  954. testValSet(2, 10),
  955. []testVal{{"v2", -123}},
  956. },
  957. { // remove non-existing validator
  958. testValSet(2, 10),
  959. []testVal{{"v3", 0}},
  960. },
  961. { // delete all validators
  962. []testVal{{"v1", 10}, {"v2", 20}, {"v3", 30}},
  963. []testVal{{"v1", 0}, {"v2", 0}, {"v3", 0}},
  964. },
  965. }
  966. for i, tt := range testCases {
  967. executeValSetErrTestCase(t, i, tt)
  968. }
  969. }
  970. func TestValSetUpdatesBasicTestsExecute(t *testing.T) {
  971. valSetUpdatesBasicTests := []struct {
  972. startVals []testVal
  973. updateVals []testVal
  974. expectedVals []testVal
  975. }{
  976. { // no changes
  977. testValSet(2, 10),
  978. []testVal{},
  979. testValSet(2, 10),
  980. },
  981. { // voting power changes
  982. testValSet(2, 10),
  983. []testVal{{"v2", 22}, {"v1", 11}},
  984. []testVal{{"v2", 22}, {"v1", 11}},
  985. },
  986. { // add new validators
  987. []testVal{{"v2", 20}, {"v1", 10}},
  988. []testVal{{"v4", 40}, {"v3", 30}},
  989. []testVal{{"v4", 40}, {"v3", 30}, {"v2", 20}, {"v1", 10}},
  990. },
  991. { // add new validator to middle
  992. []testVal{{"v3", 20}, {"v1", 10}},
  993. []testVal{{"v2", 30}},
  994. []testVal{{"v2", 30}, {"v3", 20}, {"v1", 10}},
  995. },
  996. { // add new validator to beginning
  997. []testVal{{"v3", 20}, {"v2", 10}},
  998. []testVal{{"v1", 30}},
  999. []testVal{{"v1", 30}, {"v3", 20}, {"v2", 10}},
  1000. },
  1001. { // delete validators
  1002. []testVal{{"v3", 30}, {"v2", 20}, {"v1", 10}},
  1003. []testVal{{"v2", 0}},
  1004. []testVal{{"v3", 30}, {"v1", 10}},
  1005. },
  1006. }
  1007. for i, tt := range valSetUpdatesBasicTests {
  1008. // create a new set and apply updates, keeping copies for the checks
  1009. valSet := createNewValidatorSet(tt.startVals)
  1010. valList := createNewValidatorList(tt.updateVals)
  1011. err := valSet.UpdateWithChangeSet(valList)
  1012. assert.NoError(t, err, "test %d", i)
  1013. valListCopy := validatorListCopy(valSet.Validators)
  1014. // check that the voting power in the set's validators is not changing if the voting power
  1015. // is changed in the list of validators previously passed as parameter to UpdateWithChangeSet.
  1016. // this is to make sure copies of the validators are made by UpdateWithChangeSet.
  1017. if len(valList) > 0 {
  1018. valList[0].VotingPower++
  1019. assert.Equal(t, toTestValList(valListCopy), toTestValList(valSet.Validators), "test %v", i)
  1020. }
  1021. // check the final validator list is as expected and the set is properly scaled and centered.
  1022. assert.Equal(t, tt.expectedVals, toTestValList(valSet.Validators), "test %v", i)
  1023. verifyValidatorSet(t, valSet)
  1024. }
  1025. }
  1026. // Test that different permutations of an update give the same result.
  1027. func TestValSetUpdatesOrderIndependenceTestsExecute(t *testing.T) {
  1028. // startVals - initial validators to create the set with
  1029. // updateVals - a sequence of updates to be applied to the set.
  1030. // updateVals is shuffled a number of times during testing to check for same resulting validator set.
  1031. valSetUpdatesOrderTests := []struct {
  1032. startVals []testVal
  1033. updateVals []testVal
  1034. }{
  1035. 0: { // order of changes should not matter, the final validator sets should be the same
  1036. []testVal{{"v4", 40}, {"v3", 30}, {"v2", 10}, {"v1", 10}},
  1037. []testVal{{"v4", 44}, {"v3", 33}, {"v2", 22}, {"v1", 11}}},
  1038. 1: { // order of additions should not matter
  1039. []testVal{{"v2", 20}, {"v1", 10}},
  1040. []testVal{{"v3", 30}, {"v4", 40}, {"v5", 50}, {"v6", 60}}},
  1041. 2: { // order of removals should not matter
  1042. []testVal{{"v4", 40}, {"v3", 30}, {"v2", 20}, {"v1", 10}},
  1043. []testVal{{"v1", 0}, {"v3", 0}, {"v4", 0}}},
  1044. 3: { // order of mixed operations should not matter
  1045. []testVal{{"v4", 40}, {"v3", 30}, {"v2", 20}, {"v1", 10}},
  1046. []testVal{{"v1", 0}, {"v3", 0}, {"v2", 22}, {"v5", 50}, {"v4", 44}}},
  1047. }
  1048. for i, tt := range valSetUpdatesOrderTests {
  1049. // create a new set and apply updates
  1050. valSet := createNewValidatorSet(tt.startVals)
  1051. valSetCopy := valSet.Copy()
  1052. valList := createNewValidatorList(tt.updateVals)
  1053. assert.NoError(t, valSetCopy.UpdateWithChangeSet(valList))
  1054. // save the result as expected for next updates
  1055. valSetExp := valSetCopy.Copy()
  1056. // perform at most 20 permutations on the updates and call UpdateWithChangeSet()
  1057. n := len(tt.updateVals)
  1058. maxNumPerms := tmmath.MinInt(20, n*n)
  1059. for j := 0; j < maxNumPerms; j++ {
  1060. // create a copy of original set and apply a random permutation of updates
  1061. valSetCopy := valSet.Copy()
  1062. valList := createNewValidatorList(permutation(tt.updateVals))
  1063. // check there was no error and the set is properly scaled and centered.
  1064. assert.NoError(t, valSetCopy.UpdateWithChangeSet(valList),
  1065. "test %v failed for permutation %v", i, valList)
  1066. verifyValidatorSet(t, valSetCopy)
  1067. // verify the resulting test is same as the expected
  1068. assert.Equal(t, valSetCopy, valSetExp,
  1069. "test %v failed for permutation %v", i, valList)
  1070. }
  1071. }
  1072. }
  1073. // This tests the private function validator_set.go:applyUpdates() function, used only for additions and changes.
  1074. // Should perform a proper merge of updatedVals and startVals
  1075. func TestValSetApplyUpdatesTestsExecute(t *testing.T) {
  1076. valSetUpdatesBasicTests := []struct {
  1077. startVals []testVal
  1078. updateVals []testVal
  1079. expectedVals []testVal
  1080. }{
  1081. // additions
  1082. 0: { // prepend
  1083. []testVal{{"v4", 44}, {"v5", 55}},
  1084. []testVal{{"v1", 11}},
  1085. []testVal{{"v1", 11}, {"v4", 44}, {"v5", 55}}},
  1086. 1: { // append
  1087. []testVal{{"v4", 44}, {"v5", 55}},
  1088. []testVal{{"v6", 66}},
  1089. []testVal{{"v4", 44}, {"v5", 55}, {"v6", 66}}},
  1090. 2: { // insert
  1091. []testVal{{"v4", 44}, {"v6", 66}},
  1092. []testVal{{"v5", 55}},
  1093. []testVal{{"v4", 44}, {"v5", 55}, {"v6", 66}}},
  1094. 3: { // insert multi
  1095. []testVal{{"v4", 44}, {"v6", 66}, {"v9", 99}},
  1096. []testVal{{"v5", 55}, {"v7", 77}, {"v8", 88}},
  1097. []testVal{{"v4", 44}, {"v5", 55}, {"v6", 66}, {"v7", 77}, {"v8", 88}, {"v9", 99}}},
  1098. // changes
  1099. 4: { // head
  1100. []testVal{{"v1", 111}, {"v2", 22}},
  1101. []testVal{{"v1", 11}},
  1102. []testVal{{"v1", 11}, {"v2", 22}}},
  1103. 5: { // tail
  1104. []testVal{{"v1", 11}, {"v2", 222}},
  1105. []testVal{{"v2", 22}},
  1106. []testVal{{"v1", 11}, {"v2", 22}}},
  1107. 6: { // middle
  1108. []testVal{{"v1", 11}, {"v2", 222}, {"v3", 33}},
  1109. []testVal{{"v2", 22}},
  1110. []testVal{{"v1", 11}, {"v2", 22}, {"v3", 33}}},
  1111. 7: { // multi
  1112. []testVal{{"v1", 111}, {"v2", 222}, {"v3", 333}},
  1113. []testVal{{"v1", 11}, {"v2", 22}, {"v3", 33}},
  1114. []testVal{{"v1", 11}, {"v2", 22}, {"v3", 33}}},
  1115. // additions and changes
  1116. 8: {
  1117. []testVal{{"v1", 111}, {"v2", 22}},
  1118. []testVal{{"v1", 11}, {"v3", 33}, {"v4", 44}},
  1119. []testVal{{"v1", 11}, {"v2", 22}, {"v3", 33}, {"v4", 44}}},
  1120. }
  1121. for i, tt := range valSetUpdatesBasicTests {
  1122. // create a new validator set with the start values
  1123. valSet := createNewValidatorSet(tt.startVals)
  1124. // applyUpdates() with the update values
  1125. valList := createNewValidatorList(tt.updateVals)
  1126. valSet.applyUpdates(valList)
  1127. // check the new list of validators for proper merge
  1128. assert.Equal(t, toTestValList(valSet.Validators), tt.expectedVals, "test %v", i)
  1129. }
  1130. }
  1131. type testVSetCfg struct {
  1132. name string
  1133. startVals []testVal
  1134. deletedVals []testVal
  1135. updatedVals []testVal
  1136. addedVals []testVal
  1137. expectedVals []testVal
  1138. expErr error
  1139. }
  1140. func randTestVSetCfg(t *testing.T, nBase, nAddMax int) testVSetCfg {
  1141. if nBase <= 0 || nAddMax < 0 {
  1142. panic(fmt.Sprintf("bad parameters %v %v", nBase, nAddMax))
  1143. }
  1144. const maxPower = 1000
  1145. var nOld, nDel, nChanged, nAdd int
  1146. nOld = int(tmrand.Uint()%uint(nBase)) + 1
  1147. if nBase-nOld > 0 {
  1148. nDel = int(tmrand.Uint() % uint(nBase-nOld))
  1149. }
  1150. nChanged = nBase - nOld - nDel
  1151. if nAddMax > 0 {
  1152. nAdd = tmrand.Int()%nAddMax + 1
  1153. }
  1154. cfg := testVSetCfg{}
  1155. cfg.startVals = make([]testVal, nBase)
  1156. cfg.deletedVals = make([]testVal, nDel)
  1157. cfg.addedVals = make([]testVal, nAdd)
  1158. cfg.updatedVals = make([]testVal, nChanged)
  1159. cfg.expectedVals = make([]testVal, nBase-nDel+nAdd)
  1160. for i := 0; i < nBase; i++ {
  1161. cfg.startVals[i] = testVal{fmt.Sprintf("v%d", i), int64(tmrand.Uint()%maxPower + 1)}
  1162. if i < nOld {
  1163. cfg.expectedVals[i] = cfg.startVals[i]
  1164. }
  1165. if i >= nOld && i < nOld+nChanged {
  1166. cfg.updatedVals[i-nOld] = testVal{fmt.Sprintf("v%d", i), int64(tmrand.Uint()%maxPower + 1)}
  1167. cfg.expectedVals[i] = cfg.updatedVals[i-nOld]
  1168. }
  1169. if i >= nOld+nChanged {
  1170. cfg.deletedVals[i-nOld-nChanged] = testVal{fmt.Sprintf("v%d", i), 0}
  1171. }
  1172. }
  1173. for i := nBase; i < nBase+nAdd; i++ {
  1174. cfg.addedVals[i-nBase] = testVal{fmt.Sprintf("v%d", i), int64(tmrand.Uint()%maxPower + 1)}
  1175. cfg.expectedVals[i-nDel] = cfg.addedVals[i-nBase]
  1176. }
  1177. sort.Sort(testValsByVotingPower(cfg.startVals))
  1178. sort.Sort(testValsByVotingPower(cfg.deletedVals))
  1179. sort.Sort(testValsByVotingPower(cfg.updatedVals))
  1180. sort.Sort(testValsByVotingPower(cfg.addedVals))
  1181. sort.Sort(testValsByVotingPower(cfg.expectedVals))
  1182. return cfg
  1183. }
  1184. func applyChangesToValSet(t *testing.T, expErr error, valSet *ValidatorSet, valsLists ...[]testVal) {
  1185. changes := make([]testVal, 0)
  1186. for _, valsList := range valsLists {
  1187. changes = append(changes, valsList...)
  1188. }
  1189. valList := createNewValidatorList(changes)
  1190. err := valSet.UpdateWithChangeSet(valList)
  1191. if expErr != nil {
  1192. assert.Equal(t, expErr, err)
  1193. } else {
  1194. assert.NoError(t, err)
  1195. }
  1196. }
  1197. func TestValSetUpdatePriorityOrderTests(t *testing.T) {
  1198. const nMaxElections int32 = 5000
  1199. testCases := []testVSetCfg{
  1200. 0: { // remove high power validator, keep old equal lower power validators
  1201. startVals: []testVal{{"v3", 1000}, {"v1", 1}, {"v2", 1}},
  1202. deletedVals: []testVal{{"v3", 0}},
  1203. updatedVals: []testVal{},
  1204. addedVals: []testVal{},
  1205. expectedVals: []testVal{{"v1", 1}, {"v2", 1}},
  1206. },
  1207. 1: { // remove high power validator, keep old different power validators
  1208. startVals: []testVal{{"v3", 1000}, {"v2", 10}, {"v1", 1}},
  1209. deletedVals: []testVal{{"v3", 0}},
  1210. updatedVals: []testVal{},
  1211. addedVals: []testVal{},
  1212. expectedVals: []testVal{{"v2", 10}, {"v1", 1}},
  1213. },
  1214. 2: { // remove high power validator, add new low power validators, keep old lower power
  1215. startVals: []testVal{{"v3", 1000}, {"v2", 2}, {"v1", 1}},
  1216. deletedVals: []testVal{{"v3", 0}},
  1217. updatedVals: []testVal{{"v2", 1}},
  1218. addedVals: []testVal{{"v5", 50}, {"v4", 40}},
  1219. expectedVals: []testVal{{"v5", 50}, {"v4", 40}, {"v1", 1}, {"v2", 1}},
  1220. },
  1221. // generate a configuration with 100 validators,
  1222. // randomly select validators for updates and deletes, and
  1223. // generate 10 new validators to be added
  1224. 3: randTestVSetCfg(t, 100, 10),
  1225. 4: randTestVSetCfg(t, 1000, 100),
  1226. 5: randTestVSetCfg(t, 10, 100),
  1227. 6: randTestVSetCfg(t, 100, 1000),
  1228. 7: randTestVSetCfg(t, 1000, 1000),
  1229. }
  1230. for _, cfg := range testCases {
  1231. // create a new validator set
  1232. valSet := createNewValidatorSet(cfg.startVals)
  1233. verifyValidatorSet(t, valSet)
  1234. // run election up to nMaxElections times, apply changes and verify that the priority order is correct
  1235. verifyValSetUpdatePriorityOrder(t, valSet, cfg, nMaxElections)
  1236. }
  1237. }
  1238. func verifyValSetUpdatePriorityOrder(t *testing.T, valSet *ValidatorSet, cfg testVSetCfg, nMaxElections int32) {
  1239. // Run election up to nMaxElections times, sort validators by priorities
  1240. valSet.IncrementProposerPriority(tmrand.Int31()%nMaxElections + 1)
  1241. // apply the changes, get the updated validators, sort by priorities
  1242. applyChangesToValSet(t, nil, valSet, cfg.addedVals, cfg.updatedVals, cfg.deletedVals)
  1243. // basic checks
  1244. assert.Equal(t, cfg.expectedVals, toTestValList(valSet.Validators))
  1245. verifyValidatorSet(t, valSet)
  1246. // verify that the added validators have the smallest priority:
  1247. // - they should be at the beginning of updatedValsPriSorted since it is
  1248. // sorted by priority
  1249. if len(cfg.addedVals) > 0 {
  1250. updatedValsPriSorted := validatorListCopy(valSet.Validators)
  1251. sort.Sort(validatorsByPriority(updatedValsPriSorted))
  1252. addedValsPriSlice := updatedValsPriSorted[:len(cfg.addedVals)]
  1253. sort.Sort(ValidatorsByVotingPower(addedValsPriSlice))
  1254. assert.Equal(t, cfg.addedVals, toTestValList(addedValsPriSlice))
  1255. // - and should all have the same priority
  1256. expectedPri := addedValsPriSlice[0].ProposerPriority
  1257. for _, val := range addedValsPriSlice[1:] {
  1258. assert.Equal(t, expectedPri, val.ProposerPriority)
  1259. }
  1260. }
  1261. }
  1262. func TestNewValidatorSetFromExistingValidators(t *testing.T) {
  1263. size := 5
  1264. vals := make([]*Validator, size)
  1265. for i := 0; i < size; i++ {
  1266. pv := NewMockPV()
  1267. vals[i] = pv.ExtractIntoValidator(int64(i + 1))
  1268. }
  1269. valSet := NewValidatorSet(vals)
  1270. valSet.IncrementProposerPriority(5)
  1271. newValSet := NewValidatorSet(valSet.Validators)
  1272. assert.NotEqual(t, valSet, newValSet)
  1273. existingValSet, err := ValidatorSetFromExistingValidators(valSet.Validators)
  1274. assert.NoError(t, err)
  1275. assert.Equal(t, valSet, existingValSet)
  1276. assert.Equal(t, valSet.CopyIncrementProposerPriority(3), existingValSet.CopyIncrementProposerPriority(3))
  1277. }
  1278. func TestValSetUpdateOverflowRelated(t *testing.T) {
  1279. testCases := []testVSetCfg{
  1280. {
  1281. name: "1 no false overflow error messages for updates",
  1282. startVals: []testVal{{"v2", MaxTotalVotingPower - 1}, {"v1", 1}},
  1283. updatedVals: []testVal{{"v1", MaxTotalVotingPower - 1}, {"v2", 1}},
  1284. expectedVals: []testVal{{"v1", MaxTotalVotingPower - 1}, {"v2", 1}},
  1285. expErr: nil,
  1286. },
  1287. {
  1288. // this test shows that it is important to apply the updates in the order of the change in power
  1289. // i.e. apply first updates with decreases in power, v2 change in this case.
  1290. name: "2 no false overflow error messages for updates",
  1291. startVals: []testVal{{"v2", MaxTotalVotingPower - 1}, {"v1", 1}},
  1292. updatedVals: []testVal{{"v1", MaxTotalVotingPower/2 - 1}, {"v2", MaxTotalVotingPower / 2}},
  1293. expectedVals: []testVal{{"v2", MaxTotalVotingPower / 2}, {"v1", MaxTotalVotingPower/2 - 1}},
  1294. expErr: nil,
  1295. },
  1296. {
  1297. name: "3 no false overflow error messages for deletes",
  1298. startVals: []testVal{{"v1", MaxTotalVotingPower - 2}, {"v2", 1}, {"v3", 1}},
  1299. deletedVals: []testVal{{"v1", 0}},
  1300. addedVals: []testVal{{"v4", MaxTotalVotingPower - 2}},
  1301. expectedVals: []testVal{{"v4", MaxTotalVotingPower - 2}, {"v2", 1}, {"v3", 1}},
  1302. expErr: nil,
  1303. },
  1304. {
  1305. name: "4 no false overflow error messages for adds, updates and deletes",
  1306. startVals: []testVal{
  1307. {"v1", MaxTotalVotingPower / 4}, {"v2", MaxTotalVotingPower / 4},
  1308. {"v3", MaxTotalVotingPower / 4}, {"v4", MaxTotalVotingPower / 4}},
  1309. deletedVals: []testVal{{"v2", 0}},
  1310. updatedVals: []testVal{
  1311. {"v1", MaxTotalVotingPower/2 - 2}, {"v3", MaxTotalVotingPower/2 - 3}, {"v4", 2}},
  1312. addedVals: []testVal{{"v5", 3}},
  1313. expectedVals: []testVal{
  1314. {"v1", MaxTotalVotingPower/2 - 2}, {"v3", MaxTotalVotingPower/2 - 3}, {"v5", 3}, {"v4", 2}},
  1315. expErr: nil,
  1316. },
  1317. {
  1318. name: "5 check panic on overflow is prevented: update 8 validators with power int64(math.MaxInt64)/8",
  1319. startVals: []testVal{
  1320. {"v1", 1}, {"v2", 1}, {"v3", 1}, {"v4", 1}, {"v5", 1},
  1321. {"v6", 1}, {"v7", 1}, {"v8", 1}, {"v9", 1}},
  1322. updatedVals: []testVal{
  1323. {"v1", MaxTotalVotingPower}, {"v2", MaxTotalVotingPower}, {"v3", MaxTotalVotingPower},
  1324. {"v4", MaxTotalVotingPower}, {"v5", MaxTotalVotingPower}, {"v6", MaxTotalVotingPower},
  1325. {"v7", MaxTotalVotingPower}, {"v8", MaxTotalVotingPower}, {"v9", 8}},
  1326. expectedVals: []testVal{
  1327. {"v1", 1}, {"v2", 1}, {"v3", 1}, {"v4", 1}, {"v5", 1},
  1328. {"v6", 1}, {"v7", 1}, {"v8", 1}, {"v9", 1}},
  1329. expErr: ErrTotalVotingPowerOverflow,
  1330. },
  1331. }
  1332. for _, tt := range testCases {
  1333. tt := tt
  1334. t.Run(tt.name, func(t *testing.T) {
  1335. valSet := createNewValidatorSet(tt.startVals)
  1336. verifyValidatorSet(t, valSet)
  1337. // execute update and verify returned error is as expected
  1338. applyChangesToValSet(t, tt.expErr, valSet, tt.addedVals, tt.updatedVals, tt.deletedVals)
  1339. // verify updated validator set is as expected
  1340. assert.Equal(t, tt.expectedVals, toTestValList(valSet.Validators))
  1341. verifyValidatorSet(t, valSet)
  1342. })
  1343. }
  1344. }
  1345. func TestValidatorSet_VerifyCommitLightTrusting(t *testing.T) {
  1346. var (
  1347. blockID = makeBlockIDRandom()
  1348. voteSet, originalValset, vals = randVoteSet(1, 1, tmproto.PrecommitType, 6, 1)
  1349. commit, err = MakeCommit(blockID, 1, 1, voteSet, vals, time.Now())
  1350. newValSet, _ = RandValidatorSet(2, 1)
  1351. )
  1352. require.NoError(t, err)
  1353. testCases := []struct {
  1354. valSet *ValidatorSet
  1355. err bool
  1356. }{
  1357. // good
  1358. 0: {
  1359. valSet: originalValset,
  1360. err: false,
  1361. },
  1362. // bad - no overlap between validator sets
  1363. 1: {
  1364. valSet: newValSet,
  1365. err: true,
  1366. },
  1367. // good - first two are different but the rest of the same -> >1/3
  1368. 2: {
  1369. valSet: NewValidatorSet(append(newValSet.Validators, originalValset.Validators...)),
  1370. err: false,
  1371. },
  1372. }
  1373. for _, tc := range testCases {
  1374. err = tc.valSet.VerifyCommitLightTrusting("test_chain_id", commit,
  1375. tmmath.Fraction{Numerator: 1, Denominator: 3})
  1376. if tc.err {
  1377. assert.Error(t, err)
  1378. } else {
  1379. assert.NoError(t, err)
  1380. }
  1381. }
  1382. }
  1383. func TestValidatorSet_VerifyCommitLightTrustingErrorsOnOverflow(t *testing.T) {
  1384. var (
  1385. blockID = makeBlockIDRandom()
  1386. voteSet, valSet, vals = randVoteSet(1, 1, tmproto.PrecommitType, 1, MaxTotalVotingPower)
  1387. commit, err = MakeCommit(blockID, 1, 1, voteSet, vals, time.Now())
  1388. )
  1389. require.NoError(t, err)
  1390. err = valSet.VerifyCommitLightTrusting("test_chain_id", commit,
  1391. tmmath.Fraction{Numerator: 25, Denominator: 55})
  1392. if assert.Error(t, err) {
  1393. assert.Contains(t, err.Error(), "int64 overflow")
  1394. }
  1395. }
  1396. func TestSafeMul(t *testing.T) {
  1397. testCases := []struct {
  1398. a int64
  1399. b int64
  1400. c int64
  1401. overflow bool
  1402. }{
  1403. 0: {0, 0, 0, false},
  1404. 1: {1, 0, 0, false},
  1405. 2: {2, 3, 6, false},
  1406. 3: {2, -3, -6, false},
  1407. 4: {-2, -3, 6, false},
  1408. 5: {-2, 3, -6, false},
  1409. 6: {math.MaxInt64, 1, math.MaxInt64, false},
  1410. 7: {math.MaxInt64 / 2, 2, math.MaxInt64 - 1, false},
  1411. 8: {math.MaxInt64 / 2, 3, 0, true},
  1412. 9: {math.MaxInt64, 2, 0, true},
  1413. }
  1414. for i, tc := range testCases {
  1415. c, overflow := safeMul(tc.a, tc.b)
  1416. assert.Equal(t, tc.c, c, "#%d", i)
  1417. assert.Equal(t, tc.overflow, overflow, "#%d", i)
  1418. }
  1419. }
  1420. func TestValidatorSetProtoBuf(t *testing.T) {
  1421. valset, _ := RandValidatorSet(10, 100)
  1422. valset2, _ := RandValidatorSet(10, 100)
  1423. valset2.Validators[0] = &Validator{}
  1424. valset3, _ := RandValidatorSet(10, 100)
  1425. valset3.Proposer = nil
  1426. valset4, _ := RandValidatorSet(10, 100)
  1427. valset4.Proposer = &Validator{}
  1428. testCases := []struct {
  1429. msg string
  1430. v1 *ValidatorSet
  1431. expPass1 bool
  1432. expPass2 bool
  1433. }{
  1434. {"success", valset, true, true},
  1435. {"fail valSet2, pubkey empty", valset2, false, false},
  1436. {"fail nil Proposer", valset3, false, false},
  1437. {"fail empty Proposer", valset4, false, false},
  1438. {"fail empty valSet", &ValidatorSet{}, true, false},
  1439. {"false nil", nil, true, false},
  1440. }
  1441. for _, tc := range testCases {
  1442. protoValSet, err := tc.v1.ToProto()
  1443. if tc.expPass1 {
  1444. require.NoError(t, err, tc.msg)
  1445. } else {
  1446. require.Error(t, err, tc.msg)
  1447. }
  1448. valSet, err := ValidatorSetFromProto(protoValSet)
  1449. if tc.expPass2 {
  1450. require.NoError(t, err, tc.msg)
  1451. require.EqualValues(t, tc.v1, valSet, tc.msg)
  1452. } else {
  1453. require.Error(t, err, tc.msg)
  1454. }
  1455. }
  1456. }
  1457. //---------------------
  1458. // Sort validators by priority and address
  1459. type validatorsByPriority []*Validator
  1460. func (valz validatorsByPriority) Len() int {
  1461. return len(valz)
  1462. }
  1463. func (valz validatorsByPriority) Less(i, j int) bool {
  1464. if valz[i].ProposerPriority < valz[j].ProposerPriority {
  1465. return true
  1466. }
  1467. if valz[i].ProposerPriority > valz[j].ProposerPriority {
  1468. return false
  1469. }
  1470. return bytes.Compare(valz[i].Address, valz[j].Address) < 0
  1471. }
  1472. func (valz validatorsByPriority) Swap(i, j int) {
  1473. valz[i], valz[j] = valz[j], valz[i]
  1474. }
  1475. //-------------------------------------
  1476. type testValsByVotingPower []testVal
  1477. func (tvals testValsByVotingPower) Len() int {
  1478. return len(tvals)
  1479. }
  1480. func (tvals testValsByVotingPower) Less(i, j int) bool {
  1481. if tvals[i].power == tvals[j].power {
  1482. return bytes.Compare([]byte(tvals[i].name), []byte(tvals[j].name)) == -1
  1483. }
  1484. return tvals[i].power > tvals[j].power
  1485. }
  1486. func (tvals testValsByVotingPower) Swap(i, j int) {
  1487. tvals[i], tvals[j] = tvals[j], tvals[i]
  1488. }
  1489. //-------------------------------------
  1490. // Benchmark tests
  1491. //
  1492. func BenchmarkUpdates(b *testing.B) {
  1493. const (
  1494. n = 100
  1495. m = 2000
  1496. )
  1497. // Init with n validators
  1498. vs := make([]*Validator, n)
  1499. for j := 0; j < n; j++ {
  1500. vs[j] = newValidator([]byte(fmt.Sprintf("v%d", j)), 100)
  1501. }
  1502. valSet := NewValidatorSet(vs)
  1503. l := len(valSet.Validators)
  1504. // Make m new validators
  1505. newValList := make([]*Validator, m)
  1506. for j := 0; j < m; j++ {
  1507. newValList[j] = newValidator([]byte(fmt.Sprintf("v%d", j+l)), 1000)
  1508. }
  1509. b.ResetTimer()
  1510. for i := 0; i < b.N; i++ {
  1511. // Add m validators to valSetCopy
  1512. valSetCopy := valSet.Copy()
  1513. assert.NoError(b, valSetCopy.UpdateWithChangeSet(newValList))
  1514. }
  1515. }
  1516. func BenchmarkValidatorSet_VerifyCommit_Ed25519(b *testing.B) {
  1517. for _, n := range []int{1, 8, 64, 1024} {
  1518. n := n
  1519. var (
  1520. chainID = "test_chain_id"
  1521. h = int64(3)
  1522. blockID = makeBlockIDRandom()
  1523. )
  1524. b.Run(fmt.Sprintf("valset size %d", n), func(b *testing.B) {
  1525. b.ReportAllocs()
  1526. // generate n validators
  1527. voteSet, valSet, vals := randVoteSet(h, 0, tmproto.PrecommitType, n, int64(n*5))
  1528. // create a commit with n validators
  1529. commit, err := MakeCommit(blockID, h, 0, voteSet, vals, time.Now())
  1530. require.NoError(b, err)
  1531. for i := 0; i < b.N/n; i++ {
  1532. err = valSet.VerifyCommit(chainID, blockID, h, commit)
  1533. assert.NoError(b, err)
  1534. }
  1535. })
  1536. }
  1537. }
  1538. func BenchmarkValidatorSet_VerifyCommitLight_Ed25519(b *testing.B) {
  1539. for _, n := range []int{1, 8, 64, 1024} {
  1540. n := n
  1541. var (
  1542. chainID = "test_chain_id"
  1543. h = int64(3)
  1544. blockID = makeBlockIDRandom()
  1545. )
  1546. b.Run(fmt.Sprintf("valset size %d", n), func(b *testing.B) {
  1547. b.ReportAllocs()
  1548. // generate n validators
  1549. voteSet, valSet, vals := randVoteSet(h, 0, tmproto.PrecommitType, n, int64(n*5))
  1550. // create a commit with n validators
  1551. commit, err := MakeCommit(blockID, h, 0, voteSet, vals, time.Now())
  1552. require.NoError(b, err)
  1553. for i := 0; i < b.N/n; i++ {
  1554. err = valSet.VerifyCommitLight(chainID, blockID, h, commit)
  1555. assert.NoError(b, err)
  1556. }
  1557. })
  1558. }
  1559. }
  1560. func BenchmarkValidatorSet_VerifyCommitLightTrusting_Ed25519(b *testing.B) {
  1561. for _, n := range []int{1, 8, 64, 1024} {
  1562. n := n
  1563. var (
  1564. chainID = "test_chain_id"
  1565. h = int64(3)
  1566. blockID = makeBlockIDRandom()
  1567. )
  1568. b.Run(fmt.Sprintf("valset size %d", n), func(b *testing.B) {
  1569. b.ReportAllocs()
  1570. // generate n validators
  1571. voteSet, valSet, vals := randVoteSet(h, 0, tmproto.PrecommitType, n, int64(n*5))
  1572. // create a commit with n validators
  1573. commit, err := MakeCommit(blockID, h, 0, voteSet, vals, time.Now())
  1574. require.NoError(b, err)
  1575. for i := 0; i < b.N/n; i++ {
  1576. err = valSet.VerifyCommitLightTrusting(chainID, commit, tmmath.Fraction{Numerator: 1, Denominator: 3})
  1577. assert.NoError(b, err)
  1578. }
  1579. })
  1580. }
  1581. }