Browse Source

Merge branch 'master' into callum/4728-check-evidence

pull/4743/head
Callum Waters 5 years ago
committed by GitHub
parent
commit
3da6d94001
No known key found for this signature in database GPG Key ID: 4AEE18F83AFDEB23
32 changed files with 222 additions and 660 deletions
  1. +2
    -13
      .github/PULL_REQUEST_TEMPLATE.md
  2. +16
    -0
      .github/auto-comment.yml
  3. +8
    -1
      .github/workflows/proto.yml
  4. +32
    -4
      .github/workflows/tests.yml
  5. +5
    -0
      CHANGELOG_PENDING.md
  6. +0
    -3
      blockchain/v0/reactor.go
  7. +0
    -3
      blockchain/v1/reactor.go
  8. +0
    -3
      blockchain/v2/reactor.go
  9. +0
    -6
      config/config.go
  10. +0
    -3
      consensus/reactor.go
  11. +0
    -95
      crypto/merkle/simple_map.go
  12. +0
    -49
      crypto/merkle/simple_map_test.go
  13. +0
    -25
      crypto/merkle/simple_proof.go
  14. +0
    -12
      crypto/merkle/simple_tree.go
  15. +26
    -14
      docs/tendermint-core/running-in-production.md
  16. +38
    -0
      evidence/doc.go
  17. +26
    -20
      evidence/pool.go
  18. +1
    -3
      evidence/reactor.go
  19. +1
    -2
      evidence/reactor_test.go
  20. +4
    -6
      mempool/reactor.go
  21. +2
    -21
      node/node.go
  22. +4
    -6
      p2p/pex/pex_reactor.go
  23. +1
    -1
      privval/file.go
  24. +0
    -81
      privval/file_deprecated.go
  25. +0
    -84
      privval/file_deprecated_test.go
  26. +2
    -2
      rpc/client/evidence_test.go
  27. +0
    -45
      scripts/privValUpgrade.go
  28. +0
    -129
      scripts/privValUpgrade_test.go
  29. +9
    -15
      types/evidence.go
  30. +41
    -9
      types/evidence_test.go
  31. +2
    -3
      types/protobuf_test.go
  32. +2
    -2
      types/vote.go

+ 2
- 13
.github/PULL_REQUEST_TEMPLATE.md View File

@ -1,18 +1,7 @@
## Description
<!-- Add a description of the changes that this PR introduces and the files that
are the most critical to review.
-->
_Please add a description of the changes that this PR introduces and the files that
are the most critical to review._
Closes: #XXX
---
For contributor use:
- [ ] Wrote tests
- [ ] Updated CHANGELOG_PENDING.md
- [ ] Linked to Github issue with discussion and accepted design OR link to spec that describes this work.
- [ ] Updated relevant documentation (`docs/`) and code comments
- [ ] Re-reviewed `Files changed` in the Github PR explorer
- [ ] Applied Appropriate Labels

+ 16
- 0
.github/auto-comment.yml View File

@ -0,0 +1,16 @@
pullRequestOpened: |
:wave: Thanks for creating a PR!
Before we can merge this PR, please make sure that all the following items have been
checked off. If any of the checklist items are not applicable, please leave them but
write a little note why.
- [ ] Wrote tests
- [ ] Updated CHANGELOG_PENDING.md
- [ ] Linked to Github issue with discussion and accepted design OR link to spec that describes this work.
- [ ] Updated relevant documentation (`docs/`) and code comments
- [ ] Re-reviewed `Files changed` in the Github PR explorer
- [ ] Applied Appropriate Labels
Thank you for your contribution to Tendermint! :rocket:

+ 8
- 1
.github/workflows/proto.yml View File

@ -5,8 +5,15 @@ jobs:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@master
- uses: docker-practice/actions-setup-docker@master
- uses: technote-space/get-diff-action@v1
id: git_diff
with:
SUFFIX_FILTER: .proto
SET_ENV_NAME_INSERTIONS: 1
SET_ENV_NAME_LINES: 1
- name: lint
run: make proto-lint
if: "env.GIT_DIFF != ''"
- name: check-breakage
run: make proto-check-breaking-ci
if: "env.GIT_DIFF != ''"

+ 32
- 4
.github/workflows/tests.yml View File

@ -7,6 +7,23 @@ on:
- release/**
jobs:
Diff:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v2
- uses: technote-space/get-diff-action@v1
id: git_diff
with:
SUFFIX_FILTER: |
.go
.mod
.sum
SET_ENV_NAME_INSERTIONS: 1
SET_ENV_NAME_LINES: 1
- name: test
run: exit 1
if: "env.GIT_DIFF == ''"
cleanup-runs:
runs-on: ubuntu-latest
steps:
@ -14,6 +31,7 @@ jobs:
env:
GITHUB_TOKEN: "${{ secrets.GITHUB_TOKEN }}"
if: "!startsWith(github.ref, 'refs/tags/') && github.ref != 'refs/heads/master'"
build:
name: Build
runs-on: ubuntu-latest
@ -33,7 +51,7 @@ jobs:
test_abci_apps:
runs-on: ubuntu-latest
needs: Build
needs: [Build, Diff]
steps:
- uses: actions/setup-go@v2-beta
- name: Set GOBIN
@ -50,7 +68,7 @@ jobs:
test_abci_cli:
runs-on: ubuntu-latest
needs: Build
needs: [Build, Diff]
steps:
- uses: actions/setup-go@v2-beta
- name: Set GOBIN
@ -68,15 +86,25 @@ jobs:
runs-on: ubuntu-latest
needs: Build
steps:
- uses: actions/setup-go@v2-beta
- uses: actions/checkout@v2
- uses: technote-space/get-diff-action@v1
id: git_diff
with:
SUFFIX_FILTER: |
.go
.mod
.sum
SET_ENV_NAME_INSERTIONS: 1
SET_ENV_NAME_LINES: 1
- name: Set GOBIN
run: |
echo "::add-path::$(go env GOPATH)/bin"
- uses: actions/checkout@v2
- uses: actions/cache@v1
with:
path: ~/go/bin
key: ${{ runner.os }}-go-tm-binary
if: "env.GIT_DIFF != ''"
- name: test_apps
run: test/app/test.sh
shell: bash
if: "env.GIT_DIFF != ''"

+ 5
- 0
CHANGELOG_PENDING.md View File

@ -10,12 +10,17 @@ Friendly reminder, we have a [bug bounty program](https://hackerone.com/tendermi
- CLI/RPC/Config
- [evidence] \#4725 Remove `Pubkey` from DuplicateVoteEvidence
- Apps
- P2P Protocol
- Go API
- [crypto] [\#4721](https://github.com/tendermint/tendermint/pull/4721) Remove `SimpleHashFromMap()` and `SimpleProofsFromMap()` (@erikgrinaker)
- [privval] [\#4744](https://github.com/tendermint/tendermint/pull/4744) Remove deprecated `OldFilePV` (@melekes)
- Blockchain Protocol
### FEATURES:


+ 0
- 3
blockchain/v0/reactor.go View File

@ -394,9 +394,6 @@ func RegisterBlockchainMessages(cdc *amino.Codec) {
}
func decodeMsg(bz []byte) (msg BlockchainMessage, err error) {
if len(bz) > maxMsgSize {
return msg, fmt.Errorf("msg exceeds max size (%d > %d)", len(bz), maxMsgSize)
}
err = cdc.UnmarshalBinaryBare(bz, &msg)
return
}


+ 0
- 3
blockchain/v1/reactor.go View File

@ -539,9 +539,6 @@ func RegisterBlockchainMessages(cdc *amino.Codec) {
}
func decodeMsg(bz []byte) (msg BlockchainMessage, err error) {
if len(bz) > maxMsgSize {
return msg, fmt.Errorf("msg exceeds max size (%d > %d)", len(bz), maxMsgSize)
}
err = cdc.UnmarshalBinaryBare(bz, &msg)
return
}


+ 0
- 3
blockchain/v2/reactor.go View File

@ -453,9 +453,6 @@ func RegisterBlockchainMessages(cdc *amino.Codec) {
}
func decodeMsg(bz []byte) (msg BlockchainMessage, err error) {
if len(bz) > maxMsgSize {
return msg, fmt.Errorf("msg exceeds max size (%d > %d)", len(bz), maxMsgSize)
}
err = cdc.UnmarshalBinaryBare(bz, &msg)
return
}


+ 0
- 6
config/config.go View File

@ -263,12 +263,6 @@ func (cfg BaseConfig) PrivValidatorStateFile() string {
return rootify(cfg.PrivValidatorState, cfg.RootDir)
}
// OldPrivValidatorFile returns the full path of the priv_validator.json from pre v0.28.0.
// TODO: eventually remove.
func (cfg BaseConfig) OldPrivValidatorFile() string {
return rootify(oldPrivValPath, cfg.RootDir)
}
// NodeKeyFile returns the full path to the node_key.json file
func (cfg BaseConfig) NodeKeyFile() string {
return rootify(cfg.NodeKey, cfg.RootDir)


+ 0
- 3
consensus/reactor.go View File

@ -1404,9 +1404,6 @@ func RegisterMessages(cdc *amino.Codec) {
}
func decodeMsg(bz []byte) (msg Message, err error) {
if len(bz) > maxMsgSize {
return msg, fmt.Errorf("msg exceeds max size (%d > %d)", len(bz), maxMsgSize)
}
err = cdc.UnmarshalBinaryBare(bz, &msg)
return
}


+ 0
- 95
crypto/merkle/simple_map.go View File

@ -1,95 +0,0 @@
package merkle
import (
"bytes"
amino "github.com/tendermint/go-amino"
"github.com/tendermint/tendermint/crypto/tmhash"
"github.com/tendermint/tendermint/libs/kv"
)
// Merkle tree from a map.
// Leaves are `hash(key) | hash(value)`.
// Leaves are sorted before Merkle hashing.
type simpleMap struct {
kvs kv.Pairs
sorted bool
}
func newSimpleMap() *simpleMap {
return &simpleMap{
kvs: nil,
sorted: false,
}
}
// Set creates a kv pair of the key and the hash of the value,
// and then appends it to simpleMap's kv pairs.
func (sm *simpleMap) Set(key string, value []byte) {
sm.sorted = false
// The value is hashed, so you can
// check for equality with a cached value (say)
// and make a determination to fetch or not.
vhash := tmhash.Sum(value)
sm.kvs = append(sm.kvs, kv.Pair{
Key: []byte(key),
Value: vhash,
})
}
// Hash Merkle root hash of items sorted by key
// (UNSTABLE: and by value too if duplicate key).
func (sm *simpleMap) Hash() []byte {
sm.Sort()
return hashKVPairs(sm.kvs)
}
func (sm *simpleMap) Sort() {
if sm.sorted {
return
}
sm.kvs.Sort()
sm.sorted = true
}
// Returns a copy of sorted KVPairs.
// NOTE these contain the hashed key and value.
func (sm *simpleMap) KVPairs() kv.Pairs {
sm.Sort()
kvs := make(kv.Pairs, len(sm.kvs))
copy(kvs, sm.kvs)
return kvs
}
//----------------------------------------
// A local extension to KVPair that can be hashed.
// Key and value are length prefixed and concatenated,
// then hashed.
type KVPair kv.Pair
// Bytes returns key || value, with both the
// key and value length prefixed.
func (kv KVPair) Bytes() []byte {
var b bytes.Buffer
err := amino.EncodeByteSlice(&b, kv.Key)
if err != nil {
panic(err)
}
err = amino.EncodeByteSlice(&b, kv.Value)
if err != nil {
panic(err)
}
return b.Bytes()
}
func hashKVPairs(kvs kv.Pairs) []byte {
kvsH := make([][]byte, len(kvs))
for i, kvp := range kvs {
kvsH[i] = KVPair(kvp).Bytes()
}
return SimpleHashFromByteSlices(kvsH)
}

+ 0
- 49
crypto/merkle/simple_map_test.go View File

@ -1,49 +0,0 @@
package merkle
import (
"fmt"
"testing"
"github.com/stretchr/testify/assert"
)
func TestSimpleMap(t *testing.T) {
tests := []struct {
keys []string
values []string // each string gets converted to []byte in test
want string
}{
{[]string{"key1"}, []string{"value1"}, "a44d3cc7daba1a4600b00a2434b30f8b970652169810d6dfa9fb1793a2189324"},
{[]string{"key1"}, []string{"value2"}, "0638e99b3445caec9d95c05e1a3fc1487b4ddec6a952ff337080360b0dcc078c"},
// swap order with 2 keys
{
[]string{"key1", "key2"},
[]string{"value1", "value2"},
"8fd19b19e7bb3f2b3ee0574027d8a5a4cec370464ea2db2fbfa5c7d35bb0cff3",
},
{
[]string{"key2", "key1"},
[]string{"value2", "value1"},
"8fd19b19e7bb3f2b3ee0574027d8a5a4cec370464ea2db2fbfa5c7d35bb0cff3",
},
// swap order with 3 keys
{
[]string{"key1", "key2", "key3"},
[]string{"value1", "value2", "value3"},
"1dd674ec6782a0d586a903c9c63326a41cbe56b3bba33ed6ff5b527af6efb3dc",
},
{
[]string{"key1", "key3", "key2"},
[]string{"value1", "value3", "value2"},
"1dd674ec6782a0d586a903c9c63326a41cbe56b3bba33ed6ff5b527af6efb3dc",
},
}
for i, tc := range tests {
db := newSimpleMap()
for i := 0; i < len(tc.keys); i++ {
db.Set(tc.keys[i], []byte(tc.values[i]))
}
got := db.Hash()
assert.Equal(t, tc.want, fmt.Sprintf("%x", got), "Hash didn't match on tc %d", i)
}
}

+ 0
- 25
crypto/merkle/simple_proof.go View File

@ -47,31 +47,6 @@ func SimpleProofsFromByteSlices(items [][]byte) (rootHash []byte, proofs []*Simp
return
}
// SimpleProofsFromMap generates proofs from a map. The keys/values of the map will be used as the keys/values
// in the underlying key-value pairs.
// The keys are sorted before the proofs are computed.
func SimpleProofsFromMap(m map[string][]byte) (rootHash []byte, proofs map[string]*SimpleProof, keys []string) {
sm := newSimpleMap()
for k, v := range m {
sm.Set(k, v)
}
sm.Sort()
kvs := sm.kvs
kvsBytes := make([][]byte, len(kvs))
for i, kvp := range kvs {
kvsBytes[i] = KVPair(kvp).Bytes()
}
rootHash, proofList := SimpleProofsFromByteSlices(kvsBytes)
proofs = make(map[string]*SimpleProof)
keys = make([]string, len(proofList))
for i, kvp := range kvs {
proofs[string(kvp.Key)] = proofList[i]
keys[i] = string(kvp.Key)
}
return
}
// Verify that the SimpleProof proves the root hash.
// Check sp.Index/sp.Total manually if needed
func (sp *SimpleProof) Verify(rootHash []byte, leaf []byte) error {


+ 0
- 12
crypto/merkle/simple_tree.go View File

@ -91,18 +91,6 @@ func SimpleHashFromByteSlicesIterative(input [][]byte) []byte {
}
}
// SimpleHashFromMap computes a Merkle tree from sorted map.
// Like calling SimpleHashFromHashers with
// `item = []byte(Hash(key) | Hash(value))`,
// sorted by `item`.
func SimpleHashFromMap(m map[string][]byte) []byte {
sm := newSimpleMap()
for k, v := range m {
sm.Set(k, v)
}
return sm.Hash()
}
// getSplitPoint returns the largest power of 2 less than length
func getSplitPoint(length int) int {
if length < 1 {


+ 26
- 14
docs/tendermint-core/running-in-production.md View File

@ -247,15 +247,14 @@ $EDITOR /tmp/corrupted_wal
### Processor and Memory
While actual specs vary depending on the load and validators count,
minimal requirements are:
While actual specs vary depending on the load and validators count, minimal
requirements are:
- 1GB RAM
- 25GB of disk space
- 1.4 GHz CPU
SSD disks are preferable for applications with high transaction
throughput.
SSD disks are preferable for applications with high transaction throughput.
Recommended:
@ -263,21 +262,34 @@ Recommended:
- 100GB SSD
- x64 2.0 GHz 2v CPU
While for now, Tendermint stores all the history and it may require
significant disk space over time, we are planning to implement state
syncing (See
[this issue](https://github.com/tendermint/tendermint/issues/828)). So,
storing all the past blocks will not be necessary.
While for now, Tendermint stores all the history and it may require significant
disk space over time, we are planning to implement state syncing (See [this
issue](https://github.com/tendermint/tendermint/issues/828)). So, storing all
the past blocks will not be necessary.
### Validator signing on 32 bit architectures (or ARM)
Both our `ed25519` and `secp256k1` implementations require constant time
`uint64` multiplication. Non-constant time crypto can (and has) leaked
private keys on both `ed25519` and `secp256k1`. This doesn't exist in hardware
on 32 bit x86 platforms ([source](https://bearssl.org/ctmul.html)), and it
depends on the compiler to enforce that it is constant time. It's unclear at
this point whenever the Golang compiler does this correctly for all
implementations.
**We do not support nor recommend running a validator on 32 bit architectures OR
the "VIA Nano 2000 Series", and the architectures in the ARM section rated
"S-".**
### Operating Systems
Tendermint can be compiled for a wide range of operating systems thanks
to Go language (the list of \$OS/\$ARCH pairs can be found
Tendermint can be compiled for a wide range of operating systems thanks to Go
language (the list of \$OS/\$ARCH pairs can be found
[here](https://golang.org/doc/install/source#environment)).
While we do not favor any operation system, more secure and stable Linux
server distributions (like Centos) should be preferred over desktop
operation systems (like Mac OS).
While we do not favor any operation system, more secure and stable Linux server
distributions (like Centos) should be preferred over desktop operation systems
(like Mac OS).
### Miscellaneous


+ 38
- 0
evidence/doc.go View File

@ -0,0 +1,38 @@
/*
Package evidence handles all evidence storage and gossiping from detection to block proposal.
For the different types of evidence refer to the `evidence.go` file in the types package
or https://github.com/tendermint/spec/blob/master/spec/consensus/light-client/accountability.md.
Gossiping
The core functionality begins with the evidence reactor (see reactor.
go) which operates both the sending and receiving of evidence.
The `Receive` function takes a list of evidence and does the following:
1. Breaks it down into individual evidence if it is `Composite Evidence`
(see types/evidence.go#ConflictingHeadersEvidence)
2. Checks that it does not already have the evidence stored
3. Verifies the evidence against the node's state (see state/validation.go#VerifyEvidence)
4. Stores the evidence to a db and a concurrent list
The gossiping of evidence is initiated when a peer is added which starts a go routine to broadcast currently
uncommitted evidence at intervals of 60 seconds (set by the by broadcastEvidenceIntervalS).
It uses a concurrent list to store the evidence and before sending verifies that each evidence is still valid in the
sense that it has not exceeded the max evidence age and height (see types/params.go#EvidenceParams).
Proposing
When a new block is being proposed (in state/execution.go#CreateProposalBlock),
`PendingEvidence(maxNum)` is called to send up to the maxNum number of uncommitted evidence, from the evidence store,
based on a priority that is a product of the age of the evidence and the voting power of the malicious validator.
Once the proposed evidence is submitted,
the evidence is marked as committed and is moved from the broadcasted set to the committed set (
the committed set is used to verify whether new evidence has actually already been submitted).
As a result it is also removed from the concurrent list so that it is no longer gossiped.
*/
package evidence

+ 26
- 20
evidence/pool.go View File

@ -28,17 +28,17 @@ type Pool struct {
// needed to load validators to verify evidence
stateDB dbm.DB
// needed to load headers to verify evidence
blockStore *store.BlockStore
mtx sync.Mutex
// latest state
state sm.State
// a map of active validators and respective last heights validator is active
// if it was in validator set after EvidenceParams.MaxAgeNumBlocks or
// currently is (ie. [MaxAgeNumBlocks, CurrentHeight])
// In simple words, it means it's still bonded -> therefore slashable.
valToLastHeight valToLastHeightMap
// latest state
mtx sync.Mutex
state sm.State
}
// Validator.Address -> Last height it was in validator set
@ -90,8 +90,8 @@ func (evpool *Pool) PendingEvidence(maxNum int64) []types.Evidence {
return evidence
}
// Update uses the latest block to update the state, the ValToLastHeight map for evidence expiration
// and to mark committed evidence
// Update uses the latest block & state to update its copy of the state,
// validator to last height map and calls MarkEvidenceAsCommitted.
func (evpool *Pool) Update(block *types.Block, state sm.State) {
// sanity check
if state.LastBlockHeight != block.Height {
@ -103,14 +103,13 @@ func (evpool *Pool) Update(block *types.Block, state sm.State) {
)
}
// update the state
evpool.mtx.Lock()
evpool.state = state
evpool.mtx.Unlock()
// remove evidence from pending and mark committed
evpool.MarkEvidenceAsCommitted(block.Height, block.Time, block.Evidence.Evidence)
// update the state
evpool.mtx.Lock()
defer evpool.mtx.Unlock()
evpool.state = state
evpool.updateValToLastHeight(block.Height, state)
}
@ -141,7 +140,15 @@ func (evpool *Pool) AddEvidence(evidence types.Evidence) error {
return err
}
evList = ce.Split(&blockMeta.Header, valSet, evpool.valToLastHeight)
// XXX: Copy here since this should be a rare case.
evpool.mtx.Lock()
valToLastHeightCopy := make(valToLastHeightMap, len(evpool.valToLastHeight))
for k, v := range evpool.valToLastHeight {
valToLastHeightCopy[k] = v
}
evpool.mtx.Unlock()
evList = ce.Split(&blockMeta.Header, valSet, valToLastHeightCopy)
}
for _, ev := range evList {
@ -262,6 +269,9 @@ func (evpool *Pool) SetLogger(l log.Logger) {
// long time ago (> ConsensusParams.Evidence.MaxAgeDuration && >
// ConsensusParams.Evidence.MaxAgeNumBlocks).
func (evpool *Pool) ValidatorLastHeight(address []byte) int64 {
evpool.mtx.Lock()
defer evpool.mtx.Unlock()
h, ok := evpool.valToLastHeight[string(address)]
if !ok {
return 0
@ -352,15 +362,11 @@ func (evpool *Pool) updateValToLastHeight(blockHeight int64, state sm.State) {
}
// Remove validators outside of MaxAgeNumBlocks & MaxAgeDuration.
removeHeight := blockHeight - evpool.State().ConsensusParams.Evidence.MaxAgeNumBlocks
removeHeight := blockHeight - state.ConsensusParams.Evidence.MaxAgeNumBlocks
if removeHeight >= 1 {
valSet, err := sm.LoadValidators(evpool.stateDB, removeHeight)
if err != nil {
for _, val := range valSet.Validators {
h, ok := evpool.valToLastHeight[string(val.Address)]
if ok && h == removeHeight {
delete(evpool.valToLastHeight, string(val.Address))
}
for val, height := range evpool.valToLastHeight {
if height <= removeHeight {
delete(evpool.valToLastHeight, val)
}
}
}


+ 1
- 3
evidence/reactor.go View File

@ -51,6 +51,7 @@ func (evR *Reactor) GetChannels() []*p2p.ChannelDescriptor {
{
ID: EvidenceChannel,
Priority: 5,
RecvMessageCapacity: maxMsgSize,
},
}
}
@ -235,9 +236,6 @@ func RegisterMessages(cdc *amino.Codec) {
}
func decodeMsg(bz []byte) (msg Message, err error) {
if len(bz) > maxMsgSize {
return msg, fmt.Errorf("msg exceeds max size (%d > %d)", len(bz), maxMsgSize)
}
err = cdc.UnmarshalBinaryBare(bz, &msg)
return
}


+ 1
- 2
evidence/reactor_test.go View File

@ -13,7 +13,6 @@ import (
dbm "github.com/tendermint/tm-db"
cfg "github.com/tendermint/tendermint/config"
"github.com/tendermint/tendermint/crypto/secp256k1"
"github.com/tendermint/tendermint/libs/log"
"github.com/tendermint/tendermint/p2p"
sm "github.com/tendermint/tendermint/state"
@ -214,7 +213,7 @@ func TestListMessageValidationBasic(t *testing.T) {
{"Good ListMessage", func(evList *ListMessage) {}, false},
{"Invalid ListMessage", func(evList *ListMessage) {
evList.Evidence = append(evList.Evidence,
&types.DuplicateVoteEvidence{PubKey: secp256k1.GenPrivKey().PubKey()})
&types.DuplicateVoteEvidence{})
}, true},
}
for _, tc := range testCases {


+ 4
- 6
mempool/reactor.go View File

@ -137,10 +137,12 @@ func (memR *Reactor) OnStart() error {
// GetChannels implements Reactor.
// It returns the list of channels for this reactor.
func (memR *Reactor) GetChannels() []*p2p.ChannelDescriptor {
maxMsgSize := calcMaxMsgSize(memR.config.MaxTxBytes)
return []*p2p.ChannelDescriptor{
{
ID: MempoolChannel,
Priority: 5,
ID: MempoolChannel,
Priority: 5,
RecvMessageCapacity: maxMsgSize,
},
}
}
@ -271,10 +273,6 @@ func RegisterMessages(cdc *amino.Codec) {
}
func (memR *Reactor) decodeMsg(bz []byte) (msg Message, err error) {
maxMsgSize := calcMaxMsgSize(memR.config.MaxTxBytes)
if l := len(bz); l > maxMsgSize {
return msg, ErrTxTooLarge{maxMsgSize, l}
}
err = cdc.UnmarshalBinaryBare(bz, &msg)
return
}


+ 2
- 21
node/node.go View File

@ -7,7 +7,6 @@ import (
"net"
"net/http"
_ "net/http/pprof" // nolint: gosec // securely exposed on separate, optional port
"os"
"strings"
"time"
@ -88,31 +87,13 @@ type Provider func(*cfg.Config, log.Logger) (*Node, error)
// PrivValidator, ClientCreator, GenesisDoc, and DBProvider.
// It implements NodeProvider.
func DefaultNewNode(config *cfg.Config, logger log.Logger) (*Node, error) {
// Generate node PrivKey
nodeKey, err := p2p.LoadOrGenNodeKey(config.NodeKeyFile())
if err != nil {
return nil, err
}
// Convert old PrivValidator if it exists.
oldPrivVal := config.OldPrivValidatorFile()
newPrivValKey := config.PrivValidatorKeyFile()
newPrivValState := config.PrivValidatorStateFile()
if _, err := os.Stat(oldPrivVal); !os.IsNotExist(err) {
oldPV, err := privval.LoadOldFilePV(oldPrivVal)
if err != nil {
return nil, fmt.Errorf("error reading OldPrivValidator from %v: %v", oldPrivVal, err)
}
logger.Info("Upgrading PrivValidator file",
"old", oldPrivVal,
"newKey", newPrivValKey,
"newState", newPrivValState,
)
oldPV.Upgrade(newPrivValKey, newPrivValState)
return nil, fmt.Errorf("failed to load or gen node key %s: %w", config.NodeKeyFile(), err)
}
return NewNode(config,
privval.LoadOrGenFilePV(newPrivValKey, newPrivValState),
privval.LoadOrGenFilePV(config.PrivValidatorKeyFile(), config.PrivValidatorStateFile()),
nodeKey,
proxy.DefaultClientCreator(config.ProxyApp, config.ABCI, config.DBDir()),
DefaultGenesisDocProviderFunc(config),


+ 4
- 6
p2p/pex/pex_reactor.go View File

@ -180,9 +180,10 @@ func (r *Reactor) OnStop() {
func (r *Reactor) GetChannels() []*conn.ChannelDescriptor {
return []*conn.ChannelDescriptor{
{
ID: PexChannel,
Priority: 1,
SendQueueCapacity: 10,
ID: PexChannel,
Priority: 1,
SendQueueCapacity: 10,
RecvMessageCapacity: maxMsgSize,
},
}
}
@ -771,9 +772,6 @@ func RegisterMessages(cdc *amino.Codec) {
}
func decodeMsg(bz []byte) (msg Message, err error) {
if len(bz) > maxMsgSize {
return msg, fmt.Errorf("msg exceeds max size (%d > %d)", len(bz), maxMsgSize)
}
err = cdc.UnmarshalBinaryBare(bz, &msg)
return
}


+ 1
- 1
privval/file.go View File

@ -32,7 +32,7 @@ func voteToStep(vote *types.Vote) int8 {
case types.PrecommitType:
return stepPrecommit
default:
panic("Unknown vote type")
panic(fmt.Sprintf("Unknown vote type: %v", vote.Type))
}
}


+ 0
- 81
privval/file_deprecated.go View File

@ -1,81 +0,0 @@
package privval
import (
"io/ioutil"
"os"
"github.com/tendermint/tendermint/crypto"
"github.com/tendermint/tendermint/libs/bytes"
"github.com/tendermint/tendermint/types"
)
// OldFilePV is the old version of the FilePV, pre v0.28.0.
// Deprecated: Use FilePV instead.
type OldFilePV struct {
Address types.Address `json:"address"`
PubKey crypto.PubKey `json:"pub_key"`
LastHeight int64 `json:"last_height"`
LastRound int `json:"last_round"`
LastStep int8 `json:"last_step"`
LastSignature []byte `json:"last_signature,omitempty"`
LastSignBytes bytes.HexBytes `json:"last_signbytes,omitempty"`
PrivKey crypto.PrivKey `json:"priv_key"`
filePath string
}
// LoadOldFilePV loads an OldFilePV from the filePath.
func LoadOldFilePV(filePath string) (*OldFilePV, error) {
pvJSONBytes, err := ioutil.ReadFile(filePath)
if err != nil {
return nil, err
}
pv := &OldFilePV{}
err = cdc.UnmarshalJSON(pvJSONBytes, &pv)
if err != nil {
return nil, err
}
// overwrite pubkey and address for convenience
pv.PubKey = pv.PrivKey.PubKey()
pv.Address = pv.PubKey.Address()
pv.filePath = filePath
return pv, nil
}
// Upgrade convets the OldFilePV to the new FilePV, separating the immutable and mutable components,
// and persisting them to the keyFilePath and stateFilePath, respectively.
// It renames the original file by adding ".bak".
func (oldFilePV *OldFilePV) Upgrade(keyFilePath, stateFilePath string) *FilePV {
privKey := oldFilePV.PrivKey
pvKey := FilePVKey{
PrivKey: privKey,
PubKey: privKey.PubKey(),
Address: privKey.PubKey().Address(),
filePath: keyFilePath,
}
pvState := FilePVLastSignState{
Height: oldFilePV.LastHeight,
Round: oldFilePV.LastRound,
Step: oldFilePV.LastStep,
Signature: oldFilePV.LastSignature,
SignBytes: oldFilePV.LastSignBytes,
filePath: stateFilePath,
}
// Save the new PV files
pv := &FilePV{
Key: pvKey,
LastSignState: pvState,
}
pv.Save()
// Rename the old PV file
err := os.Rename(oldFilePV.filePath, oldFilePV.filePath+".bak")
if err != nil {
panic(err)
}
return pv
}

+ 0
- 84
privval/file_deprecated_test.go View File

@ -1,84 +0,0 @@
package privval_test
import (
"io/ioutil"
"os"
"testing"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
"github.com/tendermint/tendermint/privval"
)
const lastSignBytes = "750802110500000000000000220B08B398F3E00510F48DA6402A480A20F" +
"C258973076512999C3E6839A22E9FBDB1B77CF993E8A9955412A41A59D4" +
"CAD312240A20C971B286ACB8AAA6FCA0365EB0A660B189EDC08B46B5AF2" +
"995DEFA51A28D215B10013211746573742D636861696E2D533245415533"
const oldPrivvalContent = `{
"address": "1D8089FAFDFAE4A637F3D616E17B92905FA2D91D",
"pub_key": {
"type": "tendermint/PubKeyEd25519",
"value": "r3Yg2AhDZ745CNTpavsGU+mRZ8WpRXqoJuyqjN8mJq0="
},
"last_height": "5",
"last_round": "0",
"last_step": 3,
"last_signature": "CTr7b9ZQlrJJf+12rPl5t/YSCUc/KqV7jQogCfFJA24e7hof69X6OMT7eFLVQHyodPjD/QTA298XHV5ejxInDQ==",
"last_signbytes": "` + lastSignBytes + `",
"priv_key": {
"type": "tendermint/PrivKeyEd25519",
"value": "7MwvTGEWWjsYwjn2IpRb+GYsWi9nnFsw8jPLLY1UtP6vdiDYCENnvjkI1Olq+wZT6ZFnxalFeqgm7KqM3yYmrQ=="
}
}`
func TestLoadAndUpgrade(t *testing.T) {
oldFilePath := initTmpOldFile(t)
defer os.Remove(oldFilePath)
newStateFile, err := ioutil.TempFile("", "priv_validator_state*.json")
defer os.Remove(newStateFile.Name())
require.NoError(t, err)
newKeyFile, err := ioutil.TempFile("", "priv_validator_key*.json")
defer os.Remove(newKeyFile.Name())
require.NoError(t, err)
oldPV, err := privval.LoadOldFilePV(oldFilePath)
assert.NoError(t, err)
newPV := oldPV.Upgrade(newKeyFile.Name(), newStateFile.Name())
assertEqualPV(t, oldPV, newPV)
assert.NoError(t, err)
upgradedPV := privval.LoadFilePV(newKeyFile.Name(), newStateFile.Name())
assertEqualPV(t, oldPV, upgradedPV)
oldPV, err = privval.LoadOldFilePV(oldFilePath + ".bak")
require.NoError(t, err)
assertEqualPV(t, oldPV, upgradedPV)
}
func assertEqualPV(t *testing.T, oldPV *privval.OldFilePV, newPV *privval.FilePV) {
assert.Equal(t, oldPV.Address, newPV.Key.Address)
assert.Equal(t, oldPV.Address, newPV.GetAddress())
assert.Equal(t, oldPV.PubKey, newPV.Key.PubKey)
npv, err := newPV.GetPubKey()
require.NoError(t, err)
assert.Equal(t, oldPV.PubKey, npv)
assert.Equal(t, oldPV.PrivKey, newPV.Key.PrivKey)
assert.Equal(t, oldPV.LastHeight, newPV.LastSignState.Height)
assert.Equal(t, oldPV.LastRound, newPV.LastSignState.Round)
assert.Equal(t, oldPV.LastSignature, newPV.LastSignState.Signature)
assert.Equal(t, oldPV.LastSignBytes, newPV.LastSignState.SignBytes)
assert.Equal(t, oldPV.LastStep, newPV.LastSignState.Step)
}
func initTmpOldFile(t *testing.T) string {
tmpFile, err := ioutil.TempFile("", "priv_validator_*.json")
require.NoError(t, err)
t.Logf("created test file %s", tmpFile.Name())
_, err = tmpFile.WriteString(oldPrivvalContent)
require.NoError(t, err)
return tmpFile.Name()
}

+ 2
- 2
rpc/client/evidence_test.go View File

@ -29,7 +29,7 @@ func newEvidence(t *testing.T, val *privval.FilePV,
vote2.Signature, err = val.Key.PrivKey.Sign(vote2.SignBytes(chainID))
require.NoError(t, err)
return types.NewDuplicateVoteEvidence(val.Key.PubKey, vote, vote2)
return types.NewDuplicateVoteEvidence(vote, vote2)
}
func makeEvidences(
@ -123,7 +123,7 @@ func TestBroadcastEvidence_DuplicateVoteEvidence(t *testing.T) {
require.NoError(t, err)
client.WaitForHeight(c, status.SyncInfo.LatestBlockHeight+2, nil)
ed25519pub := correct.PubKey.(ed25519.PubKeyEd25519)
ed25519pub := pv.Key.PubKey.(ed25519.PubKeyEd25519)
rawpub := ed25519pub[:]
result2, err := c.ABCIQuery("/val", rawpub)
require.NoError(t, err)


+ 0
- 45
scripts/privValUpgrade.go View File

@ -1,45 +0,0 @@
package main
import (
"fmt"
"os"
"github.com/tendermint/tendermint/libs/log"
"github.com/tendermint/tendermint/privval"
)
var (
logger = log.NewTMLogger(log.NewSyncWriter(os.Stdout))
)
func main() {
args := os.Args[1:]
if len(args) != 3 {
fmt.Println("Expected three args: <old path> <new key path> <new state path>")
fmt.Println(
"Eg. ~/.tendermint/config/priv_validator.json" +
" ~/.tendermint/config/priv_validator_key.json" +
" ~/.tendermint/data/priv_validator_state.json",
)
os.Exit(1)
}
err := loadAndUpgrade(args[0], args[1], args[2])
if err != nil {
fmt.Println(err)
os.Exit(1)
}
}
func loadAndUpgrade(oldPVPath, newPVKeyPath, newPVStatePath string) error {
oldPV, err := privval.LoadOldFilePV(oldPVPath)
if err != nil {
return fmt.Errorf("error reading OldPrivValidator from %v: %v", oldPVPath, err)
}
logger.Info("Upgrading PrivValidator file",
"old", oldPVPath,
"newKey", newPVKeyPath,
"newState", newPVStatePath,
)
oldPV.Upgrade(newPVKeyPath, newPVStatePath)
return nil
}

+ 0
- 129
scripts/privValUpgrade_test.go View File

@ -1,129 +0,0 @@
package main
import (
"fmt"
"io/ioutil"
"os"
"testing"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
"github.com/tendermint/tendermint/privval"
)
const lastSignBytes = "750802110500000000000000220B08B398F3E00510F48DA6402A480A20FC25" +
"8973076512999C3E6839A22E9FBDB1B77CF993E8A9955412A41A59D4CAD312240A20C971B286ACB8AA" +
"A6FCA0365EB0A660B189EDC08B46B5AF2995DEFA51A28D215B10013211746573742D636861696E2D533245415533"
const oldPrivvalContent = `{
"address": "1D8089FAFDFAE4A637F3D616E17B92905FA2D91D",
"pub_key": {
"type": "tendermint/PubKeyEd25519",
"value": "r3Yg2AhDZ745CNTpavsGU+mRZ8WpRXqoJuyqjN8mJq0="
},
"last_height": "5",
"last_round": "0",
"last_step": 3,
"last_signature": "CTr7b9ZQlrJJf+12rPl5t/YSCUc/KqV7jQogCfFJA24e7hof69X6OMT7eFLVQHyodPjD/QTA298XHV5ejxInDQ==",
"last_signbytes": "` + lastSignBytes + `",
"priv_key": {
"type": "tendermint/PrivKeyEd25519",
"value": "7MwvTGEWWjsYwjn2IpRb+GYsWi9nnFsw8jPLLY1UtP6vdiDYCENnvjkI1Olq+wZT6ZFnxalFeqgm7KqM3yYmrQ=="
}
}`
func TestLoadAndUpgrade(t *testing.T) {
oldFilePath := initTmpOldFile(t)
defer os.Remove(oldFilePath)
newStateFile, err := ioutil.TempFile("", "priv_validator_state*.json")
defer os.Remove(newStateFile.Name())
require.NoError(t, err)
newKeyFile, err := ioutil.TempFile("", "priv_validator_key*.json")
defer os.Remove(newKeyFile.Name())
require.NoError(t, err)
emptyOldFile, err := ioutil.TempFile("", "priv_validator_empty*.json")
require.NoError(t, err)
defer os.Remove(emptyOldFile.Name())
type args struct {
oldPVPath string
newPVKeyPath string
newPVStatePath string
}
tests := []struct {
name string
args args
wantErr bool
wantPanic bool
}{
{"successful upgrade",
args{oldPVPath: oldFilePath, newPVKeyPath: newKeyFile.Name(), newPVStatePath: newStateFile.Name()},
false, false,
},
{"unsuccessful upgrade: empty old privval file",
args{oldPVPath: emptyOldFile.Name(), newPVKeyPath: newKeyFile.Name(), newPVStatePath: newStateFile.Name()},
true, false,
},
{"unsuccessful upgrade: invalid new paths (1/3)",
args{oldPVPath: oldFilePath, newPVKeyPath: "", newPVStatePath: newStateFile.Name()},
false, true,
},
{"unsuccessful upgrade: invalid new paths (2/3)",
args{oldPVPath: oldFilePath, newPVKeyPath: newKeyFile.Name(), newPVStatePath: ""},
false, true,
},
{"unsuccessful upgrade: invalid new paths (3/3)",
args{oldPVPath: oldFilePath, newPVKeyPath: "", newPVStatePath: ""},
false, true,
},
}
for _, tt := range tests {
tt := tt
t.Run(tt.name, func(t *testing.T) {
// need to re-write the file everytime because upgrading renames it
err := ioutil.WriteFile(oldFilePath, []byte(oldPrivvalContent), 0600)
require.NoError(t, err)
if tt.wantPanic {
require.Panics(t, func() { loadAndUpgrade(tt.args.oldPVPath, tt.args.newPVKeyPath, tt.args.newPVStatePath) })
} else {
err = loadAndUpgrade(tt.args.oldPVPath, tt.args.newPVKeyPath, tt.args.newPVStatePath)
if tt.wantErr {
assert.Error(t, err)
fmt.Println("ERR", err)
} else {
assert.NoError(t, err)
upgradedPV := privval.LoadFilePV(tt.args.newPVKeyPath, tt.args.newPVStatePath)
oldPV, err := privval.LoadOldFilePV(tt.args.oldPVPath + ".bak")
require.NoError(t, err)
assert.Equal(t, oldPV.Address, upgradedPV.Key.Address)
assert.Equal(t, oldPV.Address, upgradedPV.GetAddress())
assert.Equal(t, oldPV.PubKey, upgradedPV.Key.PubKey)
upv, err := upgradedPV.GetPubKey()
require.NoError(t, err)
assert.Equal(t, oldPV.PubKey, upv)
assert.Equal(t, oldPV.PrivKey, upgradedPV.Key.PrivKey)
assert.Equal(t, oldPV.LastHeight, upgradedPV.LastSignState.Height)
assert.Equal(t, oldPV.LastRound, upgradedPV.LastSignState.Round)
assert.Equal(t, oldPV.LastSignature, upgradedPV.LastSignState.Signature)
assert.Equal(t, oldPV.LastSignBytes, upgradedPV.LastSignState.SignBytes)
assert.Equal(t, oldPV.LastStep, upgradedPV.LastSignState.Step)
}
}
})
}
}
func initTmpOldFile(t *testing.T) string {
tmpfile, err := ioutil.TempFile("", "priv_validator_*.json")
require.NoError(t, err)
t.Logf("created test file %s", tmpfile.Name())
_, err = tmpfile.WriteString(oldPrivvalContent)
require.NoError(t, err)
return tmpfile.Name()
}

+ 9
- 15
types/evidence.go View File

@ -19,7 +19,7 @@ import (
const (
// MaxEvidenceBytes is a maximum size of any evidence (including amino overhead).
MaxEvidenceBytes int64 = 484
MaxEvidenceBytes int64 = 444
// An invalid field in the header from LunaticValidatorEvidence.
// Must be a function of the ABCI application state.
@ -117,16 +117,15 @@ func MaxEvidencePerBlock(blockMaxBytes int64) (int64, int64) {
// DuplicateVoteEvidence contains evidence a validator signed two conflicting
// votes.
type DuplicateVoteEvidence struct {
PubKey crypto.PubKey
VoteA *Vote
VoteB *Vote
VoteA *Vote
VoteB *Vote
}
var _ Evidence = &DuplicateVoteEvidence{}
// NewDuplicateVoteEvidence creates DuplicateVoteEvidence with right ordering given
// two conflicting votes. If one of the votes is nil, evidence returned is nil as well
func NewDuplicateVoteEvidence(pubkey crypto.PubKey, vote1 *Vote, vote2 *Vote) *DuplicateVoteEvidence {
func NewDuplicateVoteEvidence(vote1 *Vote, vote2 *Vote) *DuplicateVoteEvidence {
var voteA, voteB *Vote
if vote1 == nil || vote2 == nil {
return nil
@ -139,9 +138,8 @@ func NewDuplicateVoteEvidence(pubkey crypto.PubKey, vote1 *Vote, vote2 *Vote) *D
voteB = vote1
}
return &DuplicateVoteEvidence{
PubKey: pubkey,
VoteA: voteA,
VoteB: voteB,
VoteA: voteA,
VoteB: voteB,
}
}
@ -163,7 +161,7 @@ func (dve *DuplicateVoteEvidence) Time() time.Time {
// Address returns the address of the validator.
func (dve *DuplicateVoteEvidence) Address() []byte {
return dve.PubKey.Address()
return dve.VoteA.ValidatorAddress
}
// Hash returns the hash of the evidence.
@ -247,9 +245,6 @@ func (dve *DuplicateVoteEvidence) Equal(ev Evidence) bool {
// ValidateBasic performs basic validation.
func (dve *DuplicateVoteEvidence) ValidateBasic() error {
if len(dve.PubKey.Bytes()) == 0 {
return errors.New("empty PubKey")
}
if dve.VoteA == nil || dve.VoteB == nil {
return fmt.Errorf("one or both of the votes are empty %v, %v", dve.VoteA, dve.VoteB)
}
@ -424,9 +419,8 @@ OUTER_LOOP:
// immediately slashable (#F1).
if ev.H1.Commit.Round == ev.H2.Commit.Round {
evList = append(evList, &DuplicateVoteEvidence{
PubKey: val.PubKey,
VoteA: ev.H1.Commit.GetVote(i),
VoteB: ev.H2.Commit.GetVote(j),
VoteA: ev.H1.Commit.GetVote(i),
VoteB: ev.H2.Commit.GetVote(j),
})
} else {
// if H1.Round != H2.Round we need to run full detection procedure => not


+ 41
- 9
types/evidence_test.go View File

@ -10,7 +10,6 @@ import (
"github.com/tendermint/tendermint/crypto"
"github.com/tendermint/tendermint/crypto/ed25519"
"github.com/tendermint/tendermint/crypto/secp256k1"
"github.com/tendermint/tendermint/crypto/tmhash"
tmrand "github.com/tendermint/tendermint/libs/rand"
)
@ -108,15 +107,49 @@ func TestMaxEvidenceBytes(t *testing.T) {
blockID2 := makeBlockID(tmhash.Sum([]byte("blockhash2")), math.MaxInt64, tmhash.Sum([]byte("partshash")))
const chainID = "mychain"
ev := &DuplicateVoteEvidence{
PubKey: secp256k1.GenPrivKey().PubKey(), // use secp because it's pubkey is longer
VoteA: makeVote(t, val, chainID, math.MaxInt64, math.MaxInt64, math.MaxInt64, math.MaxInt64, blockID),
VoteB: makeVote(t, val, chainID, math.MaxInt64, math.MaxInt64, math.MaxInt64, math.MaxInt64, blockID2),
VoteA: makeVote(t, val, chainID, math.MaxInt64, math.MaxInt64, math.MaxInt64, math.MaxInt64, blockID),
VoteB: makeVote(t, val, chainID, math.MaxInt64, math.MaxInt64, math.MaxInt64, math.MaxInt64, blockID2),
}
bz, err := cdc.MarshalBinaryLengthPrefixed(ev)
require.NoError(t, err)
//TODO: Add other types of evidence to test and set MaxEvidenceBytes accordingly
// evl := &LunaticValidatorEvidence{
// Header: makeHeaderRandom(),
// Vote: makeVote(t, val, chainID, math.MaxInt64, math.MaxInt64, math.MaxInt64, math.MaxInt64, blockID2),
// InvalidHeaderField: "",
// }
// evp := &PhantomValidatorEvidence{
// Header: makeHeaderRandom(),
// Vote: makeVote(t, val, chainID, math.MaxInt64, math.MaxInt64, math.MaxInt64, math.MaxInt64, blockID2),
// LastHeightValidatorWasInSet: math.MaxInt64,
// }
// signedHeader := SignedHeader{Header: makeHeaderRandom(), Commit: randCommit(time.Now())}
// evc := &ConflictingHeadersEvidence{
// H1: &signedHeader,
// H2: &signedHeader,
// }
testCases := []struct {
testName string
evidence Evidence
}{
{"DuplicateVote", ev},
// {"LunaticValidatorEvidence", evl},
// {"PhantomValidatorEvidence", evp},
// {"ConflictingHeadersEvidence", evc},
}
for _, tt := range testCases {
bz, err := cdc.MarshalBinaryLengthPrefixed(tt.evidence)
require.NoError(t, err, tt.testName)
assert.LessOrEqual(t, MaxEvidenceBytes, int64(len(bz)), tt.testName)
}
assert.EqualValues(t, MaxEvidenceBytes, len(bz))
}
func randomDuplicatedVoteEvidence(t *testing.T) *DuplicateVoteEvidence {
@ -160,10 +193,9 @@ func TestDuplicateVoteEvidenceValidation(t *testing.T) {
for _, tc := range testCases {
tc := tc
t.Run(tc.testName, func(t *testing.T) {
pk := secp256k1.GenPrivKey().PubKey()
vote1 := makeVote(t, val, chainID, math.MaxInt64, math.MaxInt64, math.MaxInt64, 0x02, blockID)
vote2 := makeVote(t, val, chainID, math.MaxInt64, math.MaxInt64, math.MaxInt64, 0x02, blockID2)
ev := NewDuplicateVoteEvidence(pk, vote1, vote2)
ev := NewDuplicateVoteEvidence(vote1, vote2)
tc.malleateEvidence(ev)
assert.Equal(t, tc.expectErr, ev.ValidateBasic() != nil, "Validate Basic had an unexpected result")
})


+ 2
- 3
types/protobuf_test.go View File

@ -136,9 +136,8 @@ func TestABCIEvidence(t *testing.T) {
pubKey, err := val.GetPubKey()
require.NoError(t, err)
ev := &DuplicateVoteEvidence{
PubKey: pubKey,
VoteA: makeVote(t, val, chainID, 0, 10, 2, 1, blockID),
VoteB: makeVote(t, val, chainID, 0, 10, 2, 1, blockID2),
VoteA: makeVote(t, val, chainID, 0, 10, 2, 1, blockID),
VoteB: makeVote(t, val, chainID, 0, 10, 2, 1, blockID2),
}
abciEv := TM2PB.Evidence(
ev,


+ 2
- 2
types/vote.go View File

@ -31,12 +31,12 @@ type ErrVoteConflictingVotes struct {
}
func (err *ErrVoteConflictingVotes) Error() string {
return fmt.Sprintf("conflicting votes from validator %X", err.PubKey.Address())
return fmt.Sprintf("conflicting votes from validator %X", err.VoteA.ValidatorAddress)
}
func NewConflictingVoteError(val *Validator, vote1, vote2 *Vote) *ErrVoteConflictingVotes {
return &ErrVoteConflictingVotes{
NewDuplicateVoteEvidence(val.PubKey, vote1, vote2),
NewDuplicateVoteEvidence(vote1, vote2),
}
}


Loading…
Cancel
Save