Browse Source

p2p: make NodeID and NetAddress public (#6583)

pull/6618/head
Sam Kleinman 3 years ago
committed by GitHub
parent
commit
ae5f98881b
No known key found for this signature in database GPG Key ID: 4AEE18F83AFDEB23
89 changed files with 1367 additions and 1305 deletions
  1. +1
    -0
      CHANGELOG_PENDING.md
  2. +1
    -1
      cmd/tendermint/commands/testnet.go
  3. +19
    -20
      internal/blockchain/v0/pool.go
  4. +7
    -8
      internal/blockchain/v0/pool_test.go
  5. +2
    -2
      internal/blockchain/v0/reactor.go
  6. +13
    -13
      internal/blockchain/v0/reactor_test.go
  7. +6
    -8
      internal/blockchain/v2/internal/behavior/peer_behaviour.go
  8. +4
    -3
      internal/blockchain/v2/internal/behavior/reporter.go
  9. +8
    -8
      internal/blockchain/v2/internal/behavior/reporter_test.go
  10. +6
    -7
      internal/blockchain/v2/processor.go
  11. +2
    -3
      internal/blockchain/v2/processor_test.go
  12. +5
    -5
      internal/blockchain/v2/reactor.go
  13. +5
    -5
      internal/blockchain/v2/reactor_test.go
  14. +26
    -27
      internal/blockchain/v2/scheduler.go
  15. +104
    -105
      internal/blockchain/v2/scheduler_test.go
  16. +1
    -1
      internal/consensus/byzantine_test.go
  17. +1
    -2
      internal/consensus/msgs.go
  18. +1
    -2
      internal/consensus/msgs_test.go
  19. +2
    -3
      internal/consensus/peer_state.go
  20. +4
    -4
      internal/consensus/reactor.go
  21. +10
    -10
      internal/consensus/reactor_test.go
  22. +9
    -10
      internal/consensus/state.go
  23. +6
    -7
      internal/consensus/types/height_vote_set.go
  24. +3
    -3
      internal/evidence/reactor.go
  25. +12
    -12
      internal/evidence/reactor_test.go
  26. +6
    -6
      internal/mempool/ids.go
  27. +2
    -2
      internal/mempool/ids_test.go
  28. +3
    -4
      internal/mempool/tx.go
  29. +2
    -3
      internal/mempool/v0/clist_mempool.go
  30. +4
    -4
      internal/mempool/v0/reactor.go
  31. +17
    -17
      internal/mempool/v0/reactor_test.go
  32. +4
    -4
      internal/mempool/v1/reactor.go
  33. +4
    -56
      internal/p2p/address.go
  34. +11
    -10
      internal/p2p/address_test.go
  35. +5
    -12
      internal/p2p/errors.go
  36. +4
    -3
      internal/p2p/key.go
  37. +4
    -3
      internal/p2p/mock/peer.go
  38. +5
    -4
      internal/p2p/mocks/peer.go
  39. +2
    -413
      internal/p2p/netaddress.go
  40. +8
    -7
      internal/p2p/node_info.go
  41. +3
    -2
      internal/p2p/p2p_test.go
  42. +14
    -13
      internal/p2p/p2ptest/network.go
  43. +2
    -1
      internal/p2p/p2ptest/require.go
  44. +2
    -2
      internal/p2p/p2ptest/util.go
  45. +3
    -2
      internal/p2p/peer.go
  46. +7
    -6
      internal/p2p/peer_set.go
  47. +3
    -2
      internal/p2p/peer_set_test.go
  48. +5
    -4
      internal/p2p/peer_test.go
  49. +49
    -48
      internal/p2p/peermanager.go
  50. +3
    -2
      internal/p2p/peermanager_scoring_test.go
  51. +130
    -129
      internal/p2p/peermanager_test.go
  52. +10
    -9
      internal/p2p/pex/addrbook.go
  53. +9
    -8
      internal/p2p/pex/addrbook_test.go
  54. +4
    -4
      internal/p2p/pex/bench_test.go
  55. +2
    -1
      internal/p2p/pex/known_address.go
  56. +55
    -6
      internal/p2p/pex/pex_reactor.go
  57. +5
    -3
      internal/p2p/pex/pex_reactor_test.go
  58. +11
    -10
      internal/p2p/pex/reactor.go
  59. +17
    -16
      internal/p2p/pex/reactor_test.go
  60. +20
    -14
      internal/p2p/router.go
  61. +7
    -6
      internal/p2p/router_test.go
  62. +3
    -2
      internal/p2p/shim_test.go
  63. +29
    -12
      internal/p2p/switch.go
  64. +15
    -5
      internal/p2p/switch_test.go
  65. +8
    -7
      internal/p2p/test_util.go
  66. +12
    -2
      internal/p2p/transport.go
  67. +13
    -12
      internal/p2p/transport_memory.go
  68. +2
    -1
      internal/p2p/transport_memory_test.go
  69. +7
    -6
      internal/p2p/transport_test.go
  70. +1
    -2
      internal/statesync/block_queue.go
  71. +7
    -7
      internal/statesync/block_queue_test.go
  72. +6
    -6
      internal/statesync/chunks.go
  73. +16
    -16
      internal/statesync/chunks_test.go
  74. +19
    -19
      internal/statesync/dispatcher.go
  75. +5
    -4
      internal/statesync/dispatcher_test.go
  76. +11
    -11
      internal/statesync/reactor_test.go
  77. +15
    -15
      internal/statesync/snapshots.go
  78. +19
    -19
      internal/statesync/snapshots_test.go
  79. +4
    -4
      internal/statesync/syncer.go
  80. +13
    -14
      internal/statesync/syncer_test.go
  81. +3
    -3
      node/node.go
  82. +5
    -5
      node/setup.go
  83. +6
    -5
      test/fuzz/p2p/addrbook/init-corpus/main.go
  84. +7
    -5
      test/fuzz/p2p/pex/init-corpus/main.go
  85. +11
    -8
      test/fuzz/p2p/pex/reactor_receive.go
  86. +33
    -0
      types/errors_p2p.go
  87. +329
    -0
      types/netaddress.go
  88. +1
    -10
      types/netaddress_test.go
  89. +67
    -0
      types/node_id.go

+ 1
- 0
CHANGELOG_PENDING.md View File

@ -31,6 +31,7 @@ Friendly reminder: We have a [bug bounty program](https://hackerone.com/tendermi
- P2P Protocol
- Go API
- [p2p] \#6583 Make `p2p.NodeID` and `p2p.NetAddress` exported types to support their use in the RPC layer. (@tychoish)
- [node] \#6540 Reduce surface area of the `node` package by making most of the implementation details private. (@tychoish)
- [p2p] \#6547 Move the entire `p2p` package and all reactor implementations into `internal`. (@tychoish)
- [libs/log] \#6534 Remove the existing custom Tendermint logger backed by go-kit. The logging interface, `Logger`, remains. Tendermint still provides a default logger backed by the performant zerolog logger. (@alexanderbez)


+ 1
- 1
cmd/tendermint/commands/testnet.go View File

@ -278,7 +278,7 @@ func persistentPeersArray(config *cfg.Config) ([]string, error) {
if err != nil {
return []string{}, err
}
peers[i] = p2p.IDAddressString(nodeKey.ID, fmt.Sprintf("%s:%d", hostnameOrIP(i), p2pPort))
peers[i] = nodeKey.ID.AddressString(fmt.Sprintf("%s:%d", hostnameOrIP(i), p2pPort))
}
return peers, nil
}


+ 19
- 20
internal/blockchain/v0/pool.go View File

@ -9,7 +9,6 @@ import (
flow "github.com/tendermint/tendermint/internal/libs/flowrate"
tmsync "github.com/tendermint/tendermint/internal/libs/sync"
"github.com/tendermint/tendermint/internal/p2p"
"github.com/tendermint/tendermint/libs/log"
"github.com/tendermint/tendermint/libs/service"
"github.com/tendermint/tendermint/types"
@ -63,7 +62,7 @@ var peerTimeout = 15 * time.Second // not const so we can override with tests
// PeerID responsible for delivering the block.
type BlockRequest struct {
Height int64
PeerID p2p.NodeID
PeerID types.NodeID
}
// BlockPool keeps track of the fast sync peers, block requests and block responses.
@ -76,7 +75,7 @@ type BlockPool struct {
requesters map[int64]*bpRequester
height int64 // the lowest key in requesters.
// peers
peers map[p2p.NodeID]*bpPeer
peers map[types.NodeID]*bpPeer
maxPeerHeight int64 // the biggest reported height
// atomic
@ -90,7 +89,7 @@ type BlockPool struct {
// requests and errors will be sent to requestsCh and errorsCh accordingly.
func NewBlockPool(start int64, requestsCh chan<- BlockRequest, errorsCh chan<- peerError) *BlockPool {
bp := &BlockPool{
peers: make(map[p2p.NodeID]*bpPeer),
peers: make(map[types.NodeID]*bpPeer),
requesters: make(map[int64]*bpRequester),
height: start,
@ -225,13 +224,13 @@ func (pool *BlockPool) PopRequest() {
// RedoRequest invalidates the block at pool.height,
// Remove the peer and redo request from others.
// Returns the ID of the removed peer.
func (pool *BlockPool) RedoRequest(height int64) p2p.NodeID {
func (pool *BlockPool) RedoRequest(height int64) types.NodeID {
pool.mtx.Lock()
defer pool.mtx.Unlock()
request := pool.requesters[height]
peerID := request.getPeerID()
if peerID != p2p.NodeID("") {
if peerID != types.NodeID("") {
// RemovePeer will redo all requesters associated with this peer.
pool.removePeer(peerID)
}
@ -240,7 +239,7 @@ func (pool *BlockPool) RedoRequest(height int64) p2p.NodeID {
// AddBlock validates that the block comes from the peer it was expected from and calls the requester to store it.
// TODO: ensure that blocks come in order for each peer.
func (pool *BlockPool) AddBlock(peerID p2p.NodeID, block *types.Block, blockSize int) {
func (pool *BlockPool) AddBlock(peerID types.NodeID, block *types.Block, blockSize int) {
pool.mtx.Lock()
defer pool.mtx.Unlock()
@ -287,7 +286,7 @@ func (pool *BlockPool) LastAdvance() time.Time {
}
// SetPeerRange sets the peer's alleged blockchain base and height.
func (pool *BlockPool) SetPeerRange(peerID p2p.NodeID, base int64, height int64) {
func (pool *BlockPool) SetPeerRange(peerID types.NodeID, base int64, height int64) {
pool.mtx.Lock()
defer pool.mtx.Unlock()
@ -308,14 +307,14 @@ func (pool *BlockPool) SetPeerRange(peerID p2p.NodeID, base int64, height int64)
// RemovePeer removes the peer with peerID from the pool. If there's no peer
// with peerID, function is a no-op.
func (pool *BlockPool) RemovePeer(peerID p2p.NodeID) {
func (pool *BlockPool) RemovePeer(peerID types.NodeID) {
pool.mtx.Lock()
defer pool.mtx.Unlock()
pool.removePeer(peerID)
}
func (pool *BlockPool) removePeer(peerID p2p.NodeID) {
func (pool *BlockPool) removePeer(peerID types.NodeID) {
for _, requester := range pool.requesters {
if requester.getPeerID() == peerID {
requester.redo(peerID)
@ -396,14 +395,14 @@ func (pool *BlockPool) requestersLen() int64 {
return int64(len(pool.requesters))
}
func (pool *BlockPool) sendRequest(height int64, peerID p2p.NodeID) {
func (pool *BlockPool) sendRequest(height int64, peerID types.NodeID) {
if !pool.IsRunning() {
return
}
pool.requestsCh <- BlockRequest{height, peerID}
}
func (pool *BlockPool) sendError(err error, peerID p2p.NodeID) {
func (pool *BlockPool) sendError(err error, peerID types.NodeID) {
if !pool.IsRunning() {
return
}
@ -437,7 +436,7 @@ type bpPeer struct {
height int64
base int64
pool *BlockPool
id p2p.NodeID
id types.NodeID
recvMonitor *flow.Monitor
timeout *time.Timer
@ -445,7 +444,7 @@ type bpPeer struct {
logger log.Logger
}
func newBPPeer(pool *BlockPool, peerID p2p.NodeID, base int64, height int64) *bpPeer {
func newBPPeer(pool *BlockPool, peerID types.NodeID, base int64, height int64) *bpPeer {
peer := &bpPeer{
pool: pool,
id: peerID,
@ -510,10 +509,10 @@ type bpRequester struct {
pool *BlockPool
height int64
gotBlockCh chan struct{}
redoCh chan p2p.NodeID // redo may send multitime, add peerId to identify repeat
redoCh chan types.NodeID // redo may send multitime, add peerId to identify repeat
mtx tmsync.Mutex
peerID p2p.NodeID
peerID types.NodeID
block *types.Block
}
@ -522,7 +521,7 @@ func newBPRequester(pool *BlockPool, height int64) *bpRequester {
pool: pool,
height: height,
gotBlockCh: make(chan struct{}, 1),
redoCh: make(chan p2p.NodeID, 1),
redoCh: make(chan types.NodeID, 1),
peerID: "",
block: nil,
@ -537,7 +536,7 @@ func (bpr *bpRequester) OnStart() error {
}
// Returns true if the peer matches and block doesn't already exist.
func (bpr *bpRequester) setBlock(block *types.Block, peerID p2p.NodeID) bool {
func (bpr *bpRequester) setBlock(block *types.Block, peerID types.NodeID) bool {
bpr.mtx.Lock()
if bpr.block != nil || bpr.peerID != peerID {
bpr.mtx.Unlock()
@ -559,7 +558,7 @@ func (bpr *bpRequester) getBlock() *types.Block {
return bpr.block
}
func (bpr *bpRequester) getPeerID() p2p.NodeID {
func (bpr *bpRequester) getPeerID() types.NodeID {
bpr.mtx.Lock()
defer bpr.mtx.Unlock()
return bpr.peerID
@ -581,7 +580,7 @@ func (bpr *bpRequester) reset() {
// Tells bpRequester to pick another peer and try again.
// NOTE: Nonblocking, and does nothing if another redo
// was already requested.
func (bpr *bpRequester) redo(peerID p2p.NodeID) {
func (bpr *bpRequester) redo(peerID types.NodeID) {
select {
case bpr.redoCh <- peerID:
default:


+ 7
- 8
internal/blockchain/v0/pool_test.go View File

@ -9,7 +9,6 @@ import (
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
"github.com/tendermint/tendermint/internal/p2p"
"github.com/tendermint/tendermint/libs/log"
tmrand "github.com/tendermint/tendermint/libs/rand"
"github.com/tendermint/tendermint/types"
@ -20,7 +19,7 @@ func init() {
}
type testPeer struct {
id p2p.NodeID
id types.NodeID
base int64
height int64
inputChan chan inputData // make sure each peer's data is sequential
@ -50,7 +49,7 @@ func (p testPeer) simulateInput(input inputData) {
// input.t.Logf("Added block from peer %v (height: %v)", input.request.PeerID, input.request.Height)
}
type testPeers map[p2p.NodeID]testPeer
type testPeers map[types.NodeID]testPeer
func (ps testPeers) start() {
for _, v := range ps {
@ -67,7 +66,7 @@ func (ps testPeers) stop() {
func makePeers(numPeers int, minHeight, maxHeight int64) testPeers {
peers := make(testPeers, numPeers)
for i := 0; i < numPeers; i++ {
peerID := p2p.NodeID(tmrand.Str(12))
peerID := types.NodeID(tmrand.Str(12))
height := minHeight + mrand.Int63n(maxHeight-minHeight)
base := minHeight + int64(i)
if base > height {
@ -183,7 +182,7 @@ func TestBlockPoolTimeout(t *testing.T) {
// Pull from channels
counter := 0
timedOut := map[p2p.NodeID]struct{}{}
timedOut := map[types.NodeID]struct{}{}
for {
select {
case err := <-errorsCh:
@ -204,7 +203,7 @@ func TestBlockPoolTimeout(t *testing.T) {
func TestBlockPoolRemovePeer(t *testing.T) {
peers := make(testPeers, 10)
for i := 0; i < 10; i++ {
peerID := p2p.NodeID(fmt.Sprintf("%d", i+1))
peerID := types.NodeID(fmt.Sprintf("%d", i+1))
height := int64(i + 1)
peers[peerID] = testPeer{peerID, 0, height, make(chan inputData)}
}
@ -228,10 +227,10 @@ func TestBlockPoolRemovePeer(t *testing.T) {
assert.EqualValues(t, 10, pool.MaxPeerHeight())
// remove not-existing peer
assert.NotPanics(t, func() { pool.RemovePeer(p2p.NodeID("Superman")) })
assert.NotPanics(t, func() { pool.RemovePeer(types.NodeID("Superman")) })
// remove peer with biggest height
pool.RemovePeer(p2p.NodeID("10"))
pool.RemovePeer(types.NodeID("10"))
assert.EqualValues(t, 9, pool.MaxPeerHeight())
// remove all peers


+ 2
- 2
internal/blockchain/v0/reactor.go View File

@ -66,7 +66,7 @@ type consensusReactor interface {
type peerError struct {
err error
peerID p2p.NodeID
peerID types.NodeID
}
func (e peerError) Error() string {
@ -194,7 +194,7 @@ func (r *Reactor) OnStop() {
// respondToPeer loads a block and sends it to the requesting peer, if we have it.
// Otherwise, we'll respond saying we do not have it.
func (r *Reactor) respondToPeer(msg *bcproto.BlockRequest, peerID p2p.NodeID) {
func (r *Reactor) respondToPeer(msg *bcproto.BlockRequest, peerID types.NodeID) {
block := r.store.LoadBlock(msg.Height)
if block != nil {
blockProto, err := block.ToProto()


+ 13
- 13
internal/blockchain/v0/reactor_test.go View File

@ -27,14 +27,14 @@ import (
type reactorTestSuite struct {
network *p2ptest.Network
logger log.Logger
nodes []p2p.NodeID
nodes []types.NodeID
reactors map[p2p.NodeID]*Reactor
app map[p2p.NodeID]proxy.AppConns
reactors map[types.NodeID]*Reactor
app map[types.NodeID]proxy.AppConns
blockchainChannels map[p2p.NodeID]*p2p.Channel
peerChans map[p2p.NodeID]chan p2p.PeerUpdate
peerUpdates map[p2p.NodeID]*p2p.PeerUpdates
blockchainChannels map[types.NodeID]*p2p.Channel
peerChans map[types.NodeID]chan p2p.PeerUpdate
peerUpdates map[types.NodeID]*p2p.PeerUpdates
fastSync bool
}
@ -55,12 +55,12 @@ func setup(
rts := &reactorTestSuite{
logger: log.TestingLogger().With("module", "blockchain", "testCase", t.Name()),
network: p2ptest.MakeNetwork(t, p2ptest.NetworkOptions{NumNodes: numNodes}),
nodes: make([]p2p.NodeID, 0, numNodes),
reactors: make(map[p2p.NodeID]*Reactor, numNodes),
app: make(map[p2p.NodeID]proxy.AppConns, numNodes),
blockchainChannels: make(map[p2p.NodeID]*p2p.Channel, numNodes),
peerChans: make(map[p2p.NodeID]chan p2p.PeerUpdate, numNodes),
peerUpdates: make(map[p2p.NodeID]*p2p.PeerUpdates, numNodes),
nodes: make([]types.NodeID, 0, numNodes),
reactors: make(map[types.NodeID]*Reactor, numNodes),
app: make(map[types.NodeID]proxy.AppConns, numNodes),
blockchainChannels: make(map[types.NodeID]*p2p.Channel, numNodes),
peerChans: make(map[types.NodeID]chan p2p.PeerUpdate, numNodes),
peerUpdates: make(map[types.NodeID]*p2p.PeerUpdates, numNodes),
fastSync: true,
}
@ -89,7 +89,7 @@ func setup(
}
func (rts *reactorTestSuite) addNode(t *testing.T,
nodeID p2p.NodeID,
nodeID types.NodeID,
genDoc *types.GenesisDoc,
privVal types.PrivValidator,
maxBlockHeight int64,


+ 6
- 8
internal/blockchain/v2/internal/behavior/peer_behaviour.go View File

@ -1,14 +1,12 @@
package behavior
import (
"github.com/tendermint/tendermint/internal/p2p"
)
import "github.com/tendermint/tendermint/types"
// PeerBehavior is a struct describing a behavior a peer performed.
// `peerID` identifies the peer and reason characterizes the specific
// behavior performed by the peer.
type PeerBehavior struct {
peerID p2p.NodeID
peerID types.NodeID
reason interface{}
}
@ -17,7 +15,7 @@ type badMessage struct {
}
// BadMessage returns a badMessage PeerBehavior.
func BadMessage(peerID p2p.NodeID, explanation string) PeerBehavior {
func BadMessage(peerID types.NodeID, explanation string) PeerBehavior {
return PeerBehavior{peerID: peerID, reason: badMessage{explanation}}
}
@ -26,7 +24,7 @@ type messageOutOfOrder struct {
}
// MessageOutOfOrder returns a messagOutOfOrder PeerBehavior.
func MessageOutOfOrder(peerID p2p.NodeID, explanation string) PeerBehavior {
func MessageOutOfOrder(peerID types.NodeID, explanation string) PeerBehavior {
return PeerBehavior{peerID: peerID, reason: messageOutOfOrder{explanation}}
}
@ -35,7 +33,7 @@ type consensusVote struct {
}
// ConsensusVote returns a consensusVote PeerBehavior.
func ConsensusVote(peerID p2p.NodeID, explanation string) PeerBehavior {
func ConsensusVote(peerID types.NodeID, explanation string) PeerBehavior {
return PeerBehavior{peerID: peerID, reason: consensusVote{explanation}}
}
@ -44,6 +42,6 @@ type blockPart struct {
}
// BlockPart returns blockPart PeerBehavior.
func BlockPart(peerID p2p.NodeID, explanation string) PeerBehavior {
func BlockPart(peerID types.NodeID, explanation string) PeerBehavior {
return PeerBehavior{peerID: peerID, reason: blockPart{explanation}}
}

+ 4
- 3
internal/blockchain/v2/internal/behavior/reporter.go View File

@ -5,6 +5,7 @@ import (
tmsync "github.com/tendermint/tendermint/internal/libs/sync"
"github.com/tendermint/tendermint/internal/p2p"
"github.com/tendermint/tendermint/types"
)
// Reporter provides an interface for reactors to report the behavior
@ -51,14 +52,14 @@ func (spbr *SwitchReporter) Report(behavior PeerBehavior) error {
// behavior in manufactured scenarios.
type MockReporter struct {
mtx tmsync.RWMutex
pb map[p2p.NodeID][]PeerBehavior
pb map[types.NodeID][]PeerBehavior
}
// NewMockReporter returns a Reporter which records all reported
// behaviors in memory.
func NewMockReporter() *MockReporter {
return &MockReporter{
pb: map[p2p.NodeID][]PeerBehavior{},
pb: map[types.NodeID][]PeerBehavior{},
}
}
@ -72,7 +73,7 @@ func (mpbr *MockReporter) Report(behavior PeerBehavior) error {
}
// GetBehaviors returns all behaviors reported on the peer identified by peerID.
func (mpbr *MockReporter) GetBehaviors(peerID p2p.NodeID) []PeerBehavior {
func (mpbr *MockReporter) GetBehaviors(peerID types.NodeID) []PeerBehavior {
mpbr.mtx.RLock()
defer mpbr.mtx.RUnlock()
if items, ok := mpbr.pb[peerID]; ok {


+ 8
- 8
internal/blockchain/v2/internal/behavior/reporter_test.go View File

@ -5,13 +5,13 @@ import (
"testing"
bh "github.com/tendermint/tendermint/internal/blockchain/v2/internal/behavior"
"github.com/tendermint/tendermint/internal/p2p"
"github.com/tendermint/tendermint/types"
)
// TestMockReporter tests the MockReporter's ability to store reported
// peer behavior in memory indexed by the peerID.
func TestMockReporter(t *testing.T) {
var peerID p2p.NodeID = "MockPeer"
var peerID types.NodeID = "MockPeer"
pr := bh.NewMockReporter()
behaviors := pr.GetBehaviors(peerID)
@ -34,7 +34,7 @@ func TestMockReporter(t *testing.T) {
}
type scriptItem struct {
peerID p2p.NodeID
peerID types.NodeID
behavior bh.PeerBehavior
}
@ -76,10 +76,10 @@ func equalBehaviors(a []bh.PeerBehavior, b []bh.PeerBehavior) bool {
// freequencies that those behaviors occur.
func TestEqualPeerBehaviors(t *testing.T) {
var (
peerID p2p.NodeID = "MockPeer"
consensusVote = bh.ConsensusVote(peerID, "voted")
blockPart = bh.BlockPart(peerID, "blocked")
equals = []struct {
peerID types.NodeID = "MockPeer"
consensusVote = bh.ConsensusVote(peerID, "voted")
blockPart = bh.BlockPart(peerID, "blocked")
equals = []struct {
left []bh.PeerBehavior
right []bh.PeerBehavior
}{
@ -128,7 +128,7 @@ func TestEqualPeerBehaviors(t *testing.T) {
func TestMockPeerBehaviorReporterConcurrency(t *testing.T) {
var (
behaviorScript = []struct {
peerID p2p.NodeID
peerID types.NodeID
behaviors []bh.PeerBehavior
}{
{"1", []bh.PeerBehavior{bh.ConsensusVote("1", "")}},


+ 6
- 7
internal/blockchain/v2/processor.go View File

@ -3,7 +3,6 @@ package v2
import (
"fmt"
"github.com/tendermint/tendermint/internal/p2p"
tmState "github.com/tendermint/tendermint/state"
"github.com/tendermint/tendermint/types"
)
@ -13,8 +12,8 @@ import (
type pcBlockVerificationFailure struct {
priorityNormal
height int64
firstPeerID p2p.NodeID
secondPeerID p2p.NodeID
firstPeerID types.NodeID
secondPeerID types.NodeID
}
func (e pcBlockVerificationFailure) String() string {
@ -26,7 +25,7 @@ func (e pcBlockVerificationFailure) String() string {
type pcBlockProcessed struct {
priorityNormal
height int64
peerID p2p.NodeID
peerID types.NodeID
}
func (e pcBlockProcessed) String() string {
@ -46,7 +45,7 @@ func (p pcFinished) Error() string {
type queueItem struct {
block *types.Block
peerID p2p.NodeID
peerID types.NodeID
}
type blockQueue map[int64]queueItem
@ -95,7 +94,7 @@ func (state *pcState) synced() bool {
return len(state.queue) <= 1
}
func (state *pcState) enqueue(peerID p2p.NodeID, block *types.Block, height int64) {
func (state *pcState) enqueue(peerID types.NodeID, block *types.Block, height int64) {
if item, ok := state.queue[height]; ok {
panic(fmt.Sprintf(
"duplicate block %d (%X) enqueued by processor (sent by %v; existing block %X from %v)",
@ -110,7 +109,7 @@ func (state *pcState) height() int64 {
}
// purgePeer moves all unprocessed blocks from the queue
func (state *pcState) purgePeer(peerID p2p.NodeID) {
func (state *pcState) purgePeer(peerID types.NodeID) {
// what if height is less than state.height?
for height, item := range state.queue {
if item.peerID == peerID {


+ 2
- 3
internal/blockchain/v2/processor_test.go View File

@ -5,7 +5,6 @@ import (
"github.com/stretchr/testify/assert"
"github.com/tendermint/tendermint/internal/p2p"
tmState "github.com/tendermint/tendermint/state"
"github.com/tendermint/tendermint/types"
)
@ -40,7 +39,7 @@ func makeState(p *params) *pcState {
state := newPcState(context)
for _, item := range p.items {
state.enqueue(p2p.NodeID(item.pid), makePcBlock(item.height), item.height)
state.enqueue(types.NodeID(item.pid), makePcBlock(item.height), item.height)
}
state.blocksSynced = p.blocksSynced
@ -48,7 +47,7 @@ func makeState(p *params) *pcState {
return state
}
func mBlockResponse(peerID p2p.NodeID, height int64) scBlockReceived {
func mBlockResponse(peerID types.NodeID, height int64) scBlockReceived {
return scBlockReceived{
peerID: peerID,
block: makePcBlock(height),


+ 5
- 5
internal/blockchain/v2/reactor.go View File

@ -213,7 +213,7 @@ func (e rProcessBlock) String() string {
type bcBlockResponse struct {
priorityNormal
time time.Time
peerID p2p.NodeID
peerID types.NodeID
size int64
block *types.Block
}
@ -227,7 +227,7 @@ func (resp bcBlockResponse) String() string {
type bcNoBlockResponse struct {
priorityNormal
time time.Time
peerID p2p.NodeID
peerID types.NodeID
height int64
}
@ -240,7 +240,7 @@ func (resp bcNoBlockResponse) String() string {
type bcStatusResponse struct {
priorityNormal
time time.Time
peerID p2p.NodeID
peerID types.NodeID
base int64
height int64
}
@ -253,7 +253,7 @@ func (resp bcStatusResponse) String() string {
// new peer is connected
type bcAddNewPeer struct {
priorityNormal
peerID p2p.NodeID
peerID types.NodeID
}
func (resp bcAddNewPeer) String() string {
@ -263,7 +263,7 @@ func (resp bcAddNewPeer) String() string {
// existing peer is removed
type bcRemovePeer struct {
priorityHigh
peerID p2p.NodeID
peerID types.NodeID
reason interface{}
}


+ 5
- 5
internal/blockchain/v2/reactor_test.go View File

@ -33,11 +33,11 @@ import (
type mockPeer struct {
service.Service
id p2p.NodeID
id types.NodeID
}
func (mp mockPeer) FlushStop() {}
func (mp mockPeer) ID() p2p.NodeID { return mp.id }
func (mp mockPeer) ID() types.NodeID { return mp.id }
func (mp mockPeer) RemoteIP() net.IP { return net.IP{} }
func (mp mockPeer) RemoteAddr() net.Addr { return &net.TCPAddr{IP: mp.RemoteIP(), Port: 8800} }
@ -419,7 +419,7 @@ func TestReactorHelperMode(t *testing.T) {
msgBz, err := proto.Marshal(msgProto)
require.NoError(t, err)
reactor.Receive(channelID, mockPeer{id: p2p.NodeID(step.peer)}, msgBz)
reactor.Receive(channelID, mockPeer{id: types.NodeID(step.peer)}, msgBz)
assert.Equal(t, old+1, mockSwitch.numStatusResponse)
case bcproto.BlockRequest:
if ev.Height > params.startHeight {
@ -431,7 +431,7 @@ func TestReactorHelperMode(t *testing.T) {
msgBz, err := proto.Marshal(msgProto)
require.NoError(t, err)
reactor.Receive(channelID, mockPeer{id: p2p.NodeID(step.peer)}, msgBz)
reactor.Receive(channelID, mockPeer{id: types.NodeID(step.peer)}, msgBz)
assert.Equal(t, old+1, mockSwitch.numNoBlockResponse)
} else {
old := mockSwitch.numBlockResponse
@ -442,7 +442,7 @@ func TestReactorHelperMode(t *testing.T) {
msgBz, err := proto.Marshal(msgProto)
require.NoError(t, err)
reactor.Receive(channelID, mockPeer{id: p2p.NodeID(step.peer)}, msgBz)
reactor.Receive(channelID, mockPeer{id: types.NodeID(step.peer)}, msgBz)
assert.Equal(t, old+1, mockSwitch.numBlockResponse)
}
}


+ 26
- 27
internal/blockchain/v2/scheduler.go View File

@ -8,7 +8,6 @@ import (
"sort"
"time"
"github.com/tendermint/tendermint/internal/p2p"
"github.com/tendermint/tendermint/types"
)
@ -26,7 +25,7 @@ func (e scFinishedEv) String() string {
// send a blockRequest message
type scBlockRequest struct {
priorityNormal
peerID p2p.NodeID
peerID types.NodeID
height int64
}
@ -37,7 +36,7 @@ func (e scBlockRequest) String() string {
// a block has been received and validated by the scheduler
type scBlockReceived struct {
priorityNormal
peerID p2p.NodeID
peerID types.NodeID
block *types.Block
}
@ -48,7 +47,7 @@ func (e scBlockReceived) String() string {
// scheduler detected a peer error
type scPeerError struct {
priorityHigh
peerID p2p.NodeID
peerID types.NodeID
reason error
}
@ -59,7 +58,7 @@ func (e scPeerError) String() string {
// scheduler removed a set of peers (timed out or slow peer)
type scPeersPruned struct {
priorityHigh
peers []p2p.NodeID
peers []types.NodeID
}
func (e scPeersPruned) String() string {
@ -126,7 +125,7 @@ func (e peerState) String() string {
}
type scPeer struct {
peerID p2p.NodeID
peerID types.NodeID
// initialized as New when peer is added, updated to Ready when statusUpdate is received,
// updated to Removed when peer is removed
@ -143,7 +142,7 @@ func (p scPeer) String() string {
p.state, p.base, p.height, p.lastTouched, p.lastRate, p.peerID)
}
func newScPeer(peerID p2p.NodeID) *scPeer {
func newScPeer(peerID types.NodeID) *scPeer {
return &scPeer{
peerID: peerID,
state: peerStateNew,
@ -171,7 +170,7 @@ type scheduler struct {
// a map of peerID to scheduler specific peer struct `scPeer` used to keep
// track of peer specific state
peers map[p2p.NodeID]*scPeer
peers map[types.NodeID]*scPeer
peerTimeout time.Duration // maximum response time from a peer otherwise prune
minRecvRate int64 // minimum receive rate from peer otherwise prune
@ -183,13 +182,13 @@ type scheduler struct {
blockStates map[int64]blockState
// a map of heights to the peer we are waiting a response from
pendingBlocks map[int64]p2p.NodeID
pendingBlocks map[int64]types.NodeID
// the time at which a block was put in blockStatePending
pendingTime map[int64]time.Time
// a map of heights to the peers that put the block in blockStateReceived
receivedBlocks map[int64]p2p.NodeID
receivedBlocks map[int64]types.NodeID
}
func (sc scheduler) String() string {
@ -204,10 +203,10 @@ func newScheduler(initHeight int64, startTime time.Time) *scheduler {
syncTimeout: 60 * time.Second,
height: initHeight,
blockStates: make(map[int64]blockState),
peers: make(map[p2p.NodeID]*scPeer),
pendingBlocks: make(map[int64]p2p.NodeID),
peers: make(map[types.NodeID]*scPeer),
pendingBlocks: make(map[int64]types.NodeID),
pendingTime: make(map[int64]time.Time),
receivedBlocks: make(map[int64]p2p.NodeID),
receivedBlocks: make(map[int64]types.NodeID),
targetPending: 10, // TODO - pass as param
peerTimeout: 15 * time.Second, // TODO - pass as param
minRecvRate: 0, // int64(7680), TODO - pass as param
@ -216,14 +215,14 @@ func newScheduler(initHeight int64, startTime time.Time) *scheduler {
return &sc
}
func (sc *scheduler) ensurePeer(peerID p2p.NodeID) *scPeer {
func (sc *scheduler) ensurePeer(peerID types.NodeID) *scPeer {
if _, ok := sc.peers[peerID]; !ok {
sc.peers[peerID] = newScPeer(peerID)
}
return sc.peers[peerID]
}
func (sc *scheduler) touchPeer(peerID p2p.NodeID, time time.Time) error {
func (sc *scheduler) touchPeer(peerID types.NodeID, time time.Time) error {
peer, ok := sc.peers[peerID]
if !ok {
return fmt.Errorf("couldn't find peer %s", peerID)
@ -238,7 +237,7 @@ func (sc *scheduler) touchPeer(peerID p2p.NodeID, time time.Time) error {
return nil
}
func (sc *scheduler) removePeer(peerID p2p.NodeID) {
func (sc *scheduler) removePeer(peerID types.NodeID) {
peer, ok := sc.peers[peerID]
if !ok {
return
@ -298,7 +297,7 @@ func (sc *scheduler) addNewBlocks() {
}
}
func (sc *scheduler) setPeerRange(peerID p2p.NodeID, base int64, height int64) error {
func (sc *scheduler) setPeerRange(peerID types.NodeID, base int64, height int64) error {
peer := sc.ensurePeer(peerID)
if peer.state == peerStateRemoved {
@ -333,8 +332,8 @@ func (sc *scheduler) getStateAtHeight(height int64) blockState {
}
}
func (sc *scheduler) getPeersWithHeight(height int64) []p2p.NodeID {
peers := make([]p2p.NodeID, 0)
func (sc *scheduler) getPeersWithHeight(height int64) []types.NodeID {
peers := make([]types.NodeID, 0)
for _, peer := range sc.peers {
if peer.state != peerStateReady {
continue
@ -346,8 +345,8 @@ func (sc *scheduler) getPeersWithHeight(height int64) []p2p.NodeID {
return peers
}
func (sc *scheduler) prunablePeers(peerTimout time.Duration, minRecvRate int64, now time.Time) []p2p.NodeID {
prunable := make([]p2p.NodeID, 0)
func (sc *scheduler) prunablePeers(peerTimout time.Duration, minRecvRate int64, now time.Time) []types.NodeID {
prunable := make([]types.NodeID, 0)
for peerID, peer := range sc.peers {
if peer.state != peerStateReady {
continue
@ -366,7 +365,7 @@ func (sc *scheduler) setStateAtHeight(height int64, state blockState) {
}
// CONTRACT: peer exists and in Ready state.
func (sc *scheduler) markReceived(peerID p2p.NodeID, height int64, size int64, now time.Time) error {
func (sc *scheduler) markReceived(peerID types.NodeID, height int64, size int64, now time.Time) error {
peer := sc.peers[peerID]
if state := sc.getStateAtHeight(height); state != blockStatePending || sc.pendingBlocks[height] != peerID {
@ -390,7 +389,7 @@ func (sc *scheduler) markReceived(peerID p2p.NodeID, height int64, size int64, n
return nil
}
func (sc *scheduler) markPending(peerID p2p.NodeID, height int64, time time.Time) error {
func (sc *scheduler) markPending(peerID types.NodeID, height int64, time time.Time) error {
state := sc.getStateAtHeight(height)
if state != blockStateNew {
return fmt.Errorf("block %d should be in blockStateNew but is %s", height, state)
@ -472,7 +471,7 @@ func (sc *scheduler) nextHeightToSchedule() int64 {
return min
}
func (sc *scheduler) pendingFrom(peerID p2p.NodeID) []int64 {
func (sc *scheduler) pendingFrom(peerID types.NodeID) []int64 {
var heights []int64
for height, pendingPeerID := range sc.pendingBlocks {
if pendingPeerID == peerID {
@ -482,7 +481,7 @@ func (sc *scheduler) pendingFrom(peerID p2p.NodeID) []int64 {
return heights
}
func (sc *scheduler) selectPeer(height int64) (p2p.NodeID, error) {
func (sc *scheduler) selectPeer(height int64) (types.NodeID, error) {
peers := sc.getPeersWithHeight(height)
if len(peers) == 0 {
return "", fmt.Errorf("cannot find peer for height %d", height)
@ -490,7 +489,7 @@ func (sc *scheduler) selectPeer(height int64) (p2p.NodeID, error) {
// create a map from number of pending requests to a list
// of peers having that number of pending requests.
pendingFrom := make(map[int][]p2p.NodeID)
pendingFrom := make(map[int][]types.NodeID)
for _, peerID := range peers {
numPending := len(sc.pendingFrom(peerID))
pendingFrom[numPending] = append(pendingFrom[numPending], peerID)
@ -509,7 +508,7 @@ func (sc *scheduler) selectPeer(height int64) (p2p.NodeID, error) {
}
// PeerByID is a list of peers sorted by peerID.
type PeerByID []p2p.NodeID
type PeerByID []types.NodeID
func (peers PeerByID) Len() int {
return len(peers)


+ 104
- 105
internal/blockchain/v2/scheduler_test.go View File

@ -10,7 +10,6 @@ import (
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
"github.com/tendermint/tendermint/internal/p2p"
"github.com/tendermint/tendermint/state"
"github.com/tendermint/tendermint/types"
)
@ -20,9 +19,9 @@ type scTestParams struct {
initHeight int64
height int64
allB []int64
pending map[int64]p2p.NodeID
pending map[int64]types.NodeID
pendingTime map[int64]time.Time
received map[int64]p2p.NodeID
received map[int64]types.NodeID
peerTimeout time.Duration
minRecvRate int64
targetPending int
@ -41,7 +40,7 @@ func verifyScheduler(sc *scheduler) {
}
func newTestScheduler(params scTestParams) *scheduler {
peers := make(map[p2p.NodeID]*scPeer)
peers := make(map[types.NodeID]*scPeer)
var maxHeight int64
initHeight := params.initHeight
@ -54,8 +53,8 @@ func newTestScheduler(params scTestParams) *scheduler {
}
for id, peer := range params.peers {
peer.peerID = p2p.NodeID(id)
peers[p2p.NodeID(id)] = peer
peer.peerID = types.NodeID(id)
peers[types.NodeID(id)] = peer
if maxHeight < peer.height {
maxHeight = peer.height
}
@ -122,7 +121,7 @@ func TestScMaxHeights(t *testing.T) {
name: "one ready peer",
sc: scheduler{
height: 3,
peers: map[p2p.NodeID]*scPeer{"P1": {height: 6, state: peerStateReady}},
peers: map[types.NodeID]*scPeer{"P1": {height: 6, state: peerStateReady}},
},
wantMax: 6,
},
@ -130,7 +129,7 @@ func TestScMaxHeights(t *testing.T) {
name: "ready and removed peers",
sc: scheduler{
height: 1,
peers: map[p2p.NodeID]*scPeer{
peers: map[types.NodeID]*scPeer{
"P1": {height: 4, state: peerStateReady},
"P2": {height: 10, state: peerStateRemoved}},
},
@ -140,7 +139,7 @@ func TestScMaxHeights(t *testing.T) {
name: "removed peers",
sc: scheduler{
height: 1,
peers: map[p2p.NodeID]*scPeer{
peers: map[types.NodeID]*scPeer{
"P1": {height: 4, state: peerStateRemoved},
"P2": {height: 10, state: peerStateRemoved}},
},
@ -150,7 +149,7 @@ func TestScMaxHeights(t *testing.T) {
name: "new peers",
sc: scheduler{
height: 1,
peers: map[p2p.NodeID]*scPeer{
peers: map[types.NodeID]*scPeer{
"P1": {base: -1, height: -1, state: peerStateNew},
"P2": {base: -1, height: -1, state: peerStateNew}},
},
@ -160,7 +159,7 @@ func TestScMaxHeights(t *testing.T) {
name: "mixed peers",
sc: scheduler{
height: 1,
peers: map[p2p.NodeID]*scPeer{
peers: map[types.NodeID]*scPeer{
"P1": {height: -1, state: peerStateNew},
"P2": {height: 10, state: peerStateReady},
"P3": {height: 20, state: peerStateRemoved},
@ -187,7 +186,7 @@ func TestScMaxHeights(t *testing.T) {
func TestScEnsurePeer(t *testing.T) {
type args struct {
peerID p2p.NodeID
peerID types.NodeID
}
tests := []struct {
name string
@ -244,7 +243,7 @@ func TestScTouchPeer(t *testing.T) {
now := time.Now()
type args struct {
peerID p2p.NodeID
peerID types.NodeID
time time.Time
}
@ -316,13 +315,13 @@ func TestScPrunablePeers(t *testing.T) {
name string
fields scTestParams
args args
wantResult []p2p.NodeID
wantResult []types.NodeID
}{
{
name: "no peers",
fields: scTestParams{peers: map[string]*scPeer{}},
args: args{threshold: time.Second, time: now.Add(time.Second + time.Millisecond), minSpeed: 100},
wantResult: []p2p.NodeID{},
wantResult: []types.NodeID{},
},
{
name: "mixed peers",
@ -341,7 +340,7 @@ func TestScPrunablePeers(t *testing.T) {
"P6": {state: peerStateReady, lastTouched: now.Add(time.Second), lastRate: 90},
}},
args: args{threshold: time.Second, time: now.Add(time.Second + time.Millisecond), minSpeed: 100},
wantResult: []p2p.NodeID{"P4", "P5", "P6"},
wantResult: []types.NodeID{"P4", "P5", "P6"},
},
}
@ -361,7 +360,7 @@ func TestScPrunablePeers(t *testing.T) {
func TestScRemovePeer(t *testing.T) {
type args struct {
peerID p2p.NodeID
peerID types.NodeID
}
tests := []struct {
name string
@ -424,13 +423,13 @@ func TestScRemovePeer(t *testing.T) {
fields: scTestParams{
peers: map[string]*scPeer{"P1": {height: 3, state: peerStateReady}},
allB: []int64{1, 2, 3},
pending: map[int64]p2p.NodeID{1: "P1"},
pending: map[int64]types.NodeID{1: "P1"},
},
args: args{peerID: "P1"},
wantFields: scTestParams{
peers: map[string]*scPeer{"P1": {height: 3, state: peerStateRemoved}},
allB: []int64{},
pending: map[int64]p2p.NodeID{},
pending: map[int64]types.NodeID{},
},
},
{
@ -438,13 +437,13 @@ func TestScRemovePeer(t *testing.T) {
fields: scTestParams{
peers: map[string]*scPeer{"P1": {height: 3, state: peerStateReady}},
allB: []int64{1, 2, 3},
received: map[int64]p2p.NodeID{1: "P1"},
received: map[int64]types.NodeID{1: "P1"},
},
args: args{peerID: "P1"},
wantFields: scTestParams{
peers: map[string]*scPeer{"P1": {height: 3, state: peerStateRemoved}},
allB: []int64{},
received: map[int64]p2p.NodeID{},
received: map[int64]types.NodeID{},
},
},
{
@ -452,15 +451,15 @@ func TestScRemovePeer(t *testing.T) {
fields: scTestParams{
peers: map[string]*scPeer{"P1": {height: 4, state: peerStateReady}},
allB: []int64{1, 2, 3, 4},
pending: map[int64]p2p.NodeID{1: "P1", 3: "P1"},
received: map[int64]p2p.NodeID{2: "P1", 4: "P1"},
pending: map[int64]types.NodeID{1: "P1", 3: "P1"},
received: map[int64]types.NodeID{2: "P1", 4: "P1"},
},
args: args{peerID: "P1"},
wantFields: scTestParams{
peers: map[string]*scPeer{"P1": {height: 4, state: peerStateRemoved}},
allB: []int64{},
pending: map[int64]p2p.NodeID{},
received: map[int64]p2p.NodeID{},
pending: map[int64]types.NodeID{},
received: map[int64]types.NodeID{},
},
},
{
@ -471,8 +470,8 @@ func TestScRemovePeer(t *testing.T) {
"P2": {height: 6, state: peerStateReady},
},
allB: []int64{1, 2, 3, 4, 5, 6},
pending: map[int64]p2p.NodeID{1: "P1", 3: "P2", 6: "P1"},
received: map[int64]p2p.NodeID{2: "P1", 4: "P2", 5: "P2"},
pending: map[int64]types.NodeID{1: "P1", 3: "P2", 6: "P1"},
received: map[int64]types.NodeID{2: "P1", 4: "P2", 5: "P2"},
},
args: args{peerID: "P1"},
wantFields: scTestParams{
@ -481,8 +480,8 @@ func TestScRemovePeer(t *testing.T) {
"P2": {height: 6, state: peerStateReady},
},
allB: []int64{1, 2, 3, 4, 5, 6},
pending: map[int64]p2p.NodeID{3: "P2"},
received: map[int64]p2p.NodeID{4: "P2", 5: "P2"},
pending: map[int64]types.NodeID{3: "P2"},
received: map[int64]types.NodeID{4: "P2", 5: "P2"},
},
},
}
@ -501,7 +500,7 @@ func TestScRemovePeer(t *testing.T) {
func TestScSetPeerRange(t *testing.T) {
type args struct {
peerID p2p.NodeID
peerID types.NodeID
base int64
height int64
}
@ -622,25 +621,25 @@ func TestScGetPeersWithHeight(t *testing.T) {
name string
fields scTestParams
args args
wantResult []p2p.NodeID
wantResult []types.NodeID
}{
{
name: "no peers",
fields: scTestParams{peers: map[string]*scPeer{}},
args: args{height: 10},
wantResult: []p2p.NodeID{},
wantResult: []types.NodeID{},
},
{
name: "only new peers",
fields: scTestParams{peers: map[string]*scPeer{"P1": {height: -1, state: peerStateNew}}},
args: args{height: 10},
wantResult: []p2p.NodeID{},
wantResult: []types.NodeID{},
},
{
name: "only Removed peers",
fields: scTestParams{peers: map[string]*scPeer{"P1": {height: 4, state: peerStateRemoved}}},
args: args{height: 2},
wantResult: []p2p.NodeID{},
wantResult: []types.NodeID{},
},
{
name: "one Ready shorter peer",
@ -649,7 +648,7 @@ func TestScGetPeersWithHeight(t *testing.T) {
allB: []int64{1, 2, 3, 4},
},
args: args{height: 5},
wantResult: []p2p.NodeID{},
wantResult: []types.NodeID{},
},
{
name: "one Ready equal peer",
@ -658,7 +657,7 @@ func TestScGetPeersWithHeight(t *testing.T) {
allB: []int64{1, 2, 3, 4},
},
args: args{height: 4},
wantResult: []p2p.NodeID{"P1"},
wantResult: []types.NodeID{"P1"},
},
{
name: "one Ready higher peer",
@ -668,7 +667,7 @@ func TestScGetPeersWithHeight(t *testing.T) {
allB: []int64{1, 2, 3, 4},
},
args: args{height: 4},
wantResult: []p2p.NodeID{"P1"},
wantResult: []types.NodeID{"P1"},
},
{
name: "one Ready higher peer at base",
@ -678,7 +677,7 @@ func TestScGetPeersWithHeight(t *testing.T) {
allB: []int64{1, 2, 3, 4},
},
args: args{height: 4},
wantResult: []p2p.NodeID{"P1"},
wantResult: []types.NodeID{"P1"},
},
{
name: "one Ready higher peer with higher base",
@ -688,7 +687,7 @@ func TestScGetPeersWithHeight(t *testing.T) {
allB: []int64{1, 2, 3, 4},
},
args: args{height: 4},
wantResult: []p2p.NodeID{},
wantResult: []types.NodeID{},
},
{
name: "multiple mixed peers",
@ -703,7 +702,7 @@ func TestScGetPeersWithHeight(t *testing.T) {
allB: []int64{8, 9, 10, 11},
},
args: args{height: 8},
wantResult: []p2p.NodeID{"P2", "P5"},
wantResult: []types.NodeID{"P2", "P5"},
},
}
@ -725,7 +724,7 @@ func TestScMarkPending(t *testing.T) {
now := time.Now()
type args struct {
peerID p2p.NodeID
peerID types.NodeID
height int64
tm time.Time
}
@ -821,14 +820,14 @@ func TestScMarkPending(t *testing.T) {
fields: scTestParams{
peers: map[string]*scPeer{"P1": {height: 2, state: peerStateReady}},
allB: []int64{1, 2},
pending: map[int64]p2p.NodeID{1: "P1"},
pending: map[int64]types.NodeID{1: "P1"},
pendingTime: map[int64]time.Time{1: now},
},
args: args{peerID: "P1", height: 2, tm: now.Add(time.Millisecond)},
wantFields: scTestParams{
peers: map[string]*scPeer{"P1": {height: 2, state: peerStateReady}},
allB: []int64{1, 2},
pending: map[int64]p2p.NodeID{1: "P1", 2: "P1"},
pending: map[int64]types.NodeID{1: "P1", 2: "P1"},
pendingTime: map[int64]time.Time{1: now, 2: now.Add(time.Millisecond)},
},
},
@ -851,7 +850,7 @@ func TestScMarkReceived(t *testing.T) {
now := time.Now()
type args struct {
peerID p2p.NodeID
peerID types.NodeID
height int64
size int64
tm time.Time
@ -891,7 +890,7 @@ func TestScMarkReceived(t *testing.T) {
"P2": {height: 4, state: peerStateReady},
},
allB: []int64{1, 2, 3, 4},
pending: map[int64]p2p.NodeID{1: "P1", 2: "P2", 3: "P2", 4: "P1"},
pending: map[int64]types.NodeID{1: "P1", 2: "P2", 3: "P2", 4: "P1"},
},
args: args{peerID: "P1", height: 2, size: 1000, tm: now},
wantFields: scTestParams{
@ -900,7 +899,7 @@ func TestScMarkReceived(t *testing.T) {
"P2": {height: 4, state: peerStateReady},
},
allB: []int64{1, 2, 3, 4},
pending: map[int64]p2p.NodeID{1: "P1", 2: "P2", 3: "P2", 4: "P1"},
pending: map[int64]types.NodeID{1: "P1", 2: "P2", 3: "P2", 4: "P1"},
},
wantErr: true,
},
@ -909,13 +908,13 @@ func TestScMarkReceived(t *testing.T) {
fields: scTestParams{
peers: map[string]*scPeer{"P1": {height: 4, state: peerStateReady}},
allB: []int64{1, 2, 3, 4},
pending: map[int64]p2p.NodeID{},
pending: map[int64]types.NodeID{},
},
args: args{peerID: "P1", height: 2, size: 1000, tm: now},
wantFields: scTestParams{
peers: map[string]*scPeer{"P1": {height: 4, state: peerStateReady}},
allB: []int64{1, 2, 3, 4},
pending: map[int64]p2p.NodeID{},
pending: map[int64]types.NodeID{},
},
wantErr: true,
},
@ -924,14 +923,14 @@ func TestScMarkReceived(t *testing.T) {
fields: scTestParams{
peers: map[string]*scPeer{"P1": {height: 2, state: peerStateReady}},
allB: []int64{1, 2},
pending: map[int64]p2p.NodeID{1: "P1", 2: "P1"},
pending: map[int64]types.NodeID{1: "P1", 2: "P1"},
pendingTime: map[int64]time.Time{1: now, 2: now.Add(time.Second)},
},
args: args{peerID: "P1", height: 2, size: 1000, tm: now},
wantFields: scTestParams{
peers: map[string]*scPeer{"P1": {height: 2, state: peerStateReady}},
allB: []int64{1, 2},
pending: map[int64]p2p.NodeID{1: "P1", 2: "P1"},
pending: map[int64]types.NodeID{1: "P1", 2: "P1"},
pendingTime: map[int64]time.Time{1: now, 2: now.Add(time.Second)},
},
wantErr: true,
@ -941,16 +940,16 @@ func TestScMarkReceived(t *testing.T) {
fields: scTestParams{
peers: map[string]*scPeer{"P1": {height: 2, state: peerStateReady}},
allB: []int64{1, 2},
pending: map[int64]p2p.NodeID{1: "P1", 2: "P1"},
pending: map[int64]types.NodeID{1: "P1", 2: "P1"},
pendingTime: map[int64]time.Time{1: now, 2: now},
},
args: args{peerID: "P1", height: 2, size: 1000, tm: now.Add(time.Millisecond)},
wantFields: scTestParams{
peers: map[string]*scPeer{"P1": {height: 2, state: peerStateReady}},
allB: []int64{1, 2},
pending: map[int64]p2p.NodeID{1: "P1"},
pending: map[int64]types.NodeID{1: "P1"},
pendingTime: map[int64]time.Time{1: now},
received: map[int64]p2p.NodeID{2: "P1"},
received: map[int64]types.NodeID{2: "P1"},
},
},
}
@ -991,7 +990,7 @@ func TestScMarkProcessed(t *testing.T) {
height: 2,
peers: map[string]*scPeer{"P1": {height: 4, state: peerStateReady}},
allB: []int64{2},
pending: map[int64]p2p.NodeID{2: "P1"},
pending: map[int64]types.NodeID{2: "P1"},
pendingTime: map[int64]time.Time{2: now},
targetPending: 1,
},
@ -1009,15 +1008,15 @@ func TestScMarkProcessed(t *testing.T) {
height: 1,
peers: map[string]*scPeer{"P1": {height: 2, state: peerStateReady}},
allB: []int64{1, 2},
pending: map[int64]p2p.NodeID{2: "P1"},
pending: map[int64]types.NodeID{2: "P1"},
pendingTime: map[int64]time.Time{2: now},
received: map[int64]p2p.NodeID{1: "P1"}},
received: map[int64]types.NodeID{1: "P1"}},
args: args{height: 1},
wantFields: scTestParams{
height: 2,
peers: map[string]*scPeer{"P1": {height: 2, state: peerStateReady}},
allB: []int64{2},
pending: map[int64]p2p.NodeID{2: "P1"},
pending: map[int64]types.NodeID{2: "P1"},
pendingTime: map[int64]time.Time{2: now}},
},
}
@ -1101,7 +1100,7 @@ func TestScAllBlocksProcessed(t *testing.T) {
fields: scTestParams{
peers: map[string]*scPeer{"P1": {height: 4, state: peerStateReady}},
allB: []int64{1, 2, 3, 4},
pending: map[int64]p2p.NodeID{1: "P1", 2: "P1", 3: "P1", 4: "P1"},
pending: map[int64]types.NodeID{1: "P1", 2: "P1", 3: "P1", 4: "P1"},
pendingTime: map[int64]time.Time{1: now, 2: now, 3: now, 4: now},
},
wantResult: false,
@ -1111,7 +1110,7 @@ func TestScAllBlocksProcessed(t *testing.T) {
fields: scTestParams{
peers: map[string]*scPeer{"P1": {height: 4, state: peerStateReady}},
allB: []int64{1, 2, 3, 4},
received: map[int64]p2p.NodeID{1: "P1", 2: "P1", 3: "P1", 4: "P1"},
received: map[int64]types.NodeID{1: "P1", 2: "P1", 3: "P1", 4: "P1"},
},
wantResult: false,
},
@ -1122,7 +1121,7 @@ func TestScAllBlocksProcessed(t *testing.T) {
peers: map[string]*scPeer{
"P1": {height: 4, state: peerStateReady}},
allB: []int64{4},
received: map[int64]p2p.NodeID{4: "P1"},
received: map[int64]types.NodeID{4: "P1"},
},
wantResult: true,
},
@ -1131,7 +1130,7 @@ func TestScAllBlocksProcessed(t *testing.T) {
fields: scTestParams{
peers: map[string]*scPeer{"P1": {height: 4, state: peerStateReady}},
allB: []int64{1, 2, 3, 4},
pending: map[int64]p2p.NodeID{2: "P1", 4: "P1"},
pending: map[int64]types.NodeID{2: "P1", 4: "P1"},
pendingTime: map[int64]time.Time{2: now, 4: now},
},
wantResult: false,
@ -1179,7 +1178,7 @@ func TestScNextHeightToSchedule(t *testing.T) {
initHeight: 1,
peers: map[string]*scPeer{"P1": {height: 4, state: peerStateReady}},
allB: []int64{1, 2, 3, 4},
pending: map[int64]p2p.NodeID{1: "P1", 2: "P1", 3: "P1", 4: "P1"},
pending: map[int64]types.NodeID{1: "P1", 2: "P1", 3: "P1", 4: "P1"},
pendingTime: map[int64]time.Time{1: now, 2: now, 3: now, 4: now},
},
wantHeight: -1,
@ -1190,7 +1189,7 @@ func TestScNextHeightToSchedule(t *testing.T) {
initHeight: 1,
peers: map[string]*scPeer{"P1": {height: 4, state: peerStateReady}},
allB: []int64{1, 2, 3, 4},
received: map[int64]p2p.NodeID{1: "P1", 2: "P1", 3: "P1", 4: "P1"},
received: map[int64]types.NodeID{1: "P1", 2: "P1", 3: "P1", 4: "P1"},
},
wantHeight: -1,
},
@ -1209,7 +1208,7 @@ func TestScNextHeightToSchedule(t *testing.T) {
initHeight: 1,
peers: map[string]*scPeer{"P1": {height: 4, state: peerStateReady}},
allB: []int64{1, 2, 3, 4},
pending: map[int64]p2p.NodeID{2: "P1"},
pending: map[int64]types.NodeID{2: "P1"},
pendingTime: map[int64]time.Time{2: now},
},
wantHeight: 1,
@ -1239,7 +1238,7 @@ func TestScSelectPeer(t *testing.T) {
name string
fields scTestParams
args args
wantResult p2p.NodeID
wantResult types.NodeID
wantError bool
}{
{
@ -1307,7 +1306,7 @@ func TestScSelectPeer(t *testing.T) {
"P1": {height: 8, state: peerStateReady},
"P2": {height: 9, state: peerStateReady}},
allB: []int64{4, 5, 6, 7, 8, 9},
pending: map[int64]p2p.NodeID{
pending: map[int64]types.NodeID{
4: "P1", 6: "P1",
5: "P2",
},
@ -1323,7 +1322,7 @@ func TestScSelectPeer(t *testing.T) {
"P1": {height: 15, state: peerStateReady},
"P3": {height: 15, state: peerStateReady}},
allB: []int64{1, 2, 3, 4, 5, 6, 7, 8, 9, 10},
pending: map[int64]p2p.NodeID{
pending: map[int64]types.NodeID{
1: "P1", 2: "P1",
3: "P3", 4: "P3",
5: "P2", 6: "P2",
@ -1392,7 +1391,7 @@ func TestScHandleBlockResponse(t *testing.T) {
now := time.Now()
block6FromP1 := bcBlockResponse{
time: now.Add(time.Millisecond),
peerID: p2p.NodeID("P1"),
peerID: types.NodeID("P1"),
size: 100,
block: makeScBlock(6),
}
@ -1433,7 +1432,7 @@ func TestScHandleBlockResponse(t *testing.T) {
fields: scTestParams{
peers: map[string]*scPeer{"P2": {height: 8, state: peerStateReady}},
allB: []int64{1, 2, 3, 4, 5, 6, 7, 8},
pending: map[int64]p2p.NodeID{6: "P2"},
pending: map[int64]types.NodeID{6: "P2"},
pendingTime: map[int64]time.Time{6: now},
},
args: args{event: block6FromP1},
@ -1444,7 +1443,7 @@ func TestScHandleBlockResponse(t *testing.T) {
fields: scTestParams{
peers: map[string]*scPeer{"P1": {height: 8, state: peerStateReady}},
allB: []int64{1, 2, 3, 4, 5, 6, 7, 8},
pending: map[int64]p2p.NodeID{6: "P1"},
pending: map[int64]types.NodeID{6: "P1"},
pendingTime: map[int64]time.Time{6: now.Add(time.Second)},
},
args: args{event: block6FromP1},
@ -1455,7 +1454,7 @@ func TestScHandleBlockResponse(t *testing.T) {
fields: scTestParams{
peers: map[string]*scPeer{"P1": {height: 8, state: peerStateReady}},
allB: []int64{1, 2, 3, 4, 5, 6, 7, 8},
pending: map[int64]p2p.NodeID{6: "P1"},
pending: map[int64]types.NodeID{6: "P1"},
pendingTime: map[int64]time.Time{6: now},
},
args: args{event: block6FromP1},
@ -1477,7 +1476,7 @@ func TestScHandleNoBlockResponse(t *testing.T) {
now := time.Now()
noBlock6FromP1 := bcNoBlockResponse{
time: now.Add(time.Millisecond),
peerID: p2p.NodeID("P1"),
peerID: types.NodeID("P1"),
height: 6,
}
@ -1513,14 +1512,14 @@ func TestScHandleNoBlockResponse(t *testing.T) {
fields: scTestParams{
peers: map[string]*scPeer{"P2": {height: 8, state: peerStateReady}},
allB: []int64{1, 2, 3, 4, 5, 6, 7, 8},
pending: map[int64]p2p.NodeID{6: "P2"},
pending: map[int64]types.NodeID{6: "P2"},
pendingTime: map[int64]time.Time{6: now},
},
wantEvent: noOpEvent{},
wantFields: scTestParams{
peers: map[string]*scPeer{"P2": {height: 8, state: peerStateReady}},
allB: []int64{1, 2, 3, 4, 5, 6, 7, 8},
pending: map[int64]p2p.NodeID{6: "P2"},
pending: map[int64]types.NodeID{6: "P2"},
pendingTime: map[int64]time.Time{6: now},
},
},
@ -1529,7 +1528,7 @@ func TestScHandleNoBlockResponse(t *testing.T) {
fields: scTestParams{
peers: map[string]*scPeer{"P1": {height: 8, state: peerStateReady}},
allB: []int64{1, 2, 3, 4, 5, 6, 7, 8},
pending: map[int64]p2p.NodeID{6: "P1"},
pending: map[int64]types.NodeID{6: "P1"},
pendingTime: map[int64]time.Time{6: now},
},
wantEvent: scPeerError{peerID: "P1", reason: fmt.Errorf("some error")},
@ -1552,7 +1551,7 @@ func TestScHandleNoBlockResponse(t *testing.T) {
func TestScHandleBlockProcessed(t *testing.T) {
now := time.Now()
processed6FromP1 := pcBlockProcessed{
peerID: p2p.NodeID("P1"),
peerID: types.NodeID("P1"),
height: 6,
}
@ -1579,7 +1578,7 @@ func TestScHandleBlockProcessed(t *testing.T) {
initHeight: 6,
peers: map[string]*scPeer{"P1": {height: 8, state: peerStateReady}},
allB: []int64{6, 7, 8},
pending: map[int64]p2p.NodeID{6: "P1"},
pending: map[int64]types.NodeID{6: "P1"},
pendingTime: map[int64]time.Time{6: now},
},
args: args{event: processed6FromP1},
@ -1591,7 +1590,7 @@ func TestScHandleBlockProcessed(t *testing.T) {
initHeight: 6,
peers: map[string]*scPeer{"P1": {height: 7, state: peerStateReady}},
allB: []int64{6, 7},
received: map[int64]p2p.NodeID{6: "P1", 7: "P1"},
received: map[int64]types.NodeID{6: "P1", 7: "P1"},
},
args: args{event: processed6FromP1},
wantEvent: scFinishedEv{},
@ -1602,8 +1601,8 @@ func TestScHandleBlockProcessed(t *testing.T) {
initHeight: 6,
peers: map[string]*scPeer{"P1": {height: 8, state: peerStateReady}},
allB: []int64{6, 7, 8},
pending: map[int64]p2p.NodeID{7: "P1", 8: "P1"},
received: map[int64]p2p.NodeID{6: "P1"},
pending: map[int64]types.NodeID{7: "P1", 8: "P1"},
received: map[int64]types.NodeID{6: "P1"},
},
args: args{event: processed6FromP1},
wantEvent: noOpEvent{},
@ -1646,7 +1645,7 @@ func TestScHandleBlockVerificationFailure(t *testing.T) {
initHeight: 6,
peers: map[string]*scPeer{"P1": {height: 8, state: peerStateReady}},
allB: []int64{6, 7, 8},
pending: map[int64]p2p.NodeID{6: "P1"},
pending: map[int64]types.NodeID{6: "P1"},
pendingTime: map[int64]time.Time{6: now},
},
args: args{event: pcBlockVerificationFailure{height: 10, firstPeerID: "P1", secondPeerID: "P1"}},
@ -1658,7 +1657,7 @@ func TestScHandleBlockVerificationFailure(t *testing.T) {
initHeight: 6,
peers: map[string]*scPeer{"P1": {height: 8, state: peerStateReady}, "P2": {height: 8, state: peerStateReady}},
allB: []int64{6, 7, 8},
pending: map[int64]p2p.NodeID{6: "P1"},
pending: map[int64]types.NodeID{6: "P1"},
pendingTime: map[int64]time.Time{6: now},
},
args: args{event: pcBlockVerificationFailure{height: 10, firstPeerID: "P1", secondPeerID: "P1"}},
@ -1670,7 +1669,7 @@ func TestScHandleBlockVerificationFailure(t *testing.T) {
initHeight: 6,
peers: map[string]*scPeer{"P1": {height: 7, state: peerStateReady}},
allB: []int64{6, 7},
received: map[int64]p2p.NodeID{6: "P1", 7: "P1"},
received: map[int64]types.NodeID{6: "P1", 7: "P1"},
},
args: args{event: pcBlockVerificationFailure{height: 7, firstPeerID: "P1", secondPeerID: "P1"}},
wantEvent: scFinishedEv{},
@ -1681,8 +1680,8 @@ func TestScHandleBlockVerificationFailure(t *testing.T) {
initHeight: 5,
peers: map[string]*scPeer{"P1": {height: 8, state: peerStateReady}, "P2": {height: 8, state: peerStateReady}},
allB: []int64{5, 6, 7, 8},
pending: map[int64]p2p.NodeID{7: "P1", 8: "P1"},
received: map[int64]p2p.NodeID{5: "P1", 6: "P1"},
pending: map[int64]types.NodeID{7: "P1", 8: "P1"},
received: map[int64]types.NodeID{5: "P1", 6: "P1"},
},
args: args{event: pcBlockVerificationFailure{height: 5, firstPeerID: "P1", secondPeerID: "P1"}},
wantEvent: noOpEvent{},
@ -1697,8 +1696,8 @@ func TestScHandleBlockVerificationFailure(t *testing.T) {
"P3": {height: 8, state: peerStateReady},
},
allB: []int64{5, 6, 7, 8},
pending: map[int64]p2p.NodeID{7: "P1", 8: "P1"},
received: map[int64]p2p.NodeID{5: "P1", 6: "P1"},
pending: map[int64]types.NodeID{7: "P1", 8: "P1"},
received: map[int64]types.NodeID{5: "P1", 6: "P1"},
},
args: args{event: pcBlockVerificationFailure{height: 5, firstPeerID: "P1", secondPeerID: "P2"}},
wantEvent: noOpEvent{},
@ -1717,7 +1716,7 @@ func TestScHandleBlockVerificationFailure(t *testing.T) {
func TestScHandleAddNewPeer(t *testing.T) {
addP1 := bcAddNewPeer{
peerID: p2p.NodeID("P1"),
peerID: types.NodeID("P1"),
}
type args struct {
event bcAddNewPeer
@ -1828,7 +1827,7 @@ func TestScHandleTryPrunePeer(t *testing.T) {
allB: []int64{1, 2, 3, 4, 5, 6, 7},
peerTimeout: time.Second},
args: args{event: pruneEv},
wantEvent: scPeersPruned{peers: []p2p.NodeID{"P4", "P5", "P6"}},
wantEvent: scPeersPruned{peers: []types.NodeID{"P4", "P5", "P6"}},
},
{
name: "mixed peers, finish after pruning",
@ -1926,7 +1925,7 @@ func TestScHandleTrySchedule(t *testing.T) {
"P1": {height: 4, state: peerStateReady},
"P2": {height: 5, state: peerStateReady}},
allB: []int64{1, 2, 3, 4, 5},
pending: map[int64]p2p.NodeID{
pending: map[int64]types.NodeID{
1: "P1", 2: "P1",
3: "P2",
},
@ -1944,7 +1943,7 @@ func TestScHandleTrySchedule(t *testing.T) {
"P1": {height: 8, state: peerStateReady},
"P3": {height: 8, state: peerStateReady}},
allB: []int64{1, 2, 3, 4, 5, 6, 7, 8},
pending: map[int64]p2p.NodeID{
pending: map[int64]types.NodeID{
1: "P1", 2: "P1",
3: "P3", 4: "P3",
5: "P2", 6: "P2",
@ -2106,7 +2105,7 @@ func TestScHandle(t *testing.T) {
startTime: now,
peers: map[string]*scPeer{"P1": {height: 3, state: peerStateReady}},
allB: []int64{1, 2, 3},
pending: map[int64]p2p.NodeID{1: "P1"},
pending: map[int64]types.NodeID{1: "P1"},
pendingTime: map[int64]time.Time{1: tick[1]},
height: 1,
},
@ -2118,7 +2117,7 @@ func TestScHandle(t *testing.T) {
startTime: now,
peers: map[string]*scPeer{"P1": {height: 3, state: peerStateReady}},
allB: []int64{1, 2, 3},
pending: map[int64]p2p.NodeID{1: "P1", 2: "P1"},
pending: map[int64]types.NodeID{1: "P1", 2: "P1"},
pendingTime: map[int64]time.Time{1: tick[1], 2: tick[2]},
height: 1,
},
@ -2130,7 +2129,7 @@ func TestScHandle(t *testing.T) {
startTime: now,
peers: map[string]*scPeer{"P1": {height: 3, state: peerStateReady}},
allB: []int64{1, 2, 3},
pending: map[int64]p2p.NodeID{1: "P1", 2: "P1", 3: "P1"},
pending: map[int64]types.NodeID{1: "P1", 2: "P1", 3: "P1"},
pendingTime: map[int64]time.Time{1: tick[1], 2: tick[2], 3: tick[3]},
height: 1,
},
@ -2142,9 +2141,9 @@ func TestScHandle(t *testing.T) {
startTime: now,
peers: map[string]*scPeer{"P1": {height: 3, state: peerStateReady, lastTouched: tick[4]}},
allB: []int64{1, 2, 3},
pending: map[int64]p2p.NodeID{2: "P1", 3: "P1"},
pending: map[int64]types.NodeID{2: "P1", 3: "P1"},
pendingTime: map[int64]time.Time{2: tick[2], 3: tick[3]},
received: map[int64]p2p.NodeID{1: "P1"},
received: map[int64]types.NodeID{1: "P1"},
height: 1,
},
},
@ -2155,9 +2154,9 @@ func TestScHandle(t *testing.T) {
startTime: now,
peers: map[string]*scPeer{"P1": {height: 3, state: peerStateReady, lastTouched: tick[5]}},
allB: []int64{1, 2, 3},
pending: map[int64]p2p.NodeID{3: "P1"},
pending: map[int64]types.NodeID{3: "P1"},
pendingTime: map[int64]time.Time{3: tick[3]},
received: map[int64]p2p.NodeID{1: "P1", 2: "P1"},
received: map[int64]types.NodeID{1: "P1", 2: "P1"},
height: 1,
},
},
@ -2168,29 +2167,29 @@ func TestScHandle(t *testing.T) {
startTime: now,
peers: map[string]*scPeer{"P1": {height: 3, state: peerStateReady, lastTouched: tick[6]}},
allB: []int64{1, 2, 3},
received: map[int64]p2p.NodeID{1: "P1", 2: "P1", 3: "P1"},
received: map[int64]types.NodeID{1: "P1", 2: "P1", 3: "P1"},
height: 1,
},
},
{ // processed block 1
args: args{event: pcBlockProcessed{peerID: p2p.NodeID("P1"), height: 1}},
args: args{event: pcBlockProcessed{peerID: types.NodeID("P1"), height: 1}},
wantEvent: noOpEvent{},
wantSc: &scTestParams{
startTime: now,
peers: map[string]*scPeer{"P1": {height: 3, state: peerStateReady, lastTouched: tick[6]}},
allB: []int64{2, 3},
received: map[int64]p2p.NodeID{2: "P1", 3: "P1"},
received: map[int64]types.NodeID{2: "P1", 3: "P1"},
height: 2,
},
},
{ // processed block 2
args: args{event: pcBlockProcessed{peerID: p2p.NodeID("P1"), height: 2}},
args: args{event: pcBlockProcessed{peerID: types.NodeID("P1"), height: 2}},
wantEvent: scFinishedEv{},
wantSc: &scTestParams{
startTime: now,
peers: map[string]*scPeer{"P1": {height: 3, state: peerStateReady, lastTouched: tick[6]}},
allB: []int64{3},
received: map[int64]p2p.NodeID{3: "P1"},
received: map[int64]types.NodeID{3: "P1"},
height: 3,
},
},
@ -2206,7 +2205,7 @@ func TestScHandle(t *testing.T) {
"P1": {height: 4, state: peerStateReady, lastTouched: tick[6]},
"P2": {height: 3, state: peerStateReady, lastTouched: tick[6]}},
allB: []int64{1, 2, 3, 4},
received: map[int64]p2p.NodeID{1: "P1", 2: "P1", 3: "P1"},
received: map[int64]types.NodeID{1: "P1", 2: "P1", 3: "P1"},
height: 1,
},
args: args{event: pcBlockVerificationFailure{height: 1, firstPeerID: "P1", secondPeerID: "P1"}},
@ -2217,7 +2216,7 @@ func TestScHandle(t *testing.T) {
"P1": {height: 4, state: peerStateRemoved, lastTouched: tick[6]},
"P2": {height: 3, state: peerStateReady, lastTouched: tick[6]}},
allB: []int64{1, 2, 3},
received: map[int64]p2p.NodeID{},
received: map[int64]types.NodeID{},
height: 1,
},
},


+ 1
- 1
internal/consensus/byzantine_test.go View File

@ -100,7 +100,7 @@ func TestByzantinePrevoteEquivocation(t *testing.T) {
rts := setup(t, nValidators, states, 100) // buffer must be large enough to not deadlock
var bzNodeID p2p.NodeID
var bzNodeID types.NodeID
// Set the first state's reactor as the dedicated byzantine reactor and grab
// the NodeID that corresponds to the state so we can reference the reactor.


+ 1
- 2
internal/consensus/msgs.go View File

@ -5,7 +5,6 @@ import (
"fmt"
cstypes "github.com/tendermint/tendermint/internal/consensus/types"
"github.com/tendermint/tendermint/internal/p2p"
"github.com/tendermint/tendermint/libs/bits"
tmjson "github.com/tendermint/tendermint/libs/json"
tmmath "github.com/tendermint/tendermint/libs/math"
@ -651,7 +650,7 @@ func WALFromProto(msg *tmcons.WALMessage) (WALMessage, error) {
}
pb = msgInfo{
Msg: walMsg,
PeerID: p2p.NodeID(msg.MsgInfo.PeerID),
PeerID: types.NodeID(msg.MsgInfo.PeerID),
}
case *tmcons.WALMessage_TimeoutInfo:


+ 1
- 2
internal/consensus/msgs_test.go View File

@ -14,7 +14,6 @@ import (
"github.com/tendermint/tendermint/crypto/merkle"
"github.com/tendermint/tendermint/crypto/tmhash"
cstypes "github.com/tendermint/tendermint/internal/consensus/types"
"github.com/tendermint/tendermint/internal/p2p"
"github.com/tendermint/tendermint/internal/test/factory"
"github.com/tendermint/tendermint/libs/bits"
"github.com/tendermint/tendermint/libs/bytes"
@ -249,7 +248,7 @@ func TestWALMsgProto(t *testing.T) {
Round: 1,
Part: &parts,
},
PeerID: p2p.NodeID("string"),
PeerID: types.NodeID("string"),
}, &tmcons.WALMessage{
Sum: &tmcons.WALMessage_MsgInfo{
MsgInfo: &tmcons.MsgInfo{


+ 2
- 3
internal/consensus/peer_state.go View File

@ -8,7 +8,6 @@ import (
cstypes "github.com/tendermint/tendermint/internal/consensus/types"
tmsync "github.com/tendermint/tendermint/internal/libs/sync"
"github.com/tendermint/tendermint/internal/p2p"
"github.com/tendermint/tendermint/libs/bits"
tmjson "github.com/tendermint/tendermint/libs/json"
"github.com/tendermint/tendermint/libs/log"
@ -37,7 +36,7 @@ func (pss peerStateStats) String() string {
// NOTE: THIS GETS DUMPED WITH rpc/core/consensus.go.
// Be mindful of what you Expose.
type PeerState struct {
peerID p2p.NodeID
peerID types.NodeID
logger log.Logger
// NOTE: Modify below using setters, never directly.
@ -51,7 +50,7 @@ type PeerState struct {
}
// NewPeerState returns a new PeerState for the given node ID.
func NewPeerState(logger log.Logger, peerID p2p.NodeID) *PeerState {
func NewPeerState(logger log.Logger, peerID types.NodeID) *PeerState {
return &PeerState{
peerID: peerID,
logger: logger,


+ 4
- 4
internal/consensus/reactor.go View File

@ -107,7 +107,7 @@ type Reactor struct {
Metrics *Metrics
mtx tmsync.RWMutex
peers map[p2p.NodeID]*PeerState
peers map[types.NodeID]*PeerState
waitSync bool
stateCh *p2p.Channel
@ -143,7 +143,7 @@ func NewReactor(
r := &Reactor{
state: cs,
waitSync: waitSync,
peers: make(map[p2p.NodeID]*PeerState),
peers: make(map[types.NodeID]*PeerState),
Metrics: NopMetrics(),
stateCh: stateCh,
dataCh: dataCh,
@ -319,7 +319,7 @@ func (r *Reactor) StringIndented(indent string) string {
}
// GetPeerState returns PeerState for a given NodeID.
func (r *Reactor) GetPeerState(peerID p2p.NodeID) (*PeerState, bool) {
func (r *Reactor) GetPeerState(peerID types.NodeID) (*PeerState, bool) {
r.mtx.RLock()
defer r.mtx.RUnlock()
@ -416,7 +416,7 @@ func makeRoundStepMessage(rs *cstypes.RoundState) *tmcons.NewRoundStep {
}
}
func (r *Reactor) sendNewRoundStepMessage(peerID p2p.NodeID) {
func (r *Reactor) sendNewRoundStepMessage(peerID types.NodeID) {
rs := r.state.GetRoundState()
msg := makeRoundStepMessage(rs)
r.stateCh.Out <- p2p.Envelope{


+ 10
- 10
internal/consensus/reactor_test.go View File

@ -39,13 +39,13 @@ var (
type reactorTestSuite struct {
network *p2ptest.Network
states map[p2p.NodeID]*State
reactors map[p2p.NodeID]*Reactor
subs map[p2p.NodeID]types.Subscription
stateChannels map[p2p.NodeID]*p2p.Channel
dataChannels map[p2p.NodeID]*p2p.Channel
voteChannels map[p2p.NodeID]*p2p.Channel
voteSetBitsChannels map[p2p.NodeID]*p2p.Channel
states map[types.NodeID]*State
reactors map[types.NodeID]*Reactor
subs map[types.NodeID]types.Subscription
stateChannels map[types.NodeID]*p2p.Channel
dataChannels map[types.NodeID]*p2p.Channel
voteChannels map[types.NodeID]*p2p.Channel
voteSetBitsChannels map[types.NodeID]*p2p.Channel
}
func chDesc(chID p2p.ChannelID) p2p.ChannelDescriptor {
@ -59,9 +59,9 @@ func setup(t *testing.T, numNodes int, states []*State, size int) *reactorTestSu
rts := &reactorTestSuite{
network: p2ptest.MakeNetwork(t, p2ptest.NetworkOptions{NumNodes: numNodes}),
states: make(map[p2p.NodeID]*State),
reactors: make(map[p2p.NodeID]*Reactor, numNodes),
subs: make(map[p2p.NodeID]types.Subscription, numNodes),
states: make(map[types.NodeID]*State),
reactors: make(map[types.NodeID]*Reactor, numNodes),
subs: make(map[types.NodeID]types.Subscription, numNodes),
}
rts.stateChannels = rts.network.MakeChannelsNoCleanup(t, chDesc(StateChannel), new(tmcons.Message), size)


+ 9
- 10
internal/consensus/state.go View File

@ -17,7 +17,6 @@ import (
cstypes "github.com/tendermint/tendermint/internal/consensus/types"
"github.com/tendermint/tendermint/internal/libs/fail"
tmsync "github.com/tendermint/tendermint/internal/libs/sync"
"github.com/tendermint/tendermint/internal/p2p"
tmevents "github.com/tendermint/tendermint/libs/events"
tmjson "github.com/tendermint/tendermint/libs/json"
"github.com/tendermint/tendermint/libs/log"
@ -44,8 +43,8 @@ var msgQueueSize = 1000
// msgs from the reactor which may update the state
type msgInfo struct {
Msg Message `json:"msg"`
PeerID p2p.NodeID `json:"peer_key"`
Msg Message `json:"msg"`
PeerID types.NodeID `json:"peer_key"`
}
// internally generated messages which may update the state
@ -471,7 +470,7 @@ func (cs *State) OpenWAL(walFile string) (WAL, error) {
// TODO: should these return anything or let callers just use events?
// AddVote inputs a vote.
func (cs *State) AddVote(vote *types.Vote, peerID p2p.NodeID) (added bool, err error) {
func (cs *State) AddVote(vote *types.Vote, peerID types.NodeID) (added bool, err error) {
if peerID == "" {
cs.internalMsgQueue <- msgInfo{&VoteMessage{vote}, ""}
} else {
@ -483,7 +482,7 @@ func (cs *State) AddVote(vote *types.Vote, peerID p2p.NodeID) (added bool, err e
}
// SetProposal inputs a proposal.
func (cs *State) SetProposal(proposal *types.Proposal, peerID p2p.NodeID) error {
func (cs *State) SetProposal(proposal *types.Proposal, peerID types.NodeID) error {
if peerID == "" {
cs.internalMsgQueue <- msgInfo{&ProposalMessage{proposal}, ""}
@ -496,7 +495,7 @@ func (cs *State) SetProposal(proposal *types.Proposal, peerID p2p.NodeID) error
}
// AddProposalBlockPart inputs a part of the proposal block.
func (cs *State) AddProposalBlockPart(height int64, round int32, part *types.Part, peerID p2p.NodeID) error {
func (cs *State) AddProposalBlockPart(height int64, round int32, part *types.Part, peerID types.NodeID) error {
if peerID == "" {
cs.internalMsgQueue <- msgInfo{&BlockPartMessage{height, round, part}, ""}
@ -513,7 +512,7 @@ func (cs *State) SetProposalAndBlock(
proposal *types.Proposal,
block *types.Block,
parts *types.PartSet,
peerID p2p.NodeID,
peerID types.NodeID,
) error {
if err := cs.SetProposal(proposal, peerID); err != nil {
@ -1827,7 +1826,7 @@ func (cs *State) defaultSetProposal(proposal *types.Proposal) error {
// NOTE: block is not necessarily valid.
// Asynchronously triggers either enterPrevote (before we timeout of propose) or tryFinalizeCommit,
// once we have the full block.
func (cs *State) addProposalBlockPart(msg *BlockPartMessage, peerID p2p.NodeID) (added bool, err error) {
func (cs *State) addProposalBlockPart(msg *BlockPartMessage, peerID types.NodeID) (added bool, err error) {
height, round, part := msg.Height, msg.Round, msg.Part
// Blocks might be reused, so round mismatch is OK
@ -1925,7 +1924,7 @@ func (cs *State) addProposalBlockPart(msg *BlockPartMessage, peerID p2p.NodeID)
}
// Attempt to add the vote. if its a duplicate signature, dupeout the validator
func (cs *State) tryAddVote(vote *types.Vote, peerID p2p.NodeID) (bool, error) {
func (cs *State) tryAddVote(vote *types.Vote, peerID types.NodeID) (bool, error) {
added, err := cs.addVote(vote, peerID)
if err != nil {
// If the vote height is off, we'll just ignore it,
@ -1973,7 +1972,7 @@ func (cs *State) tryAddVote(vote *types.Vote, peerID p2p.NodeID) (bool, error) {
return added, nil
}
func (cs *State) addVote(vote *types.Vote, peerID p2p.NodeID) (added bool, err error) {
func (cs *State) addVote(vote *types.Vote, peerID types.NodeID) (added bool, err error) {
cs.Logger.Debug(
"adding vote",
"vote_height", vote.Height,


+ 6
- 7
internal/consensus/types/height_vote_set.go View File

@ -6,7 +6,6 @@ import (
"strings"
"sync"
"github.com/tendermint/tendermint/internal/p2p"
tmjson "github.com/tendermint/tendermint/libs/json"
tmmath "github.com/tendermint/tendermint/libs/math"
tmproto "github.com/tendermint/tendermint/proto/tendermint/types"
@ -44,9 +43,9 @@ type HeightVoteSet struct {
valSet *types.ValidatorSet
mtx sync.Mutex
round int32 // max tracked round
roundVoteSets map[int32]RoundVoteSet // keys: [0...round]
peerCatchupRounds map[p2p.NodeID][]int32 // keys: peer.ID; values: at most 2 rounds
round int32 // max tracked round
roundVoteSets map[int32]RoundVoteSet // keys: [0...round]
peerCatchupRounds map[types.NodeID][]int32 // keys: peer.ID; values: at most 2 rounds
}
func NewHeightVoteSet(chainID string, height int64, valSet *types.ValidatorSet) *HeightVoteSet {
@ -64,7 +63,7 @@ func (hvs *HeightVoteSet) Reset(height int64, valSet *types.ValidatorSet) {
hvs.height = height
hvs.valSet = valSet
hvs.roundVoteSets = make(map[int32]RoundVoteSet)
hvs.peerCatchupRounds = make(map[p2p.NodeID][]int32)
hvs.peerCatchupRounds = make(map[types.NodeID][]int32)
hvs.addRound(0)
hvs.round = 0
@ -114,7 +113,7 @@ func (hvs *HeightVoteSet) addRound(round int32) {
// Duplicate votes return added=false, err=nil.
// By convention, peerID is "" if origin is self.
func (hvs *HeightVoteSet) AddVote(vote *types.Vote, peerID p2p.NodeID) (added bool, err error) {
func (hvs *HeightVoteSet) AddVote(vote *types.Vote, peerID types.NodeID) (added bool, err error) {
hvs.mtx.Lock()
defer hvs.mtx.Unlock()
if !types.IsVoteTypeValid(vote.Type) {
@ -185,7 +184,7 @@ func (hvs *HeightVoteSet) getVoteSet(round int32, voteType tmproto.SignedMsgType
func (hvs *HeightVoteSet) SetPeerMaj23(
round int32,
voteType tmproto.SignedMsgType,
peerID p2p.NodeID,
peerID types.NodeID,
blockID types.BlockID) error {
hvs.mtx.Lock()
defer hvs.mtx.Unlock()


+ 3
- 3
internal/evidence/reactor.go View File

@ -62,7 +62,7 @@ type Reactor struct {
peerWG sync.WaitGroup
mtx tmsync.Mutex
peerRoutines map[p2p.NodeID]*tmsync.Closer
peerRoutines map[types.NodeID]*tmsync.Closer
}
// NewReactor returns a reference to a new evidence reactor, which implements the
@ -79,7 +79,7 @@ func NewReactor(
evidenceCh: evidenceCh,
peerUpdates: peerUpdates,
closeCh: make(chan struct{}),
peerRoutines: make(map[p2p.NodeID]*tmsync.Closer),
peerRoutines: make(map[types.NodeID]*tmsync.Closer),
}
r.BaseService = *service.NewBaseService(logger, "Evidence", r)
@ -285,7 +285,7 @@ func (r *Reactor) processPeerUpdates() {
// that the peer has already received or may not be ready for.
//
// REF: https://github.com/tendermint/tendermint/issues/4727
func (r *Reactor) broadcastEvidenceLoop(peerID p2p.NodeID, closer *tmsync.Closer) {
func (r *Reactor) broadcastEvidenceLoop(peerID types.NodeID, closer *tmsync.Closer) {
var next *clist.CElement
defer func() {


+ 12
- 12
internal/evidence/reactor_test.go View File

@ -35,11 +35,11 @@ var (
type reactorTestSuite struct {
network *p2ptest.Network
logger log.Logger
reactors map[p2p.NodeID]*evidence.Reactor
pools map[p2p.NodeID]*evidence.Pool
evidenceChannels map[p2p.NodeID]*p2p.Channel
peerUpdates map[p2p.NodeID]*p2p.PeerUpdates
peerChans map[p2p.NodeID]chan p2p.PeerUpdate
reactors map[types.NodeID]*evidence.Reactor
pools map[types.NodeID]*evidence.Pool
evidenceChannels map[types.NodeID]*p2p.Channel
peerUpdates map[types.NodeID]*p2p.PeerUpdates
peerChans map[types.NodeID]chan p2p.PeerUpdate
nodes []*p2ptest.Node
numStateStores int
}
@ -56,10 +56,10 @@ func setup(t *testing.T, stateStores []sm.Store, chBuf uint) *reactorTestSuite {
numStateStores: numStateStores,
logger: log.TestingLogger().With("testCase", t.Name()),
network: p2ptest.MakeNetwork(t, p2ptest.NetworkOptions{NumNodes: numStateStores}),
reactors: make(map[p2p.NodeID]*evidence.Reactor, numStateStores),
pools: make(map[p2p.NodeID]*evidence.Pool, numStateStores),
peerUpdates: make(map[p2p.NodeID]*p2p.PeerUpdates, numStateStores),
peerChans: make(map[p2p.NodeID]chan p2p.PeerUpdate, numStateStores),
reactors: make(map[types.NodeID]*evidence.Reactor, numStateStores),
pools: make(map[types.NodeID]*evidence.Pool, numStateStores),
peerUpdates: make(map[types.NodeID]*p2p.PeerUpdates, numStateStores),
peerChans: make(map[types.NodeID]chan p2p.PeerUpdate, numStateStores),
}
chDesc := p2p.ChannelDescriptor{ID: byte(evidence.EvidenceChannel)}
@ -124,7 +124,7 @@ func (rts *reactorTestSuite) start(t *testing.T) {
"network does not have expected number of nodes")
}
func (rts *reactorTestSuite) waitForEvidence(t *testing.T, evList types.EvidenceList, ids ...p2p.NodeID) {
func (rts *reactorTestSuite) waitForEvidence(t *testing.T, evList types.EvidenceList, ids ...types.NodeID) {
t.Helper()
fn := func(pool *evidence.Pool) {
@ -188,7 +188,7 @@ func (rts *reactorTestSuite) waitForEvidence(t *testing.T, evList types.Evidence
}
wg.Add(1)
go func(id p2p.NodeID) { defer wg.Done(); fn(rts.pools[id]) }(id)
go func(id types.NodeID) { defer wg.Done(); fn(rts.pools[id]) }(id)
}
wg.Wait()
}
@ -293,7 +293,7 @@ func TestReactorBroadcastEvidence(t *testing.T) {
// primary. As a result, the primary will gossip all evidence to each secondary.
primary := rts.network.RandomNode()
secondaries := make([]*p2ptest.Node, 0, len(rts.network.NodeIDs())-1)
secondaryIDs := make([]p2p.NodeID, 0, cap(secondaries))
secondaryIDs := make([]types.NodeID, 0, cap(secondaries))
for id := range rts.network.Nodes {
if id == primary.NodeID {
continue


+ 6
- 6
internal/mempool/ids.go View File

@ -4,21 +4,21 @@ import (
"fmt"
tmsync "github.com/tendermint/tendermint/internal/libs/sync"
"github.com/tendermint/tendermint/internal/p2p"
"github.com/tendermint/tendermint/types"
)
// nolint: golint
// TODO: Rename type.
type MempoolIDs struct {
mtx tmsync.RWMutex
peerMap map[p2p.NodeID]uint16
peerMap map[types.NodeID]uint16
nextID uint16 // assumes that a node will never have over 65536 active peers
activeIDs map[uint16]struct{} // used to check if a given peerID key is used
}
func NewMempoolIDs() *MempoolIDs {
return &MempoolIDs{
peerMap: make(map[p2p.NodeID]uint16),
peerMap: make(map[types.NodeID]uint16),
// reserve UnknownPeerID for mempoolReactor.BroadcastTx
activeIDs: map[uint16]struct{}{UnknownPeerID: {}},
@ -28,7 +28,7 @@ func NewMempoolIDs() *MempoolIDs {
// ReserveForPeer searches for the next unused ID and assigns it to the provided
// peer.
func (ids *MempoolIDs) ReserveForPeer(peerID p2p.NodeID) {
func (ids *MempoolIDs) ReserveForPeer(peerID types.NodeID) {
ids.mtx.Lock()
defer ids.mtx.Unlock()
@ -38,7 +38,7 @@ func (ids *MempoolIDs) ReserveForPeer(peerID p2p.NodeID) {
}
// Reclaim returns the ID reserved for the peer back to unused pool.
func (ids *MempoolIDs) Reclaim(peerID p2p.NodeID) {
func (ids *MempoolIDs) Reclaim(peerID types.NodeID) {
ids.mtx.Lock()
defer ids.mtx.Unlock()
@ -50,7 +50,7 @@ func (ids *MempoolIDs) Reclaim(peerID p2p.NodeID) {
}
// GetForPeer returns an ID reserved for the peer.
func (ids *MempoolIDs) GetForPeer(peerID p2p.NodeID) uint16 {
func (ids *MempoolIDs) GetForPeer(peerID types.NodeID) uint16 {
ids.mtx.RLock()
defer ids.mtx.RUnlock()


+ 2
- 2
internal/mempool/ids_test.go View File

@ -4,13 +4,13 @@ import (
"testing"
"github.com/stretchr/testify/require"
"github.com/tendermint/tendermint/internal/p2p"
"github.com/tendermint/tendermint/types"
)
func TestMempoolIDsBasic(t *testing.T) {
ids := NewMempoolIDs()
peerID, err := p2p.NewNodeID("0011223344556677889900112233445566778899")
peerID, err := types.NewNodeID("0011223344556677889900112233445566778899")
require.NoError(t, err)
ids.ReserveForPeer(peerID)


+ 3
- 4
internal/mempool/tx.go View File

@ -3,7 +3,6 @@ package mempool
import (
"crypto/sha256"
"github.com/tendermint/tendermint/internal/p2p"
"github.com/tendermint/tendermint/types"
)
@ -25,9 +24,9 @@ func TxHashFromBytes(tx []byte) []byte {
type TxInfo struct {
// SenderID is the internal peer ID used in the mempool to identify the
// sender, storing two bytes with each transaction instead of 20 bytes for
// the p2p.NodeID.
// the types.NodeID.
SenderID uint16
// SenderNodeID is the actual p2p.NodeID of the sender.
SenderNodeID p2p.NodeID
// SenderNodeID is the actual types.NodeID of the sender.
SenderNodeID types.NodeID
}

+ 2
- 3
internal/mempool/v0/clist_mempool.go View File

@ -12,7 +12,6 @@ import (
"github.com/tendermint/tendermint/internal/libs/clist"
tmsync "github.com/tendermint/tendermint/internal/libs/sync"
"github.com/tendermint/tendermint/internal/mempool"
"github.com/tendermint/tendermint/internal/p2p"
"github.com/tendermint/tendermint/libs/log"
tmmath "github.com/tendermint/tendermint/libs/math"
"github.com/tendermint/tendermint/proxy"
@ -303,7 +302,7 @@ func (mem *CListMempool) globalCb(req *abci.Request, res *abci.Response) {
func (mem *CListMempool) reqResCb(
tx []byte,
peerID uint16,
peerP2PID p2p.NodeID,
peerP2PID types.NodeID,
externalCb func(*abci.Response),
) func(res *abci.Response) {
return func(res *abci.Response) {
@ -382,7 +381,7 @@ func (mem *CListMempool) isFull(txSize int) error {
func (mem *CListMempool) resCbFirstTime(
tx []byte,
peerID uint16,
peerP2PID p2p.NodeID,
peerP2PID types.NodeID,
res *abci.Response,
) {
switch r := res.Value.(type) {


+ 4
- 4
internal/mempool/v0/reactor.go View File

@ -27,7 +27,7 @@ var (
// peer information. This should eventually be replaced with a message-oriented
// approach utilizing the p2p stack.
type PeerManager interface {
GetHeight(p2p.NodeID) int64
GetHeight(types.NodeID) int64
}
// Reactor implements a service that contains mempool of txs that are broadcasted
@ -54,7 +54,7 @@ type Reactor struct {
peerWG sync.WaitGroup
mtx tmsync.Mutex
peerRoutines map[p2p.NodeID]*tmsync.Closer
peerRoutines map[types.NodeID]*tmsync.Closer
}
// NewReactor returns a reference to a new reactor.
@ -75,7 +75,7 @@ func NewReactor(
mempoolCh: mempoolCh,
peerUpdates: peerUpdates,
closeCh: make(chan struct{}),
peerRoutines: make(map[p2p.NodeID]*tmsync.Closer),
peerRoutines: make(map[types.NodeID]*tmsync.Closer),
}
r.BaseService = *service.NewBaseService(logger, "Mempool", r)
@ -299,7 +299,7 @@ func (r *Reactor) processPeerUpdates() {
}
}
func (r *Reactor) broadcastTxRoutine(peerID p2p.NodeID, closer *tmsync.Closer) {
func (r *Reactor) broadcastTxRoutine(peerID types.NodeID, closer *tmsync.Closer) {
peerMempoolID := r.ids.GetForPeer(peerID)
var next *clist.CElement


+ 17
- 17
internal/mempool/v0/reactor_test.go View File

@ -25,15 +25,15 @@ type reactorTestSuite struct {
network *p2ptest.Network
logger log.Logger
reactors map[p2p.NodeID]*Reactor
mempoolChnnels map[p2p.NodeID]*p2p.Channel
mempools map[p2p.NodeID]*CListMempool
kvstores map[p2p.NodeID]*kvstore.Application
reactors map[types.NodeID]*Reactor
mempoolChnnels map[types.NodeID]*p2p.Channel
mempools map[types.NodeID]*CListMempool
kvstores map[types.NodeID]*kvstore.Application
peerChans map[p2p.NodeID]chan p2p.PeerUpdate
peerUpdates map[p2p.NodeID]*p2p.PeerUpdates
peerChans map[types.NodeID]chan p2p.PeerUpdate
peerUpdates map[types.NodeID]*p2p.PeerUpdates
nodes []p2p.NodeID
nodes []types.NodeID
}
func setup(t *testing.T, cfg *cfg.MempoolConfig, numNodes int, chBuf uint) *reactorTestSuite {
@ -42,12 +42,12 @@ func setup(t *testing.T, cfg *cfg.MempoolConfig, numNodes int, chBuf uint) *reac
rts := &reactorTestSuite{
logger: log.TestingLogger().With("testCase", t.Name()),
network: p2ptest.MakeNetwork(t, p2ptest.NetworkOptions{NumNodes: numNodes}),
reactors: make(map[p2p.NodeID]*Reactor, numNodes),
mempoolChnnels: make(map[p2p.NodeID]*p2p.Channel, numNodes),
mempools: make(map[p2p.NodeID]*CListMempool, numNodes),
kvstores: make(map[p2p.NodeID]*kvstore.Application, numNodes),
peerChans: make(map[p2p.NodeID]chan p2p.PeerUpdate, numNodes),
peerUpdates: make(map[p2p.NodeID]*p2p.PeerUpdates, numNodes),
reactors: make(map[types.NodeID]*Reactor, numNodes),
mempoolChnnels: make(map[types.NodeID]*p2p.Channel, numNodes),
mempools: make(map[types.NodeID]*CListMempool, numNodes),
kvstores: make(map[types.NodeID]*kvstore.Application, numNodes),
peerChans: make(map[types.NodeID]chan p2p.PeerUpdate, numNodes),
peerUpdates: make(map[types.NodeID]*p2p.PeerUpdates, numNodes),
}
chDesc := p2p.ChannelDescriptor{ID: byte(mempool.MempoolChannel)}
@ -118,7 +118,7 @@ func (rts *reactorTestSuite) assertMempoolChannelsDrained(t *testing.T) {
}
}
func (rts *reactorTestSuite) waitForTxns(t *testing.T, txs types.Txs, ids ...p2p.NodeID) {
func (rts *reactorTestSuite) waitForTxns(t *testing.T, txs types.Txs, ids ...types.NodeID) {
t.Helper()
fn := func(pool *CListMempool) {
@ -149,7 +149,7 @@ func (rts *reactorTestSuite) waitForTxns(t *testing.T, txs types.Txs, ids ...p2p
}
wg.Add(1)
func(nid p2p.NodeID) { defer wg.Done(); fn(rts.reactors[nid].mempool) }(id)
func(nid types.NodeID) { defer wg.Done(); fn(rts.reactors[nid].mempool) }(id)
}
wg.Wait()
@ -313,7 +313,7 @@ func TestDontExhaustMaxActiveIDs(t *testing.T) {
nodeID := rts.nodes[0]
peerID, err := p2p.NewNodeID("0011223344556677889900112233445566778899")
peerID, err := types.NewNodeID("0011223344556677889900112233445566778899")
require.NoError(t, err)
// ensure the reactor does not panic (i.e. exhaust active IDs)
@ -357,7 +357,7 @@ func TestMempoolIDsPanicsIfNodeRequestsOvermaxActiveIDs(t *testing.T) {
// 0 is already reserved for UnknownPeerID
ids := mempool.NewMempoolIDs()
peerID, err := p2p.NewNodeID("0011223344556677889900112233445566778899")
peerID, err := types.NewNodeID("0011223344556677889900112233445566778899")
require.NoError(t, err)
for i := 0; i < mempool.MaxActiveIDs-1; i++ {


+ 4
- 4
internal/mempool/v1/reactor.go View File

@ -26,7 +26,7 @@ var (
// peer information. This should eventually be replaced with a message-oriented
// approach utilizing the p2p stack.
type PeerManager interface {
GetHeight(p2p.NodeID) int64
GetHeight(types.NodeID) int64
}
// Reactor implements a service that contains mempool of txs that are broadcasted
@ -53,7 +53,7 @@ type Reactor struct {
peerWG sync.WaitGroup
mtx tmsync.Mutex
peerRoutines map[p2p.NodeID]*tmsync.Closer
peerRoutines map[types.NodeID]*tmsync.Closer
}
// NewReactor returns a reference to a new reactor.
@ -74,7 +74,7 @@ func NewReactor(
mempoolCh: mempoolCh,
peerUpdates: peerUpdates,
closeCh: make(chan struct{}),
peerRoutines: make(map[p2p.NodeID]*tmsync.Closer),
peerRoutines: make(map[types.NodeID]*tmsync.Closer),
}
r.BaseService = *service.NewBaseService(logger, "Mempool", r)
@ -298,7 +298,7 @@ func (r *Reactor) processPeerUpdates() {
}
}
func (r *Reactor) broadcastTxRoutine(peerID p2p.NodeID, closer *tmsync.Closer) {
func (r *Reactor) broadcastTxRoutine(peerID types.NodeID, closer *tmsync.Closer) {
peerMempoolID := r.ids.GetForPeer(peerID)
var memTx *WrappedTx


+ 4
- 56
internal/p2p/address.go View File

@ -2,7 +2,6 @@ package p2p
import (
"context"
"encoding/hex"
"errors"
"fmt"
"net"
@ -11,19 +10,10 @@ import (
"strconv"
"strings"
"github.com/tendermint/tendermint/crypto"
)
const (
// NodeIDByteLength is the length of a crypto.Address. Currently only 20.
// FIXME: support other length addresses?
NodeIDByteLength = crypto.AddressSize
"github.com/tendermint/tendermint/types"
)
var (
// reNodeID is a regexp for valid node IDs.
reNodeID = regexp.MustCompile(`^[0-9a-f]{40}$`)
// stringHasScheme tries to detect URLs with schemes. It looks for a : before a / (if any).
stringHasScheme = func(str string) bool {
return strings.Contains(str, "://")
@ -34,48 +24,6 @@ var (
reSchemeIsHost = regexp.MustCompile(`^[^/:]+:\d+(/|$)`)
)
// NodeID is a hex-encoded crypto.Address. It must be lowercased
// (for uniqueness) and of length 2*NodeIDByteLength.
type NodeID string
// NewNodeID returns a lowercased (normalized) NodeID, or errors if the
// node ID is invalid.
func NewNodeID(nodeID string) (NodeID, error) {
n := NodeID(strings.ToLower(nodeID))
return n, n.Validate()
}
// NodeIDFromPubKey creates a node ID from a given PubKey address.
func NodeIDFromPubKey(pubKey crypto.PubKey) NodeID {
return NodeID(hex.EncodeToString(pubKey.Address()))
}
// Bytes converts the node ID to its binary byte representation.
func (id NodeID) Bytes() ([]byte, error) {
bz, err := hex.DecodeString(string(id))
if err != nil {
return nil, fmt.Errorf("invalid node ID encoding: %w", err)
}
return bz, nil
}
// Validate validates the NodeID.
func (id NodeID) Validate() error {
switch {
case len(id) == 0:
return errors.New("empty node ID")
case len(id) != 2*NodeIDByteLength:
return fmt.Errorf("invalid node ID length %d, expected %d", len(id), 2*NodeIDByteLength)
case !reNodeID.MatchString(string(id)):
return fmt.Errorf("node ID can only contain lowercased hex digits")
default:
return nil
}
}
// NodeAddress is a node address URL. It differs from a transport Endpoint in
// that it contains the node's ID, and that the address hostname may be resolved
// into multiple IP addresses (and thus multiple endpoints).
@ -83,7 +31,7 @@ func (id NodeID) Validate() error {
// If the URL is opaque, i.e. of the form "scheme:opaque", then the opaque part
// is expected to contain a node ID.
type NodeAddress struct {
NodeID NodeID
NodeID types.NodeID
Protocol Protocol
Hostname string
Port uint16
@ -110,13 +58,13 @@ func ParseNodeAddress(urlString string) (NodeAddress, error) {
// Opaque URLs are expected to contain only a node ID.
if url.Opaque != "" {
address.NodeID = NodeID(url.Opaque)
address.NodeID = types.NodeID(url.Opaque)
return address, address.Validate()
}
// Otherwise, just parse a normal networked URL.
if url.User != nil {
address.NodeID = NodeID(strings.ToLower(url.User.Username()))
address.NodeID = types.NodeID(strings.ToLower(url.User.Username()))
}
address.Hostname = strings.ToLower(url.Hostname())


+ 11
- 10
internal/p2p/address_test.go View File

@ -9,13 +9,14 @@ import (
"github.com/tendermint/tendermint/crypto/ed25519"
"github.com/tendermint/tendermint/internal/p2p"
"github.com/tendermint/tendermint/types"
)
func TestNewNodeID(t *testing.T) {
// Most tests are in TestNodeID_Validate, this just checks that it's validated.
testcases := []struct {
input string
expect p2p.NodeID
expect types.NodeID
ok bool
}{
{"", "", false},
@ -28,7 +29,7 @@ func TestNewNodeID(t *testing.T) {
for _, tc := range testcases {
tc := tc
t.Run(tc.input, func(t *testing.T) {
id, err := p2p.NewNodeID(tc.input)
id, err := types.NewNodeID(tc.input)
if !tc.ok {
require.Error(t, err)
} else {
@ -41,14 +42,14 @@ func TestNewNodeID(t *testing.T) {
func TestNewNodeIDFromPubKey(t *testing.T) {
privKey := ed25519.GenPrivKeyFromSecret([]byte("foo"))
nodeID := p2p.NodeIDFromPubKey(privKey.PubKey())
require.Equal(t, p2p.NodeID("045f5600654182cfeaccfe6cb19f0642e8a59898"), nodeID)
nodeID := types.NodeIDFromPubKey(privKey.PubKey())
require.Equal(t, types.NodeID("045f5600654182cfeaccfe6cb19f0642e8a59898"), nodeID)
require.NoError(t, nodeID.Validate())
}
func TestNodeID_Bytes(t *testing.T) {
testcases := []struct {
nodeID p2p.NodeID
nodeID types.NodeID
expect []byte
ok bool
}{
@ -74,7 +75,7 @@ func TestNodeID_Bytes(t *testing.T) {
func TestNodeID_Validate(t *testing.T) {
testcases := []struct {
nodeID p2p.NodeID
nodeID types.NodeID
ok bool
}{
{"", false},
@ -99,7 +100,7 @@ func TestNodeID_Validate(t *testing.T) {
func TestParseNodeAddress(t *testing.T) {
user := "00112233445566778899aabbccddeeff00112233"
id := p2p.NodeID(user)
id := types.NodeID(user)
testcases := []struct {
url string
@ -201,7 +202,7 @@ func TestParseNodeAddress(t *testing.T) {
}
func TestNodeAddress_Resolve(t *testing.T) {
id := p2p.NodeID("00112233445566778899aabbccddeeff00112233")
id := types.NodeID("00112233445566778899aabbccddeeff00112233")
testcases := []struct {
address p2p.NodeAddress
@ -285,7 +286,7 @@ func TestNodeAddress_Resolve(t *testing.T) {
}
func TestNodeAddress_String(t *testing.T) {
id := p2p.NodeID("00112233445566778899aabbccddeeff00112233")
id := types.NodeID("00112233445566778899aabbccddeeff00112233")
user := string(id)
testcases := []struct {
address p2p.NodeAddress
@ -348,7 +349,7 @@ func TestNodeAddress_String(t *testing.T) {
}
func TestNodeAddress_Validate(t *testing.T) {
id := p2p.NodeID("00112233445566778899aabbccddeeff00112233")
id := types.NodeID("00112233445566778899aabbccddeeff00112233")
testcases := []struct {
address p2p.NodeAddress
ok bool


+ 5
- 12
internal/p2p/errors.go View File

@ -3,6 +3,8 @@ package p2p
import (
"fmt"
"net"
"github.com/tendermint/tendermint/types"
)
// ErrFilterTimeout indicates that a filter operation timed out.
@ -18,7 +20,7 @@ type ErrRejected struct {
addr NetAddress
conn net.Conn
err error
id NodeID
id types.NodeID
isAuthFailure bool
isDuplicate bool
isFiltered bool
@ -99,7 +101,7 @@ func (e ErrRejected) IsSelf() bool { return e.isSelf }
// ErrSwitchDuplicatePeerID to be raised when a peer is connecting with a known
// ID.
type ErrSwitchDuplicatePeerID struct {
ID NodeID
ID types.NodeID
}
func (e ErrSwitchDuplicatePeerID) Error() string {
@ -127,7 +129,7 @@ func (e ErrSwitchConnectToSelf) Error() string {
type ErrSwitchAuthenticationFailure struct {
Dialed *NetAddress
Got NodeID
Got types.NodeID
}
func (e ErrSwitchAuthenticationFailure) Error() string {
@ -164,15 +166,6 @@ func (e ErrNetAddressInvalid) Error() string {
return fmt.Sprintf("invalid address (%s): %v", e.Addr, e.Err)
}
type ErrNetAddressLookup struct {
Addr string
Err error
}
func (e ErrNetAddressLookup) Error() string {
return fmt.Sprintf("error looking up host (%s): %v", e.Addr, e.Err)
}
// ErrCurrentlyDialingOrExistingAddress indicates that we're currently
// dialing this address or it belongs to an existing peer.
type ErrCurrentlyDialingOrExistingAddress struct {


+ 4
- 3
internal/p2p/key.go View File

@ -7,6 +7,7 @@ import (
"github.com/tendermint/tendermint/crypto/ed25519"
tmjson "github.com/tendermint/tendermint/libs/json"
tmos "github.com/tendermint/tendermint/libs/os"
"github.com/tendermint/tendermint/types"
)
//------------------------------------------------------------------------------
@ -17,7 +18,7 @@ import (
// It contains the nodes private key for authentication.
type NodeKey struct {
// Canonical ID - hex-encoded pubkey's address (IDByteLength bytes)
ID NodeID `json:"id"`
ID types.NodeID `json:"id"`
// Private key
PrivKey crypto.PrivKey `json:"priv_key"`
}
@ -64,7 +65,7 @@ func LoadOrGenNodeKey(filePath string) (NodeKey, error) {
func GenNodeKey() NodeKey {
privKey := ed25519.GenPrivKey()
return NodeKey{
ID: NodeIDFromPubKey(privKey.PubKey()),
ID: types.NodeIDFromPubKey(privKey.PubKey()),
PrivKey: privKey,
}
}
@ -80,6 +81,6 @@ func LoadNodeKey(filePath string) (NodeKey, error) {
if err != nil {
return NodeKey{}, err
}
nodeKey.ID = NodeIDFromPubKey(nodeKey.PubKey())
nodeKey.ID = types.NodeIDFromPubKey(nodeKey.PubKey())
return nodeKey, nil
}

+ 4
- 3
internal/p2p/mock/peer.go View File

@ -6,12 +6,13 @@ import (
"github.com/tendermint/tendermint/internal/p2p"
"github.com/tendermint/tendermint/internal/p2p/conn"
"github.com/tendermint/tendermint/libs/service"
"github.com/tendermint/tendermint/types"
)
type Peer struct {
*service.BaseService
ip net.IP
id p2p.NodeID
id types.NodeID
addr *p2p.NetAddress
kv map[string]interface{}
Outbound, Persistent bool
@ -24,7 +25,7 @@ func NewPeer(ip net.IP) *Peer {
if ip == nil {
_, netAddr = p2p.CreateRoutableAddr()
} else {
netAddr = p2p.NewNetAddressIPPort(ip, 26656)
netAddr = types.NewNetAddressIPPort(ip, 26656)
}
nodeKey := p2p.GenNodeKey()
netAddr.ID = nodeKey.ID
@ -51,7 +52,7 @@ func (mp *Peer) NodeInfo() p2p.NodeInfo {
}
}
func (mp *Peer) Status() conn.ConnectionStatus { return conn.ConnectionStatus{} }
func (mp *Peer) ID() p2p.NodeID { return mp.id }
func (mp *Peer) ID() types.NodeID { return mp.id }
func (mp *Peer) IsOutbound() bool { return mp.Outbound }
func (mp *Peer) IsPersistent() bool { return mp.Persistent }
func (mp *Peer) Get(key string) interface{} {


+ 5
- 4
internal/p2p/mocks/peer.go View File

@ -5,6 +5,7 @@ package mocks
import (
conn "github.com/tendermint/tendermint/internal/p2p/conn"
log "github.com/tendermint/tendermint/libs/log"
"github.com/tendermint/tendermint/types"
mock "github.com/stretchr/testify/mock"
@ -54,14 +55,14 @@ func (_m *Peer) Get(_a0 string) interface{} {
}
// ID provides a mock function with given fields:
func (_m *Peer) ID() p2p.NodeID {
func (_m *Peer) ID() types.NodeID {
ret := _m.Called()
var r0 p2p.NodeID
if rf, ok := ret.Get(0).(func() p2p.NodeID); ok {
var r0 types.NodeID
if rf, ok := ret.Get(0).(func() types.NodeID); ok {
r0 = rf()
} else {
r0 = ret.Get(0).(p2p.NodeID)
r0 = ret.Get(0).(types.NodeID)
}
return r0


+ 2
- 413
internal/p2p/netaddress.go View File

@ -5,418 +5,7 @@
package p2p
import (
"errors"
"flag"
"fmt"
"net"
"strconv"
"strings"
"time"
tmp2p "github.com/tendermint/tendermint/proto/tendermint/p2p"
"github.com/tendermint/tendermint/types"
)
// EmptyNetAddress defines the string representation of an empty NetAddress
const EmptyNetAddress = "<nil-NetAddress>"
// NetAddress defines information about a peer on the network
// including its ID, IP address, and port.
type NetAddress struct {
ID NodeID `json:"id"`
IP net.IP `json:"ip"`
Port uint16 `json:"port"`
}
// IDAddressString returns id@hostPort. It strips the leading
// protocol from protocolHostPort if it exists.
func IDAddressString(id NodeID, protocolHostPort string) string {
hostPort := removeProtocolIfDefined(protocolHostPort)
return fmt.Sprintf("%s@%s", id, hostPort)
}
// NewNetAddress returns a new NetAddress using the provided TCP
// address. When testing, other net.Addr (except TCP) will result in
// using 0.0.0.0:0. When normal run, other net.Addr (except TCP) will
// panic. Panics if ID is invalid.
// TODO: socks proxies?
func NewNetAddress(id NodeID, addr net.Addr) *NetAddress {
tcpAddr, ok := addr.(*net.TCPAddr)
if !ok {
if flag.Lookup("test.v") == nil { // normal run
panic(fmt.Sprintf("Only TCPAddrs are supported. Got: %v", addr))
} else { // in testing
netAddr := NewNetAddressIPPort(net.IP("127.0.0.1"), 0)
netAddr.ID = id
return netAddr
}
}
if err := id.Validate(); err != nil {
panic(fmt.Sprintf("Invalid ID %v: %v (addr: %v)", id, err, addr))
}
ip := tcpAddr.IP
port := uint16(tcpAddr.Port)
na := NewNetAddressIPPort(ip, port)
na.ID = id
return na
}
// NewNetAddressString returns a new NetAddress using the provided address in
// the form of "ID@IP:Port".
// Also resolves the host if host is not an IP.
// Errors are of type ErrNetAddressXxx where Xxx is in (NoID, Invalid, Lookup)
func NewNetAddressString(addr string) (*NetAddress, error) {
addrWithoutProtocol := removeProtocolIfDefined(addr)
spl := strings.Split(addrWithoutProtocol, "@")
if len(spl) != 2 {
return nil, ErrNetAddressNoID{addr}
}
id, err := NewNodeID(spl[0])
if err != nil {
return nil, ErrNetAddressInvalid{addrWithoutProtocol, err}
}
if err := id.Validate(); err != nil {
return nil, ErrNetAddressInvalid{addrWithoutProtocol, err}
}
addrWithoutProtocol = spl[1]
// get host and port
host, portStr, err := net.SplitHostPort(addrWithoutProtocol)
if err != nil {
return nil, ErrNetAddressInvalid{addrWithoutProtocol, err}
}
if len(host) == 0 {
return nil, ErrNetAddressInvalid{
addrWithoutProtocol,
errors.New("host is empty")}
}
ip := net.ParseIP(host)
if ip == nil {
ips, err := net.LookupIP(host)
if err != nil {
return nil, ErrNetAddressLookup{host, err}
}
ip = ips[0]
}
port, err := strconv.ParseUint(portStr, 10, 16)
if err != nil {
return nil, ErrNetAddressInvalid{portStr, err}
}
na := NewNetAddressIPPort(ip, uint16(port))
na.ID = id
return na, nil
}
// NewNetAddressStrings returns an array of NetAddress'es build using
// the provided strings.
func NewNetAddressStrings(addrs []string) ([]*NetAddress, []error) {
netAddrs := make([]*NetAddress, 0)
errs := make([]error, 0)
for _, addr := range addrs {
netAddr, err := NewNetAddressString(addr)
if err != nil {
errs = append(errs, err)
} else {
netAddrs = append(netAddrs, netAddr)
}
}
return netAddrs, errs
}
// NewNetAddressIPPort returns a new NetAddress using the provided IP
// and port number.
func NewNetAddressIPPort(ip net.IP, port uint16) *NetAddress {
return &NetAddress{
IP: ip,
Port: port,
}
}
// NetAddressFromProto converts a Protobuf PexAddress into a native struct.
// FIXME: Remove this when legacy PEX reactor is removed.
func NetAddressFromProto(pb tmp2p.PexAddress) (*NetAddress, error) {
ip := net.ParseIP(pb.IP)
if ip == nil {
return nil, fmt.Errorf("invalid IP address %v", pb.IP)
}
if pb.Port >= 1<<16 {
return nil, fmt.Errorf("invalid port number %v", pb.Port)
}
return &NetAddress{
ID: NodeID(pb.ID),
IP: ip,
Port: uint16(pb.Port),
}, nil
}
// NetAddressesFromProto converts a slice of Protobuf PexAddresses into a native slice.
// FIXME: Remove this when legacy PEX reactor is removed.
func NetAddressesFromProto(pbs []tmp2p.PexAddress) ([]*NetAddress, error) {
nas := make([]*NetAddress, 0, len(pbs))
for _, pb := range pbs {
na, err := NetAddressFromProto(pb)
if err != nil {
return nil, err
}
nas = append(nas, na)
}
return nas, nil
}
// NetAddressesToProto converts a slice of NetAddresses into a Protobuf PexAddress slice.
// FIXME: Remove this when legacy PEX reactor is removed.
func NetAddressesToProto(nas []*NetAddress) []tmp2p.PexAddress {
pbs := make([]tmp2p.PexAddress, 0, len(nas))
for _, na := range nas {
if na != nil {
pbs = append(pbs, na.ToProto())
}
}
return pbs
}
// ToProto converts a NetAddress to a Protobuf PexAddress.
// FIXME: Remove this when legacy PEX reactor is removed.
func (na *NetAddress) ToProto() tmp2p.PexAddress {
return tmp2p.PexAddress{
ID: string(na.ID),
IP: na.IP.String(),
Port: uint32(na.Port),
}
}
// Equals reports whether na and other are the same addresses,
// including their ID, IP, and Port.
func (na *NetAddress) Equals(other interface{}) bool {
if o, ok := other.(*NetAddress); ok {
return na.String() == o.String()
}
return false
}
// Same returns true is na has the same non-empty ID or DialString as other.
func (na *NetAddress) Same(other interface{}) bool {
if o, ok := other.(*NetAddress); ok {
if na.DialString() == o.DialString() {
return true
}
if na.ID != "" && na.ID == o.ID {
return true
}
}
return false
}
// String representation: <ID>@<IP>:<PORT>
func (na *NetAddress) String() string {
if na == nil {
return EmptyNetAddress
}
addrStr := na.DialString()
if na.ID != "" {
addrStr = IDAddressString(na.ID, addrStr)
}
return addrStr
}
func (na *NetAddress) DialString() string {
if na == nil {
return "<nil-NetAddress>"
}
return net.JoinHostPort(
na.IP.String(),
strconv.FormatUint(uint64(na.Port), 10),
)
}
// Dial calls net.Dial on the address.
func (na *NetAddress) Dial() (net.Conn, error) {
conn, err := net.Dial("tcp", na.DialString())
if err != nil {
return nil, err
}
return conn, nil
}
// DialTimeout calls net.DialTimeout on the address.
func (na *NetAddress) DialTimeout(timeout time.Duration) (net.Conn, error) {
conn, err := net.DialTimeout("tcp", na.DialString(), timeout)
if err != nil {
return nil, err
}
return conn, nil
}
// Routable returns true if the address is routable.
func (na *NetAddress) Routable() bool {
if err := na.Valid(); err != nil {
return false
}
// TODO(oga) bitcoind doesn't include RFC3849 here, but should we?
return !(na.RFC1918() || na.RFC3927() || na.RFC4862() ||
na.RFC4193() || na.RFC4843() || na.Local())
}
// For IPv4 these are either a 0 or all bits set address. For IPv6 a zero
// address or one that matches the RFC3849 documentation address format.
func (na *NetAddress) Valid() error {
if err := na.ID.Validate(); err != nil {
return fmt.Errorf("invalid ID: %w", err)
}
if na.IP == nil {
return errors.New("no IP")
}
if na.IP.IsUnspecified() || na.RFC3849() || na.IP.Equal(net.IPv4bcast) {
return errors.New("invalid IP")
}
return nil
}
// HasID returns true if the address has an ID.
// NOTE: It does not check whether the ID is valid or not.
func (na *NetAddress) HasID() bool {
return string(na.ID) != ""
}
// Endpoint converts the address to an MConnection endpoint.
func (na *NetAddress) Endpoint() Endpoint {
return Endpoint{
Protocol: MConnProtocol,
IP: na.IP,
Port: na.Port,
}
}
// Local returns true if it is a local address.
func (na *NetAddress) Local() bool {
return na.IP.IsLoopback() || zero4.Contains(na.IP)
}
// ReachabilityTo checks whenever o can be reached from na.
func (na *NetAddress) ReachabilityTo(o *NetAddress) int {
const (
Unreachable = 0
Default = iota
Teredo
Ipv6Weak
Ipv4
Ipv6Strong
)
switch {
case !na.Routable():
return Unreachable
case na.RFC4380():
switch {
case !o.Routable():
return Default
case o.RFC4380():
return Teredo
case o.IP.To4() != nil:
return Ipv4
default: // ipv6
return Ipv6Weak
}
case na.IP.To4() != nil:
if o.Routable() && o.IP.To4() != nil {
return Ipv4
}
return Default
default: /* ipv6 */
var tunneled bool
// Is our v6 is tunneled?
if o.RFC3964() || o.RFC6052() || o.RFC6145() {
tunneled = true
}
switch {
case !o.Routable():
return Default
case o.RFC4380():
return Teredo
case o.IP.To4() != nil:
return Ipv4
case tunneled:
// only prioritize ipv6 if we aren't tunneling it.
return Ipv6Weak
}
return Ipv6Strong
}
}
// RFC1918: IPv4 Private networks (10.0.0.0/8, 192.168.0.0/16, 172.16.0.0/12)
// RFC3849: IPv6 Documentation address (2001:0DB8::/32)
// RFC3927: IPv4 Autoconfig (169.254.0.0/16)
// RFC3964: IPv6 6to4 (2002::/16)
// RFC4193: IPv6 unique local (FC00::/7)
// RFC4380: IPv6 Teredo tunneling (2001::/32)
// RFC4843: IPv6 ORCHID: (2001:10::/28)
// RFC4862: IPv6 Autoconfig (FE80::/64)
// RFC6052: IPv6 well known prefix (64:FF9B::/96)
// RFC6145: IPv6 IPv4 translated address ::FFFF:0:0:0/96
var rfc1918_10 = net.IPNet{IP: net.ParseIP("10.0.0.0"), Mask: net.CIDRMask(8, 32)}
var rfc1918_192 = net.IPNet{IP: net.ParseIP("192.168.0.0"), Mask: net.CIDRMask(16, 32)}
var rfc1918_172 = net.IPNet{IP: net.ParseIP("172.16.0.0"), Mask: net.CIDRMask(12, 32)}
var rfc3849 = net.IPNet{IP: net.ParseIP("2001:0DB8::"), Mask: net.CIDRMask(32, 128)}
var rfc3927 = net.IPNet{IP: net.ParseIP("169.254.0.0"), Mask: net.CIDRMask(16, 32)}
var rfc3964 = net.IPNet{IP: net.ParseIP("2002::"), Mask: net.CIDRMask(16, 128)}
var rfc4193 = net.IPNet{IP: net.ParseIP("FC00::"), Mask: net.CIDRMask(7, 128)}
var rfc4380 = net.IPNet{IP: net.ParseIP("2001::"), Mask: net.CIDRMask(32, 128)}
var rfc4843 = net.IPNet{IP: net.ParseIP("2001:10::"), Mask: net.CIDRMask(28, 128)}
var rfc4862 = net.IPNet{IP: net.ParseIP("FE80::"), Mask: net.CIDRMask(64, 128)}
var rfc6052 = net.IPNet{IP: net.ParseIP("64:FF9B::"), Mask: net.CIDRMask(96, 128)}
var rfc6145 = net.IPNet{IP: net.ParseIP("::FFFF:0:0:0"), Mask: net.CIDRMask(96, 128)}
var zero4 = net.IPNet{IP: net.ParseIP("0.0.0.0"), Mask: net.CIDRMask(8, 32)}
var (
// onionCatNet defines the IPv6 address block used to support Tor.
// bitcoind encodes a .onion address as a 16 byte number by decoding the
// address prior to the .onion (i.e. the key hash) base32 into a ten
// byte number. It then stores the first 6 bytes of the address as
// 0xfd, 0x87, 0xd8, 0x7e, 0xeb, 0x43.
//
// This is the same range used by OnionCat, which is part part of the
// RFC4193 unique local IPv6 range.
//
// In summary the format is:
// { magic 6 bytes, 10 bytes base32 decode of key hash }
onionCatNet = ipNet("fd87:d87e:eb43::", 48, 128)
)
// ipNet returns a net.IPNet struct given the passed IP address string, number
// of one bits to include at the start of the mask, and the total number of bits
// for the mask.
func ipNet(ip string, ones, bits int) net.IPNet {
return net.IPNet{IP: net.ParseIP(ip), Mask: net.CIDRMask(ones, bits)}
}
func (na *NetAddress) RFC1918() bool {
return rfc1918_10.Contains(na.IP) ||
rfc1918_192.Contains(na.IP) ||
rfc1918_172.Contains(na.IP)
}
func (na *NetAddress) RFC3849() bool { return rfc3849.Contains(na.IP) }
func (na *NetAddress) RFC3927() bool { return rfc3927.Contains(na.IP) }
func (na *NetAddress) RFC3964() bool { return rfc3964.Contains(na.IP) }
func (na *NetAddress) RFC4193() bool { return rfc4193.Contains(na.IP) }
func (na *NetAddress) RFC4380() bool { return rfc4380.Contains(na.IP) }
func (na *NetAddress) RFC4843() bool { return rfc4843.Contains(na.IP) }
func (na *NetAddress) RFC4862() bool { return rfc4862.Contains(na.IP) }
func (na *NetAddress) RFC6052() bool { return rfc6052.Contains(na.IP) }
func (na *NetAddress) RFC6145() bool { return rfc6145.Contains(na.IP) }
func (na *NetAddress) OnionCatTor() bool { return onionCatNet.Contains(na.IP) }
func removeProtocolIfDefined(addr string) string {
if strings.Contains(addr, "://") {
return strings.Split(addr, "://")[1]
}
return addr
}
type NetAddress = types.NetAddress

+ 8
- 7
internal/p2p/node_info.go View File

@ -7,6 +7,7 @@ import (
"github.com/tendermint/tendermint/libs/bytes"
tmstrings "github.com/tendermint/tendermint/libs/strings"
tmp2p "github.com/tendermint/tendermint/proto/tendermint/p2p"
"github.com/tendermint/tendermint/types"
"github.com/tendermint/tendermint/version"
)
@ -52,8 +53,8 @@ type NodeInfo struct {
ProtocolVersion ProtocolVersion `json:"protocol_version"`
// Authenticate
NodeID NodeID `json:"id"` // authenticated identifier
ListenAddr string `json:"listen_addr"` // accepting incoming
NodeID types.NodeID `json:"id"` // authenticated identifier
ListenAddr string `json:"listen_addr"` // accepting incoming
// Check compatibility.
// Channels are HexBytes so easier to read as JSON
@ -73,7 +74,7 @@ type NodeInfoOther struct {
}
// ID returns the node's peer ID.
func (info NodeInfo) ID() NodeID {
func (info NodeInfo) ID() types.NodeID {
return info.NodeID
}
@ -95,7 +96,7 @@ func (info NodeInfo) Validate() error {
// ID is already validated.
// Validate ListenAddr.
_, err := NewNetAddressString(IDAddressString(info.ID(), info.ListenAddr))
_, err := types.NewNetAddressString(info.ID().AddressString(info.ListenAddr))
if err != nil {
return err
}
@ -185,8 +186,8 @@ OUTER_LOOP:
// ListenAddr. Note that the ListenAddr is not authenticated and
// may not match that address actually dialed if its an outbound peer.
func (info NodeInfo) NetAddress() (*NetAddress, error) {
idAddr := IDAddressString(info.ID(), info.ListenAddr)
return NewNetAddressString(idAddr)
idAddr := info.ID().AddressString(info.ListenAddr)
return types.NewNetAddressString(idAddr)
}
func (info NodeInfo) ToProto() *tmp2p.NodeInfo {
@ -222,7 +223,7 @@ func NodeInfoFromProto(pb *tmp2p.NodeInfo) (NodeInfo, error) {
Block: pb.ProtocolVersion.Block,
App: pb.ProtocolVersion.App,
},
NodeID: NodeID(pb.NodeID),
NodeID: types.NodeID(pb.NodeID),
ListenAddr: pb.ListenAddr,
Network: pb.Network,
Version: pb.Version,


+ 3
- 2
internal/p2p/p2p_test.go View File

@ -6,6 +6,7 @@ import (
"github.com/tendermint/tendermint/crypto"
"github.com/tendermint/tendermint/crypto/ed25519"
"github.com/tendermint/tendermint/internal/p2p"
"github.com/tendermint/tendermint/types"
)
// Common setup for P2P tests.
@ -22,7 +23,7 @@ var (
}
selfKey crypto.PrivKey = ed25519.GenPrivKeyFromSecret([]byte{0xf9, 0x1b, 0x08, 0xaa, 0x38, 0xee, 0x34, 0xdd})
selfID = p2p.NodeIDFromPubKey(selfKey.PubKey())
selfID = types.NodeIDFromPubKey(selfKey.PubKey())
selfInfo = p2p.NodeInfo{
NodeID: selfID,
ListenAddr: "0.0.0.0:0",
@ -31,7 +32,7 @@ var (
}
peerKey crypto.PrivKey = ed25519.GenPrivKeyFromSecret([]byte{0x84, 0xd7, 0x01, 0xbf, 0x83, 0x20, 0x1c, 0xfe})
peerID = p2p.NodeIDFromPubKey(peerKey.PubKey())
peerID = types.NodeIDFromPubKey(peerKey.PubKey())
peerInfo = p2p.NodeInfo{
NodeID: peerID,
ListenAddr: "0.0.0.0:0",


+ 14
- 13
internal/p2p/p2ptest/network.go View File

@ -14,13 +14,14 @@ import (
"github.com/tendermint/tendermint/crypto/ed25519"
"github.com/tendermint/tendermint/internal/p2p"
"github.com/tendermint/tendermint/libs/log"
"github.com/tendermint/tendermint/types"
)
// Network sets up an in-memory network that can be used for high-level P2P
// testing. It creates an arbitrary number of nodes that are connected to each
// other, and can open channels across all nodes with custom reactors.
type Network struct {
Nodes map[p2p.NodeID]*Node
Nodes map[types.NodeID]*Node
logger log.Logger
memoryNetwork *p2p.MemoryNetwork
@ -51,7 +52,7 @@ func MakeNetwork(t *testing.T, opts NetworkOptions) *Network {
opts.setDefaults()
logger := log.TestingLogger()
network := &Network{
Nodes: map[p2p.NodeID]*Node{},
Nodes: map[types.NodeID]*Node{},
logger: logger,
memoryNetwork: p2p.NewMemoryNetwork(logger, opts.BufferSize),
}
@ -71,7 +72,7 @@ func (n *Network) Start(t *testing.T) {
// Set up a list of node addresses to dial, and a peer update subscription
// for each node.
dialQueue := []p2p.NodeAddress{}
subs := map[p2p.NodeID]*p2p.PeerUpdates{}
subs := map[types.NodeID]*p2p.PeerUpdates{}
for _, node := range n.Nodes {
dialQueue = append(dialQueue, node.NodeAddress)
subs[node.NodeID] = node.PeerManager.Subscribe()
@ -124,8 +125,8 @@ func (n *Network) Start(t *testing.T) {
}
// NodeIDs returns the network's node IDs.
func (n *Network) NodeIDs() []p2p.NodeID {
ids := []p2p.NodeID{}
func (n *Network) NodeIDs() []types.NodeID {
ids := []types.NodeID{}
for id := range n.Nodes {
ids = append(ids, id)
}
@ -139,8 +140,8 @@ func (n *Network) MakeChannels(
chDesc p2p.ChannelDescriptor,
messageType proto.Message,
size int,
) map[p2p.NodeID]*p2p.Channel {
channels := map[p2p.NodeID]*p2p.Channel{}
) map[types.NodeID]*p2p.Channel {
channels := map[types.NodeID]*p2p.Channel{}
for _, node := range n.Nodes {
channels[node.NodeID] = node.MakeChannel(t, chDesc, messageType, size)
}
@ -155,8 +156,8 @@ func (n *Network) MakeChannelsNoCleanup(
chDesc p2p.ChannelDescriptor,
messageType proto.Message,
size int,
) map[p2p.NodeID]*p2p.Channel {
channels := map[p2p.NodeID]*p2p.Channel{}
) map[types.NodeID]*p2p.Channel {
channels := map[types.NodeID]*p2p.Channel{}
for _, node := range n.Nodes {
channels[node.NodeID] = node.MakeChannelNoCleanup(t, chDesc, messageType, size)
}
@ -173,7 +174,7 @@ func (n *Network) RandomNode() *Node {
}
// Peers returns a node's peers (i.e. everyone except itself).
func (n *Network) Peers(id p2p.NodeID) []*Node {
func (n *Network) Peers(id types.NodeID) []*Node {
peers := make([]*Node, 0, len(n.Nodes)-1)
for _, peer := range n.Nodes {
if peer.NodeID != id {
@ -185,7 +186,7 @@ func (n *Network) Peers(id p2p.NodeID) []*Node {
// Remove removes a node from the network, stopping it and waiting for all other
// nodes to pick up the disconnection.
func (n *Network) Remove(t *testing.T, id p2p.NodeID) {
func (n *Network) Remove(t *testing.T, id types.NodeID) {
require.Contains(t, n.Nodes, id)
node := n.Nodes[id]
delete(n.Nodes, id)
@ -213,7 +214,7 @@ func (n *Network) Remove(t *testing.T, id p2p.NodeID) {
// Node is a node in a Network, with a Router and a PeerManager.
type Node struct {
NodeID p2p.NodeID
NodeID types.NodeID
NodeInfo p2p.NodeInfo
NodeAddress p2p.NodeAddress
PrivKey crypto.PrivKey
@ -227,7 +228,7 @@ type Node struct {
// network. Callers are responsible for updating peering relationships.
func (n *Network) MakeNode(t *testing.T, opts NodeOptions) *Node {
privKey := ed25519.GenPrivKey()
nodeID := p2p.NodeIDFromPubKey(privKey.PubKey())
nodeID := types.NodeIDFromPubKey(privKey.PubKey())
nodeInfo := p2p.NodeInfo{
NodeID: nodeID,
ListenAddr: "0.0.0.0:0", // FIXME: We have to fake this for now.


+ 2
- 1
internal/p2p/p2ptest/require.go View File

@ -8,6 +8,7 @@ import (
"github.com/stretchr/testify/require"
"github.com/tendermint/tendermint/internal/p2p"
"github.com/tendermint/tendermint/types"
)
// RequireEmpty requires that the given channel is empty.
@ -83,7 +84,7 @@ func RequireSend(t *testing.T, channel *p2p.Channel, envelope p2p.Envelope) {
func RequireSendReceive(
t *testing.T,
channel *p2p.Channel,
peerID p2p.NodeID,
peerID types.NodeID,
send proto.Message,
receive proto.Message,
) {


+ 2
- 2
internal/p2p/p2ptest/util.go View File

@ -2,13 +2,13 @@ package p2ptest
import (
gogotypes "github.com/gogo/protobuf/types"
"github.com/tendermint/tendermint/internal/p2p"
"github.com/tendermint/tendermint/types"
)
// Message is a simple message containing a string-typed Value field.
type Message = gogotypes.StringValue
func NodeInSlice(id p2p.NodeID, ids []p2p.NodeID) bool {
func NodeInSlice(id types.NodeID, ids []types.NodeID) bool {
for _, n := range ids {
if id == n {
return true


+ 3
- 2
internal/p2p/peer.go View File

@ -11,6 +11,7 @@ import (
"github.com/tendermint/tendermint/libs/cmap"
"github.com/tendermint/tendermint/libs/log"
"github.com/tendermint/tendermint/libs/service"
"github.com/tendermint/tendermint/types"
)
//go:generate mockery --case underscore --name Peer
@ -22,7 +23,7 @@ type Peer interface {
service.Service
FlushStop()
ID() NodeID // peer's cryptographic ID
ID() types.NodeID // peer's cryptographic ID
RemoteIP() net.IP // remote IP of the connection
RemoteAddr() net.Addr // remote address of the connection
@ -202,7 +203,7 @@ func (p *peer) OnStop() {
// Implements Peer
// ID returns the peer's ID - the hex encoded hash of its pubkey.
func (p *peer) ID() NodeID {
func (p *peer) ID() types.NodeID {
return p.nodeInfo.ID()
}


+ 7
- 6
internal/p2p/peer_set.go View File

@ -4,13 +4,14 @@ import (
"net"
tmsync "github.com/tendermint/tendermint/internal/libs/sync"
"github.com/tendermint/tendermint/types"
)
// IPeerSet has a (immutable) subset of the methods of PeerSet.
type IPeerSet interface {
Has(key NodeID) bool
Has(key types.NodeID) bool
HasIP(ip net.IP) bool
Get(key NodeID) Peer
Get(key types.NodeID) Peer
List() []Peer
Size() int
}
@ -21,7 +22,7 @@ type IPeerSet interface {
// Iteration over the peers is super fast and thread-safe.
type PeerSet struct {
mtx tmsync.Mutex
lookup map[NodeID]*peerSetItem
lookup map[types.NodeID]*peerSetItem
list []Peer
}
@ -33,7 +34,7 @@ type peerSetItem struct {
// NewPeerSet creates a new peerSet with a list of initial capacity of 256 items.
func NewPeerSet() *PeerSet {
return &PeerSet{
lookup: make(map[NodeID]*peerSetItem),
lookup: make(map[types.NodeID]*peerSetItem),
list: make([]Peer, 0, 256),
}
}
@ -58,7 +59,7 @@ func (ps *PeerSet) Add(peer Peer) error {
// Has returns true if the set contains the peer referred to by this
// peerKey, otherwise false.
func (ps *PeerSet) Has(peerKey NodeID) bool {
func (ps *PeerSet) Has(peerKey types.NodeID) bool {
ps.mtx.Lock()
_, ok := ps.lookup[peerKey]
ps.mtx.Unlock()
@ -88,7 +89,7 @@ func (ps *PeerSet) hasIP(peerIP net.IP) bool {
// Get looks up a peer by the provided peerKey. Returns nil if peer is not
// found.
func (ps *PeerSet) Get(peerKey NodeID) Peer {
func (ps *PeerSet) Get(peerKey types.NodeID) Peer {
ps.mtx.Lock()
defer ps.mtx.Unlock()
item, ok := ps.lookup[peerKey]


+ 3
- 2
internal/p2p/peer_set_test.go View File

@ -8,13 +8,14 @@ import (
"github.com/stretchr/testify/assert"
"github.com/tendermint/tendermint/libs/service"
"github.com/tendermint/tendermint/types"
)
// mockPeer for testing the PeerSet
type mockPeer struct {
service.BaseService
ip net.IP
id NodeID
id types.NodeID
}
func (mp *mockPeer) FlushStop() { mp.Stop() } //nolint:errcheck // ignore error
@ -22,7 +23,7 @@ func (mp *mockPeer) TrySend(chID byte, msgBytes []byte) bool { return true }
func (mp *mockPeer) Send(chID byte, msgBytes []byte) bool { return true }
func (mp *mockPeer) NodeInfo() NodeInfo { return NodeInfo{} }
func (mp *mockPeer) Status() ConnectionStatus { return ConnectionStatus{} }
func (mp *mockPeer) ID() NodeID { return mp.id }
func (mp *mockPeer) ID() types.NodeID { return mp.id }
func (mp *mockPeer) IsOutbound() bool { return false }
func (mp *mockPeer) IsPersistent() bool { return true }
func (mp *mockPeer) Get(s string) interface{} { return s }


+ 5
- 4
internal/p2p/peer_test.go View File

@ -15,6 +15,7 @@ import (
"github.com/tendermint/tendermint/crypto/ed25519"
"github.com/tendermint/tendermint/libs/bytes"
"github.com/tendermint/tendermint/libs/log"
"github.com/tendermint/tendermint/types"
"github.com/tendermint/tendermint/config"
tmconn "github.com/tendermint/tendermint/internal/p2p/conn"
@ -82,7 +83,7 @@ func createOutboundPeerAndPerformHandshake(
{ID: testCh, Priority: 1},
}
pk := ed25519.GenPrivKey()
ourNodeInfo := testNodeInfo(NodeIDFromPubKey(pk.PubKey()), "host_peer")
ourNodeInfo := testNodeInfo(types.NodeIDFromPubKey(pk.PubKey()), "host_peer")
transport := NewMConnTransport(log.TestingLogger(), mConfig, chDescs, MConnTransportOptions{})
reactorsByCh := map[byte]Reactor{testCh: NewTestReactor(chDescs, true)}
pc, err := testOutboundPeerConn(transport, addr, config, false, pk)
@ -149,8 +150,8 @@ func (rp *remotePeer) Addr() *NetAddress {
return rp.addr
}
func (rp *remotePeer) ID() NodeID {
return NodeIDFromPubKey(rp.PrivKey.PubKey())
func (rp *remotePeer) ID() types.NodeID {
return types.NodeIDFromPubKey(rp.PrivKey.PubKey())
}
func (rp *remotePeer) Start() {
@ -163,7 +164,7 @@ func (rp *remotePeer) Start() {
golog.Fatalf("net.Listen tcp :0: %+v", e)
}
rp.listener = l
rp.addr = NewNetAddress(NodeIDFromPubKey(rp.PrivKey.PubKey()), l.Addr())
rp.addr = types.NewNetAddress(types.NodeIDFromPubKey(rp.PrivKey.PubKey()), l.Addr())
if rp.channels == nil {
rp.channels = []byte{testCh}
}


+ 49
- 48
internal/p2p/peermanager.go View File

@ -16,6 +16,7 @@ import (
tmsync "github.com/tendermint/tendermint/internal/libs/sync"
p2pproto "github.com/tendermint/tendermint/proto/tendermint/p2p"
"github.com/tendermint/tendermint/types"
)
const (
@ -46,7 +47,7 @@ const (
// PeerUpdate is a peer update event sent via PeerUpdates.
type PeerUpdate struct {
NodeID NodeID
NodeID types.NodeID
Status PeerStatus
}
@ -105,7 +106,7 @@ type PeerManagerOptions struct {
// to. These will be scored higher than other peers, and if
// MaxConnectedUpgrade is non-zero any lower-scored peers will be evicted if
// necessary to make room for these.
PersistentPeers []NodeID
PersistentPeers []types.NodeID
// MaxPeers is the maximum number of peers to track information about, i.e.
// store in the peer store. When exceeded, the lowest-scored unconnected peers
@ -147,15 +148,15 @@ type PeerManagerOptions struct {
// PeerScores sets fixed scores for specific peers. It is mainly used
// for testing. A score of 0 is ignored.
PeerScores map[NodeID]PeerScore
PeerScores map[types.NodeID]PeerScore
// PrivatePeerIDs defines a set of NodeID objects which the PEX reactor will
// consider private and never gossip.
PrivatePeers map[NodeID]struct{}
PrivatePeers map[types.NodeID]struct{}
// persistentPeers provides fast PersistentPeers lookups. It is built
// by optimize().
persistentPeers map[NodeID]bool
persistentPeers map[types.NodeID]bool
}
// Validate validates the options.
@ -209,7 +210,7 @@ func (o *PeerManagerOptions) Validate() error {
// isPersistentPeer checks if a peer is in PersistentPeers. It will panic
// if called before optimize().
func (o *PeerManagerOptions) isPersistent(id NodeID) bool {
func (o *PeerManagerOptions) isPersistent(id types.NodeID) bool {
if o.persistentPeers == nil {
panic("isPersistentPeer() called before optimize()")
}
@ -220,7 +221,7 @@ func (o *PeerManagerOptions) isPersistent(id NodeID) bool {
// separate method instead of memoizing during calls to avoid dealing with
// concurrency and mutex overhead.
func (o *PeerManagerOptions) optimize() {
o.persistentPeers = make(map[NodeID]bool, len(o.PersistentPeers))
o.persistentPeers = make(map[types.NodeID]bool, len(o.PersistentPeers))
for _, p := range o.PersistentPeers {
o.persistentPeers[p] = true
}
@ -270,7 +271,7 @@ func (o *PeerManagerOptions) optimize() {
// - EvictNext: pick peer from evict, mark as evicting.
// - Disconnected: unmark connected, upgrading[from]=to, evict, evicting.
type PeerManager struct {
selfID NodeID
selfID types.NodeID
options PeerManagerOptions
rand *rand.Rand
dialWaker *tmsync.Waker // wakes up DialNext() on relevant peer changes
@ -281,16 +282,16 @@ type PeerManager struct {
mtx sync.Mutex
store *peerStore
subscriptions map[*PeerUpdates]*PeerUpdates // keyed by struct identity (address)
dialing map[NodeID]bool // peers being dialed (DialNext → Dialed/DialFail)
upgrading map[NodeID]NodeID // peers claimed for upgrade (DialNext → Dialed/DialFail)
connected map[NodeID]bool // connected peers (Dialed/Accepted → Disconnected)
ready map[NodeID]bool // ready peers (Ready → Disconnected)
evict map[NodeID]bool // peers scheduled for eviction (Connected → EvictNext)
evicting map[NodeID]bool // peers being evicted (EvictNext → Disconnected)
dialing map[types.NodeID]bool // peers being dialed (DialNext → Dialed/DialFail)
upgrading map[types.NodeID]types.NodeID // peers claimed for upgrade (DialNext → Dialed/DialFail)
connected map[types.NodeID]bool // connected peers (Dialed/Accepted → Disconnected)
ready map[types.NodeID]bool // ready peers (Ready → Disconnected)
evict map[types.NodeID]bool // peers scheduled for eviction (Connected → EvictNext)
evicting map[types.NodeID]bool // peers being evicted (EvictNext → Disconnected)
}
// NewPeerManager creates a new peer manager.
func NewPeerManager(selfID NodeID, peerDB dbm.DB, options PeerManagerOptions) (*PeerManager, error) {
func NewPeerManager(selfID types.NodeID, peerDB dbm.DB, options PeerManagerOptions) (*PeerManager, error) {
if selfID == "" {
return nil, errors.New("self ID not given")
}
@ -314,12 +315,12 @@ func NewPeerManager(selfID NodeID, peerDB dbm.DB, options PeerManagerOptions) (*
closeCh: make(chan struct{}),
store: store,
dialing: map[NodeID]bool{},
upgrading: map[NodeID]NodeID{},
connected: map[NodeID]bool{},
ready: map[NodeID]bool{},
evict: map[NodeID]bool{},
evicting: map[NodeID]bool{},
dialing: map[types.NodeID]bool{},
upgrading: map[types.NodeID]types.NodeID{},
connected: map[types.NodeID]bool{},
ready: map[types.NodeID]bool{},
evict: map[types.NodeID]bool{},
evicting: map[types.NodeID]bool{},
subscriptions: map[*PeerUpdates]*PeerUpdates{},
}
if err = peerManager.configurePeers(); err != nil {
@ -339,7 +340,7 @@ func (m *PeerManager) configurePeers() error {
return err
}
configure := map[NodeID]bool{}
configure := map[types.NodeID]bool{}
for _, id := range m.options.PersistentPeers {
configure[id] = true
}
@ -364,7 +365,7 @@ func (m *PeerManager) configurePeer(peer peerInfo) peerInfo {
}
// newPeerInfo creates a peerInfo for a new peer.
func (m *PeerManager) newPeerInfo(id NodeID) peerInfo {
func (m *PeerManager) newPeerInfo(id types.NodeID) peerInfo {
peerInfo := peerInfo{
ID: id,
AddressInfo: map[NodeAddress]*peerAddressInfo{},
@ -568,7 +569,7 @@ func (m *PeerManager) Dialed(address NodeAddress) error {
delete(m.dialing, address.NodeID)
var upgradeFromPeer NodeID
var upgradeFromPeer types.NodeID
for from, to := range m.upgrading {
if to == address.NodeID {
delete(m.upgrading, from)
@ -639,7 +640,7 @@ func (m *PeerManager) Dialed(address NodeAddress) error {
// that, we'll need to get the remote address after all, but as noted above that
// can't be the remote endpoint since that will usually have the wrong port
// number.
func (m *PeerManager) Accepted(peerID NodeID) error {
func (m *PeerManager) Accepted(peerID types.NodeID) error {
m.mtx.Lock()
defer m.mtx.Unlock()
@ -662,7 +663,7 @@ func (m *PeerManager) Accepted(peerID NodeID) error {
// If all connections slots are full, but we allow upgrades (and we checked
// above that we have upgrade capacity), then we can look for a lower-scored
// peer to replace and if found accept the connection anyway and evict it.
var upgradeFromPeer NodeID
var upgradeFromPeer types.NodeID
if m.options.MaxConnected > 0 && len(m.connected) >= int(m.options.MaxConnected) {
upgradeFromPeer = m.findUpgradeCandidate(peer.ID, peer.Score())
if upgradeFromPeer == "" {
@ -687,7 +688,7 @@ func (m *PeerManager) Accepted(peerID NodeID) error {
// peer must already be marked as connected. This is separate from Dialed() and
// Accepted() to allow the router to set up its internal queues before reactors
// start sending messages.
func (m *PeerManager) Ready(peerID NodeID) {
func (m *PeerManager) Ready(peerID types.NodeID) {
m.mtx.Lock()
defer m.mtx.Unlock()
@ -702,7 +703,7 @@ func (m *PeerManager) Ready(peerID NodeID) {
// EvictNext returns the next peer to evict (i.e. disconnect). If no evictable
// peers are found, the call will block until one becomes available.
func (m *PeerManager) EvictNext(ctx context.Context) (NodeID, error) {
func (m *PeerManager) EvictNext(ctx context.Context) (types.NodeID, error) {
for {
id, err := m.TryEvictNext()
if err != nil || id != "" {
@ -718,7 +719,7 @@ func (m *PeerManager) EvictNext(ctx context.Context) (NodeID, error) {
// TryEvictNext is equivalent to EvictNext, but immediately returns an empty
// node ID if no evictable peers are found.
func (m *PeerManager) TryEvictNext() (NodeID, error) {
func (m *PeerManager) TryEvictNext() (types.NodeID, error) {
m.mtx.Lock()
defer m.mtx.Unlock()
@ -754,7 +755,7 @@ func (m *PeerManager) TryEvictNext() (NodeID, error) {
// Disconnected unmarks a peer as connected, allowing it to be dialed or
// accepted again as appropriate.
func (m *PeerManager) Disconnected(peerID NodeID) {
func (m *PeerManager) Disconnected(peerID types.NodeID) {
m.mtx.Lock()
defer m.mtx.Unlock()
@ -784,7 +785,7 @@ func (m *PeerManager) Disconnected(peerID NodeID) {
//
// FIXME: This will cause the peer manager to immediately try to reconnect to
// the peer, which is probably not always what we want.
func (m *PeerManager) Errored(peerID NodeID, err error) {
func (m *PeerManager) Errored(peerID types.NodeID, err error) {
m.mtx.Lock()
defer m.mtx.Unlock()
@ -799,7 +800,7 @@ func (m *PeerManager) Errored(peerID NodeID, err error) {
//
// FIXME: This is fairly naïve and only returns the addresses of the
// highest-ranked peers.
func (m *PeerManager) Advertise(peerID NodeID, limit uint16) []NodeAddress {
func (m *PeerManager) Advertise(peerID types.NodeID, limit uint16) []NodeAddress {
m.mtx.Lock()
defer m.mtx.Unlock()
@ -924,7 +925,7 @@ func (m *PeerManager) Close() {
// Addresses returns all known addresses for a peer, primarily for testing.
// The order is arbitrary.
func (m *PeerManager) Addresses(peerID NodeID) []NodeAddress {
func (m *PeerManager) Addresses(peerID types.NodeID) []NodeAddress {
m.mtx.Lock()
defer m.mtx.Unlock()
@ -938,11 +939,11 @@ func (m *PeerManager) Addresses(peerID NodeID) []NodeAddress {
}
// Peers returns all known peers, primarily for testing. The order is arbitrary.
func (m *PeerManager) Peers() []NodeID {
func (m *PeerManager) Peers() []types.NodeID {
m.mtx.Lock()
defer m.mtx.Unlock()
peers := []NodeID{}
peers := []types.NodeID{}
for _, peer := range m.store.Ranked() {
peers = append(peers, peer.ID)
}
@ -950,11 +951,11 @@ func (m *PeerManager) Peers() []NodeID {
}
// Scores returns the peer scores for all known peers, primarily for testing.
func (m *PeerManager) Scores() map[NodeID]PeerScore {
func (m *PeerManager) Scores() map[types.NodeID]PeerScore {
m.mtx.Lock()
defer m.mtx.Unlock()
scores := map[NodeID]PeerScore{}
scores := map[types.NodeID]PeerScore{}
for _, peer := range m.store.Ranked() {
scores[peer.ID] = peer.Score()
}
@ -962,7 +963,7 @@ func (m *PeerManager) Scores() map[NodeID]PeerScore {
}
// Status returns the status for a peer, primarily for testing.
func (m *PeerManager) Status(id NodeID) PeerStatus {
func (m *PeerManager) Status(id types.NodeID) PeerStatus {
m.mtx.Lock()
defer m.mtx.Unlock()
switch {
@ -977,7 +978,7 @@ func (m *PeerManager) Status(id NodeID) PeerStatus {
// to make room for the given peer. Returns an empty ID if none is found.
// If the peer is already being upgraded to, we return that same upgrade.
// The caller must hold the mutex lock.
func (m *PeerManager) findUpgradeCandidate(id NodeID, score PeerScore) NodeID {
func (m *PeerManager) findUpgradeCandidate(id types.NodeID, score PeerScore) types.NodeID {
for from, to := range m.upgrading {
if to == id {
return from
@ -1033,7 +1034,7 @@ func (m *PeerManager) retryDelay(failures uint32, persistent bool) time.Duration
// FIXME: This is a temporary workaround to share state between the consensus
// and mempool reactors, carried over from the legacy P2P stack. Reactors should
// not have dependencies on each other, instead tracking this themselves.
func (m *PeerManager) GetHeight(peerID NodeID) int64 {
func (m *PeerManager) GetHeight(peerID types.NodeID) int64 {
m.mtx.Lock()
defer m.mtx.Unlock()
@ -1046,7 +1047,7 @@ func (m *PeerManager) GetHeight(peerID NodeID) int64 {
// FIXME: This is a temporary workaround to share state between the consensus
// and mempool reactors, carried over from the legacy P2P stack. Reactors should
// not have dependencies on each other, instead tracking this themselves.
func (m *PeerManager) SetHeight(peerID NodeID, height int64) error {
func (m *PeerManager) SetHeight(peerID types.NodeID, height int64) error {
m.mtx.Lock()
defer m.mtx.Unlock()
@ -1067,7 +1068,7 @@ func (m *PeerManager) SetHeight(peerID NodeID, height int64) error {
// (without fsync, since we can afford to lose recent writes).
type peerStore struct {
db dbm.DB
peers map[NodeID]*peerInfo
peers map[types.NodeID]*peerInfo
ranked []*peerInfo // cache for Ranked(), nil invalidates cache
}
@ -1086,7 +1087,7 @@ func newPeerStore(db dbm.DB) (*peerStore, error) {
// loadPeers loads all peers from the database into memory.
func (s *peerStore) loadPeers() error {
peers := map[NodeID]*peerInfo{}
peers := map[types.NodeID]*peerInfo{}
start, end := keyPeerInfoRange()
iter, err := s.db.Iterator(start, end)
@ -1117,7 +1118,7 @@ func (s *peerStore) loadPeers() error {
// Get fetches a peer. The boolean indicates whether the peer existed or not.
// The returned peer info is a copy, and can be mutated at will.
func (s *peerStore) Get(id NodeID) (peerInfo, bool) {
func (s *peerStore) Get(id types.NodeID) (peerInfo, bool) {
peer, ok := s.peers[id]
return peer.Copy(), ok
}
@ -1155,7 +1156,7 @@ func (s *peerStore) Set(peer peerInfo) error {
}
// Delete deletes a peer, or does nothing if it does not exist.
func (s *peerStore) Delete(id NodeID) error {
func (s *peerStore) Delete(id types.NodeID) error {
if _, ok := s.peers[id]; !ok {
return nil
}
@ -1213,7 +1214,7 @@ func (s *peerStore) Size() int {
// peerInfo contains peer information stored in a peerStore.
type peerInfo struct {
ID NodeID
ID types.NodeID
AddressInfo map[NodeAddress]*peerAddressInfo
LastConnected time.Time
@ -1229,7 +1230,7 @@ type peerInfo struct {
// erroring if the data is invalid.
func peerInfoFromProto(msg *p2pproto.PeerInfo) (*peerInfo, error) {
p := &peerInfo{
ID: NodeID(msg.ID),
ID: types.NodeID(msg.ID),
AddressInfo: map[NodeAddress]*peerAddressInfo{},
}
if msg.LastConnected != nil {
@ -1366,7 +1367,7 @@ const (
)
// keyPeerInfo generates a peerInfo database key.
func keyPeerInfo(id NodeID) []byte {
func keyPeerInfo(id types.NodeID) []byte {
key, err := orderedcode.Append(nil, prefixPeerInfo, string(id))
if err != nil {
panic(err)


+ 3
- 2
internal/p2p/peermanager_scoring_test.go View File

@ -7,13 +7,14 @@ import (
"github.com/stretchr/testify/require"
"github.com/tendermint/tendermint/crypto/ed25519"
"github.com/tendermint/tendermint/types"
dbm "github.com/tendermint/tm-db"
)
func TestPeerScoring(t *testing.T) {
// coppied from p2p_test shared variables
selfKey := ed25519.GenPrivKeyFromSecret([]byte{0xf9, 0x1b, 0x08, 0xaa, 0x38, 0xee, 0x34, 0xdd})
selfID := NodeIDFromPubKey(selfKey.PubKey())
selfID := types.NodeIDFromPubKey(selfKey.PubKey())
// create a mock peer manager
db := dbm.NewMemDB()
@ -22,7 +23,7 @@ func TestPeerScoring(t *testing.T) {
defer peerManager.Close()
// create a fake node
id := NodeID(strings.Repeat("a1", 20))
id := types.NodeID(strings.Repeat("a1", 20))
added, err := peerManager.Add(NodeAddress{NodeID: id, Protocol: "memory"})
require.NoError(t, err)
require.True(t, added)


+ 130
- 129
internal/p2p/peermanager_test.go View File

@ -12,6 +12,7 @@ import (
dbm "github.com/tendermint/tm-db"
"github.com/tendermint/tendermint/internal/p2p"
"github.com/tendermint/tendermint/types"
)
// FIXME: We should probably have some randomized property-based tests for the
@ -21,7 +22,7 @@ import (
// tests.
func TestPeerManagerOptions_Validate(t *testing.T) {
nodeID := p2p.NodeID("00112233445566778899aabbccddeeff00112233")
nodeID := types.NodeID("00112233445566778899aabbccddeeff00112233")
testcases := map[string]struct {
options p2p.PeerManagerOptions
@ -31,24 +32,24 @@ func TestPeerManagerOptions_Validate(t *testing.T) {
// PersistentPeers
"valid PersistentPeers NodeID": {p2p.PeerManagerOptions{
PersistentPeers: []p2p.NodeID{"00112233445566778899aabbccddeeff00112233"},
PersistentPeers: []types.NodeID{"00112233445566778899aabbccddeeff00112233"},
}, true},
"invalid PersistentPeers NodeID": {p2p.PeerManagerOptions{
PersistentPeers: []p2p.NodeID{"foo"},
PersistentPeers: []types.NodeID{"foo"},
}, false},
"uppercase PersistentPeers NodeID": {p2p.PeerManagerOptions{
PersistentPeers: []p2p.NodeID{"00112233445566778899AABBCCDDEEFF00112233"},
PersistentPeers: []types.NodeID{"00112233445566778899AABBCCDDEEFF00112233"},
}, false},
"PersistentPeers at MaxConnected": {p2p.PeerManagerOptions{
PersistentPeers: []p2p.NodeID{nodeID, nodeID, nodeID},
PersistentPeers: []types.NodeID{nodeID, nodeID, nodeID},
MaxConnected: 3,
}, true},
"PersistentPeers above MaxConnected": {p2p.PeerManagerOptions{
PersistentPeers: []p2p.NodeID{nodeID, nodeID, nodeID},
PersistentPeers: []types.NodeID{nodeID, nodeID, nodeID},
MaxConnected: 2,
}, false},
"PersistentPeers above MaxConnected below MaxConnectedUpgrade": {p2p.PeerManagerOptions{
PersistentPeers: []p2p.NodeID{nodeID, nodeID, nodeID},
PersistentPeers: []types.NodeID{nodeID, nodeID, nodeID},
MaxConnected: 2,
MaxConnectedUpgrade: 2,
}, false},
@ -114,7 +115,7 @@ func TestNewPeerManager(t *testing.T) {
// Invalid options should error.
_, err = p2p.NewPeerManager(selfID, dbm.NewMemDB(), p2p.PeerManagerOptions{
PersistentPeers: []p2p.NodeID{"foo"},
PersistentPeers: []types.NodeID{"foo"},
})
require.Error(t, err)
@ -128,19 +129,19 @@ func TestNewPeerManager(t *testing.T) {
}
func TestNewPeerManager_Persistence(t *testing.T) {
aID := p2p.NodeID(strings.Repeat("a", 40))
aID := types.NodeID(strings.Repeat("a", 40))
aAddresses := []p2p.NodeAddress{
{Protocol: "tcp", NodeID: aID, Hostname: "127.0.0.1", Port: 26657, Path: "/path"},
{Protocol: "memory", NodeID: aID},
}
bID := p2p.NodeID(strings.Repeat("b", 40))
bID := types.NodeID(strings.Repeat("b", 40))
bAddresses := []p2p.NodeAddress{
{Protocol: "tcp", NodeID: bID, Hostname: "b10c::1", Port: 26657, Path: "/path"},
{Protocol: "memory", NodeID: bID},
}
cID := p2p.NodeID(strings.Repeat("c", 40))
cID := types.NodeID(strings.Repeat("c", 40))
cAddresses := []p2p.NodeAddress{
{Protocol: "tcp", NodeID: cID, Hostname: "host.domain", Port: 80},
{Protocol: "memory", NodeID: cID},
@ -149,8 +150,8 @@ func TestNewPeerManager_Persistence(t *testing.T) {
// Create an initial peer manager and add the peers.
db := dbm.NewMemDB()
peerManager, err := p2p.NewPeerManager(selfID, db, p2p.PeerManagerOptions{
PersistentPeers: []p2p.NodeID{aID},
PeerScores: map[p2p.NodeID]p2p.PeerScore{bID: 1},
PersistentPeers: []types.NodeID{aID},
PeerScores: map[types.NodeID]p2p.PeerScore{bID: 1},
})
require.NoError(t, err)
defer peerManager.Close()
@ -164,7 +165,7 @@ func TestNewPeerManager_Persistence(t *testing.T) {
require.ElementsMatch(t, aAddresses, peerManager.Addresses(aID))
require.ElementsMatch(t, bAddresses, peerManager.Addresses(bID))
require.ElementsMatch(t, cAddresses, peerManager.Addresses(cID))
require.Equal(t, map[p2p.NodeID]p2p.PeerScore{
require.Equal(t, map[types.NodeID]p2p.PeerScore{
aID: p2p.PeerScorePersistent,
bID: 1,
cID: 0,
@ -176,8 +177,8 @@ func TestNewPeerManager_Persistence(t *testing.T) {
// peers, but they should have updated scores from the new PersistentPeers
// configuration.
peerManager, err = p2p.NewPeerManager(selfID, db, p2p.PeerManagerOptions{
PersistentPeers: []p2p.NodeID{bID},
PeerScores: map[p2p.NodeID]p2p.PeerScore{cID: 1},
PersistentPeers: []types.NodeID{bID},
PeerScores: map[types.NodeID]p2p.PeerScore{cID: 1},
})
require.NoError(t, err)
defer peerManager.Close()
@ -185,7 +186,7 @@ func TestNewPeerManager_Persistence(t *testing.T) {
require.ElementsMatch(t, aAddresses, peerManager.Addresses(aID))
require.ElementsMatch(t, bAddresses, peerManager.Addresses(bID))
require.ElementsMatch(t, cAddresses, peerManager.Addresses(cID))
require.Equal(t, map[p2p.NodeID]p2p.PeerScore{
require.Equal(t, map[types.NodeID]p2p.PeerScore{
aID: 0,
bID: p2p.PeerScorePersistent,
cID: 1,
@ -193,8 +194,8 @@ func TestNewPeerManager_Persistence(t *testing.T) {
}
func TestNewPeerManager_SelfIDChange(t *testing.T) {
a := p2p.NodeAddress{Protocol: "memory", NodeID: p2p.NodeID(strings.Repeat("a", 40))}
b := p2p.NodeAddress{Protocol: "memory", NodeID: p2p.NodeID(strings.Repeat("b", 40))}
a := p2p.NodeAddress{Protocol: "memory", NodeID: types.NodeID(strings.Repeat("a", 40))}
b := p2p.NodeAddress{Protocol: "memory", NodeID: types.NodeID(strings.Repeat("b", 40))}
db := dbm.NewMemDB()
peerManager, err := p2p.NewPeerManager(selfID, db, p2p.PeerManagerOptions{})
@ -206,23 +207,23 @@ func TestNewPeerManager_SelfIDChange(t *testing.T) {
added, err = peerManager.Add(b)
require.NoError(t, err)
require.True(t, added)
require.ElementsMatch(t, []p2p.NodeID{a.NodeID, b.NodeID}, peerManager.Peers())
require.ElementsMatch(t, []types.NodeID{a.NodeID, b.NodeID}, peerManager.Peers())
peerManager.Close()
// If we change our selfID to one of the peers in the peer store, it
// should be removed from the store.
peerManager, err = p2p.NewPeerManager(a.NodeID, db, p2p.PeerManagerOptions{})
require.NoError(t, err)
require.Equal(t, []p2p.NodeID{b.NodeID}, peerManager.Peers())
require.Equal(t, []types.NodeID{b.NodeID}, peerManager.Peers())
}
func TestPeerManager_Add(t *testing.T) {
aID := p2p.NodeID(strings.Repeat("a", 40))
bID := p2p.NodeID(strings.Repeat("b", 40))
cID := p2p.NodeID(strings.Repeat("c", 40))
aID := types.NodeID(strings.Repeat("a", 40))
bID := types.NodeID(strings.Repeat("b", 40))
cID := types.NodeID(strings.Repeat("c", 40))
peerManager, err := p2p.NewPeerManager(selfID, dbm.NewMemDB(), p2p.PeerManagerOptions{
PersistentPeers: []p2p.NodeID{aID, cID},
PersistentPeers: []types.NodeID{aID, cID},
MaxPeers: 2,
MaxConnected: 2,
})
@ -260,7 +261,7 @@ func TestPeerManager_Add(t *testing.T) {
Protocol: "tcp", NodeID: cID, Hostname: "localhost"})
require.NoError(t, err)
require.True(t, added)
require.ElementsMatch(t, []p2p.NodeID{aID, cID}, peerManager.Peers())
require.ElementsMatch(t, []types.NodeID{aID, cID}, peerManager.Peers())
// Adding an invalid address should error.
_, err = peerManager.Add(p2p.NodeAddress{Path: "foo"})
@ -272,7 +273,7 @@ func TestPeerManager_Add(t *testing.T) {
}
func TestPeerManager_DialNext(t *testing.T) {
a := p2p.NodeAddress{Protocol: "memory", NodeID: p2p.NodeID(strings.Repeat("a", 40))}
a := p2p.NodeAddress{Protocol: "memory", NodeID: types.NodeID(strings.Repeat("a", 40))}
peerManager, err := p2p.NewPeerManager(selfID, dbm.NewMemDB(), p2p.PeerManagerOptions{})
require.NoError(t, err)
@ -295,7 +296,7 @@ func TestPeerManager_DialNext(t *testing.T) {
}
func TestPeerManager_DialNext_Retry(t *testing.T) {
a := p2p.NodeAddress{Protocol: "memory", NodeID: p2p.NodeID(strings.Repeat("a", 40))}
a := p2p.NodeAddress{Protocol: "memory", NodeID: types.NodeID(strings.Repeat("a", 40))}
options := p2p.PeerManagerOptions{
MinRetryTime: 100 * time.Millisecond,
@ -341,7 +342,7 @@ func TestPeerManager_DialNext_Retry(t *testing.T) {
}
func TestPeerManager_DialNext_WakeOnAdd(t *testing.T) {
a := p2p.NodeAddress{Protocol: "memory", NodeID: p2p.NodeID(strings.Repeat("a", 40))}
a := p2p.NodeAddress{Protocol: "memory", NodeID: types.NodeID(strings.Repeat("a", 40))}
peerManager, err := p2p.NewPeerManager(selfID, dbm.NewMemDB(), p2p.PeerManagerOptions{})
require.NoError(t, err)
@ -368,8 +369,8 @@ func TestPeerManager_DialNext_WakeOnDialFailed(t *testing.T) {
})
require.NoError(t, err)
a := p2p.NodeAddress{Protocol: "memory", NodeID: p2p.NodeID(strings.Repeat("a", 40))}
b := p2p.NodeAddress{Protocol: "memory", NodeID: p2p.NodeID(strings.Repeat("b", 40))}
a := p2p.NodeAddress{Protocol: "memory", NodeID: types.NodeID(strings.Repeat("a", 40))}
b := p2p.NodeAddress{Protocol: "memory", NodeID: types.NodeID(strings.Repeat("b", 40))}
// Add and dial a.
added, err := peerManager.Add(a)
@ -406,7 +407,7 @@ func TestPeerManager_DialNext_WakeOnDialFailedRetry(t *testing.T) {
peerManager, err := p2p.NewPeerManager(selfID, dbm.NewMemDB(), options)
require.NoError(t, err)
a := p2p.NodeAddress{Protocol: "memory", NodeID: p2p.NodeID(strings.Repeat("a", 40))}
a := p2p.NodeAddress{Protocol: "memory", NodeID: types.NodeID(strings.Repeat("a", 40))}
// Add a, dial it, and mark it a failure. This will start a retry timer.
added, err := peerManager.Add(a)
@ -429,7 +430,7 @@ func TestPeerManager_DialNext_WakeOnDialFailedRetry(t *testing.T) {
}
func TestPeerManager_DialNext_WakeOnDisconnected(t *testing.T) {
a := p2p.NodeAddress{Protocol: "memory", NodeID: p2p.NodeID(strings.Repeat("a", 40))}
a := p2p.NodeAddress{Protocol: "memory", NodeID: types.NodeID(strings.Repeat("a", 40))}
peerManager, err := p2p.NewPeerManager(selfID, dbm.NewMemDB(), p2p.PeerManagerOptions{})
require.NoError(t, err)
@ -457,9 +458,9 @@ func TestPeerManager_DialNext_WakeOnDisconnected(t *testing.T) {
}
func TestPeerManager_TryDialNext_MaxConnected(t *testing.T) {
a := p2p.NodeAddress{Protocol: "memory", NodeID: p2p.NodeID(strings.Repeat("a", 40))}
b := p2p.NodeAddress{Protocol: "memory", NodeID: p2p.NodeID(strings.Repeat("b", 40))}
c := p2p.NodeAddress{Protocol: "memory", NodeID: p2p.NodeID(strings.Repeat("c", 40))}
a := p2p.NodeAddress{Protocol: "memory", NodeID: types.NodeID(strings.Repeat("a", 40))}
b := p2p.NodeAddress{Protocol: "memory", NodeID: types.NodeID(strings.Repeat("b", 40))}
c := p2p.NodeAddress{Protocol: "memory", NodeID: types.NodeID(strings.Repeat("c", 40))}
peerManager, err := p2p.NewPeerManager(selfID, dbm.NewMemDB(), p2p.PeerManagerOptions{
MaxConnected: 2,
@ -493,21 +494,21 @@ func TestPeerManager_TryDialNext_MaxConnected(t *testing.T) {
}
func TestPeerManager_TryDialNext_MaxConnectedUpgrade(t *testing.T) {
a := p2p.NodeAddress{Protocol: "memory", NodeID: p2p.NodeID(strings.Repeat("a", 40))}
b := p2p.NodeAddress{Protocol: "memory", NodeID: p2p.NodeID(strings.Repeat("b", 40))}
c := p2p.NodeAddress{Protocol: "memory", NodeID: p2p.NodeID(strings.Repeat("c", 40))}
d := p2p.NodeAddress{Protocol: "memory", NodeID: p2p.NodeID(strings.Repeat("d", 40))}
e := p2p.NodeAddress{Protocol: "memory", NodeID: p2p.NodeID(strings.Repeat("e", 40))}
a := p2p.NodeAddress{Protocol: "memory", NodeID: types.NodeID(strings.Repeat("a", 40))}
b := p2p.NodeAddress{Protocol: "memory", NodeID: types.NodeID(strings.Repeat("b", 40))}
c := p2p.NodeAddress{Protocol: "memory", NodeID: types.NodeID(strings.Repeat("c", 40))}
d := p2p.NodeAddress{Protocol: "memory", NodeID: types.NodeID(strings.Repeat("d", 40))}
e := p2p.NodeAddress{Protocol: "memory", NodeID: types.NodeID(strings.Repeat("e", 40))}
peerManager, err := p2p.NewPeerManager(selfID, dbm.NewMemDB(), p2p.PeerManagerOptions{
PeerScores: map[p2p.NodeID]p2p.PeerScore{
PeerScores: map[types.NodeID]p2p.PeerScore{
a.NodeID: 0,
b.NodeID: 1,
c.NodeID: 2,
d.NodeID: 3,
e.NodeID: 0,
},
PersistentPeers: []p2p.NodeID{c.NodeID, d.NodeID},
PersistentPeers: []types.NodeID{c.NodeID, d.NodeID},
MaxConnected: 2,
MaxConnectedUpgrade: 1,
})
@ -577,12 +578,12 @@ func TestPeerManager_TryDialNext_MaxConnectedUpgrade(t *testing.T) {
}
func TestPeerManager_TryDialNext_UpgradeReservesPeer(t *testing.T) {
a := p2p.NodeAddress{Protocol: "memory", NodeID: p2p.NodeID(strings.Repeat("a", 40))}
b := p2p.NodeAddress{Protocol: "memory", NodeID: p2p.NodeID(strings.Repeat("b", 40))}
c := p2p.NodeAddress{Protocol: "memory", NodeID: p2p.NodeID(strings.Repeat("c", 40))}
a := p2p.NodeAddress{Protocol: "memory", NodeID: types.NodeID(strings.Repeat("a", 40))}
b := p2p.NodeAddress{Protocol: "memory", NodeID: types.NodeID(strings.Repeat("b", 40))}
c := p2p.NodeAddress{Protocol: "memory", NodeID: types.NodeID(strings.Repeat("c", 40))}
peerManager, err := p2p.NewPeerManager(selfID, dbm.NewMemDB(), p2p.PeerManagerOptions{
PeerScores: map[p2p.NodeID]p2p.PeerScore{b.NodeID: 1, c.NodeID: 1},
PeerScores: map[types.NodeID]p2p.PeerScore{b.NodeID: 1, c.NodeID: 1},
MaxConnected: 1,
MaxConnectedUpgrade: 2,
})
@ -616,11 +617,11 @@ func TestPeerManager_TryDialNext_UpgradeReservesPeer(t *testing.T) {
}
func TestPeerManager_TryDialNext_DialingConnected(t *testing.T) {
aID := p2p.NodeID(strings.Repeat("a", 40))
aID := types.NodeID(strings.Repeat("a", 40))
a := p2p.NodeAddress{Protocol: "memory", NodeID: aID}
aTCP := p2p.NodeAddress{Protocol: "tcp", NodeID: aID, Hostname: "localhost"}
bID := p2p.NodeID(strings.Repeat("b", 40))
bID := types.NodeID(strings.Repeat("b", 40))
b := p2p.NodeAddress{Protocol: "memory", NodeID: bID}
peerManager, err := p2p.NewPeerManager(selfID, dbm.NewMemDB(), p2p.PeerManagerOptions{
@ -661,8 +662,8 @@ func TestPeerManager_TryDialNext_DialingConnected(t *testing.T) {
}
func TestPeerManager_TryDialNext_Multiple(t *testing.T) {
aID := p2p.NodeID(strings.Repeat("a", 40))
bID := p2p.NodeID(strings.Repeat("b", 40))
aID := types.NodeID(strings.Repeat("a", 40))
bID := types.NodeID(strings.Repeat("b", 40))
addresses := []p2p.NodeAddress{
{Protocol: "memory", NodeID: aID},
{Protocol: "memory", NodeID: bID},
@ -698,9 +699,9 @@ func TestPeerManager_TryDialNext_Multiple(t *testing.T) {
func TestPeerManager_DialFailed(t *testing.T) {
// DialFailed is tested through other tests, we'll just check a few basic
// things here, e.g. reporting unknown addresses.
aID := p2p.NodeID(strings.Repeat("a", 40))
aID := types.NodeID(strings.Repeat("a", 40))
a := p2p.NodeAddress{Protocol: "memory", NodeID: aID}
bID := p2p.NodeID(strings.Repeat("b", 40))
bID := types.NodeID(strings.Repeat("b", 40))
b := p2p.NodeAddress{Protocol: "memory", NodeID: bID}
peerManager, err := p2p.NewPeerManager(selfID, dbm.NewMemDB(), p2p.PeerManagerOptions{})
@ -730,16 +731,16 @@ func TestPeerManager_DialFailed(t *testing.T) {
// DialFailed on an unknown peer shouldn't error or add it.
require.NoError(t, peerManager.DialFailed(b))
require.Equal(t, []p2p.NodeID{aID}, peerManager.Peers())
require.Equal(t, []types.NodeID{aID}, peerManager.Peers())
}
func TestPeerManager_DialFailed_UnreservePeer(t *testing.T) {
a := p2p.NodeAddress{Protocol: "memory", NodeID: p2p.NodeID(strings.Repeat("a", 40))}
b := p2p.NodeAddress{Protocol: "memory", NodeID: p2p.NodeID(strings.Repeat("b", 40))}
c := p2p.NodeAddress{Protocol: "memory", NodeID: p2p.NodeID(strings.Repeat("c", 40))}
a := p2p.NodeAddress{Protocol: "memory", NodeID: types.NodeID(strings.Repeat("a", 40))}
b := p2p.NodeAddress{Protocol: "memory", NodeID: types.NodeID(strings.Repeat("b", 40))}
c := p2p.NodeAddress{Protocol: "memory", NodeID: types.NodeID(strings.Repeat("c", 40))}
peerManager, err := p2p.NewPeerManager(selfID, dbm.NewMemDB(), p2p.PeerManagerOptions{
PeerScores: map[p2p.NodeID]p2p.PeerScore{b.NodeID: 1, c.NodeID: 1},
PeerScores: map[types.NodeID]p2p.PeerScore{b.NodeID: 1, c.NodeID: 1},
MaxConnected: 1,
MaxConnectedUpgrade: 2,
})
@ -780,8 +781,8 @@ func TestPeerManager_DialFailed_UnreservePeer(t *testing.T) {
}
func TestPeerManager_Dialed_Connected(t *testing.T) {
a := p2p.NodeAddress{Protocol: "memory", NodeID: p2p.NodeID(strings.Repeat("a", 40))}
b := p2p.NodeAddress{Protocol: "memory", NodeID: p2p.NodeID(strings.Repeat("b", 40))}
a := p2p.NodeAddress{Protocol: "memory", NodeID: types.NodeID(strings.Repeat("a", 40))}
b := p2p.NodeAddress{Protocol: "memory", NodeID: types.NodeID(strings.Repeat("b", 40))}
peerManager, err := p2p.NewPeerManager(selfID, dbm.NewMemDB(), p2p.PeerManagerOptions{})
require.NoError(t, err)
@ -819,8 +820,8 @@ func TestPeerManager_Dialed_Self(t *testing.T) {
}
func TestPeerManager_Dialed_MaxConnected(t *testing.T) {
a := p2p.NodeAddress{Protocol: "memory", NodeID: p2p.NodeID(strings.Repeat("a", 40))}
b := p2p.NodeAddress{Protocol: "memory", NodeID: p2p.NodeID(strings.Repeat("b", 40))}
a := p2p.NodeAddress{Protocol: "memory", NodeID: types.NodeID(strings.Repeat("a", 40))}
b := p2p.NodeAddress{Protocol: "memory", NodeID: types.NodeID(strings.Repeat("b", 40))}
peerManager, err := p2p.NewPeerManager(selfID, dbm.NewMemDB(), p2p.PeerManagerOptions{
MaxConnected: 1,
@ -847,15 +848,15 @@ func TestPeerManager_Dialed_MaxConnected(t *testing.T) {
}
func TestPeerManager_Dialed_MaxConnectedUpgrade(t *testing.T) {
a := p2p.NodeAddress{Protocol: "memory", NodeID: p2p.NodeID(strings.Repeat("a", 40))}
b := p2p.NodeAddress{Protocol: "memory", NodeID: p2p.NodeID(strings.Repeat("b", 40))}
c := p2p.NodeAddress{Protocol: "memory", NodeID: p2p.NodeID(strings.Repeat("c", 40))}
d := p2p.NodeAddress{Protocol: "memory", NodeID: p2p.NodeID(strings.Repeat("d", 40))}
a := p2p.NodeAddress{Protocol: "memory", NodeID: types.NodeID(strings.Repeat("a", 40))}
b := p2p.NodeAddress{Protocol: "memory", NodeID: types.NodeID(strings.Repeat("b", 40))}
c := p2p.NodeAddress{Protocol: "memory", NodeID: types.NodeID(strings.Repeat("c", 40))}
d := p2p.NodeAddress{Protocol: "memory", NodeID: types.NodeID(strings.Repeat("d", 40))}
peerManager, err := p2p.NewPeerManager(selfID, dbm.NewMemDB(), p2p.PeerManagerOptions{
MaxConnected: 2,
MaxConnectedUpgrade: 1,
PeerScores: map[p2p.NodeID]p2p.PeerScore{c.NodeID: 1, d.NodeID: 1},
PeerScores: map[types.NodeID]p2p.PeerScore{c.NodeID: 1, d.NodeID: 1},
})
require.NoError(t, err)
@ -888,7 +889,7 @@ func TestPeerManager_Dialed_MaxConnectedUpgrade(t *testing.T) {
}
func TestPeerManager_Dialed_Unknown(t *testing.T) {
a := p2p.NodeAddress{Protocol: "memory", NodeID: p2p.NodeID(strings.Repeat("a", 40))}
a := p2p.NodeAddress{Protocol: "memory", NodeID: types.NodeID(strings.Repeat("a", 40))}
peerManager, err := p2p.NewPeerManager(selfID, dbm.NewMemDB(), p2p.PeerManagerOptions{})
require.NoError(t, err)
@ -898,14 +899,14 @@ func TestPeerManager_Dialed_Unknown(t *testing.T) {
}
func TestPeerManager_Dialed_Upgrade(t *testing.T) {
a := p2p.NodeAddress{Protocol: "memory", NodeID: p2p.NodeID(strings.Repeat("a", 40))}
b := p2p.NodeAddress{Protocol: "memory", NodeID: p2p.NodeID(strings.Repeat("b", 40))}
c := p2p.NodeAddress{Protocol: "memory", NodeID: p2p.NodeID(strings.Repeat("c", 40))}
a := p2p.NodeAddress{Protocol: "memory", NodeID: types.NodeID(strings.Repeat("a", 40))}
b := p2p.NodeAddress{Protocol: "memory", NodeID: types.NodeID(strings.Repeat("b", 40))}
c := p2p.NodeAddress{Protocol: "memory", NodeID: types.NodeID(strings.Repeat("c", 40))}
peerManager, err := p2p.NewPeerManager(selfID, dbm.NewMemDB(), p2p.PeerManagerOptions{
MaxConnected: 1,
MaxConnectedUpgrade: 2,
PeerScores: map[p2p.NodeID]p2p.PeerScore{b.NodeID: 1, c.NodeID: 1},
PeerScores: map[types.NodeID]p2p.PeerScore{b.NodeID: 1, c.NodeID: 1},
})
require.NoError(t, err)
@ -940,15 +941,15 @@ func TestPeerManager_Dialed_Upgrade(t *testing.T) {
}
func TestPeerManager_Dialed_UpgradeEvenLower(t *testing.T) {
a := p2p.NodeAddress{Protocol: "memory", NodeID: p2p.NodeID(strings.Repeat("a", 40))}
b := p2p.NodeAddress{Protocol: "memory", NodeID: p2p.NodeID(strings.Repeat("b", 40))}
c := p2p.NodeAddress{Protocol: "memory", NodeID: p2p.NodeID(strings.Repeat("c", 40))}
d := p2p.NodeAddress{Protocol: "memory", NodeID: p2p.NodeID(strings.Repeat("d", 40))}
a := p2p.NodeAddress{Protocol: "memory", NodeID: types.NodeID(strings.Repeat("a", 40))}
b := p2p.NodeAddress{Protocol: "memory", NodeID: types.NodeID(strings.Repeat("b", 40))}
c := p2p.NodeAddress{Protocol: "memory", NodeID: types.NodeID(strings.Repeat("c", 40))}
d := p2p.NodeAddress{Protocol: "memory", NodeID: types.NodeID(strings.Repeat("d", 40))}
peerManager, err := p2p.NewPeerManager(selfID, dbm.NewMemDB(), p2p.PeerManagerOptions{
MaxConnected: 2,
MaxConnectedUpgrade: 1,
PeerScores: map[p2p.NodeID]p2p.PeerScore{
PeerScores: map[types.NodeID]p2p.PeerScore{
a.NodeID: 3,
b.NodeID: 2,
c.NodeID: 10,
@ -994,14 +995,14 @@ func TestPeerManager_Dialed_UpgradeEvenLower(t *testing.T) {
}
func TestPeerManager_Dialed_UpgradeNoEvict(t *testing.T) {
a := p2p.NodeAddress{Protocol: "memory", NodeID: p2p.NodeID(strings.Repeat("a", 40))}
b := p2p.NodeAddress{Protocol: "memory", NodeID: p2p.NodeID(strings.Repeat("b", 40))}
c := p2p.NodeAddress{Protocol: "memory", NodeID: p2p.NodeID(strings.Repeat("c", 40))}
a := p2p.NodeAddress{Protocol: "memory", NodeID: types.NodeID(strings.Repeat("a", 40))}
b := p2p.NodeAddress{Protocol: "memory", NodeID: types.NodeID(strings.Repeat("b", 40))}
c := p2p.NodeAddress{Protocol: "memory", NodeID: types.NodeID(strings.Repeat("c", 40))}
peerManager, err := p2p.NewPeerManager(selfID, dbm.NewMemDB(), p2p.PeerManagerOptions{
MaxConnected: 2,
MaxConnectedUpgrade: 1,
PeerScores: map[p2p.NodeID]p2p.PeerScore{
PeerScores: map[types.NodeID]p2p.PeerScore{
a.NodeID: 1,
b.NodeID: 2,
c.NodeID: 3,
@ -1041,10 +1042,10 @@ func TestPeerManager_Dialed_UpgradeNoEvict(t *testing.T) {
}
func TestPeerManager_Accepted(t *testing.T) {
a := p2p.NodeAddress{Protocol: "memory", NodeID: p2p.NodeID(strings.Repeat("a", 40))}
b := p2p.NodeAddress{Protocol: "memory", NodeID: p2p.NodeID(strings.Repeat("b", 40))}
c := p2p.NodeAddress{Protocol: "memory", NodeID: p2p.NodeID(strings.Repeat("c", 40))}
d := p2p.NodeAddress{Protocol: "memory", NodeID: p2p.NodeID(strings.Repeat("d", 40))}
a := p2p.NodeAddress{Protocol: "memory", NodeID: types.NodeID(strings.Repeat("a", 40))}
b := p2p.NodeAddress{Protocol: "memory", NodeID: types.NodeID(strings.Repeat("b", 40))}
c := p2p.NodeAddress{Protocol: "memory", NodeID: types.NodeID(strings.Repeat("c", 40))}
d := p2p.NodeAddress{Protocol: "memory", NodeID: types.NodeID(strings.Repeat("d", 40))}
peerManager, err := p2p.NewPeerManager(selfID, dbm.NewMemDB(), p2p.PeerManagerOptions{})
require.NoError(t, err)
@ -1063,7 +1064,7 @@ func TestPeerManager_Accepted(t *testing.T) {
// Accepting a connection from an unknown peer should work and register it.
require.NoError(t, peerManager.Accepted(b.NodeID))
require.ElementsMatch(t, []p2p.NodeID{a.NodeID, b.NodeID}, peerManager.Peers())
require.ElementsMatch(t, []types.NodeID{a.NodeID, b.NodeID}, peerManager.Peers())
// Accepting a connection from a peer that's being dialed should work, and
// should cause the dial to fail.
@ -1088,9 +1089,9 @@ func TestPeerManager_Accepted(t *testing.T) {
}
func TestPeerManager_Accepted_MaxConnected(t *testing.T) {
a := p2p.NodeAddress{Protocol: "memory", NodeID: p2p.NodeID(strings.Repeat("a", 40))}
b := p2p.NodeAddress{Protocol: "memory", NodeID: p2p.NodeID(strings.Repeat("b", 40))}
c := p2p.NodeAddress{Protocol: "memory", NodeID: p2p.NodeID(strings.Repeat("c", 40))}
a := p2p.NodeAddress{Protocol: "memory", NodeID: types.NodeID(strings.Repeat("a", 40))}
b := p2p.NodeAddress{Protocol: "memory", NodeID: types.NodeID(strings.Repeat("b", 40))}
c := p2p.NodeAddress{Protocol: "memory", NodeID: types.NodeID(strings.Repeat("c", 40))}
peerManager, err := p2p.NewPeerManager(selfID, dbm.NewMemDB(), p2p.PeerManagerOptions{
MaxConnected: 2,
@ -1116,13 +1117,13 @@ func TestPeerManager_Accepted_MaxConnected(t *testing.T) {
}
func TestPeerManager_Accepted_MaxConnectedUpgrade(t *testing.T) {
a := p2p.NodeAddress{Protocol: "memory", NodeID: p2p.NodeID(strings.Repeat("a", 40))}
b := p2p.NodeAddress{Protocol: "memory", NodeID: p2p.NodeID(strings.Repeat("b", 40))}
c := p2p.NodeAddress{Protocol: "memory", NodeID: p2p.NodeID(strings.Repeat("c", 40))}
d := p2p.NodeAddress{Protocol: "memory", NodeID: p2p.NodeID(strings.Repeat("d", 40))}
a := p2p.NodeAddress{Protocol: "memory", NodeID: types.NodeID(strings.Repeat("a", 40))}
b := p2p.NodeAddress{Protocol: "memory", NodeID: types.NodeID(strings.Repeat("b", 40))}
c := p2p.NodeAddress{Protocol: "memory", NodeID: types.NodeID(strings.Repeat("c", 40))}
d := p2p.NodeAddress{Protocol: "memory", NodeID: types.NodeID(strings.Repeat("d", 40))}
peerManager, err := p2p.NewPeerManager(selfID, dbm.NewMemDB(), p2p.PeerManagerOptions{
PeerScores: map[p2p.NodeID]p2p.PeerScore{
PeerScores: map[types.NodeID]p2p.PeerScore{
c.NodeID: 1,
d.NodeID: 2,
},
@ -1162,12 +1163,12 @@ func TestPeerManager_Accepted_MaxConnectedUpgrade(t *testing.T) {
}
func TestPeerManager_Accepted_Upgrade(t *testing.T) {
a := p2p.NodeAddress{Protocol: "memory", NodeID: p2p.NodeID(strings.Repeat("a", 40))}
b := p2p.NodeAddress{Protocol: "memory", NodeID: p2p.NodeID(strings.Repeat("b", 40))}
c := p2p.NodeAddress{Protocol: "memory", NodeID: p2p.NodeID(strings.Repeat("c", 40))}
a := p2p.NodeAddress{Protocol: "memory", NodeID: types.NodeID(strings.Repeat("a", 40))}
b := p2p.NodeAddress{Protocol: "memory", NodeID: types.NodeID(strings.Repeat("b", 40))}
c := p2p.NodeAddress{Protocol: "memory", NodeID: types.NodeID(strings.Repeat("c", 40))}
peerManager, err := p2p.NewPeerManager(selfID, dbm.NewMemDB(), p2p.PeerManagerOptions{
PeerScores: map[p2p.NodeID]p2p.PeerScore{
PeerScores: map[types.NodeID]p2p.PeerScore{
b.NodeID: 1,
c.NodeID: 1,
},
@ -1205,12 +1206,12 @@ func TestPeerManager_Accepted_Upgrade(t *testing.T) {
}
func TestPeerManager_Accepted_UpgradeDialing(t *testing.T) {
a := p2p.NodeAddress{Protocol: "memory", NodeID: p2p.NodeID(strings.Repeat("a", 40))}
b := p2p.NodeAddress{Protocol: "memory", NodeID: p2p.NodeID(strings.Repeat("b", 40))}
c := p2p.NodeAddress{Protocol: "memory", NodeID: p2p.NodeID(strings.Repeat("c", 40))}
a := p2p.NodeAddress{Protocol: "memory", NodeID: types.NodeID(strings.Repeat("a", 40))}
b := p2p.NodeAddress{Protocol: "memory", NodeID: types.NodeID(strings.Repeat("b", 40))}
c := p2p.NodeAddress{Protocol: "memory", NodeID: types.NodeID(strings.Repeat("c", 40))}
peerManager, err := p2p.NewPeerManager(selfID, dbm.NewMemDB(), p2p.PeerManagerOptions{
PeerScores: map[p2p.NodeID]p2p.PeerScore{
PeerScores: map[types.NodeID]p2p.PeerScore{
b.NodeID: 1,
c.NodeID: 1,
},
@ -1252,8 +1253,8 @@ func TestPeerManager_Accepted_UpgradeDialing(t *testing.T) {
}
func TestPeerManager_Ready(t *testing.T) {
a := p2p.NodeAddress{Protocol: "memory", NodeID: p2p.NodeID(strings.Repeat("a", 40))}
b := p2p.NodeAddress{Protocol: "memory", NodeID: p2p.NodeID(strings.Repeat("b", 40))}
a := p2p.NodeAddress{Protocol: "memory", NodeID: types.NodeID(strings.Repeat("a", 40))}
b := p2p.NodeAddress{Protocol: "memory", NodeID: types.NodeID(strings.Repeat("b", 40))}
peerManager, err := p2p.NewPeerManager(selfID, dbm.NewMemDB(), p2p.PeerManagerOptions{})
require.NoError(t, err)
@ -1288,7 +1289,7 @@ func TestPeerManager_Ready(t *testing.T) {
// See TryEvictNext for most tests, this just tests blocking behavior.
func TestPeerManager_EvictNext(t *testing.T) {
a := p2p.NodeAddress{Protocol: "memory", NodeID: p2p.NodeID(strings.Repeat("a", 40))}
a := p2p.NodeAddress{Protocol: "memory", NodeID: types.NodeID(strings.Repeat("a", 40))}
peerManager, err := p2p.NewPeerManager(selfID, dbm.NewMemDB(), p2p.PeerManagerOptions{})
require.NoError(t, err)
@ -1321,7 +1322,7 @@ func TestPeerManager_EvictNext(t *testing.T) {
}
func TestPeerManager_EvictNext_WakeOnError(t *testing.T) {
a := p2p.NodeAddress{Protocol: "memory", NodeID: p2p.NodeID(strings.Repeat("a", 40))}
a := p2p.NodeAddress{Protocol: "memory", NodeID: types.NodeID(strings.Repeat("a", 40))}
peerManager, err := p2p.NewPeerManager(selfID, dbm.NewMemDB(), p2p.PeerManagerOptions{})
require.NoError(t, err)
@ -1347,13 +1348,13 @@ func TestPeerManager_EvictNext_WakeOnError(t *testing.T) {
}
func TestPeerManager_EvictNext_WakeOnUpgradeDialed(t *testing.T) {
a := p2p.NodeAddress{Protocol: "memory", NodeID: p2p.NodeID(strings.Repeat("a", 40))}
b := p2p.NodeAddress{Protocol: "memory", NodeID: p2p.NodeID(strings.Repeat("b", 40))}
a := p2p.NodeAddress{Protocol: "memory", NodeID: types.NodeID(strings.Repeat("a", 40))}
b := p2p.NodeAddress{Protocol: "memory", NodeID: types.NodeID(strings.Repeat("b", 40))}
peerManager, err := p2p.NewPeerManager(selfID, dbm.NewMemDB(), p2p.PeerManagerOptions{
MaxConnected: 1,
MaxConnectedUpgrade: 1,
PeerScores: map[p2p.NodeID]p2p.PeerScore{b.NodeID: 1},
PeerScores: map[types.NodeID]p2p.PeerScore{b.NodeID: 1},
})
require.NoError(t, err)
@ -1385,13 +1386,13 @@ func TestPeerManager_EvictNext_WakeOnUpgradeDialed(t *testing.T) {
}
func TestPeerManager_EvictNext_WakeOnUpgradeAccepted(t *testing.T) {
a := p2p.NodeAddress{Protocol: "memory", NodeID: p2p.NodeID(strings.Repeat("a", 40))}
b := p2p.NodeAddress{Protocol: "memory", NodeID: p2p.NodeID(strings.Repeat("b", 40))}
a := p2p.NodeAddress{Protocol: "memory", NodeID: types.NodeID(strings.Repeat("a", 40))}
b := p2p.NodeAddress{Protocol: "memory", NodeID: types.NodeID(strings.Repeat("b", 40))}
peerManager, err := p2p.NewPeerManager(selfID, dbm.NewMemDB(), p2p.PeerManagerOptions{
MaxConnected: 1,
MaxConnectedUpgrade: 1,
PeerScores: map[p2p.NodeID]p2p.PeerScore{b.NodeID: 1},
PeerScores: map[types.NodeID]p2p.PeerScore{b.NodeID: 1},
})
require.NoError(t, err)
@ -1416,7 +1417,7 @@ func TestPeerManager_EvictNext_WakeOnUpgradeAccepted(t *testing.T) {
require.Equal(t, a.NodeID, evict)
}
func TestPeerManager_TryEvictNext(t *testing.T) {
a := p2p.NodeAddress{Protocol: "memory", NodeID: p2p.NodeID(strings.Repeat("a", 40))}
a := p2p.NodeAddress{Protocol: "memory", NodeID: types.NodeID(strings.Repeat("a", 40))}
peerManager, err := p2p.NewPeerManager(selfID, dbm.NewMemDB(), p2p.PeerManagerOptions{})
require.NoError(t, err)
@ -1452,7 +1453,7 @@ func TestPeerManager_TryEvictNext(t *testing.T) {
}
func TestPeerManager_Disconnected(t *testing.T) {
a := p2p.NodeAddress{Protocol: "memory", NodeID: p2p.NodeID(strings.Repeat("a", 40))}
a := p2p.NodeAddress{Protocol: "memory", NodeID: types.NodeID(strings.Repeat("a", 40))}
peerManager, err := p2p.NewPeerManager(selfID, dbm.NewMemDB(), p2p.PeerManagerOptions{})
require.NoError(t, err)
@ -1506,7 +1507,7 @@ func TestPeerManager_Disconnected(t *testing.T) {
}
func TestPeerManager_Errored(t *testing.T) {
a := p2p.NodeAddress{Protocol: "memory", NodeID: p2p.NodeID(strings.Repeat("a", 40))}
a := p2p.NodeAddress{Protocol: "memory", NodeID: types.NodeID(strings.Repeat("a", 40))}
peerManager, err := p2p.NewPeerManager(selfID, dbm.NewMemDB(), p2p.PeerManagerOptions{})
require.NoError(t, err)
@ -1542,7 +1543,7 @@ func TestPeerManager_Errored(t *testing.T) {
}
func TestPeerManager_Subscribe(t *testing.T) {
a := p2p.NodeAddress{Protocol: "memory", NodeID: p2p.NodeID(strings.Repeat("a", 40))}
a := p2p.NodeAddress{Protocol: "memory", NodeID: types.NodeID(strings.Repeat("a", 40))}
peerManager, err := p2p.NewPeerManager(selfID, dbm.NewMemDB(), p2p.PeerManagerOptions{})
require.NoError(t, err)
@ -1603,7 +1604,7 @@ func TestPeerManager_Subscribe(t *testing.T) {
}
func TestPeerManager_Subscribe_Close(t *testing.T) {
a := p2p.NodeAddress{Protocol: "memory", NodeID: p2p.NodeID(strings.Repeat("a", 40))}
a := p2p.NodeAddress{Protocol: "memory", NodeID: types.NodeID(strings.Repeat("a", 40))}
peerManager, err := p2p.NewPeerManager(selfID, dbm.NewMemDB(), p2p.PeerManagerOptions{})
require.NoError(t, err)
@ -1630,7 +1631,7 @@ func TestPeerManager_Subscribe_Close(t *testing.T) {
func TestPeerManager_Subscribe_Broadcast(t *testing.T) {
t.Cleanup(leaktest.Check(t))
a := p2p.NodeAddress{Protocol: "memory", NodeID: p2p.NodeID(strings.Repeat("a", 40))}
a := p2p.NodeAddress{Protocol: "memory", NodeID: types.NodeID(strings.Repeat("a", 40))}
peerManager, err := p2p.NewPeerManager(selfID, dbm.NewMemDB(), p2p.PeerManagerOptions{})
require.NoError(t, err)
@ -1674,7 +1675,7 @@ func TestPeerManager_Close(t *testing.T) {
// leaktest will check that spawned goroutines are closed.
t.Cleanup(leaktest.CheckTimeout(t, 1*time.Second))
a := p2p.NodeAddress{Protocol: "memory", NodeID: p2p.NodeID(strings.Repeat("a", 40))}
a := p2p.NodeAddress{Protocol: "memory", NodeID: types.NodeID(strings.Repeat("a", 40))}
peerManager, err := p2p.NewPeerManager(selfID, dbm.NewMemDB(), p2p.PeerManagerOptions{
MinRetryTime: 10 * time.Second,
@ -1700,23 +1701,23 @@ func TestPeerManager_Close(t *testing.T) {
}
func TestPeerManager_Advertise(t *testing.T) {
aID := p2p.NodeID(strings.Repeat("a", 40))
aID := types.NodeID(strings.Repeat("a", 40))
aTCP := p2p.NodeAddress{Protocol: "tcp", NodeID: aID, Hostname: "127.0.0.1", Port: 26657, Path: "/path"}
aMem := p2p.NodeAddress{Protocol: "memory", NodeID: aID}
bID := p2p.NodeID(strings.Repeat("b", 40))
bID := types.NodeID(strings.Repeat("b", 40))
bTCP := p2p.NodeAddress{Protocol: "tcp", NodeID: bID, Hostname: "b10c::1", Port: 26657, Path: "/path"}
bMem := p2p.NodeAddress{Protocol: "memory", NodeID: bID}
cID := p2p.NodeID(strings.Repeat("c", 40))
cID := types.NodeID(strings.Repeat("c", 40))
cTCP := p2p.NodeAddress{Protocol: "tcp", NodeID: cID, Hostname: "host.domain", Port: 80}
cMem := p2p.NodeAddress{Protocol: "memory", NodeID: cID}
dID := p2p.NodeID(strings.Repeat("d", 40))
dID := types.NodeID(strings.Repeat("d", 40))
// Create an initial peer manager and add the peers.
peerManager, err := p2p.NewPeerManager(selfID, dbm.NewMemDB(), p2p.PeerManagerOptions{
PeerScores: map[p2p.NodeID]p2p.PeerScore{aID: 3, bID: 2, cID: 1},
PeerScores: map[types.NodeID]p2p.PeerScore{aID: 3, bID: 2, cID: 1},
})
require.NoError(t, err)
defer peerManager.Close()
@ -1760,8 +1761,8 @@ func TestPeerManager_Advertise(t *testing.T) {
}
func TestPeerManager_SetHeight_GetHeight(t *testing.T) {
a := p2p.NodeAddress{Protocol: "memory", NodeID: p2p.NodeID(strings.Repeat("a", 40))}
b := p2p.NodeAddress{Protocol: "memory", NodeID: p2p.NodeID(strings.Repeat("b", 40))}
a := p2p.NodeAddress{Protocol: "memory", NodeID: types.NodeID(strings.Repeat("a", 40))}
b := p2p.NodeAddress{Protocol: "memory", NodeID: types.NodeID(strings.Repeat("b", 40))}
db := dbm.NewMemDB()
peerManager, err := p2p.NewPeerManager(selfID, db, p2p.PeerManagerOptions{})
@ -1780,17 +1781,17 @@ func TestPeerManager_SetHeight_GetHeight(t *testing.T) {
require.EqualValues(t, 3, peerManager.GetHeight(a.NodeID))
// Setting a height should add an unknown node.
require.Equal(t, []p2p.NodeID{a.NodeID}, peerManager.Peers())
require.Equal(t, []types.NodeID{a.NodeID}, peerManager.Peers())
require.NoError(t, peerManager.SetHeight(b.NodeID, 7))
require.EqualValues(t, 7, peerManager.GetHeight(b.NodeID))
require.ElementsMatch(t, []p2p.NodeID{a.NodeID, b.NodeID}, peerManager.Peers())
require.ElementsMatch(t, []types.NodeID{a.NodeID, b.NodeID}, peerManager.Peers())
// The heights should not be persisted.
peerManager.Close()
peerManager, err = p2p.NewPeerManager(selfID, db, p2p.PeerManagerOptions{})
require.NoError(t, err)
require.ElementsMatch(t, []p2p.NodeID{a.NodeID, b.NodeID}, peerManager.Peers())
require.ElementsMatch(t, []types.NodeID{a.NodeID, b.NodeID}, peerManager.Peers())
require.Zero(t, peerManager.GetHeight(a.NodeID))
require.Zero(t, peerManager.GetHeight(b.NodeID))
}

+ 10
- 9
internal/p2p/pex/addrbook.go View File

@ -21,6 +21,7 @@ import (
tmmath "github.com/tendermint/tendermint/libs/math"
tmrand "github.com/tendermint/tendermint/libs/rand"
"github.com/tendermint/tendermint/libs/service"
"github.com/tendermint/tendermint/types"
)
const (
@ -59,7 +60,7 @@ type AddrBook interface {
PickAddress(biasTowardsNewAddrs int) *p2p.NetAddress
// Mark address
MarkGood(p2p.NodeID)
MarkGood(types.NodeID)
MarkAttempt(*p2p.NetAddress)
MarkBad(*p2p.NetAddress, time.Duration) // Move peer to bad peers list
// Add bad peers back to addrBook
@ -89,9 +90,9 @@ type addrBook struct {
// accessed concurrently
mtx tmsync.Mutex
ourAddrs map[string]struct{}
privateIDs map[p2p.NodeID]struct{}
addrLookup map[p2p.NodeID]*knownAddress // new & old
badPeers map[p2p.NodeID]*knownAddress // blacklisted peers
privateIDs map[types.NodeID]struct{}
addrLookup map[types.NodeID]*knownAddress // new & old
badPeers map[types.NodeID]*knownAddress // blacklisted peers
bucketsOld []map[string]*knownAddress
bucketsNew []map[string]*knownAddress
nOld int
@ -120,9 +121,9 @@ func mustNewHasher() hash.Hash64 {
func NewAddrBook(filePath string, routabilityStrict bool) AddrBook {
am := &addrBook{
ourAddrs: make(map[string]struct{}),
privateIDs: make(map[p2p.NodeID]struct{}),
addrLookup: make(map[p2p.NodeID]*knownAddress),
badPeers: make(map[p2p.NodeID]*knownAddress),
privateIDs: make(map[types.NodeID]struct{}),
addrLookup: make(map[types.NodeID]*knownAddress),
badPeers: make(map[types.NodeID]*knownAddress),
filePath: filePath,
routabilityStrict: routabilityStrict,
}
@ -201,7 +202,7 @@ func (a *addrBook) AddPrivateIDs(ids []string) {
defer a.mtx.Unlock()
for _, id := range ids {
a.privateIDs[p2p.NodeID(id)] = struct{}{}
a.privateIDs[types.NodeID(id)] = struct{}{}
}
}
@ -319,7 +320,7 @@ func (a *addrBook) PickAddress(biasTowardsNewAddrs int) *p2p.NetAddress {
// MarkGood implements AddrBook - it marks the peer as good and
// moves it into an "old" bucket.
func (a *addrBook) MarkGood(id p2p.NodeID) {
func (a *addrBook) MarkGood(id types.NodeID) {
a.mtx.Lock()
defer a.mtx.Unlock()


+ 9
- 8
internal/p2p/pex/addrbook_test.go View File

@ -18,6 +18,7 @@ import (
"github.com/tendermint/tendermint/libs/log"
tmmath "github.com/tendermint/tendermint/libs/math"
tmrand "github.com/tendermint/tendermint/libs/rand"
"github.com/tendermint/tendermint/types"
)
// FIXME These tests should not rely on .(*addrBook) assertions
@ -194,9 +195,9 @@ func randIPv4Address(t *testing.T) *p2p.NetAddress {
mrand.Intn(255),
)
port := mrand.Intn(65535-1) + 1
id := p2p.NodeID(hex.EncodeToString(tmrand.Bytes(p2p.NodeIDByteLength)))
idAddr := p2p.IDAddressString(id, fmt.Sprintf("%v:%v", ip, port))
addr, err := p2p.NewNetAddressString(idAddr)
id := types.NodeID(hex.EncodeToString(tmrand.Bytes(types.NodeIDByteLength)))
idAddr := id.AddressString(fmt.Sprintf("%v:%v", ip, port))
addr, err := types.NewNetAddressString(idAddr)
assert.Nil(t, err, "error generating rand network address")
if addr.Routable() {
return addr
@ -579,13 +580,13 @@ func TestAddrBookAddDoesNotOverwriteOldIP(t *testing.T) {
// to ensure we aren't in a case that got probabilistically ignored
numOverrideAttempts := 10
peerRealAddr, err := p2p.NewNetAddressString(peerID + "@" + peerRealIP)
peerRealAddr, err := types.NewNetAddressString(peerID + "@" + peerRealIP)
require.Nil(t, err)
peerOverrideAttemptAddr, err := p2p.NewNetAddressString(peerID + "@" + peerOverrideAttemptIP)
peerOverrideAttemptAddr, err := types.NewNetAddressString(peerID + "@" + peerOverrideAttemptIP)
require.Nil(t, err)
src, err := p2p.NewNetAddressString(SrcAddr)
src, err := types.NewNetAddressString(SrcAddr)
require.Nil(t, err)
book := NewAddrBook(fname, true)
@ -649,7 +650,7 @@ func TestAddrBookGroupKey(t *testing.T) {
for i, tc := range testCases {
nip := net.ParseIP(tc.ip)
key := groupKeyFor(p2p.NewNetAddressIPPort(nip, 26656), false)
key := groupKeyFor(types.NewNetAddressIPPort(nip, 26656), false)
assert.Equal(t, tc.expKey, key, "#%d", i)
}
@ -679,7 +680,7 @@ func TestAddrBookGroupKey(t *testing.T) {
for i, tc := range testCases {
nip := net.ParseIP(tc.ip)
key := groupKeyFor(p2p.NewNetAddressIPPort(nip, 26656), true)
key := groupKeyFor(types.NewNetAddressIPPort(nip, 26656), true)
assert.Equal(t, tc.expKey, key, "#%d", i)
}
}


+ 4
- 4
internal/p2p/pex/bench_test.go View File

@ -3,15 +3,15 @@ package pex
import (
"testing"
"github.com/tendermint/tendermint/internal/p2p"
"github.com/tendermint/tendermint/types"
)
func BenchmarkAddrBook_hash(b *testing.B) {
book := &addrBook{
ourAddrs: make(map[string]struct{}),
privateIDs: make(map[p2p.NodeID]struct{}),
addrLookup: make(map[p2p.NodeID]*knownAddress),
badPeers: make(map[p2p.NodeID]*knownAddress),
privateIDs: make(map[types.NodeID]struct{}),
addrLookup: make(map[types.NodeID]*knownAddress),
badPeers: make(map[types.NodeID]*knownAddress),
filePath: "",
routabilityStrict: true,
}


+ 2
- 1
internal/p2p/pex/known_address.go View File

@ -4,6 +4,7 @@ import (
"time"
"github.com/tendermint/tendermint/internal/p2p"
"github.com/tendermint/tendermint/types"
)
// knownAddress tracks information about a known network address
@ -30,7 +31,7 @@ func newKnownAddress(addr *p2p.NetAddress, src *p2p.NetAddress) *knownAddress {
}
}
func (ka *knownAddress) ID() p2p.NodeID {
func (ka *knownAddress) ID() types.NodeID {
return ka.Addr.ID
}


+ 55
- 6
internal/p2p/pex/pex_reactor.go View File

@ -3,6 +3,7 @@ package pex
import (
"errors"
"fmt"
"net"
"sync"
"time"
@ -15,6 +16,7 @@ import (
tmrand "github.com/tendermint/tendermint/libs/rand"
"github.com/tendermint/tendermint/libs/service"
tmp2p "github.com/tendermint/tendermint/proto/tendermint/p2p"
"github.com/tendermint/tendermint/types"
)
type Peer = p2p.Peer
@ -97,7 +99,7 @@ type Reactor struct {
attemptsToDial sync.Map // address (string) -> {number of attempts (int), last time dialed (time.Time)}
// seed/crawled mode fields
crawlPeerInfos map[p2p.NodeID]crawlPeerInfo
crawlPeerInfos map[types.NodeID]crawlPeerInfo
}
func (r *Reactor) minReceiveRequestInterval() time.Duration {
@ -137,7 +139,7 @@ func NewReactor(b AddrBook, config *ReactorConfig) *Reactor {
ensurePeersPeriod: defaultEnsurePeersPeriod,
requestsSent: cmap.NewCMap(),
lastReceivedRequests: cmap.NewCMap(),
crawlPeerInfos: make(map[p2p.NodeID]crawlPeerInfo),
crawlPeerInfos: make(map[types.NodeID]crawlPeerInfo),
}
r.BaseReactor = *p2p.NewBaseReactor("PEX", r)
return r
@ -289,7 +291,7 @@ func (r *Reactor) Receive(chID byte, src Peer, msgBytes []byte) {
case *tmp2p.PexResponse:
// If we asked for addresses, add them to the book
addrs, err := p2p.NetAddressesFromProto(msg.Addresses)
addrs, err := NetAddressesFromProto(msg.Addresses)
if err != nil {
r.Switch.StopPeerForError(src, err)
r.book.MarkBad(src.SocketAddr(), defaultBanTime)
@ -411,7 +413,7 @@ func (r *Reactor) ReceiveAddrs(addrs []*p2p.NetAddress, src Peer) error {
// SendAddrs sends addrs to the peer.
func (r *Reactor) SendAddrs(p Peer, netAddrs []*p2p.NetAddress) {
p.Send(PexChannel, mustEncode(&tmp2p.PexResponse{Addresses: p2p.NetAddressesToProto(netAddrs)}))
p.Send(PexChannel, mustEncode(&tmp2p.PexResponse{Addresses: NetAddressesToProto(netAddrs)}))
}
// SetEnsurePeersPeriod sets period to ensure peers connected.
@ -477,7 +479,7 @@ func (r *Reactor) ensurePeers() {
// NOTE: range here is [10, 90]. Too high ?
newBias := tmmath.MinInt(out, 8)*10 + 10
toDial := make(map[p2p.NodeID]*p2p.NetAddress)
toDial := make(map[types.NodeID]*p2p.NetAddress)
// Try maxAttempts times to pick numToDial addresses to dial
maxAttempts := numToDial * 3
@ -615,7 +617,7 @@ func (r *Reactor) checkSeeds() (numOnline int, netAddrs []*p2p.NetAddress, err e
numOnline = lSeeds - len(errs)
for _, err := range errs {
switch e := err.(type) {
case p2p.ErrNetAddressLookup:
case types.ErrNetAddressLookup:
r.Logger.Error("Connecting to seed failed", "err", e)
default:
return 0, nil, fmt.Errorf("seed node configuration has error: %w", e)
@ -812,3 +814,50 @@ func decodeMsg(bz []byte) (proto.Message, error) {
return nil, fmt.Errorf("unknown message: %T", msg)
}
}
//-----------------------------------------------------------------------------
// address converters
// NetAddressFromProto converts a Protobuf PexAddress into a native struct.
func NetAddressFromProto(pb tmp2p.PexAddress) (*types.NetAddress, error) {
ip := net.ParseIP(pb.IP)
if ip == nil {
return nil, fmt.Errorf("invalid IP address %v", pb.IP)
}
if pb.Port >= 1<<16 {
return nil, fmt.Errorf("invalid port number %v", pb.Port)
}
return &types.NetAddress{
ID: types.NodeID(pb.ID),
IP: ip,
Port: uint16(pb.Port),
}, nil
}
// NetAddressesFromProto converts a slice of Protobuf PexAddresses into a native slice.
func NetAddressesFromProto(pbs []tmp2p.PexAddress) ([]*types.NetAddress, error) {
nas := make([]*types.NetAddress, 0, len(pbs))
for _, pb := range pbs {
na, err := NetAddressFromProto(pb)
if err != nil {
return nil, err
}
nas = append(nas, na)
}
return nas, nil
}
// NetAddressesToProto converts a slice of NetAddresses into a Protobuf PexAddress slice.
func NetAddressesToProto(nas []*types.NetAddress) []tmp2p.PexAddress {
pbs := make([]tmp2p.PexAddress, 0, len(nas))
for _, na := range nas {
if na != nil {
pbs = append(pbs, tmp2p.PexAddress{
ID: string(na.ID),
IP: na.IP.String(),
Port: uint32(na.Port),
})
}
}
return pbs
}

+ 5
- 3
internal/p2p/pex/pex_reactor_test.go View File

@ -18,6 +18,7 @@ import (
"github.com/tendermint/tendermint/internal/p2p/mock"
"github.com/tendermint/tendermint/libs/log"
tmp2p "github.com/tendermint/tendermint/proto/tendermint/p2p"
"github.com/tendermint/tendermint/types"
)
var (
@ -128,7 +129,7 @@ func TestPEXReactorReceive(t *testing.T) {
size := book.Size()
na, err := peer.NodeInfo().NetAddress()
require.NoError(t, err)
msg := mustEncode(&tmp2p.PexResponse{Addresses: []tmp2p.PexAddress{na.ToProto()}})
msg := mustEncode(&tmp2p.PexResponse{Addresses: NetAddressesToProto([]*types.NetAddress{na})})
r.Receive(PexChannel, peer, msg)
assert.Equal(t, size+1, book.Size())
@ -185,7 +186,7 @@ func TestPEXReactorAddrsMessageAbuse(t *testing.T) {
assert.True(t, r.requestsSent.Has(id))
assert.True(t, sw.Peers().Has(peer.ID()))
msg := mustEncode(&tmp2p.PexResponse{Addresses: []tmp2p.PexAddress{peer.SocketAddr().ToProto()}})
msg := mustEncode(&tmp2p.PexResponse{Addresses: NetAddressesToProto([]*types.NetAddress{peer.SocketAddr()})})
// receive some addrs. should clear the request
r.Receive(PexChannel, peer, msg)
@ -457,7 +458,8 @@ func TestPEXReactorDoesNotAddPrivatePeersToAddrBook(t *testing.T) {
size := book.Size()
na, err := peer.NodeInfo().NetAddress()
require.NoError(t, err)
msg := mustEncode(&tmp2p.PexResponse{Addresses: []tmp2p.PexAddress{na.ToProto()}})
msg := mustEncode(&tmp2p.PexResponse{Addresses: NetAddressesToProto([]*types.NetAddress{na})})
pexR.Receive(PexChannel, peer, msg)
assert.Equal(t, size, book.Size())


+ 11
- 10
internal/p2p/pex/reactor.go View File

@ -12,6 +12,7 @@ import (
tmmath "github.com/tendermint/tendermint/libs/math"
"github.com/tendermint/tendermint/libs/service"
protop2p "github.com/tendermint/tendermint/proto/tendermint/p2p"
"github.com/tendermint/tendermint/types"
)
var (
@ -77,7 +78,7 @@ type ReactorV2 struct {
closeCh chan struct{}
// list of available peers to loop through and send peer requests to
availablePeers map[p2p.NodeID]struct{}
availablePeers map[types.NodeID]struct{}
mtx sync.RWMutex
@ -85,12 +86,12 @@ type ReactorV2 struct {
// to. This prevents the sending of spurious responses.
// NOTE: If a node never responds, they will remain in this map until a
// peer down status update is sent
requestsSent map[p2p.NodeID]struct{}
requestsSent map[types.NodeID]struct{}
// lastReceivedRequests keeps track of when peers send a request to prevent
// peers from sending requests too often (as defined by
// minReceiveRequestInterval).
lastReceivedRequests map[p2p.NodeID]time.Time
lastReceivedRequests map[types.NodeID]time.Time
// the time when another request will be sent
nextRequestTime time.Time
@ -119,9 +120,9 @@ func NewReactorV2(
pexCh: pexCh,
peerUpdates: peerUpdates,
closeCh: make(chan struct{}),
availablePeers: make(map[p2p.NodeID]struct{}),
requestsSent: make(map[p2p.NodeID]struct{}),
lastReceivedRequests: make(map[p2p.NodeID]time.Time),
availablePeers: make(map[types.NodeID]struct{}),
requestsSent: make(map[types.NodeID]struct{}),
lastReceivedRequests: make(map[types.NodeID]time.Time),
}
r.BaseService = *service.NewBaseService(logger, "PEX", r)
@ -418,7 +419,7 @@ func (r *ReactorV2) sendRequestForPeers() {
r.nextRequestTime = time.Now().Add(noAvailablePeersWaitPeriod)
return
}
var peerID p2p.NodeID
var peerID types.NodeID
// use range to get a random peer.
for peerID = range r.availablePeers {
@ -492,7 +493,7 @@ func (r *ReactorV2) calculateNextRequestTime() {
r.nextRequestTime = time.Now().Add(baseTime * time.Duration(r.discoveryRatio))
}
func (r *ReactorV2) markPeerRequest(peer p2p.NodeID) error {
func (r *ReactorV2) markPeerRequest(peer types.NodeID) error {
r.mtx.Lock()
defer r.mtx.Unlock()
if lastRequestTime, ok := r.lastReceivedRequests[peer]; ok {
@ -505,7 +506,7 @@ func (r *ReactorV2) markPeerRequest(peer p2p.NodeID) error {
return nil
}
func (r *ReactorV2) markPeerResponse(peer p2p.NodeID) error {
func (r *ReactorV2) markPeerResponse(peer types.NodeID) error {
r.mtx.Lock()
defer r.mtx.Unlock()
// check if a request to this peer was sent
@ -522,7 +523,7 @@ func (r *ReactorV2) markPeerResponse(peer p2p.NodeID) error {
// all addresses must use a MCONN protocol for the peer to be considered part of the
// legacy p2p pex system
func (r *ReactorV2) isLegacyPeer(peer p2p.NodeID) bool {
func (r *ReactorV2) isLegacyPeer(peer types.NodeID) bool {
for _, addr := range r.peerManager.Addresses(peer) {
if addr.Protocol != p2p.MConnProtocol {
return false


+ 17
- 16
internal/p2p/pex/reactor_test.go View File

@ -15,6 +15,7 @@ import (
"github.com/tendermint/tendermint/internal/p2p/pex"
"github.com/tendermint/tendermint/libs/log"
proto "github.com/tendermint/tendermint/proto/tendermint/p2p"
"github.com/tendermint/tendermint/types"
)
const (
@ -326,14 +327,14 @@ type reactorTestSuite struct {
network *p2ptest.Network
logger log.Logger
reactors map[p2p.NodeID]*pex.ReactorV2
pexChannels map[p2p.NodeID]*p2p.Channel
reactors map[types.NodeID]*pex.ReactorV2
pexChannels map[types.NodeID]*p2p.Channel
peerChans map[p2p.NodeID]chan p2p.PeerUpdate
peerUpdates map[p2p.NodeID]*p2p.PeerUpdates
peerChans map[types.NodeID]chan p2p.PeerUpdate
peerUpdates map[types.NodeID]*p2p.PeerUpdates
nodes []p2p.NodeID
mocks []p2p.NodeID
nodes []types.NodeID
mocks []types.NodeID
total int
opts testOptions
}
@ -369,10 +370,10 @@ func setupNetwork(t *testing.T, opts testOptions) *reactorTestSuite {
rts := &reactorTestSuite{
logger: log.TestingLogger().With("testCase", t.Name()),
network: p2ptest.MakeNetwork(t, networkOpts),
reactors: make(map[p2p.NodeID]*pex.ReactorV2, realNodes),
pexChannels: make(map[p2p.NodeID]*p2p.Channel, opts.TotalNodes),
peerChans: make(map[p2p.NodeID]chan p2p.PeerUpdate, opts.TotalNodes),
peerUpdates: make(map[p2p.NodeID]*p2p.PeerUpdates, opts.TotalNodes),
reactors: make(map[types.NodeID]*pex.ReactorV2, realNodes),
pexChannels: make(map[types.NodeID]*p2p.Channel, opts.TotalNodes),
peerChans: make(map[types.NodeID]chan p2p.PeerUpdate, opts.TotalNodes),
peerUpdates: make(map[types.NodeID]*p2p.PeerUpdates, opts.TotalNodes),
total: opts.TotalNodes,
opts: opts,
}
@ -464,7 +465,7 @@ func (r *reactorTestSuite) addNodes(t *testing.T, nodes int) {
func (r *reactorTestSuite) listenFor(
t *testing.T,
node p2p.NodeID,
node types.NodeID,
conditional func(msg p2p.Envelope) bool,
assertion func(t *testing.T, msg p2p.Envelope) bool,
waitPeriod time.Duration,
@ -788,7 +789,7 @@ func (r *reactorTestSuite) pexAddresses(t *testing.T, nodeIndices []int) []proto
return addresses
}
func (r *reactorTestSuite) checkNodePair(t *testing.T, first, second int) (p2p.NodeID, p2p.NodeID) {
func (r *reactorTestSuite) checkNodePair(t *testing.T, first, second int) (types.NodeID, types.NodeID) {
require.NotEqual(t, first, second)
require.Less(t, first, r.total)
require.Less(t, second, r.total)
@ -806,12 +807,12 @@ func (r *reactorTestSuite) addAddresses(t *testing.T, node int, addrs []int) {
}
}
func newNodeID(t *testing.T, id string) p2p.NodeID {
nodeID, err := p2p.NewNodeID(strings.Repeat(id, 2*p2p.NodeIDByteLength))
func newNodeID(t *testing.T, id string) types.NodeID {
nodeID, err := types.NewNodeID(strings.Repeat(id, 2*types.NodeIDByteLength))
require.NoError(t, err)
return nodeID
}
func randomNodeID(t *testing.T) p2p.NodeID {
return p2p.NodeIDFromPubKey(ed25519.GenPrivKey().PubKey())
func randomNodeID(t *testing.T) types.NodeID {
return types.NodeIDFromPubKey(ed25519.GenPrivKey().PubKey())
}

+ 20
- 14
internal/p2p/router.go View File

@ -16,6 +16,7 @@ import (
"github.com/tendermint/tendermint/crypto"
"github.com/tendermint/tendermint/libs/log"
"github.com/tendermint/tendermint/libs/service"
"github.com/tendermint/tendermint/types"
)
const queueBufferDefault = 4096
@ -25,8 +26,8 @@ type ChannelID uint16
// Envelope contains a message with sender/receiver routing info.
type Envelope struct {
From NodeID // sender (empty if outbound)
To NodeID // receiver (empty if inbound)
From types.NodeID // sender (empty if outbound)
To types.NodeID // receiver (empty if inbound)
Broadcast bool // send to all connected peers (ignores To)
Message proto.Message // message payload
@ -51,7 +52,7 @@ type Envelope struct {
// It should possibly also allow reactors to request explicit actions, e.g.
// disconnection or banning, in addition to doing this based on aggregates.
type PeerError struct {
NodeID NodeID
NodeID types.NodeID
Err error
}
@ -156,7 +157,7 @@ type RouterOptions struct {
// but this occurs after the handshake is complete. Filter by
// IP address to filter before the handshake. Functions should
// return an error to reject the peer.
FilterPeerByID func(context.Context, NodeID) error
FilterPeerByID func(context.Context, types.NodeID) error
// DialSleep controls the amount of time that the router
// sleeps between dialing peers. If not set, a default value
@ -257,7 +258,7 @@ type Router struct {
stopCh chan struct{} // signals Router shutdown
peerMtx sync.RWMutex
peerQueues map[NodeID]queue // outbound messages per peer for all channels
peerQueues map[types.NodeID]queue // outbound messages per peer for all channels
queueFactory func(int) queue
// FIXME: We don't strictly need to use a mutex for this if we seal the
@ -302,7 +303,7 @@ func NewRouter(
stopCh: make(chan struct{}),
channelQueues: map[ChannelID]queue{},
channelMessages: map[ChannelID]proto.Message{},
peerQueues: map[NodeID]queue{},
peerQueues: map[types.NodeID]queue{},
}
router.BaseService = service.NewBaseService(logger, "router", router)
@ -509,7 +510,7 @@ func (r *Router) filterPeersIP(ctx context.Context, ip net.IP, port uint16) erro
return r.options.FilterPeerByIP(ctx, ip, port)
}
func (r *Router) filterPeersID(ctx context.Context, id NodeID) error {
func (r *Router) filterPeersID(ctx context.Context, id types.NodeID) error {
if r.options.FilterPeerByID == nil {
return nil
}
@ -717,7 +718,7 @@ func (r *Router) connectPeer(ctx context.Context, address NodeAddress) {
go r.routePeer(address.NodeID, conn)
}
func (r *Router) getOrMakeQueue(peerID NodeID) queue {
func (r *Router) getOrMakeQueue(peerID types.NodeID) queue {
r.peerMtx.Lock()
defer r.peerMtx.Unlock()
@ -782,7 +783,12 @@ func (r *Router) dialPeer(ctx context.Context, address NodeAddress) (Connection,
// handshakePeer handshakes with a peer, validating the peer's information. If
// expectID is given, we check that the peer's info matches it.
func (r *Router) handshakePeer(ctx context.Context, conn Connection, expectID NodeID) (NodeInfo, crypto.PubKey, error) {
func (r *Router) handshakePeer(
ctx context.Context,
conn Connection,
expectID types.NodeID,
) (NodeInfo, crypto.PubKey, error) {
if r.options.HandshakeTimeout > 0 {
var cancel context.CancelFunc
ctx, cancel = context.WithTimeout(ctx, r.options.HandshakeTimeout)
@ -797,9 +803,9 @@ func (r *Router) handshakePeer(ctx context.Context, conn Connection, expectID No
if err = peerInfo.Validate(); err != nil {
return peerInfo, peerKey, fmt.Errorf("invalid handshake NodeInfo: %w", err)
}
if NodeIDFromPubKey(peerKey) != peerInfo.NodeID {
if types.NodeIDFromPubKey(peerKey) != peerInfo.NodeID {
return peerInfo, peerKey, fmt.Errorf("peer's public key did not match its node ID %q (expected %q)",
peerInfo.NodeID, NodeIDFromPubKey(peerKey))
peerInfo.NodeID, types.NodeIDFromPubKey(peerKey))
}
if expectID != "" && expectID != peerInfo.NodeID {
return peerInfo, peerKey, fmt.Errorf("expected to connect with peer %q, got %q",
@ -817,7 +823,7 @@ func (r *Router) runWithPeerMutex(fn func() error) error {
// routePeer routes inbound and outbound messages between a peer and the reactor
// channels. It will close the given connection and send queue when done, or if
// they are closed elsewhere it will cause this method to shut down and return.
func (r *Router) routePeer(peerID NodeID, conn Connection) {
func (r *Router) routePeer(peerID types.NodeID, conn Connection) {
r.metrics.Peers.Add(1)
r.peerManager.Ready(peerID)
@ -866,7 +872,7 @@ func (r *Router) routePeer(peerID NodeID, conn Connection) {
// receivePeer receives inbound messages from a peer, deserializes them and
// passes them on to the appropriate channel.
func (r *Router) receivePeer(peerID NodeID, conn Connection) error {
func (r *Router) receivePeer(peerID types.NodeID, conn Connection) error {
for {
chID, bz, err := conn.ReceiveMessage()
if err != nil {
@ -917,7 +923,7 @@ func (r *Router) receivePeer(peerID NodeID, conn Connection) error {
}
// sendPeer sends queued messages to a peer.
func (r *Router) sendPeer(peerID NodeID, conn Connection, peerQueue queue) error {
func (r *Router) sendPeer(peerID types.NodeID, conn Connection, peerQueue queue) error {
for {
start := time.Now().UTC()


+ 7
- 6
internal/p2p/router_test.go View File

@ -24,6 +24,7 @@ import (
"github.com/tendermint/tendermint/internal/p2p/mocks"
"github.com/tendermint/tendermint/internal/p2p/p2ptest"
"github.com/tendermint/tendermint/libs/log"
"github.com/tendermint/tendermint/types"
)
func echoReactor(channel *p2p.Channel) {
@ -139,7 +140,7 @@ func TestRouter_Channel(t *testing.T) {
// We should be able to send on the channel, even though there are no peers.
p2ptest.RequireSend(t, channel, p2p.Envelope{
To: p2p.NodeID(strings.Repeat("a", 40)),
To: types.NodeID(strings.Repeat("a", 40)),
Message: &p2ptest.Message{Value: "foo"},
})
@ -181,7 +182,7 @@ func TestRouter_Channel_SendReceive(t *testing.T) {
// Sending to an unknown peer should be dropped.
p2ptest.RequireSend(t, a, p2p.Envelope{
To: p2p.NodeID(strings.Repeat("a", 40)),
To: types.NodeID(strings.Repeat("a", 40)),
Message: &p2ptest.Message{Value: "a"},
})
p2ptest.RequireEmpty(t, a, b, c)
@ -520,7 +521,7 @@ func TestRouter_AcceptPeers_HeadOfLineBlocking(t *testing.T) {
func TestRouter_DialPeers(t *testing.T) {
testcases := map[string]struct {
dialID p2p.NodeID
dialID types.NodeID
peerInfo p2p.NodeInfo
peerKey crypto.PubKey
dialErr error
@ -621,9 +622,9 @@ func TestRouter_DialPeers(t *testing.T) {
func TestRouter_DialPeers_Parallel(t *testing.T) {
t.Cleanup(leaktest.Check(t))
a := p2p.NodeAddress{Protocol: "mock", NodeID: p2p.NodeID(strings.Repeat("a", 40))}
b := p2p.NodeAddress{Protocol: "mock", NodeID: p2p.NodeID(strings.Repeat("b", 40))}
c := p2p.NodeAddress{Protocol: "mock", NodeID: p2p.NodeID(strings.Repeat("c", 40))}
a := p2p.NodeAddress{Protocol: "mock", NodeID: types.NodeID(strings.Repeat("a", 40))}
b := p2p.NodeAddress{Protocol: "mock", NodeID: types.NodeID(strings.Repeat("b", 40))}
c := p2p.NodeAddress{Protocol: "mock", NodeID: types.NodeID(strings.Repeat("c", 40))}
// Set up a mock transport that returns a connection that blocks during the
// handshake. It should dial all peers in parallel.


+ 3
- 2
internal/p2p/shim_test.go View File

@ -12,6 +12,7 @@ import (
p2pmocks "github.com/tendermint/tendermint/internal/p2p/mocks"
"github.com/tendermint/tendermint/libs/log"
ssproto "github.com/tendermint/tendermint/proto/tendermint/statesync"
"github.com/tendermint/tendermint/types"
)
var (
@ -77,10 +78,10 @@ func setup(t *testing.T, peers []p2p.Peer) *reactorShimTestSuite {
return rts
}
func simplePeer(t *testing.T, id string) (*p2pmocks.Peer, p2p.NodeID) {
func simplePeer(t *testing.T, id string) (*p2pmocks.Peer, types.NodeID) {
t.Helper()
peerID := p2p.NodeID(id)
peerID := types.NodeID(id)
peer := &p2pmocks.Peer{}
peer.On("ID").Return(peerID)


+ 29
- 12
internal/p2p/switch.go View File

@ -16,6 +16,7 @@ import (
"github.com/tendermint/tendermint/libs/cmap"
tmrand "github.com/tendermint/tendermint/libs/rand"
"github.com/tendermint/tendermint/libs/service"
"github.com/tendermint/tendermint/types"
)
const (
@ -56,7 +57,7 @@ type AddrBook interface {
AddPrivateIDs([]string)
AddOurAddress(*NetAddress)
OurAddress(*NetAddress) bool
MarkGood(NodeID)
MarkGood(types.NodeID)
RemoveAddress(*NetAddress)
HasAddress(*NetAddress) bool
Save()
@ -107,7 +108,7 @@ type Switch struct {
addrBook AddrBook
// peers addresses with whom we'll maintain constant connection
persistentPeersAddrs []*NetAddress
unconditionalPeerIDs map[NodeID]struct{}
unconditionalPeerIDs map[types.NodeID]struct{}
transport Transport
@ -153,7 +154,7 @@ func NewSwitch(
metrics: NopMetrics(),
transport: transport,
persistentPeersAddrs: make([]*NetAddress, 0),
unconditionalPeerIDs: make(map[NodeID]struct{}),
unconditionalPeerIDs: make(map[types.NodeID]struct{}),
filterTimeout: defaultFilterTimeout,
conns: NewConnSet(),
}
@ -352,7 +353,7 @@ func (sw *Switch) NumPeers() (outbound, inbound, dialing int) {
return
}
func (sw *Switch) IsPeerUnconditional(id NodeID) bool {
func (sw *Switch) IsPeerUnconditional(id types.NodeID) bool {
_, ok := sw.unconditionalPeerIDs[id]
return ok
}
@ -517,7 +518,7 @@ func (sw *Switch) DialPeersAsync(peers []string) error {
}
// return first non-ErrNetAddressLookup error
for _, err := range errs {
if _, ok := err.(ErrNetAddressLookup); ok {
if _, ok := err.(types.ErrNetAddressLookup); ok {
continue
}
return err
@ -621,7 +622,7 @@ func (sw *Switch) AddPersistentPeers(addrs []string) error {
}
// return first non-ErrNetAddressLookup error
for _, err := range errs {
if _, ok := err.(ErrNetAddressLookup); ok {
if _, ok := err.(types.ErrNetAddressLookup); ok {
continue
}
return err
@ -633,11 +634,11 @@ func (sw *Switch) AddPersistentPeers(addrs []string) error {
func (sw *Switch) AddUnconditionalPeerIDs(ids []string) error {
sw.Logger.Info("Adding unconditional peer ids", "ids", ids)
for i, id := range ids {
err := NodeID(id).Validate()
err := types.NodeID(id).Validate()
if err != nil {
return fmt.Errorf("wrong ID #%d: %w", i, err)
}
sw.unconditionalPeerIDs[NodeID(id)] = struct{}{}
sw.unconditionalPeerIDs[types.NodeID(id)] = struct{}{}
}
return nil
}
@ -645,7 +646,7 @@ func (sw *Switch) AddUnconditionalPeerIDs(ids []string) error {
func (sw *Switch) AddPrivatePeerIDs(ids []string) error {
validIDs := make([]string, 0, len(ids))
for i, id := range ids {
err := NodeID(id).Validate()
err := types.NodeID(id).Validate()
if err != nil {
return fmt.Errorf("wrong ID #%d: %w", i, err)
}
@ -855,7 +856,7 @@ func (sw *Switch) addOutboundPeerWithConfig(
return nil
}
func (sw *Switch) handshakePeer(c Connection, expectPeerID NodeID) (NodeInfo, crypto.PubKey, error) {
func (sw *Switch) handshakePeer(c Connection, expectPeerID types.NodeID) (NodeInfo, crypto.PubKey, error) {
// Moved from transport and hardcoded until legacy P2P stack removal.
ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second)
defer cancel()
@ -879,7 +880,7 @@ func (sw *Switch) handshakePeer(c Connection, expectPeerID NodeID) (NodeInfo, cr
// For outgoing conns, ensure connection key matches dialed key.
if expectPeerID != "" {
peerID := NodeIDFromPubKey(peerKey)
peerID := types.NodeIDFromPubKey(peerKey)
if expectPeerID != peerID {
return peerInfo, peerKey, ErrRejected{
conn: c.(*mConnConnection).conn,
@ -896,7 +897,7 @@ func (sw *Switch) handshakePeer(c Connection, expectPeerID NodeID) (NodeInfo, cr
if sw.nodeInfo.ID() == peerInfo.ID() {
return peerInfo, peerKey, ErrRejected{
addr: *NewNetAddress(peerInfo.ID(), c.(*mConnConnection).conn.RemoteAddr()),
addr: *types.NewNetAddress(peerInfo.ID(), c.(*mConnConnection).conn.RemoteAddr()),
conn: c.(*mConnConnection).conn,
id: peerInfo.ID(),
isSelf: true,
@ -1037,3 +1038,19 @@ func (sw *Switch) addPeer(p Peer) error {
return nil
}
// NewNetAddressStrings returns an array of NetAddress'es build using
// the provided strings.
func NewNetAddressStrings(addrs []string) ([]*NetAddress, []error) {
netAddrs := make([]*NetAddress, 0)
errs := make([]error, 0)
for _, addr := range addrs {
netAddr, err := types.NewNetAddressString(addr)
if err != nil {
errs = append(errs, err)
} else {
netAddrs = append(netAddrs, netAddr)
}
}
return netAddrs, errs
}

+ 15
- 5
internal/p2p/switch_test.go View File

@ -24,6 +24,7 @@ import (
tmsync "github.com/tendermint/tendermint/internal/libs/sync"
"github.com/tendermint/tendermint/internal/p2p/conn"
"github.com/tendermint/tendermint/libs/log"
"github.com/tendermint/tendermint/types"
)
var (
@ -38,7 +39,7 @@ func init() {
}
type PeerMessage struct {
PeerID NodeID
PeerID types.NodeID
Bytes []byte
Counter int
}
@ -242,7 +243,7 @@ func TestSwitchPeerFilter(t *testing.T) {
rp.Start()
t.Cleanup(rp.Stop)
c, err := sw.transport.Dial(ctx, rp.Addr().Endpoint())
c, err := sw.transport.Dial(ctx, NewEndpoint(rp.Addr()))
if err != nil {
t.Fatal(err)
}
@ -299,7 +300,7 @@ func TestSwitchPeerFilterTimeout(t *testing.T) {
rp.Start()
defer rp.Stop()
c, err := sw.transport.Dial(ctx, rp.Addr().Endpoint())
c, err := sw.transport.Dial(ctx, NewEndpoint(rp.Addr()))
if err != nil {
t.Fatal(err)
}
@ -335,7 +336,7 @@ func TestSwitchPeerFilterDuplicate(t *testing.T) {
rp.Start()
defer rp.Stop()
c, err := sw.transport.Dial(ctx, rp.Addr().Endpoint())
c, err := sw.transport.Dial(ctx, NewEndpoint(rp.Addr()))
if err != nil {
t.Fatal(err)
}
@ -390,7 +391,7 @@ func TestSwitchStopsNonPersistentPeerOnError(t *testing.T) {
rp.Start()
defer rp.Stop()
c, err := sw.transport.Dial(ctx, rp.Addr().Endpoint())
c, err := sw.transport.Dial(ctx, NewEndpoint(rp.Addr()))
if err != nil {
t.Fatal(err)
}
@ -870,3 +871,12 @@ func BenchmarkSwitchBroadcast(b *testing.B) {
b.Logf("success: %v, failure: %v", numSuccess, numFailure)
}
func TestNewNetAddressStrings(t *testing.T) {
addrs, errs := NewNetAddressStrings([]string{
"127.0.0.1:8080",
"deadbeefdeadbeefdeadbeefdeadbeefdeadbeef@127.0.0.1:8080",
"deadbeefdeadbeefdeadbeefdeadbeefdeadbeed@127.0.0.2:8080"})
assert.Len(t, errs, 1)
assert.Equal(t, 2, len(addrs))
}

+ 8
- 7
internal/p2p/test_util.go View File

@ -9,6 +9,7 @@ import (
"github.com/tendermint/tendermint/libs/log"
tmnet "github.com/tendermint/tendermint/libs/net"
tmrand "github.com/tendermint/tendermint/libs/rand"
"github.com/tendermint/tendermint/types"
"github.com/tendermint/tendermint/config"
"github.com/tendermint/tendermint/internal/p2p/conn"
@ -46,7 +47,7 @@ func CreateRoutableAddr() (addr string, netAddr *NetAddress) {
mrand.Int()%256,
mrand.Int()%256,
mrand.Int()%256)
netAddr, err = NewNetAddressString(addr)
netAddr, err = types.NewNetAddressString(addr)
if err != nil {
panic(err)
}
@ -171,8 +172,8 @@ func MakeSwitch(
nodeKey := GenNodeKey()
nodeInfo := testNodeInfo(nodeKey.ID, fmt.Sprintf("node%d", i))
addr, err := NewNetAddressString(
IDAddressString(nodeKey.ID, nodeInfo.ListenAddr),
addr, err := types.NewNetAddressString(
nodeKey.ID.AddressString(nodeInfo.ListenAddr),
)
if err != nil {
panic(err)
@ -187,7 +188,7 @@ func MakeSwitch(
sw.SetLogger(swLogger)
sw.SetNodeKey(nodeKey)
if err := t.Listen(addr.Endpoint()); err != nil {
if err := t.Listen(NewEndpoint(addr)); err != nil {
panic(err)
}
@ -226,11 +227,11 @@ func testPeerConn(
//----------------------------------------------------------------
// rand node info
func testNodeInfo(id NodeID, name string) NodeInfo {
func testNodeInfo(id types.NodeID, name string) NodeInfo {
return testNodeInfoWithNetwork(id, name, "testing")
}
func testNodeInfoWithNetwork(id NodeID, name, network string) NodeInfo {
func testNodeInfoWithNetwork(id types.NodeID, name, network string) NodeInfo {
return NodeInfo{
ProtocolVersion: defaultProtocolVersion,
NodeID: id,
@ -271,7 +272,7 @@ func (book *AddrBookMock) OurAddress(addr *NetAddress) bool {
_, ok := book.OurAddrs[addr.String()]
return ok
}
func (book *AddrBookMock) MarkGood(NodeID) {}
func (book *AddrBookMock) MarkGood(types.NodeID) {}
func (book *AddrBookMock) HasAddress(addr *NetAddress) bool {
_, ok := book.Addrs[addr.String()]
return ok


+ 12
- 2
internal/p2p/transport.go View File

@ -8,6 +8,7 @@ import (
"github.com/tendermint/tendermint/crypto"
"github.com/tendermint/tendermint/internal/p2p/conn"
"github.com/tendermint/tendermint/types"
)
//go:generate mockery --case underscore --name Transport|Connection
@ -145,8 +146,17 @@ type Endpoint struct {
Path string
}
// NewEndpoint constructs an Endpoint from a types.NetAddress structure.
func NewEndpoint(na *types.NetAddress) Endpoint {
return Endpoint{
Protocol: MConnProtocol,
IP: na.IP,
Port: na.Port,
}
}
// NodeAddress converts the endpoint into a NodeAddress for the given node ID.
func (e Endpoint) NodeAddress(nodeID NodeID) NodeAddress {
func (e Endpoint) NodeAddress(nodeID types.NodeID) NodeAddress {
address := NodeAddress{
NodeID: nodeID,
Protocol: e.Protocol,
@ -165,7 +175,7 @@ func (e Endpoint) String() string {
// assume that path is a node ID (to handle opaque URLs of the form
// scheme:id).
if e.IP == nil {
if nodeID, err := NewNodeID(e.Path); err == nil {
if nodeID, err := types.NewNodeID(e.Path); err == nil {
return e.NodeAddress(nodeID).String()
}
}


+ 13
- 12
internal/p2p/transport_memory.go View File

@ -12,6 +12,7 @@ import (
tmsync "github.com/tendermint/tendermint/internal/libs/sync"
"github.com/tendermint/tendermint/internal/p2p/conn"
"github.com/tendermint/tendermint/libs/log"
"github.com/tendermint/tendermint/types"
)
const (
@ -27,7 +28,7 @@ type MemoryNetwork struct {
logger log.Logger
mtx sync.RWMutex
transports map[NodeID]*MemoryTransport
transports map[types.NodeID]*MemoryTransport
bufferSize int
}
@ -36,14 +37,14 @@ func NewMemoryNetwork(logger log.Logger, bufferSize int) *MemoryNetwork {
return &MemoryNetwork{
bufferSize: bufferSize,
logger: logger,
transports: map[NodeID]*MemoryTransport{},
transports: map[types.NodeID]*MemoryTransport{},
}
}
// CreateTransport creates a new memory transport endpoint with the given node
// ID and immediately begins listening on the address "memory:<id>". It panics
// if the node ID is already in use (which is fine, since this is for tests).
func (n *MemoryNetwork) CreateTransport(nodeID NodeID) *MemoryTransport {
func (n *MemoryNetwork) CreateTransport(nodeID types.NodeID) *MemoryTransport {
t := newMemoryTransport(n, nodeID)
n.mtx.Lock()
@ -56,14 +57,14 @@ func (n *MemoryNetwork) CreateTransport(nodeID NodeID) *MemoryTransport {
}
// GetTransport looks up a transport in the network, returning nil if not found.
func (n *MemoryNetwork) GetTransport(id NodeID) *MemoryTransport {
func (n *MemoryNetwork) GetTransport(id types.NodeID) *MemoryTransport {
n.mtx.RLock()
defer n.mtx.RUnlock()
return n.transports[id]
}
// RemoveTransport removes a transport from the network and closes it.
func (n *MemoryNetwork) RemoveTransport(id NodeID) {
func (n *MemoryNetwork) RemoveTransport(id types.NodeID) {
n.mtx.Lock()
t, ok := n.transports[id]
delete(n.transports, id)
@ -91,7 +92,7 @@ func (n *MemoryNetwork) Size() int {
type MemoryTransport struct {
logger log.Logger
network *MemoryNetwork
nodeID NodeID
nodeID types.NodeID
bufferSize int
acceptCh chan *MemoryConnection
@ -101,7 +102,7 @@ type MemoryTransport struct {
// newMemoryTransport creates a new MemoryTransport. This is for internal use by
// MemoryNetwork, use MemoryNetwork.CreateTransport() instead.
func newMemoryTransport(network *MemoryNetwork, nodeID NodeID) *MemoryTransport {
func newMemoryTransport(network *MemoryNetwork, nodeID types.NodeID) *MemoryTransport {
return &MemoryTransport{
logger: network.logger.With("local", nodeID),
network: network,
@ -162,7 +163,7 @@ func (t *MemoryTransport) Dial(ctx context.Context, endpoint Endpoint) (Connecti
return nil, err
}
nodeID, err := NewNodeID(endpoint.Path)
nodeID, err := types.NewNodeID(endpoint.Path)
if err != nil {
return nil, err
}
@ -203,8 +204,8 @@ func (t *MemoryTransport) Close() error {
// MemoryConnection is an in-memory connection between two transport endpoints.
type MemoryConnection struct {
logger log.Logger
localID NodeID
remoteID NodeID
localID types.NodeID
remoteID types.NodeID
receiveCh <-chan memoryMessage
sendCh chan<- memoryMessage
@ -224,8 +225,8 @@ type memoryMessage struct {
// newMemoryConnection creates a new MemoryConnection.
func newMemoryConnection(
logger log.Logger,
localID NodeID,
remoteID NodeID,
localID types.NodeID,
remoteID types.NodeID,
receiveCh <-chan memoryMessage,
sendCh chan<- memoryMessage,
closer *tmsync.Closer,


+ 2
- 1
internal/p2p/transport_memory_test.go View File

@ -8,6 +8,7 @@ import (
"github.com/stretchr/testify/require"
"github.com/tendermint/tendermint/internal/p2p"
"github.com/tendermint/tendermint/libs/log"
"github.com/tendermint/tendermint/types"
)
// Transports are mainly tested by common tests in transport_test.go, we
@ -20,7 +21,7 @@ func init() {
network = p2p.NewMemoryNetwork(log.TestingLogger(), 1)
}
i := byte(network.Size())
nodeID, err := p2p.NewNodeID(hex.EncodeToString(bytes.Repeat([]byte{i<<4 + i}, 20)))
nodeID, err := types.NewNodeID(hex.EncodeToString(bytes.Repeat([]byte{i<<4 + i}, 20)))
require.NoError(t, err)
transport := network.CreateTransport(nodeID)


+ 7
- 6
internal/p2p/transport_test.go View File

@ -14,6 +14,7 @@ import (
"github.com/tendermint/tendermint/crypto/ed25519"
"github.com/tendermint/tendermint/internal/p2p"
"github.com/tendermint/tendermint/libs/bytes"
"github.com/tendermint/tendermint/types"
)
// transportFactory is used to set up transports for tests.
@ -242,7 +243,7 @@ func TestConnection_Handshake(t *testing.T) {
// A handshake should pass the given keys and NodeInfo.
aKey := ed25519.GenPrivKey()
aInfo := p2p.NodeInfo{
NodeID: p2p.NodeIDFromPubKey(aKey.PubKey()),
NodeID: types.NodeIDFromPubKey(aKey.PubKey()),
ProtocolVersion: p2p.NewProtocolVersion(1, 2, 3),
ListenAddr: "listenaddr",
Network: "network",
@ -255,7 +256,7 @@ func TestConnection_Handshake(t *testing.T) {
},
}
bKey := ed25519.GenPrivKey()
bInfo := p2p.NodeInfo{NodeID: p2p.NodeIDFromPubKey(bKey.PubKey())}
bInfo := p2p.NodeInfo{NodeID: types.NodeIDFromPubKey(bKey.PubKey())}
errCh := make(chan error, 1)
go func() {
@ -437,7 +438,7 @@ func TestEndpoint_NodeAddress(t *testing.T) {
ip4 = []byte{1, 2, 3, 4}
ip4in6 = net.IPv4(1, 2, 3, 4)
ip6 = []byte{0xb1, 0x0c, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0x01}
id = p2p.NodeID("00112233445566778899aabbccddeeff00112233")
id = types.NodeID("00112233445566778899aabbccddeeff00112233")
)
testcases := []struct {
@ -492,7 +493,7 @@ func TestEndpoint_String(t *testing.T) {
ip4 = []byte{1, 2, 3, 4}
ip4in6 = net.IPv4(1, 2, 3, 4)
ip6 = []byte{0xb1, 0x0c, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0x01}
nodeID = p2p.NodeID("00112233445566778899aabbccddeeff00112233")
nodeID = types.NodeID("00112233445566778899aabbccddeeff00112233")
)
testcases := []struct {
@ -618,13 +619,13 @@ func dialAcceptHandshake(t *testing.T, a, b p2p.Transport) (p2p.Connection, p2p.
errCh := make(chan error, 1)
go func() {
privKey := ed25519.GenPrivKey()
nodeInfo := p2p.NodeInfo{NodeID: p2p.NodeIDFromPubKey(privKey.PubKey())}
nodeInfo := p2p.NodeInfo{NodeID: types.NodeIDFromPubKey(privKey.PubKey())}
_, _, err := ba.Handshake(ctx, nodeInfo, privKey)
errCh <- err
}()
privKey := ed25519.GenPrivKey()
nodeInfo := p2p.NodeInfo{NodeID: p2p.NodeIDFromPubKey(privKey.PubKey())}
nodeInfo := p2p.NodeInfo{NodeID: types.NodeIDFromPubKey(privKey.PubKey())}
_, _, err := ab.Handshake(ctx, nodeInfo, privKey)
require.NoError(t, err)


+ 1
- 2
internal/statesync/block_queue.go View File

@ -6,13 +6,12 @@ import (
"sync"
"time"
"github.com/tendermint/tendermint/internal/p2p"
"github.com/tendermint/tendermint/types"
)
type lightBlockResponse struct {
block *types.LightBlock
peer p2p.NodeID
peer types.NodeID
}
// a block queue is used for asynchronously fetching and verifying light blocks


+ 7
- 7
internal/statesync/block_queue_test.go View File

@ -9,8 +9,8 @@ import (
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
"github.com/tendermint/tendermint/internal/p2p"
"github.com/tendermint/tendermint/internal/test/factory"
"github.com/tendermint/tendermint/types"
)
var (
@ -22,7 +22,7 @@ var (
)
func TestBlockQueueBasic(t *testing.T) {
peerID, err := p2p.NewNodeID("0011223344556677889900112233445566778899")
peerID, err := types.NewNodeID("0011223344556677889900112233445566778899")
require.NoError(t, err)
queue := newBlockQueue(startHeight, stopHeight, stopTime, 1)
@ -69,7 +69,7 @@ loop:
// Test with spurious failures and retries
func TestBlockQueueWithFailures(t *testing.T) {
peerID, err := p2p.NewNodeID("0011223344556677889900112233445566778899")
peerID, err := types.NewNodeID("0011223344556677889900112233445566778899")
require.NoError(t, err)
queue := newBlockQueue(startHeight, stopHeight, stopTime, 200)
@ -119,7 +119,7 @@ func TestBlockQueueWithFailures(t *testing.T) {
// Test that when all the blocks are retrieved that the queue still holds on to
// it's workers and in the event of failure can still fetch the failed block
func TestBlockQueueBlocks(t *testing.T) {
peerID, err := p2p.NewNodeID("0011223344556677889900112233445566778899")
peerID, err := types.NewNodeID("0011223344556677889900112233445566778899")
require.NoError(t, err)
queue := newBlockQueue(startHeight, stopHeight, stopTime, 2)
expectedHeight := startHeight
@ -166,7 +166,7 @@ loop:
}
func TestBlockQueueAcceptsNoMoreBlocks(t *testing.T) {
peerID, err := p2p.NewNodeID("0011223344556677889900112233445566778899")
peerID, err := types.NewNodeID("0011223344556677889900112233445566778899")
require.NoError(t, err)
queue := newBlockQueue(startHeight, stopHeight, stopTime, 1)
defer queue.close()
@ -191,7 +191,7 @@ loop:
// Test a scenario where more blocks are needed then just the stopheight because
// we haven't found a block with a small enough time.
func TestBlockQueueStopTime(t *testing.T) {
peerID, err := p2p.NewNodeID("0011223344556677889900112233445566778899")
peerID, err := types.NewNodeID("0011223344556677889900112233445566778899")
require.NoError(t, err)
queue := newBlockQueue(startHeight, stopHeight, stopTime, 1)
@ -233,7 +233,7 @@ func TestBlockQueueStopTime(t *testing.T) {
}
}
func mockLBResp(t *testing.T, peer p2p.NodeID, height int64, time time.Time) lightBlockResponse {
func mockLBResp(t *testing.T, peer types.NodeID, height int64, time time.Time) lightBlockResponse {
return lightBlockResponse{
block: mockLB(t, height, time, factory.MakeBlockID()),
peer: peer,


+ 6
- 6
internal/statesync/chunks.go View File

@ -10,7 +10,7 @@ import (
"time"
tmsync "github.com/tendermint/tendermint/internal/libs/sync"
"github.com/tendermint/tendermint/internal/p2p"
"github.com/tendermint/tendermint/types"
)
// errDone is returned by chunkQueue.Next() when all chunks have been returned.
@ -22,7 +22,7 @@ type chunk struct {
Format uint32
Index uint32
Chunk []byte
Sender p2p.NodeID
Sender types.NodeID
}
// chunkQueue manages chunks for a state sync process, ordering them if requested. It acts as an
@ -33,7 +33,7 @@ type chunkQueue struct {
snapshot *snapshot // if this is nil, the queue has been closed
dir string // temp dir for on-disk chunk storage
chunkFiles map[uint32]string // path to temporary chunk file
chunkSenders map[uint32]p2p.NodeID // the peer who sent the given chunk
chunkSenders map[uint32]types.NodeID // the peer who sent the given chunk
chunkAllocated map[uint32]bool // chunks that have been allocated via Allocate()
chunkReturned map[uint32]bool // chunks returned via Next()
waiters map[uint32][]chan<- uint32 // signals WaitFor() waiters about chunk arrival
@ -54,7 +54,7 @@ func newChunkQueue(snapshot *snapshot, tempDir string) (*chunkQueue, error) {
snapshot: snapshot,
dir: dir,
chunkFiles: make(map[uint32]string, snapshot.Chunks),
chunkSenders: make(map[uint32]p2p.NodeID, snapshot.Chunks),
chunkSenders: make(map[uint32]types.NodeID, snapshot.Chunks),
chunkAllocated: make(map[uint32]bool, snapshot.Chunks),
chunkReturned: make(map[uint32]bool, snapshot.Chunks),
waiters: make(map[uint32][]chan<- uint32),
@ -188,7 +188,7 @@ func (q *chunkQueue) discard(index uint32) error {
// DiscardSender discards all *unreturned* chunks from a given sender. If the caller wants to
// discard already returned chunks, this can be done via Discard().
func (q *chunkQueue) DiscardSender(peerID p2p.NodeID) error {
func (q *chunkQueue) DiscardSender(peerID types.NodeID) error {
q.Lock()
defer q.Unlock()
@ -208,7 +208,7 @@ func (q *chunkQueue) DiscardSender(peerID p2p.NodeID) error {
// GetSender returns the sender of the chunk with the given index, or empty if
// not found.
func (q *chunkQueue) GetSender(index uint32) p2p.NodeID {
func (q *chunkQueue) GetSender(index uint32) types.NodeID {
q.Lock()
defer q.Unlock()
return q.chunkSenders[index]


+ 16
- 16
internal/statesync/chunks_test.go View File

@ -8,7 +8,7 @@ import (
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
"github.com/tendermint/tendermint/internal/p2p"
"github.com/tendermint/tendermint/types"
)
func setupChunkQueue(t *testing.T) (*chunkQueue, func()) {
@ -274,7 +274,7 @@ func TestChunkQueue_DiscardSender(t *testing.T) {
defer teardown()
// Allocate and add all chunks to the queue
senders := []p2p.NodeID{p2p.NodeID("a"), p2p.NodeID("b"), p2p.NodeID("c")}
senders := []types.NodeID{types.NodeID("a"), types.NodeID("b"), types.NodeID("c")}
for i := uint32(0); i < queue.Size(); i++ {
_, err := queue.Allocate()
require.NoError(t, err)
@ -295,14 +295,14 @@ func TestChunkQueue_DiscardSender(t *testing.T) {
}
// Discarding an unknown sender should do nothing
err := queue.DiscardSender(p2p.NodeID("x"))
err := queue.DiscardSender(types.NodeID("x"))
require.NoError(t, err)
_, err = queue.Allocate()
assert.Equal(t, errDone, err)
// Discarding sender b should discard chunk 4, but not chunk 1 which has already been
// returned.
err = queue.DiscardSender(p2p.NodeID("b"))
err = queue.DiscardSender(types.NodeID("b"))
require.NoError(t, err)
index, err := queue.Allocate()
require.NoError(t, err)
@ -315,8 +315,8 @@ func TestChunkQueue_GetSender(t *testing.T) {
queue, teardown := setupChunkQueue(t)
defer teardown()
peerAID := p2p.NodeID("aa")
peerBID := p2p.NodeID("bb")
peerAID := types.NodeID("aa")
peerBID := types.NodeID("bb")
_, err := queue.Add(&chunk{Height: 3, Format: 1, Index: 0, Chunk: []byte{1}, Sender: peerAID})
require.NoError(t, err)
@ -354,7 +354,7 @@ func TestChunkQueue_Next(t *testing.T) {
}()
assert.Empty(t, chNext)
_, err := queue.Add(&chunk{Height: 3, Format: 1, Index: 1, Chunk: []byte{3, 1, 1}, Sender: p2p.NodeID("b")})
_, err := queue.Add(&chunk{Height: 3, Format: 1, Index: 1, Chunk: []byte{3, 1, 1}, Sender: types.NodeID("b")})
require.NoError(t, err)
select {
case <-chNext:
@ -362,17 +362,17 @@ func TestChunkQueue_Next(t *testing.T) {
default:
}
_, err = queue.Add(&chunk{Height: 3, Format: 1, Index: 0, Chunk: []byte{3, 1, 0}, Sender: p2p.NodeID("a")})
_, err = queue.Add(&chunk{Height: 3, Format: 1, Index: 0, Chunk: []byte{3, 1, 0}, Sender: types.NodeID("a")})
require.NoError(t, err)
assert.Equal(t,
&chunk{Height: 3, Format: 1, Index: 0, Chunk: []byte{3, 1, 0}, Sender: p2p.NodeID("a")},
&chunk{Height: 3, Format: 1, Index: 0, Chunk: []byte{3, 1, 0}, Sender: types.NodeID("a")},
<-chNext)
assert.Equal(t,
&chunk{Height: 3, Format: 1, Index: 1, Chunk: []byte{3, 1, 1}, Sender: p2p.NodeID("b")},
&chunk{Height: 3, Format: 1, Index: 1, Chunk: []byte{3, 1, 1}, Sender: types.NodeID("b")},
<-chNext)
_, err = queue.Add(&chunk{Height: 3, Format: 1, Index: 4, Chunk: []byte{3, 1, 4}, Sender: p2p.NodeID("e")})
_, err = queue.Add(&chunk{Height: 3, Format: 1, Index: 4, Chunk: []byte{3, 1, 4}, Sender: types.NodeID("e")})
require.NoError(t, err)
select {
case <-chNext:
@ -380,19 +380,19 @@ func TestChunkQueue_Next(t *testing.T) {
default:
}
_, err = queue.Add(&chunk{Height: 3, Format: 1, Index: 2, Chunk: []byte{3, 1, 2}, Sender: p2p.NodeID("c")})
_, err = queue.Add(&chunk{Height: 3, Format: 1, Index: 2, Chunk: []byte{3, 1, 2}, Sender: types.NodeID("c")})
require.NoError(t, err)
_, err = queue.Add(&chunk{Height: 3, Format: 1, Index: 3, Chunk: []byte{3, 1, 3}, Sender: p2p.NodeID("d")})
_, err = queue.Add(&chunk{Height: 3, Format: 1, Index: 3, Chunk: []byte{3, 1, 3}, Sender: types.NodeID("d")})
require.NoError(t, err)
assert.Equal(t,
&chunk{Height: 3, Format: 1, Index: 2, Chunk: []byte{3, 1, 2}, Sender: p2p.NodeID("c")},
&chunk{Height: 3, Format: 1, Index: 2, Chunk: []byte{3, 1, 2}, Sender: types.NodeID("c")},
<-chNext)
assert.Equal(t,
&chunk{Height: 3, Format: 1, Index: 3, Chunk: []byte{3, 1, 3}, Sender: p2p.NodeID("d")},
&chunk{Height: 3, Format: 1, Index: 3, Chunk: []byte{3, 1, 3}, Sender: types.NodeID("d")},
<-chNext)
assert.Equal(t,
&chunk{Height: 3, Format: 1, Index: 4, Chunk: []byte{3, 1, 4}, Sender: p2p.NodeID("e")},
&chunk{Height: 3, Format: 1, Index: 4, Chunk: []byte{3, 1, 4}, Sender: types.NodeID("e")},
<-chNext)
_, ok := <-chNext


+ 19
- 19
internal/statesync/dispatcher.go View File

@ -31,7 +31,7 @@ type dispatcher struct {
timeout time.Duration
mtx sync.Mutex
calls map[p2p.NodeID]chan *types.LightBlock
calls map[types.NodeID]chan *types.LightBlock
running bool
}
@ -40,12 +40,12 @@ func newDispatcher(requestCh chan<- p2p.Envelope, timeout time.Duration) *dispat
availablePeers: newPeerList(),
timeout: timeout,
requestCh: requestCh,
calls: make(map[p2p.NodeID]chan *types.LightBlock),
calls: make(map[types.NodeID]chan *types.LightBlock),
running: true,
}
}
func (d *dispatcher) LightBlock(ctx context.Context, height int64) (*types.LightBlock, p2p.NodeID, error) {
func (d *dispatcher) LightBlock(ctx context.Context, height int64) (*types.LightBlock, types.NodeID, error) {
d.mtx.Lock()
outgoingCalls := len(d.calls)
d.mtx.Unlock()
@ -95,7 +95,7 @@ func (d *dispatcher) start() {
d.running = true
}
func (d *dispatcher) lightBlock(ctx context.Context, height int64, peer p2p.NodeID) (*types.LightBlock, error) {
func (d *dispatcher) lightBlock(ctx context.Context, height int64, peer types.NodeID) (*types.LightBlock, error) {
// dispatch the request to the peer
callCh, err := d.dispatch(peer, height)
if err != nil {
@ -119,7 +119,7 @@ func (d *dispatcher) lightBlock(ctx context.Context, height int64, peer p2p.Node
// respond allows the underlying process which receives requests on the
// requestCh to respond with the respective light block
func (d *dispatcher) respond(lb *proto.LightBlock, peer p2p.NodeID) error {
func (d *dispatcher) respond(lb *proto.LightBlock, peer types.NodeID) error {
d.mtx.Lock()
defer d.mtx.Unlock()
@ -149,11 +149,11 @@ func (d *dispatcher) respond(lb *proto.LightBlock, peer p2p.NodeID) error {
return nil
}
func (d *dispatcher) addPeer(peer p2p.NodeID) {
func (d *dispatcher) addPeer(peer types.NodeID) {
d.availablePeers.Append(peer)
}
func (d *dispatcher) removePeer(peer p2p.NodeID) {
func (d *dispatcher) removePeer(peer types.NodeID) {
d.mtx.Lock()
defer d.mtx.Unlock()
if _, ok := d.calls[peer]; ok {
@ -165,7 +165,7 @@ func (d *dispatcher) removePeer(peer p2p.NodeID) {
// dispatch takes a peer and allocates it a channel so long as it's not already
// busy and the receiving channel is still running. It then dispatches the message
func (d *dispatcher) dispatch(peer p2p.NodeID, height int64) (chan *types.LightBlock, error) {
func (d *dispatcher) dispatch(peer types.NodeID, height int64) (chan *types.LightBlock, error) {
d.mtx.Lock()
defer d.mtx.Unlock()
ch := make(chan *types.LightBlock, 1)
@ -195,7 +195,7 @@ func (d *dispatcher) dispatch(peer p2p.NodeID, height int64) (chan *types.LightB
// release appends the peer back to the list and deletes the allocated call so
// that a new call can be made to that peer
func (d *dispatcher) release(peer p2p.NodeID) {
func (d *dispatcher) release(peer types.NodeID) {
d.mtx.Lock()
defer d.mtx.Unlock()
if call, ok := d.calls[peer]; ok {
@ -213,7 +213,7 @@ func (d *dispatcher) release(peer p2p.NodeID) {
// TODO: This should probably be moved over to the light package but as we're
// not yet officially supporting p2p light clients we'll leave this here for now.
type blockProvider struct {
peer p2p.NodeID
peer types.NodeID
chainID string
timeout time.Duration
dispatcher *dispatcher
@ -255,14 +255,14 @@ func (p *blockProvider) String() string { return string(p.peer) }
// retrieving blocks over all the peers the reactor is connected to
type peerlist struct {
mtx sync.Mutex
peers []p2p.NodeID
waiting []chan p2p.NodeID
peers []types.NodeID
waiting []chan types.NodeID
}
func newPeerList() *peerlist {
return &peerlist{
peers: make([]p2p.NodeID, 0),
waiting: make([]chan p2p.NodeID, 0),
peers: make([]types.NodeID, 0),
waiting: make([]chan types.NodeID, 0),
}
}
@ -272,12 +272,12 @@ func (l *peerlist) Len() int {
return len(l.peers)
}
func (l *peerlist) Pop() p2p.NodeID {
func (l *peerlist) Pop() types.NodeID {
l.mtx.Lock()
if len(l.peers) == 0 {
// if we don't have any peers in the list we block until a peer is
// appended
wait := make(chan p2p.NodeID, 1)
wait := make(chan types.NodeID, 1)
l.waiting = append(l.waiting, wait)
// unlock whilst waiting so that the list can be appended to
l.mtx.Unlock()
@ -291,7 +291,7 @@ func (l *peerlist) Pop() p2p.NodeID {
return peer
}
func (l *peerlist) Append(peer p2p.NodeID) {
func (l *peerlist) Append(peer types.NodeID) {
l.mtx.Lock()
defer l.mtx.Unlock()
if len(l.waiting) > 0 {
@ -304,7 +304,7 @@ func (l *peerlist) Append(peer p2p.NodeID) {
}
}
func (l *peerlist) Remove(peer p2p.NodeID) {
func (l *peerlist) Remove(peer types.NodeID) {
l.mtx.Lock()
defer l.mtx.Unlock()
for i, p := range l.peers {
@ -315,7 +315,7 @@ func (l *peerlist) Remove(peer p2p.NodeID) {
}
}
func (l *peerlist) Peers() []p2p.NodeID {
func (l *peerlist) Peers() []types.NodeID {
l.mtx.Lock()
defer l.mtx.Unlock()
return l.peers


+ 5
- 4
internal/statesync/dispatcher_test.go View File

@ -13,6 +13,7 @@ import (
"github.com/tendermint/tendermint/internal/p2p"
ssproto "github.com/tendermint/tendermint/proto/tendermint/statesync"
"github.com/tendermint/tendermint/types"
)
func TestDispatcherBasic(t *testing.T) {
@ -98,7 +99,7 @@ func TestPeerListBasic(t *testing.T) {
}
assert.Equal(t, half, peerList.Len())
peerList.Remove(p2p.NodeID("lp"))
peerList.Remove(types.NodeID("lp"))
assert.Equal(t, half, peerList.Len())
peerList.Remove(peerSet[half])
@ -170,10 +171,10 @@ func handleRequests(t *testing.T, d *dispatcher, ch chan p2p.Envelope, closeCh c
}
}
func createPeerSet(num int) []p2p.NodeID {
peers := make([]p2p.NodeID, num)
func createPeerSet(num int) []types.NodeID {
peers := make([]types.NodeID, num)
for i := 0; i < num; i++ {
peers[i], _ = p2p.NewNodeID(strings.Repeat(fmt.Sprintf("%d", i), 2*p2p.NodeIDByteLength))
peers[i], _ = types.NewNodeID(strings.Repeat(fmt.Sprintf("%d", i), 2*types.NodeIDByteLength))
}
return peers
}

+ 11
- 11
internal/statesync/reactor_test.go View File

@ -167,7 +167,7 @@ func TestReactor_ChunkRequest_InvalidRequest(t *testing.T) {
rts := setup(t, nil, nil, nil, 2)
rts.chunkInCh <- p2p.Envelope{
From: p2p.NodeID("aa"),
From: types.NodeID("aa"),
Message: &ssproto.SnapshotsRequest{},
}
@ -175,7 +175,7 @@ func TestReactor_ChunkRequest_InvalidRequest(t *testing.T) {
require.Error(t, response.Err)
require.Empty(t, rts.chunkOutCh)
require.Contains(t, response.Err.Error(), "received unknown message")
require.Equal(t, p2p.NodeID("aa"), response.NodeID)
require.Equal(t, types.NodeID("aa"), response.NodeID)
}
func TestReactor_ChunkRequest(t *testing.T) {
@ -221,7 +221,7 @@ func TestReactor_ChunkRequest(t *testing.T) {
rts := setup(t, conn, nil, nil, 2)
rts.chunkInCh <- p2p.Envelope{
From: p2p.NodeID("aa"),
From: types.NodeID("aa"),
Message: tc.request,
}
@ -238,7 +238,7 @@ func TestReactor_SnapshotsRequest_InvalidRequest(t *testing.T) {
rts := setup(t, nil, nil, nil, 2)
rts.snapshotInCh <- p2p.Envelope{
From: p2p.NodeID("aa"),
From: types.NodeID("aa"),
Message: &ssproto.ChunkRequest{},
}
@ -246,7 +246,7 @@ func TestReactor_SnapshotsRequest_InvalidRequest(t *testing.T) {
require.Error(t, response.Err)
require.Empty(t, rts.snapshotOutCh)
require.Contains(t, response.Err.Error(), "received unknown message")
require.Equal(t, p2p.NodeID("aa"), response.NodeID)
require.Equal(t, types.NodeID("aa"), response.NodeID)
}
func TestReactor_SnapshotsRequest(t *testing.T) {
@ -298,7 +298,7 @@ func TestReactor_SnapshotsRequest(t *testing.T) {
rts := setup(t, conn, nil, nil, 100)
rts.snapshotInCh <- p2p.Envelope{
From: p2p.NodeID("aa"),
From: types.NodeID("aa"),
Message: &ssproto.SnapshotsRequest{},
}
@ -351,7 +351,7 @@ func TestReactor_LightBlockResponse(t *testing.T) {
rts.stateStore.On("LoadValidators", height).Return(vals, nil)
rts.blockInCh <- p2p.Envelope{
From: p2p.NodeID("aa"),
From: types.NodeID("aa"),
Message: &ssproto.LightBlockRequest{
Height: 10,
},
@ -360,7 +360,7 @@ func TestReactor_LightBlockResponse(t *testing.T) {
select {
case response := <-rts.blockOutCh:
require.Equal(t, p2p.NodeID("aa"), response.To)
require.Equal(t, types.NodeID("aa"), response.To)
res, ok := response.Message.(*ssproto.LightBlockResponse)
require.True(t, ok)
receivedLB, err := types.LightBlockFromProto(res.LightBlock)
@ -374,11 +374,11 @@ func TestReactor_LightBlockResponse(t *testing.T) {
func TestReactor_Dispatcher(t *testing.T) {
rts := setup(t, nil, nil, nil, 2)
rts.peerUpdateCh <- p2p.PeerUpdate{
NodeID: p2p.NodeID("aa"),
NodeID: types.NodeID("aa"),
Status: p2p.PeerStatusUp,
}
rts.peerUpdateCh <- p2p.PeerUpdate{
NodeID: p2p.NodeID("bb"),
NodeID: types.NodeID("bb"),
Status: p2p.PeerStatusUp,
}
@ -437,7 +437,7 @@ func TestReactor_Backfill(t *testing.T) {
peers := []string{"a", "b", "c", "d"}
for _, peer := range peers {
rts.peerUpdateCh <- p2p.PeerUpdate{
NodeID: p2p.NodeID(peer),
NodeID: types.NodeID(peer),
Status: p2p.PeerStatusUp,
}
}


+ 15
- 15
internal/statesync/snapshots.go View File

@ -10,7 +10,7 @@ import (
"time"
tmsync "github.com/tendermint/tendermint/internal/libs/sync"
"github.com/tendermint/tendermint/internal/p2p"
"github.com/tendermint/tendermint/types"
)
// snapshotKey is a snapshot key used for lookups.
@ -47,16 +47,16 @@ type snapshotPool struct {
tmsync.Mutex
snapshots map[snapshotKey]*snapshot
snapshotPeers map[snapshotKey]map[p2p.NodeID]p2p.NodeID
snapshotPeers map[snapshotKey]map[types.NodeID]types.NodeID
// indexes for fast searches
formatIndex map[uint32]map[snapshotKey]bool
heightIndex map[uint64]map[snapshotKey]bool
peerIndex map[p2p.NodeID]map[snapshotKey]bool
peerIndex map[types.NodeID]map[snapshotKey]bool
// blacklists for rejected items
formatBlacklist map[uint32]bool
peerBlacklist map[p2p.NodeID]bool
peerBlacklist map[types.NodeID]bool
snapshotBlacklist map[snapshotKey]bool
}
@ -65,12 +65,12 @@ func newSnapshotPool(stateProvider StateProvider) *snapshotPool {
return &snapshotPool{
stateProvider: stateProvider,
snapshots: make(map[snapshotKey]*snapshot),
snapshotPeers: make(map[snapshotKey]map[p2p.NodeID]p2p.NodeID),
snapshotPeers: make(map[snapshotKey]map[types.NodeID]types.NodeID),
formatIndex: make(map[uint32]map[snapshotKey]bool),
heightIndex: make(map[uint64]map[snapshotKey]bool),
peerIndex: make(map[p2p.NodeID]map[snapshotKey]bool),
peerIndex: make(map[types.NodeID]map[snapshotKey]bool),
formatBlacklist: make(map[uint32]bool),
peerBlacklist: make(map[p2p.NodeID]bool),
peerBlacklist: make(map[types.NodeID]bool),
snapshotBlacklist: make(map[snapshotKey]bool),
}
}
@ -79,7 +79,7 @@ func newSnapshotPool(stateProvider StateProvider) *snapshotPool {
// snapshots. It returns true if this was a new, non-blacklisted snapshot. The
// snapshot height is verified using the light client, and the expected app hash
// is set for the snapshot.
func (p *snapshotPool) Add(peerID p2p.NodeID, snapshot *snapshot) (bool, error) {
func (p *snapshotPool) Add(peerID types.NodeID, snapshot *snapshot) (bool, error) {
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
defer cancel()
@ -105,7 +105,7 @@ func (p *snapshotPool) Add(peerID p2p.NodeID, snapshot *snapshot) (bool, error)
}
if p.snapshotPeers[key] == nil {
p.snapshotPeers[key] = make(map[p2p.NodeID]p2p.NodeID)
p.snapshotPeers[key] = make(map[types.NodeID]types.NodeID)
}
p.snapshotPeers[key][peerID] = peerID
@ -142,7 +142,7 @@ func (p *snapshotPool) Best() *snapshot {
}
// GetPeer returns a random peer for a snapshot, if any.
func (p *snapshotPool) GetPeer(snapshot *snapshot) p2p.NodeID {
func (p *snapshotPool) GetPeer(snapshot *snapshot) types.NodeID {
peers := p.GetPeers(snapshot)
if len(peers) == 0 {
return ""
@ -151,13 +151,13 @@ func (p *snapshotPool) GetPeer(snapshot *snapshot) p2p.NodeID {
}
// GetPeers returns the peers for a snapshot.
func (p *snapshotPool) GetPeers(snapshot *snapshot) []p2p.NodeID {
func (p *snapshotPool) GetPeers(snapshot *snapshot) []types.NodeID {
key := snapshot.Key()
p.Lock()
defer p.Unlock()
peers := make([]p2p.NodeID, 0, len(p.snapshotPeers[key]))
peers := make([]types.NodeID, 0, len(p.snapshotPeers[key]))
for _, peer := range p.snapshotPeers[key] {
peers = append(peers, peer)
}
@ -254,7 +254,7 @@ func (p *snapshotPool) RejectFormat(format uint32) {
}
// RejectPeer rejects a peer. It will never be used again.
func (p *snapshotPool) RejectPeer(peerID p2p.NodeID) {
func (p *snapshotPool) RejectPeer(peerID types.NodeID) {
if len(peerID) == 0 {
return
}
@ -267,14 +267,14 @@ func (p *snapshotPool) RejectPeer(peerID p2p.NodeID) {
}
// RemovePeer removes a peer from the pool, and any snapshots that no longer have peers.
func (p *snapshotPool) RemovePeer(peerID p2p.NodeID) {
func (p *snapshotPool) RemovePeer(peerID types.NodeID) {
p.Lock()
defer p.Unlock()
p.removePeer(peerID)
}
// removePeer removes a peer. The caller must hold the mutex lock.
func (p *snapshotPool) removePeer(peerID p2p.NodeID) {
func (p *snapshotPool) removePeer(peerID types.NodeID) {
for key := range p.peerIndex[peerID] {
delete(p.snapshotPeers[key], peerID)
if len(p.snapshotPeers[key]) == 0 {


+ 19
- 19
internal/statesync/snapshots_test.go View File

@ -6,8 +6,8 @@ import (
"github.com/stretchr/testify/mock"
"github.com/stretchr/testify/require"
"github.com/tendermint/tendermint/internal/p2p"
"github.com/tendermint/tendermint/internal/statesync/mocks"
"github.com/tendermint/tendermint/types"
)
func TestSnapshot_Key(t *testing.T) {
@ -42,7 +42,7 @@ func TestSnapshotPool_Add(t *testing.T) {
stateProvider := &mocks.StateProvider{}
stateProvider.On("AppHash", mock.Anything, uint64(1)).Return([]byte("app_hash"), nil)
peerID := p2p.NodeID("aa")
peerID := types.NodeID("aa")
// Adding to the pool should work
pool := newSnapshotPool(stateProvider)
@ -56,7 +56,7 @@ func TestSnapshotPool_Add(t *testing.T) {
require.True(t, added)
// Adding again from a different peer should return false
otherNodeID := p2p.NodeID("bb")
otherNodeID := types.NodeID("bb")
added, err = pool.Add(otherNodeID, &snapshot{
Height: 1,
Format: 1,
@ -81,8 +81,8 @@ func TestSnapshotPool_GetPeer(t *testing.T) {
s := &snapshot{Height: 1, Format: 1, Chunks: 1, Hash: []byte{1}}
peerAID := p2p.NodeID("aa")
peerBID := p2p.NodeID("bb")
peerAID := types.NodeID("aa")
peerBID := types.NodeID("bb")
_, err := pool.Add(peerAID, s)
require.NoError(t, err)
@ -118,8 +118,8 @@ func TestSnapshotPool_GetPeers(t *testing.T) {
s := &snapshot{Height: 1, Format: 1, Chunks: 1, Hash: []byte{1}}
peerAID := p2p.NodeID("aa")
peerBID := p2p.NodeID("bb")
peerAID := types.NodeID("aa")
peerBID := types.NodeID("bb")
_, err := pool.Add(peerAID, s)
require.NoError(t, err)
@ -146,13 +146,13 @@ func TestSnapshotPool_Ranked_Best(t *testing.T) {
// tie-breaker.
expectSnapshots := []struct {
snapshot *snapshot
peers []p2p.NodeID
peers []types.NodeID
}{
{&snapshot{Height: 2, Format: 2, Chunks: 4, Hash: []byte{1, 3}}, []p2p.NodeID{"AA", "BB", "CC", "DD"}},
{&snapshot{Height: 1, Format: 1, Chunks: 4, Hash: []byte{1, 2}}, []p2p.NodeID{"AA", "BB", "CC", "DD"}},
{&snapshot{Height: 2, Format: 2, Chunks: 5, Hash: []byte{1, 2}}, []p2p.NodeID{"AA", "BB", "CC"}},
{&snapshot{Height: 2, Format: 1, Chunks: 3, Hash: []byte{1, 2}}, []p2p.NodeID{"AA", "BB", "CC"}},
{&snapshot{Height: 1, Format: 2, Chunks: 5, Hash: []byte{1, 2}}, []p2p.NodeID{"AA", "BB", "CC"}},
{&snapshot{Height: 2, Format: 2, Chunks: 4, Hash: []byte{1, 3}}, []types.NodeID{"AA", "BB", "CC", "DD"}},
{&snapshot{Height: 1, Format: 1, Chunks: 4, Hash: []byte{1, 2}}, []types.NodeID{"AA", "BB", "CC", "DD"}},
{&snapshot{Height: 2, Format: 2, Chunks: 5, Hash: []byte{1, 2}}, []types.NodeID{"AA", "BB", "CC"}},
{&snapshot{Height: 2, Format: 1, Chunks: 3, Hash: []byte{1, 2}}, []types.NodeID{"AA", "BB", "CC"}},
{&snapshot{Height: 1, Format: 2, Chunks: 5, Hash: []byte{1, 2}}, []types.NodeID{"AA", "BB", "CC"}},
}
// Add snapshots in reverse order, to make sure the pool enforces some order.
@ -186,7 +186,7 @@ func TestSnapshotPool_Reject(t *testing.T) {
stateProvider.On("AppHash", mock.Anything, mock.Anything).Return([]byte("app_hash"), nil)
pool := newSnapshotPool(stateProvider)
peerID := p2p.NodeID("aa")
peerID := types.NodeID("aa")
snapshots := []*snapshot{
{Height: 2, Format: 2, Chunks: 1, Hash: []byte{1, 2}},
@ -216,7 +216,7 @@ func TestSnapshotPool_RejectFormat(t *testing.T) {
stateProvider.On("AppHash", mock.Anything, mock.Anything).Return([]byte("app_hash"), nil)
pool := newSnapshotPool(stateProvider)
peerID := p2p.NodeID("aa")
peerID := types.NodeID("aa")
snapshots := []*snapshot{
{Height: 2, Format: 2, Chunks: 1, Hash: []byte{1, 2}},
@ -247,8 +247,8 @@ func TestSnapshotPool_RejectPeer(t *testing.T) {
stateProvider.On("AppHash", mock.Anything, mock.Anything).Return([]byte("app_hash"), nil)
pool := newSnapshotPool(stateProvider)
peerAID := p2p.NodeID("aa")
peerBID := p2p.NodeID("bb")
peerAID := types.NodeID("aa")
peerBID := types.NodeID("bb")
s1 := &snapshot{Height: 1, Format: 1, Chunks: 1, Hash: []byte{1}}
s2 := &snapshot{Height: 2, Format: 1, Chunks: 1, Hash: []byte{2}}
@ -289,8 +289,8 @@ func TestSnapshotPool_RemovePeer(t *testing.T) {
stateProvider.On("AppHash", mock.Anything, mock.Anything).Return([]byte("app_hash"), nil)
pool := newSnapshotPool(stateProvider)
peerAID := p2p.NodeID("aa")
peerBID := p2p.NodeID("bb")
peerAID := types.NodeID("aa")
peerBID := types.NodeID("bb")
s1 := &snapshot{Height: 1, Format: 1, Chunks: 1, Hash: []byte{1}}
s2 := &snapshot{Height: 2, Format: 1, Chunks: 1, Hash: []byte{2}}


+ 4
- 4
internal/statesync/syncer.go View File

@ -113,7 +113,7 @@ func (s *syncer) AddChunk(chunk *chunk) (bool, error) {
// AddSnapshot adds a snapshot to the snapshot pool. It returns true if a new, previously unseen
// snapshot was accepted and added.
func (s *syncer) AddSnapshot(peerID p2p.NodeID, snapshot *snapshot) (bool, error) {
func (s *syncer) AddSnapshot(peerID types.NodeID, snapshot *snapshot) (bool, error) {
added, err := s.snapshots.Add(peerID, snapshot)
if err != nil {
return false, err
@ -127,7 +127,7 @@ func (s *syncer) AddSnapshot(peerID p2p.NodeID, snapshot *snapshot) (bool, error
// AddPeer adds a peer to the pool. For now we just keep it simple and send a
// single request to discover snapshots, later we may want to do retries and stuff.
func (s *syncer) AddPeer(peerID p2p.NodeID) {
func (s *syncer) AddPeer(peerID types.NodeID) {
s.logger.Debug("Requesting snapshots from peer", "peer", peerID)
s.snapshotCh <- p2p.Envelope{
To: peerID,
@ -136,7 +136,7 @@ func (s *syncer) AddPeer(peerID p2p.NodeID) {
}
// RemovePeer removes a peer from the pool.
func (s *syncer) RemovePeer(peerID p2p.NodeID) {
func (s *syncer) RemovePeer(peerID types.NodeID) {
s.logger.Debug("Removing peer from sync", "peer", peerID)
s.snapshots.RemovePeer(peerID)
}
@ -371,7 +371,7 @@ func (s *syncer) applyChunks(ctx context.Context, chunks *chunkQueue) error {
// Reject any senders as requested by the app
for _, sender := range resp.RejectSenders {
if sender != "" {
peerID := p2p.NodeID(sender)
peerID := types.NodeID(sender)
s.snapshots.RejectPeer(peerID)
if err := chunks.DiscardSender(peerID); err != nil {


+ 13
- 14
internal/statesync/syncer_test.go View File

@ -13,7 +13,6 @@ import (
abci "github.com/tendermint/tendermint/abci/types"
tmsync "github.com/tendermint/tendermint/internal/libs/sync"
"github.com/tendermint/tendermint/internal/p2p"
"github.com/tendermint/tendermint/internal/statesync/mocks"
ssproto "github.com/tendermint/tendermint/proto/tendermint/statesync"
"github.com/tendermint/tendermint/proxy"
@ -66,9 +65,9 @@ func TestSyncer_SyncAny(t *testing.T) {
connSnapshot := &proxymocks.AppConnSnapshot{}
connQuery := &proxymocks.AppConnQuery{}
peerAID := p2p.NodeID("aa")
peerBID := p2p.NodeID("bb")
peerCID := p2p.NodeID("cc")
peerAID := types.NodeID("aa")
peerBID := types.NodeID("bb")
peerCID := types.NodeID("cc")
rts := setup(t, connSnapshot, connQuery, stateProvider, 3)
// Adding a chunk should error when no sync is in progress
@ -217,7 +216,7 @@ func TestSyncer_SyncAny_abort(t *testing.T) {
rts := setup(t, nil, nil, stateProvider, 2)
s := &snapshot{Height: 1, Format: 1, Chunks: 3, Hash: []byte{1, 2, 3}}
peerID := p2p.NodeID("aa")
peerID := types.NodeID("aa")
_, err := rts.syncer.AddSnapshot(peerID, s)
require.NoError(t, err)
@ -242,7 +241,7 @@ func TestSyncer_SyncAny_reject(t *testing.T) {
s12 := &snapshot{Height: 1, Format: 2, Chunks: 3, Hash: []byte{1, 2, 3}}
s11 := &snapshot{Height: 1, Format: 1, Chunks: 3, Hash: []byte{1, 2, 3}}
peerID := p2p.NodeID("aa")
peerID := types.NodeID("aa")
_, err := rts.syncer.AddSnapshot(peerID, s22)
require.NoError(t, err)
@ -281,7 +280,7 @@ func TestSyncer_SyncAny_reject_format(t *testing.T) {
s12 := &snapshot{Height: 1, Format: 2, Chunks: 3, Hash: []byte{1, 2, 3}}
s11 := &snapshot{Height: 1, Format: 1, Chunks: 3, Hash: []byte{1, 2, 3}}
peerID := p2p.NodeID("aa")
peerID := types.NodeID("aa")
_, err := rts.syncer.AddSnapshot(peerID, s22)
require.NoError(t, err)
@ -311,9 +310,9 @@ func TestSyncer_SyncAny_reject_sender(t *testing.T) {
rts := setup(t, nil, nil, stateProvider, 2)
peerAID := p2p.NodeID("aa")
peerBID := p2p.NodeID("bb")
peerCID := p2p.NodeID("cc")
peerAID := types.NodeID("aa")
peerBID := types.NodeID("bb")
peerCID := types.NodeID("cc")
// sbc will be offered first, which will be rejected with reject_sender, causing all snapshots
// submitted by both b and c (i.e. sb, sc, sbc) to be rejected. Finally, sa will reject and
@ -360,7 +359,7 @@ func TestSyncer_SyncAny_abciError(t *testing.T) {
errBoom := errors.New("boom")
s := &snapshot{Height: 1, Format: 1, Chunks: 3, Hash: []byte{1, 2, 3}}
peerID := p2p.NodeID("aa")
peerID := types.NodeID("aa")
_, err := rts.syncer.AddSnapshot(peerID, s)
require.NoError(t, err)
@ -561,9 +560,9 @@ func TestSyncer_applyChunks_RejectSenders(t *testing.T) {
// Set up three peers across two snapshots, and ask for one of them to be banned.
// It should be banned from all snapshots.
peerAID := p2p.NodeID("aa")
peerBID := p2p.NodeID("bb")
peerCID := p2p.NodeID("cc")
peerAID := types.NodeID("aa")
peerBID := types.NodeID("bb")
peerCID := types.NodeID("cc")
s1 := &snapshot{Height: 1, Format: 1, Chunks: 3}
s2 := &snapshot{Height: 2, Format: 1, Chunks: 3}


+ 3
- 3
node/node.go View File

@ -591,11 +591,11 @@ func (n *nodeImpl) OnStart() error {
}
// Start the transport.
addr, err := p2p.NewNetAddressString(p2p.IDAddressString(n.nodeKey.ID, n.config.P2P.ListenAddress))
addr, err := types.NewNetAddressString(n.nodeKey.ID.AddressString(n.config.P2P.ListenAddress))
if err != nil {
return err
}
if err := n.transport.Listen(addr.Endpoint()); err != nil {
if err := n.transport.Listen(p2p.NewEndpoint(addr)); err != nil {
return err
}
@ -1205,7 +1205,7 @@ func getRouterConfig(conf *cfg.Config, proxyApp proxy.AppConns) p2p.RouterOption
}
if conf.FilterPeers && proxyApp != nil {
opts.FilterPeerByID = func(ctx context.Context, id p2p.NodeID) error {
opts.FilterPeerByID = func(ctx context.Context, id types.NodeID) error {
res, err := proxyApp.Query().QuerySync(context.Background(), abci.RequestQuery{
Path: fmt.Sprintf("/p2p/filter/id/%s", id),
})


+ 5
- 5
node/setup.go View File

@ -454,7 +454,7 @@ func createPeerManager(
config *cfg.Config,
dbProvider cfg.DBProvider,
p2pLogger log.Logger,
nodeID p2p.NodeID,
nodeID types.NodeID,
) (*p2p.PeerManager, error) {
var maxConns uint16
@ -480,9 +480,9 @@ func createPeerManager(
maxConns = 64
}
privatePeerIDs := make(map[p2p.NodeID]struct{})
privatePeerIDs := make(map[types.NodeID]struct{})
for _, id := range tmstrings.SplitAndTrimEmpty(config.P2P.PrivatePeerIDs, ",", " ") {
privatePeerIDs[p2p.NodeID(id)] = struct{}{}
privatePeerIDs[types.NodeID(id)] = struct{}{}
}
options := p2p.PeerManagerOptions{
@ -651,14 +651,14 @@ func createAddrBookAndSetOnSwitch(config *cfg.Config, sw *p2p.Switch,
// Add ourselves to addrbook to prevent dialing ourselves
if config.P2P.ExternalAddress != "" {
addr, err := p2p.NewNetAddressString(p2p.IDAddressString(nodeKey.ID, config.P2P.ExternalAddress))
addr, err := types.NewNetAddressString(nodeKey.ID.AddressString(config.P2P.ExternalAddress))
if err != nil {
return nil, fmt.Errorf("p2p.external_address is incorrect: %w", err)
}
addrBook.AddOurAddress(addr)
}
if config.P2P.ListenAddress != "" {
addr, err := p2p.NewNetAddressString(p2p.IDAddressString(nodeKey.ID, config.P2P.ListenAddress))
addr, err := types.NewNetAddressString(nodeKey.ID.AddressString(config.P2P.ListenAddress))
if err != nil {
return nil, fmt.Errorf("p2p.laddr is incorrect: %w", err)
}


+ 6
- 5
test/fuzz/p2p/addrbook/init-corpus/main.go View File

@ -13,6 +13,7 @@ import (
"github.com/tendermint/tendermint/crypto/ed25519"
"github.com/tendermint/tendermint/internal/p2p"
"github.com/tendermint/tendermint/types"
)
func main() {
@ -34,11 +35,11 @@ func initCorpus(baseDir string) {
// create corpus
privKey := ed25519.GenPrivKey()
addrs := []*p2p.NetAddress{
{ID: p2p.NodeIDFromPubKey(privKey.PubKey()), IP: net.IPv4(0, 0, 0, 0), Port: 0},
{ID: p2p.NodeIDFromPubKey(privKey.PubKey()), IP: net.IPv4(127, 0, 0, 0), Port: 80},
{ID: p2p.NodeIDFromPubKey(privKey.PubKey()), IP: net.IPv4(213, 87, 10, 200), Port: 8808},
{ID: p2p.NodeIDFromPubKey(privKey.PubKey()), IP: net.IPv4(111, 111, 111, 111), Port: 26656},
{ID: p2p.NodeIDFromPubKey(privKey.PubKey()), IP: net.ParseIP("2001:db8::68"), Port: 26656},
{ID: types.NodeIDFromPubKey(privKey.PubKey()), IP: net.IPv4(0, 0, 0, 0), Port: 0},
{ID: types.NodeIDFromPubKey(privKey.PubKey()), IP: net.IPv4(127, 0, 0, 0), Port: 80},
{ID: types.NodeIDFromPubKey(privKey.PubKey()), IP: net.IPv4(213, 87, 10, 200), Port: 8808},
{ID: types.NodeIDFromPubKey(privKey.PubKey()), IP: net.IPv4(111, 111, 111, 111), Port: 26656},
{ID: types.NodeIDFromPubKey(privKey.PubKey()), IP: net.ParseIP("2001:db8::68"), Port: 26656},
}
for i, addr := range addrs {


+ 7
- 5
test/fuzz/p2p/pex/init-corpus/main.go View File

@ -12,7 +12,9 @@ import (
"github.com/tendermint/tendermint/crypto/ed25519"
"github.com/tendermint/tendermint/internal/p2p"
"github.com/tendermint/tendermint/internal/p2p/pex"
tmp2p "github.com/tendermint/tendermint/proto/tendermint/p2p"
"github.com/tendermint/tendermint/types"
)
func main() {
@ -42,20 +44,20 @@ func initCorpus(rootDir string) {
privKey := ed25519.GenPrivKey()
addr := fmt.Sprintf(
"%s@%v.%v.%v.%v:26656",
p2p.NodeIDFromPubKey(privKey.PubKey()),
types.NodeIDFromPubKey(privKey.PubKey()),
rand.Int()%256,
rand.Int()%256,
rand.Int()%256,
rand.Int()%256,
)
netAddr, _ := p2p.NewNetAddressString(addr)
netAddr, _ := types.NewNetAddressString(addr)
addrs = append(addrs, netAddr)
}
// IPv6 addresses
privKey := ed25519.GenPrivKey()
ipv6a, err := p2p.NewNetAddressString(
fmt.Sprintf("%s@[ff02::1:114]:26656", p2p.NodeIDFromPubKey(privKey.PubKey())))
ipv6a, err := types.NewNetAddressString(
fmt.Sprintf("%s@[ff02::1:114]:26656", types.NodeIDFromPubKey(privKey.PubKey())))
if err != nil {
log.Fatalf("can't create a new netaddress: %v", err)
}
@ -63,7 +65,7 @@ func initCorpus(rootDir string) {
msg := tmp2p.PexMessage{
Sum: &tmp2p.PexMessage_PexResponse{
PexResponse: &tmp2p.PexResponse{Addresses: p2p.NetAddressesToProto(addrs)},
PexResponse: &tmp2p.PexResponse{Addresses: pex.NetAddressesToProto(addrs)},
},
}
bz, err := msg.Marshal()


+ 11
- 8
test/fuzz/p2p/pex/reactor_receive.go View File

@ -9,6 +9,7 @@ import (
"github.com/tendermint/tendermint/internal/p2p/pex"
"github.com/tendermint/tendermint/libs/log"
"github.com/tendermint/tendermint/libs/service"
"github.com/tendermint/tendermint/types"
"github.com/tendermint/tendermint/version"
)
@ -62,7 +63,7 @@ func newFuzzPeer() *fuzzPeer {
}
var privKey = ed25519.GenPrivKey()
var nodeID = p2p.NodeIDFromPubKey(privKey.PubKey())
var nodeID = types.NodeIDFromPubKey(privKey.PubKey())
var defaultNodeInfo = p2p.NodeInfo{
ProtocolVersion: p2p.NewProtocolVersion(
version.P2PProtocol,
@ -75,17 +76,19 @@ var defaultNodeInfo = p2p.NodeInfo{
}
func (fp *fuzzPeer) FlushStop() {}
func (fp *fuzzPeer) ID() p2p.NodeID { return nodeID }
func (fp *fuzzPeer) ID() types.NodeID { return nodeID }
func (fp *fuzzPeer) RemoteIP() net.IP { return net.IPv4(198, 163, 190, 214) }
func (fp *fuzzPeer) RemoteAddr() net.Addr {
return &net.TCPAddr{IP: fp.RemoteIP(), Port: 26656, Zone: ""}
}
func (fp *fuzzPeer) IsOutbound() bool { return false }
func (fp *fuzzPeer) IsPersistent() bool { return false }
func (fp *fuzzPeer) CloseConn() error { return nil }
func (fp *fuzzPeer) NodeInfo() p2p.NodeInfo { return defaultNodeInfo }
func (fp *fuzzPeer) Status() p2p.ConnectionStatus { var cs p2p.ConnectionStatus; return cs }
func (fp *fuzzPeer) SocketAddr() *p2p.NetAddress { return p2p.NewNetAddress(fp.ID(), fp.RemoteAddr()) }
func (fp *fuzzPeer) IsOutbound() bool { return false }
func (fp *fuzzPeer) IsPersistent() bool { return false }
func (fp *fuzzPeer) CloseConn() error { return nil }
func (fp *fuzzPeer) NodeInfo() p2p.NodeInfo { return defaultNodeInfo }
func (fp *fuzzPeer) Status() p2p.ConnectionStatus { var cs p2p.ConnectionStatus; return cs }
func (fp *fuzzPeer) SocketAddr() *p2p.NetAddress {
return types.NewNetAddress(fp.ID(), fp.RemoteAddr())
}
func (fp *fuzzPeer) Send(byte, []byte) bool { return true }
func (fp *fuzzPeer) TrySend(byte, []byte) bool { return true }
func (fp *fuzzPeer) Set(key string, value interface{}) { fp.m[key] = value }


+ 33
- 0
types/errors_p2p.go View File

@ -0,0 +1,33 @@
package types
import (
"fmt"
)
//-------------------------------------------------------------------
type ErrNetAddressNoID struct {
Addr string
}
func (e ErrNetAddressNoID) Error() string {
return fmt.Sprintf("address (%s) does not contain ID", e.Addr)
}
type ErrNetAddressInvalid struct {
Addr string
Err error
}
func (e ErrNetAddressInvalid) Error() string {
return fmt.Sprintf("invalid address (%s): %v", e.Addr, e.Err)
}
type ErrNetAddressLookup struct {
Addr string
Err error
}
func (e ErrNetAddressLookup) Error() string {
return fmt.Sprintf("error looking up host (%s): %v", e.Addr, e.Err)
}

+ 329
- 0
types/netaddress.go View File

@ -0,0 +1,329 @@
// Modified for Tendermint
// Originally Copyright (c) 2013-2014 Conformal Systems LLC.
// https://github.com/conformal/btcd/blob/master/LICENSE
package types
import (
"errors"
"flag"
"fmt"
"net"
"strconv"
"strings"
"time"
)
// EmptyNetAddress defines the string representation of an empty NetAddress
const EmptyNetAddress = "<nil-NetAddress>"
// NetAddress defines information about a peer on the network
// including its ID, IP address, and port.
type NetAddress struct {
ID NodeID `json:"id"`
IP net.IP `json:"ip"`
Port uint16 `json:"port"`
}
// NewNetAddress returns a new NetAddress using the provided TCP
// address. When testing, other net.Addr (except TCP) will result in
// using 0.0.0.0:0. When normal run, other net.Addr (except TCP) will
// panic. Panics if ID is invalid.
// TODO: socks proxies?
func NewNetAddress(id NodeID, addr net.Addr) *NetAddress {
tcpAddr, ok := addr.(*net.TCPAddr)
if !ok {
if flag.Lookup("test.v") == nil { // normal run
panic(fmt.Sprintf("Only TCPAddrs are supported. Got: %v", addr))
} else { // in testing
netAddr := NewNetAddressIPPort(net.IP("127.0.0.1"), 0)
netAddr.ID = id
return netAddr
}
}
if err := id.Validate(); err != nil {
panic(fmt.Sprintf("Invalid ID %v: %v (addr: %v)", id, err, addr))
}
ip := tcpAddr.IP
port := uint16(tcpAddr.Port)
na := NewNetAddressIPPort(ip, port)
na.ID = id
return na
}
// NewNetAddressIPPort returns a new NetAddress using the provided IP
// and port number.
func NewNetAddressIPPort(ip net.IP, port uint16) *NetAddress {
return &NetAddress{
IP: ip,
Port: port,
}
}
// NewNetAddressString returns a new NetAddress using the provided address in
// the form of "ID@IP:Port".
// Also resolves the host if host is not an IP.
// Errors are of type ErrNetAddressXxx where Xxx is in (NoID, Invalid, Lookup)
func NewNetAddressString(addr string) (*NetAddress, error) {
addrWithoutProtocol := removeProtocolIfDefined(addr)
spl := strings.Split(addrWithoutProtocol, "@")
if len(spl) != 2 {
return nil, ErrNetAddressNoID{addr}
}
id, err := NewNodeID(spl[0])
if err != nil {
return nil, ErrNetAddressInvalid{addrWithoutProtocol, err}
}
if err := id.Validate(); err != nil {
return nil, ErrNetAddressInvalid{addrWithoutProtocol, err}
}
addrWithoutProtocol = spl[1]
// get host and port
host, portStr, err := net.SplitHostPort(addrWithoutProtocol)
if err != nil {
return nil, ErrNetAddressInvalid{addrWithoutProtocol, err}
}
if len(host) == 0 {
return nil, ErrNetAddressInvalid{
addrWithoutProtocol,
errors.New("host is empty")}
}
ip := net.ParseIP(host)
if ip == nil {
ips, err := net.LookupIP(host)
if err != nil {
return nil, ErrNetAddressLookup{host, err}
}
ip = ips[0]
}
port, err := strconv.ParseUint(portStr, 10, 16)
if err != nil {
return nil, ErrNetAddressInvalid{portStr, err}
}
na := NewNetAddressIPPort(ip, uint16(port))
na.ID = id
return na, nil
}
// Equals reports whether na and other are the same addresses,
// including their ID, IP, and Port.
func (na *NetAddress) Equals(other interface{}) bool {
if o, ok := other.(*NetAddress); ok {
return na.String() == o.String()
}
return false
}
// Same returns true is na has the same non-empty ID or DialString as other.
func (na *NetAddress) Same(other interface{}) bool {
if o, ok := other.(*NetAddress); ok {
if na.DialString() == o.DialString() {
return true
}
if na.ID != "" && na.ID == o.ID {
return true
}
}
return false
}
// String representation: <ID>@<IP>:<PORT>
func (na *NetAddress) String() string {
if na == nil {
return EmptyNetAddress
}
addrStr := na.DialString()
if na.ID != "" {
addrStr = na.ID.AddressString(addrStr)
}
return addrStr
}
func (na *NetAddress) DialString() string {
if na == nil {
return "<nil-NetAddress>"
}
return net.JoinHostPort(
na.IP.String(),
strconv.FormatUint(uint64(na.Port), 10),
)
}
// Dial calls net.Dial on the address.
func (na *NetAddress) Dial() (net.Conn, error) {
conn, err := net.Dial("tcp", na.DialString())
if err != nil {
return nil, err
}
return conn, nil
}
// DialTimeout calls net.DialTimeout on the address.
func (na *NetAddress) DialTimeout(timeout time.Duration) (net.Conn, error) {
conn, err := net.DialTimeout("tcp", na.DialString(), timeout)
if err != nil {
return nil, err
}
return conn, nil
}
// Routable returns true if the address is routable.
func (na *NetAddress) Routable() bool {
if err := na.Valid(); err != nil {
return false
}
// TODO(oga) bitcoind doesn't include RFC3849 here, but should we?
return !(na.RFC1918() || na.RFC3927() || na.RFC4862() ||
na.RFC4193() || na.RFC4843() || na.Local())
}
// For IPv4 these are either a 0 or all bits set address. For IPv6 a zero
// address or one that matches the RFC3849 documentation address format.
func (na *NetAddress) Valid() error {
if err := na.ID.Validate(); err != nil {
return fmt.Errorf("invalid ID: %w", err)
}
if na.IP == nil {
return errors.New("no IP")
}
if na.IP.IsUnspecified() || na.RFC3849() || na.IP.Equal(net.IPv4bcast) {
return errors.New("invalid IP")
}
return nil
}
// Local returns true if it is a local address.
func (na *NetAddress) Local() bool {
return na.IP.IsLoopback() || zero4.Contains(na.IP)
}
// ReachabilityTo checks whenever o can be reached from na.
func (na *NetAddress) ReachabilityTo(o *NetAddress) int {
const (
Unreachable = 0
Default = iota
Teredo
Ipv6Weak
Ipv4
Ipv6Strong
)
switch {
case !na.Routable():
return Unreachable
case na.RFC4380():
switch {
case !o.Routable():
return Default
case o.RFC4380():
return Teredo
case o.IP.To4() != nil:
return Ipv4
default: // ipv6
return Ipv6Weak
}
case na.IP.To4() != nil:
if o.Routable() && o.IP.To4() != nil {
return Ipv4
}
return Default
default: /* ipv6 */
var tunneled bool
// Is our v6 is tunneled?
if o.RFC3964() || o.RFC6052() || o.RFC6145() {
tunneled = true
}
switch {
case !o.Routable():
return Default
case o.RFC4380():
return Teredo
case o.IP.To4() != nil:
return Ipv4
case tunneled:
// only prioritize ipv6 if we aren't tunneling it.
return Ipv6Weak
}
return Ipv6Strong
}
}
// RFC1918: IPv4 Private networks (10.0.0.0/8, 192.168.0.0/16, 172.16.0.0/12)
// RFC3849: IPv6 Documentation address (2001:0DB8::/32)
// RFC3927: IPv4 Autoconfig (169.254.0.0/16)
// RFC3964: IPv6 6to4 (2002::/16)
// RFC4193: IPv6 unique local (FC00::/7)
// RFC4380: IPv6 Teredo tunneling (2001::/32)
// RFC4843: IPv6 ORCHID: (2001:10::/28)
// RFC4862: IPv6 Autoconfig (FE80::/64)
// RFC6052: IPv6 well known prefix (64:FF9B::/96)
// RFC6145: IPv6 IPv4 translated address ::FFFF:0:0:0/96
var rfc1918_10 = net.IPNet{IP: net.ParseIP("10.0.0.0"), Mask: net.CIDRMask(8, 32)}
var rfc1918_192 = net.IPNet{IP: net.ParseIP("192.168.0.0"), Mask: net.CIDRMask(16, 32)}
var rfc1918_172 = net.IPNet{IP: net.ParseIP("172.16.0.0"), Mask: net.CIDRMask(12, 32)}
var rfc3849 = net.IPNet{IP: net.ParseIP("2001:0DB8::"), Mask: net.CIDRMask(32, 128)}
var rfc3927 = net.IPNet{IP: net.ParseIP("169.254.0.0"), Mask: net.CIDRMask(16, 32)}
var rfc3964 = net.IPNet{IP: net.ParseIP("2002::"), Mask: net.CIDRMask(16, 128)}
var rfc4193 = net.IPNet{IP: net.ParseIP("FC00::"), Mask: net.CIDRMask(7, 128)}
var rfc4380 = net.IPNet{IP: net.ParseIP("2001::"), Mask: net.CIDRMask(32, 128)}
var rfc4843 = net.IPNet{IP: net.ParseIP("2001:10::"), Mask: net.CIDRMask(28, 128)}
var rfc4862 = net.IPNet{IP: net.ParseIP("FE80::"), Mask: net.CIDRMask(64, 128)}
var rfc6052 = net.IPNet{IP: net.ParseIP("64:FF9B::"), Mask: net.CIDRMask(96, 128)}
var rfc6145 = net.IPNet{IP: net.ParseIP("::FFFF:0:0:0"), Mask: net.CIDRMask(96, 128)}
var zero4 = net.IPNet{IP: net.ParseIP("0.0.0.0"), Mask: net.CIDRMask(8, 32)}
var (
// onionCatNet defines the IPv6 address block used to support Tor.
// bitcoind encodes a .onion address as a 16 byte number by decoding the
// address prior to the .onion (i.e. the key hash) base32 into a ten
// byte number. It then stores the first 6 bytes of the address as
// 0xfd, 0x87, 0xd8, 0x7e, 0xeb, 0x43.
//
// This is the same range used by OnionCat, which is part part of the
// RFC4193 unique local IPv6 range.
//
// In summary the format is:
// { magic 6 bytes, 10 bytes base32 decode of key hash }
onionCatNet = ipNet("fd87:d87e:eb43::", 48, 128)
)
func (na *NetAddress) RFC1918() bool {
return rfc1918_10.Contains(na.IP) ||
rfc1918_192.Contains(na.IP) ||
rfc1918_172.Contains(na.IP)
}
func (na *NetAddress) RFC3849() bool { return rfc3849.Contains(na.IP) }
func (na *NetAddress) RFC3927() bool { return rfc3927.Contains(na.IP) }
func (na *NetAddress) RFC3964() bool { return rfc3964.Contains(na.IP) }
func (na *NetAddress) RFC4193() bool { return rfc4193.Contains(na.IP) }
func (na *NetAddress) RFC4380() bool { return rfc4380.Contains(na.IP) }
func (na *NetAddress) RFC4843() bool { return rfc4843.Contains(na.IP) }
func (na *NetAddress) RFC4862() bool { return rfc4862.Contains(na.IP) }
func (na *NetAddress) RFC6052() bool { return rfc6052.Contains(na.IP) }
func (na *NetAddress) RFC6145() bool { return rfc6145.Contains(na.IP) }
func (na *NetAddress) OnionCatTor() bool { return onionCatNet.Contains(na.IP) }
func removeProtocolIfDefined(addr string) string {
if strings.Contains(addr, "://") {
return strings.Split(addr, "://")[1]
}
return addr
}
// ipNet returns a net.IPNet struct given the passed IP address string, number
// of one bits to include at the start of the mask, and the total number of bits
// for the mask.
func ipNet(ip string, ones, bits int) net.IPNet {
return net.IPNet{IP: net.ParseIP(ip), Mask: net.CIDRMask(ones, bits)}
}

internal/p2p/netaddress_test.go → types/netaddress_test.go View File


+ 67
- 0
types/node_id.go View File

@ -0,0 +1,67 @@
package types
import (
"encoding/hex"
"errors"
"fmt"
"regexp"
"strings"
"github.com/tendermint/tendermint/crypto"
)
// NodeIDByteLength is the length of a crypto.Address. Currently only 20.
// FIXME: support other length addresses?
const NodeIDByteLength = crypto.AddressSize
// reNodeID is a regexp for valid node IDs.
var reNodeID = regexp.MustCompile(`^[0-9a-f]{40}$`)
// NodeID is a hex-encoded crypto.Address. It must be lowercased
// (for uniqueness) and of length 2*NodeIDByteLength.
type NodeID string
// NewNodeID returns a lowercased (normalized) NodeID, or errors if the
// node ID is invalid.
func NewNodeID(nodeID string) (NodeID, error) {
n := NodeID(strings.ToLower(nodeID))
return n, n.Validate()
}
// IDAddressString returns id@hostPort. It strips the leading
// protocol from protocolHostPort if it exists.
func (id NodeID) AddressString(protocolHostPort string) string {
hostPort := removeProtocolIfDefined(protocolHostPort)
return fmt.Sprintf("%s@%s", id, hostPort)
}
// NodeIDFromPubKey creates a node ID from a given PubKey address.
func NodeIDFromPubKey(pubKey crypto.PubKey) NodeID {
return NodeID(hex.EncodeToString(pubKey.Address()))
}
// Bytes converts the node ID to its binary byte representation.
func (id NodeID) Bytes() ([]byte, error) {
bz, err := hex.DecodeString(string(id))
if err != nil {
return nil, fmt.Errorf("invalid node ID encoding: %w", err)
}
return bz, nil
}
// Validate validates the NodeID.
func (id NodeID) Validate() error {
switch {
case len(id) == 0:
return errors.New("empty node ID")
case len(id) != 2*NodeIDByteLength:
return fmt.Errorf("invalid node ID length %d, expected %d", len(id), 2*NodeIDByteLength)
case !reNodeID.MatchString(string(id)):
return fmt.Errorf("node ID can only contain lowercased hex digits")
default:
return nil
}
}

Loading…
Cancel
Save