Browse Source

p2p: delete legacy stack initial pass (#7035)

A few notes:

- this is not all the deletion that we can do, but this is the most
  "simple" case: it leaves in shims, and there's some trivial
  additional cleanup to the transport that can happen but that
  requires writing more code, and I wanted this to be easy to review
  above all else.
  
- This should land *after* we cut the branch for 0.35, but I'm
  anticipating that to happen soon, and I wanted to run this through
  CI.
pull/7064/head
Sam Kleinman 3 years ago
committed by GitHub
parent
commit
03ad7d6f20
No known key found for this signature in database GPG Key ID: 4AEE18F83AFDEB23
54 changed files with 127 additions and 10207 deletions
  1. +7
    -0
      CHANGELOG_PENDING.md
  2. +0
    -6
      config/config.go
  3. +0
    -3
      config/toml.go
  4. +0
    -3
      docs/nodes/configuration.md
  5. +0
    -74
      internal/p2p/base_reactor.go
  6. +1
    -1
      internal/p2p/conn/secret_connection_test.go
  7. +0
    -23
      internal/p2p/mock/reactor.go
  8. +0
    -371
      internal/p2p/peer.go
  9. +0
    -149
      internal/p2p/peer_set.go
  10. +0
    -190
      internal/p2p/peer_set_test.go
  11. +0
    -239
      internal/p2p/peer_test.go
  12. +0
    -948
      internal/p2p/pex/addrbook.go
  13. +0
    -777
      internal/p2p/pex/addrbook_test.go
  14. +0
    -24
      internal/p2p/pex/bench_test.go
  15. +0
    -11
      internal/p2p/pex/errors.go
  16. +0
    -83
      internal/p2p/pex/file.go
  17. +0
    -141
      internal/p2p/pex/known_address.go
  18. +0
    -55
      internal/p2p/pex/params.go
  19. +0
    -862
      internal/p2p/pex/pex_reactor.go
  20. +0
    -680
      internal/p2p/pex/pex_reactor_test.go
  21. +16
    -0
      internal/p2p/pex/reactor.go
  22. +3
    -1
      internal/p2p/router.go
  23. +42
    -261
      internal/p2p/shim.go
  24. +0
    -207
      internal/p2p/shim_test.go
  25. +0
    -1064
      internal/p2p/switch.go
  26. +0
    -932
      internal/p2p/switch_test.go
  27. +0
    -256
      internal/p2p/test_util.go
  28. +0
    -9
      internal/p2p/transport.go
  29. +16
    -44
      internal/rpc/core/consensus.go
  30. +0
    -9
      internal/rpc/core/env.go
  31. +10
    -105
      internal/rpc/core/net.go
  32. +0
    -89
      internal/rpc/core/net_test.go
  33. +0
    -2
      internal/rpc/core/routes.go
  34. +1
    -0
      internal/rpc/core/status.go
  35. +20
    -146
      node/node.go
  36. +8
    -186
      node/setup.go
  37. +0
    -14
      rpc/client/local/local.go
  38. +0
    -14
      rpc/client/mock/client.go
  39. +1
    -90
      test/e2e/generator/generate.go
  40. +2
    -70
      test/e2e/generator/generate_test.go
  41. +0
    -16
      test/e2e/generator/main.go
  42. +0
    -1
      test/e2e/networks/ci.toml
  43. +0
    -3
      test/e2e/pkg/manifest.go
  44. +0
    -2
      test/e2e/pkg/testnet.go
  45. +0
    -2
      test/e2e/runner/setup.go
  46. +0
    -35
      test/fuzz/p2p/addrbook/fuzz.go
  47. +0
    -33
      test/fuzz/p2p/addrbook/fuzz_test.go
  48. +0
    -59
      test/fuzz/p2p/addrbook/init-corpus/main.go
  49. +0
    -0
      test/fuzz/p2p/addrbook/testdata/cases/empty
  50. +0
    -33
      test/fuzz/p2p/pex/fuzz_test.go
  51. +0
    -84
      test/fuzz/p2p/pex/init-corpus/main.go
  52. +0
    -95
      test/fuzz/p2p/pex/reactor_receive.go
  53. +0
    -1705
      test/fuzz/p2p/pex/testdata/addrbook1
  54. +0
    -0
      test/fuzz/p2p/pex/testdata/cases/empty

+ 7
- 0
CHANGELOG_PENDING.md View File

@ -16,8 +16,15 @@ Special thanks to external contributors on this release:
- P2P Protocol
- [p2p] \#7035 Remove legacy P2P routing implementation and
associated configuration (@tychoish)
- Go API
- [blocksync] \#7046 Remove v2 implementation of the blocksync
service and recactor, which was disabled in the previous release
(@tychoish)
- Blockchain Protocol
### FEATURES


+ 0
- 6
config/config.go View File

@ -709,11 +709,6 @@ type P2PConfig struct { //nolint: maligned
// Force dial to fail
TestDialFail bool `mapstructure:"test-dial-fail"`
// UseLegacy enables the "legacy" P2P implementation and
// disables the newer default implementation. This flag will
// be removed in a future release.
UseLegacy bool `mapstructure:"use-legacy"`
// Makes it possible to configure which queue backend the p2p
// layer uses. Options are: "fifo", "priority" and "wdrr",
// with the default being "priority".
@ -748,7 +743,6 @@ func DefaultP2PConfig() *P2PConfig {
DialTimeout: 3 * time.Second,
TestDialFail: false,
QueueType: "priority",
UseLegacy: false,
}
}


+ 0
- 3
config/toml.go View File

@ -265,9 +265,6 @@ pprof-laddr = "{{ .RPC.PprofListenAddress }}"
#######################################################
[p2p]
# Enable the legacy p2p layer.
use-legacy = {{ .P2P.UseLegacy }}
# Select the p2p internal queue
queue-type = "{{ .P2P.QueueType }}"


+ 0
- 3
docs/nodes/configuration.md View File

@ -221,9 +221,6 @@ pprof-laddr = ""
#######################################################
[p2p]
# Enable the legacy p2p layer.
use-legacy = false
# Select the p2p internal queue
queue-type = "priority"


+ 0
- 74
internal/p2p/base_reactor.go View File

@ -1,74 +0,0 @@
package p2p
import (
"github.com/tendermint/tendermint/internal/p2p/conn"
"github.com/tendermint/tendermint/libs/service"
)
// Reactor is responsible for handling incoming messages on one or more
// Channel. Switch calls GetChannels when reactor is added to it. When a new
// peer joins our node, InitPeer and AddPeer are called. RemovePeer is called
// when the peer is stopped. Receive is called when a message is received on a
// channel associated with this reactor.
//
// Peer#Send or Peer#TrySend should be used to send the message to a peer.
type Reactor interface {
service.Service // Start, Stop
// SetSwitch allows setting a switch.
SetSwitch(*Switch)
// GetChannels returns the list of MConnection.ChannelDescriptor. Make sure
// that each ID is unique across all the reactors added to the switch.
GetChannels() []*conn.ChannelDescriptor
// InitPeer is called by the switch before the peer is started. Use it to
// initialize data for the peer (e.g. peer state).
//
// NOTE: The switch won't call AddPeer nor RemovePeer if it fails to start
// the peer. Do not store any data associated with the peer in the reactor
// itself unless you don't want to have a state, which is never cleaned up.
InitPeer(peer Peer) Peer
// AddPeer is called by the switch after the peer is added and successfully
// started. Use it to start goroutines communicating with the peer.
AddPeer(peer Peer)
// RemovePeer is called by the switch when the peer is stopped (due to error
// or other reason).
RemovePeer(peer Peer, reason interface{})
// Receive is called by the switch when msgBytes is received from the peer.
//
// NOTE reactor can not keep msgBytes around after Receive completes without
// copying.
//
// CONTRACT: msgBytes are not nil.
//
// XXX: do not call any methods that can block or incur heavy processing.
// https://github.com/tendermint/tendermint/issues/2888
Receive(chID byte, peer Peer, msgBytes []byte)
}
//--------------------------------------
type BaseReactor struct {
service.BaseService // Provides Start, Stop, .Quit
Switch *Switch
}
func NewBaseReactor(name string, impl Reactor) *BaseReactor {
return &BaseReactor{
BaseService: *service.NewBaseService(nil, name, impl),
Switch: nil,
}
}
func (br *BaseReactor) SetSwitch(sw *Switch) {
br.Switch = sw
}
func (*BaseReactor) GetChannels() []*conn.ChannelDescriptor { return nil }
func (*BaseReactor) AddPeer(peer Peer) {}
func (*BaseReactor) RemovePeer(peer Peer, reason interface{}) {}
func (*BaseReactor) Receive(chID byte, peer Peer, msgBytes []byte) {}
func (*BaseReactor) InitPeer(peer Peer) Peer { return peer }

+ 1
- 1
internal/p2p/conn/secret_connection_test.go View File

@ -195,7 +195,7 @@ func TestSecretConnectionReadWrite(t *testing.T) {
compareWritesReads := func(writes []string, reads []string) {
for {
// Pop next write & corresponding reads
var read, write string = "", writes[0]
var read, write = "", writes[0]
var readCount = 0
for _, readChunk := range reads {
read += readChunk


+ 0
- 23
internal/p2p/mock/reactor.go View File

@ -1,23 +0,0 @@
package mock
import (
"github.com/tendermint/tendermint/internal/p2p"
"github.com/tendermint/tendermint/internal/p2p/conn"
"github.com/tendermint/tendermint/libs/log"
)
type Reactor struct {
p2p.BaseReactor
}
func NewReactor() *Reactor {
r := &Reactor{}
r.BaseReactor = *p2p.NewBaseReactor("Mock-PEX", r)
r.SetLogger(log.TestingLogger())
return r
}
func (r *Reactor) GetChannels() []*conn.ChannelDescriptor { return []*conn.ChannelDescriptor{} }
func (r *Reactor) AddPeer(peer p2p.Peer) {}
func (r *Reactor) RemovePeer(peer p2p.Peer, reason interface{}) {}
func (r *Reactor) Receive(chID byte, peer p2p.Peer, msgBytes []byte) {}

+ 0
- 371
internal/p2p/peer.go View File

@ -1,371 +0,0 @@
package p2p
import (
"fmt"
"io"
"net"
"runtime/debug"
"time"
tmconn "github.com/tendermint/tendermint/internal/p2p/conn"
"github.com/tendermint/tendermint/libs/cmap"
"github.com/tendermint/tendermint/libs/log"
"github.com/tendermint/tendermint/libs/service"
"github.com/tendermint/tendermint/types"
)
//go:generate ../../scripts/mockery_generate.sh Peer
const metricsTickerDuration = 10 * time.Second
// Peer is an interface representing a peer connected on a reactor.
type Peer interface {
service.Service
FlushStop()
ID() types.NodeID // peer's cryptographic ID
RemoteIP() net.IP // remote IP of the connection
RemoteAddr() net.Addr // remote address of the connection
IsOutbound() bool // did we dial the peer
IsPersistent() bool // do we redial this peer when we disconnect
CloseConn() error // close original connection
NodeInfo() types.NodeInfo // peer's info
Status() tmconn.ConnectionStatus
SocketAddr() *NetAddress // actual address of the socket
Send(byte, []byte) bool
TrySend(byte, []byte) bool
Set(string, interface{})
Get(string) interface{}
}
//----------------------------------------------------------
// peerConn contains the raw connection and its config.
type peerConn struct {
outbound bool
persistent bool
conn Connection
ip net.IP // cached RemoteIP()
}
func newPeerConn(outbound, persistent bool, conn Connection) peerConn {
return peerConn{
outbound: outbound,
persistent: persistent,
conn: conn,
}
}
// Return the IP from the connection RemoteAddr
func (pc peerConn) RemoteIP() net.IP {
if pc.ip == nil {
pc.ip = pc.conn.RemoteEndpoint().IP
}
return pc.ip
}
// peer implements Peer.
//
// Before using a peer, you will need to perform a handshake on connection.
type peer struct {
service.BaseService
// raw peerConn and the multiplex connection
peerConn
// peer's node info and the channel it knows about
// channels = nodeInfo.Channels
// cached to avoid copying nodeInfo in hasChannel
nodeInfo types.NodeInfo
channels []byte
reactors map[byte]Reactor
onPeerError func(Peer, interface{})
// User data
Data *cmap.CMap
metrics *Metrics
metricsTicker *time.Ticker
}
type PeerOption func(*peer)
func newPeer(
nodeInfo types.NodeInfo,
pc peerConn,
reactorsByCh map[byte]Reactor,
onPeerError func(Peer, interface{}),
options ...PeerOption,
) *peer {
p := &peer{
peerConn: pc,
nodeInfo: nodeInfo,
channels: nodeInfo.Channels,
reactors: reactorsByCh,
onPeerError: onPeerError,
Data: cmap.NewCMap(),
metricsTicker: time.NewTicker(metricsTickerDuration),
metrics: NopMetrics(),
}
p.BaseService = *service.NewBaseService(nil, "Peer", p)
for _, option := range options {
option(p)
}
return p
}
// onError calls the peer error callback.
func (p *peer) onError(err interface{}) {
p.onPeerError(p, err)
}
// String representation.
func (p *peer) String() string {
if p.outbound {
return fmt.Sprintf("Peer{%v %v out}", p.conn, p.ID())
}
return fmt.Sprintf("Peer{%v %v in}", p.conn, p.ID())
}
//---------------------------------------------------
// Implements service.Service
// SetLogger implements BaseService.
func (p *peer) SetLogger(l log.Logger) {
p.Logger = l
}
// OnStart implements BaseService.
func (p *peer) OnStart() error {
if err := p.BaseService.OnStart(); err != nil {
return err
}
go p.processMessages()
go p.metricsReporter()
return nil
}
// processMessages processes messages received from the connection.
func (p *peer) processMessages() {
defer func() {
if r := recover(); r != nil {
p.Logger.Error("peer message processing panic", "err", r, "stack", string(debug.Stack()))
p.onError(fmt.Errorf("panic during peer message processing: %v", r))
}
}()
for {
chID, msg, err := p.conn.ReceiveMessage()
if err != nil {
p.onError(err)
return
}
reactor, ok := p.reactors[byte(chID)]
if !ok {
p.onError(fmt.Errorf("unknown channel %v", chID))
return
}
reactor.Receive(byte(chID), p, msg)
}
}
// FlushStop mimics OnStop but additionally ensures that all successful
// .Send() calls will get flushed before closing the connection.
// NOTE: it is not safe to call this method more than once.
func (p *peer) FlushStop() {
p.metricsTicker.Stop()
p.BaseService.OnStop()
if err := p.conn.FlushClose(); err != nil {
p.Logger.Debug("error while stopping peer", "err", err)
}
}
// OnStop implements BaseService.
func (p *peer) OnStop() {
p.metricsTicker.Stop()
p.BaseService.OnStop()
if err := p.conn.Close(); err != nil {
p.Logger.Debug("error while stopping peer", "err", err)
}
}
//---------------------------------------------------
// Implements Peer
// ID returns the peer's ID - the hex encoded hash of its pubkey.
func (p *peer) ID() types.NodeID {
return p.nodeInfo.ID()
}
// IsOutbound returns true if the connection is outbound, false otherwise.
func (p *peer) IsOutbound() bool {
return p.peerConn.outbound
}
// IsPersistent returns true if the peer is persitent, false otherwise.
func (p *peer) IsPersistent() bool {
return p.peerConn.persistent
}
// NodeInfo returns a copy of the peer's NodeInfo.
func (p *peer) NodeInfo() types.NodeInfo {
return p.nodeInfo
}
// SocketAddr returns the address of the socket.
// For outbound peers, it's the address dialed (after DNS resolution).
// For inbound peers, it's the address returned by the underlying connection
// (not what's reported in the peer's NodeInfo).
func (p *peer) SocketAddr() *NetAddress {
endpoint := p.peerConn.conn.RemoteEndpoint()
return &NetAddress{
ID: p.ID(),
IP: endpoint.IP,
Port: endpoint.Port,
}
}
// Status returns the peer's ConnectionStatus.
func (p *peer) Status() tmconn.ConnectionStatus {
return p.conn.Status()
}
// Send msg bytes to the channel identified by chID byte. Returns false if the
// send queue is full after timeout, specified by MConnection.
func (p *peer) Send(chID byte, msgBytes []byte) bool {
if !p.IsRunning() {
// see Switch#Broadcast, where we fetch the list of peers and loop over
// them - while we're looping, one peer may be removed and stopped.
return false
} else if !p.hasChannel(chID) {
return false
}
res, err := p.conn.SendMessage(ChannelID(chID), msgBytes)
if err == io.EOF {
return false
} else if err != nil {
p.onError(err)
return false
}
if res {
labels := []string{
"peer_id", string(p.ID()),
"chID", fmt.Sprintf("%#x", chID),
}
p.metrics.PeerSendBytesTotal.With(labels...).Add(float64(len(msgBytes)))
}
return res
}
// TrySend msg bytes to the channel identified by chID byte. Immediately returns
// false if the send queue is full.
func (p *peer) TrySend(chID byte, msgBytes []byte) bool {
if !p.IsRunning() {
return false
} else if !p.hasChannel(chID) {
return false
}
res, err := p.conn.TrySendMessage(ChannelID(chID), msgBytes)
if err == io.EOF {
return false
} else if err != nil {
p.onError(err)
return false
}
if res {
labels := []string{
"peer_id", string(p.ID()),
"chID", fmt.Sprintf("%#x", chID),
}
p.metrics.PeerSendBytesTotal.With(labels...).Add(float64(len(msgBytes)))
}
return res
}
// Get the data for a given key.
func (p *peer) Get(key string) interface{} {
return p.Data.Get(key)
}
// Set sets the data for the given key.
func (p *peer) Set(key string, data interface{}) {
p.Data.Set(key, data)
}
// hasChannel returns true if the peer reported
// knowing about the given chID.
func (p *peer) hasChannel(chID byte) bool {
for _, ch := range p.channels {
if ch == chID {
return true
}
}
// NOTE: probably will want to remove this
// but could be helpful while the feature is new
p.Logger.Debug(
"Unknown channel for peer",
"channel",
chID,
"channels",
p.channels,
)
return false
}
// CloseConn closes original connection. Used for cleaning up in cases where the peer had not been started at all.
func (p *peer) CloseConn() error {
return p.peerConn.conn.Close()
}
//---------------------------------------------------
// methods only used for testing
// TODO: can we remove these?
// CloseConn closes the underlying connection
func (pc *peerConn) CloseConn() {
pc.conn.Close()
}
// RemoteAddr returns peer's remote network address.
func (p *peer) RemoteAddr() net.Addr {
endpoint := p.conn.RemoteEndpoint()
return &net.TCPAddr{
IP: endpoint.IP,
Port: int(endpoint.Port),
}
}
//---------------------------------------------------
func PeerMetrics(metrics *Metrics) PeerOption {
return func(p *peer) {
p.metrics = metrics
}
}
func (p *peer) metricsReporter() {
for {
select {
case <-p.metricsTicker.C:
status := p.conn.Status()
var sendQueueSize float64
for _, chStatus := range status.Channels {
sendQueueSize += float64(chStatus.SendQueueSize)
}
p.metrics.PeerPendingSendBytes.With("peer_id", string(p.ID())).Set(sendQueueSize)
case <-p.Quit():
return
}
}
}

+ 0
- 149
internal/p2p/peer_set.go View File

@ -1,149 +0,0 @@
package p2p
import (
"net"
tmsync "github.com/tendermint/tendermint/internal/libs/sync"
"github.com/tendermint/tendermint/types"
)
// IPeerSet has a (immutable) subset of the methods of PeerSet.
type IPeerSet interface {
Has(key types.NodeID) bool
HasIP(ip net.IP) bool
Get(key types.NodeID) Peer
List() []Peer
Size() int
}
//-----------------------------------------------------------------------------
// PeerSet is a special structure for keeping a table of peers.
// Iteration over the peers is super fast and thread-safe.
type PeerSet struct {
mtx tmsync.Mutex
lookup map[types.NodeID]*peerSetItem
list []Peer
}
type peerSetItem struct {
peer Peer
index int
}
// NewPeerSet creates a new peerSet with a list of initial capacity of 256 items.
func NewPeerSet() *PeerSet {
return &PeerSet{
lookup: make(map[types.NodeID]*peerSetItem),
list: make([]Peer, 0, 256),
}
}
// Add adds the peer to the PeerSet.
// It returns an error carrying the reason, if the peer is already present.
func (ps *PeerSet) Add(peer Peer) error {
ps.mtx.Lock()
defer ps.mtx.Unlock()
if ps.lookup[peer.ID()] != nil {
return ErrSwitchDuplicatePeerID{peer.ID()}
}
index := len(ps.list)
// Appending is safe even with other goroutines
// iterating over the ps.list slice.
ps.list = append(ps.list, peer)
ps.lookup[peer.ID()] = &peerSetItem{peer, index}
return nil
}
// Has returns true if the set contains the peer referred to by this
// peerKey, otherwise false.
func (ps *PeerSet) Has(peerKey types.NodeID) bool {
ps.mtx.Lock()
_, ok := ps.lookup[peerKey]
ps.mtx.Unlock()
return ok
}
// HasIP returns true if the set contains the peer referred to by this IP
// address, otherwise false.
func (ps *PeerSet) HasIP(peerIP net.IP) bool {
ps.mtx.Lock()
defer ps.mtx.Unlock()
return ps.hasIP(peerIP)
}
// hasIP does not acquire a lock so it can be used in public methods which
// already lock.
func (ps *PeerSet) hasIP(peerIP net.IP) bool {
for _, item := range ps.lookup {
if item.peer.RemoteIP().Equal(peerIP) {
return true
}
}
return false
}
// Get looks up a peer by the provided peerKey. Returns nil if peer is not
// found.
func (ps *PeerSet) Get(peerKey types.NodeID) Peer {
ps.mtx.Lock()
defer ps.mtx.Unlock()
item, ok := ps.lookup[peerKey]
if ok {
return item.peer
}
return nil
}
// Remove discards peer by its Key, if the peer was previously memoized.
// Returns true if the peer was removed, and false if it was not found.
// in the set.
func (ps *PeerSet) Remove(peer Peer) bool {
ps.mtx.Lock()
defer ps.mtx.Unlock()
item := ps.lookup[peer.ID()]
if item == nil {
return false
}
index := item.index
// Create a new copy of the list but with one less item.
// (we must copy because we'll be mutating the list).
newList := make([]Peer, len(ps.list)-1)
copy(newList, ps.list)
// If it's the last peer, that's an easy special case.
if index == len(ps.list)-1 {
ps.list = newList
delete(ps.lookup, peer.ID())
return true
}
// Replace the popped item with the last item in the old list.
lastPeer := ps.list[len(ps.list)-1]
lastPeerKey := lastPeer.ID()
lastPeerItem := ps.lookup[lastPeerKey]
newList[index] = lastPeer
lastPeerItem.index = index
ps.list = newList
delete(ps.lookup, peer.ID())
return true
}
// Size returns the number of unique items in the peerSet.
func (ps *PeerSet) Size() int {
ps.mtx.Lock()
defer ps.mtx.Unlock()
return len(ps.list)
}
// List returns the threadsafe list of peers.
func (ps *PeerSet) List() []Peer {
ps.mtx.Lock()
defer ps.mtx.Unlock()
return ps.list
}

+ 0
- 190
internal/p2p/peer_set_test.go View File

@ -1,190 +0,0 @@
package p2p
import (
"net"
"sync"
"testing"
"github.com/stretchr/testify/assert"
"github.com/tendermint/tendermint/libs/service"
"github.com/tendermint/tendermint/types"
)
// mockPeer for testing the PeerSet
type mockPeer struct {
service.BaseService
ip net.IP
id types.NodeID
}
func (mp *mockPeer) FlushStop() { mp.Stop() } //nolint:errcheck // ignore error
func (mp *mockPeer) TrySend(chID byte, msgBytes []byte) bool { return true }
func (mp *mockPeer) Send(chID byte, msgBytes []byte) bool { return true }
func (mp *mockPeer) NodeInfo() types.NodeInfo { return types.NodeInfo{} }
func (mp *mockPeer) Status() ConnectionStatus { return ConnectionStatus{} }
func (mp *mockPeer) ID() types.NodeID { return mp.id }
func (mp *mockPeer) IsOutbound() bool { return false }
func (mp *mockPeer) IsPersistent() bool { return true }
func (mp *mockPeer) Get(s string) interface{} { return s }
func (mp *mockPeer) Set(string, interface{}) {}
func (mp *mockPeer) RemoteIP() net.IP { return mp.ip }
func (mp *mockPeer) SocketAddr() *NetAddress { return nil }
func (mp *mockPeer) RemoteAddr() net.Addr { return &net.TCPAddr{IP: mp.ip, Port: 8800} }
func (mp *mockPeer) CloseConn() error { return nil }
// Returns a mock peer
func newMockPeer(ip net.IP) *mockPeer {
if ip == nil {
ip = net.IP{127, 0, 0, 1}
}
nodeKey := types.GenNodeKey()
return &mockPeer{
ip: ip,
id: nodeKey.ID,
}
}
func TestPeerSetAddRemoveOne(t *testing.T) {
t.Parallel()
peerSet := NewPeerSet()
var peerList []Peer
for i := 0; i < 5; i++ {
p := newMockPeer(net.IP{127, 0, 0, byte(i)})
if err := peerSet.Add(p); err != nil {
t.Error(err)
}
peerList = append(peerList, p)
}
n := len(peerList)
// 1. Test removing from the front
for i, peerAtFront := range peerList {
removed := peerSet.Remove(peerAtFront)
assert.True(t, removed)
wantSize := n - i - 1
for j := 0; j < 2; j++ {
assert.Equal(t, false, peerSet.Has(peerAtFront.ID()), "#%d Run #%d: failed to remove peer", i, j)
assert.Equal(t, wantSize, peerSet.Size(), "#%d Run #%d: failed to remove peer and decrement size", i, j)
// Test the route of removing the now non-existent element
removed := peerSet.Remove(peerAtFront)
assert.False(t, removed)
}
}
// 2. Next we are testing removing the peer at the end
// a) Replenish the peerSet
for _, peer := range peerList {
if err := peerSet.Add(peer); err != nil {
t.Error(err)
}
}
// b) In reverse, remove each element
for i := n - 1; i >= 0; i-- {
peerAtEnd := peerList[i]
removed := peerSet.Remove(peerAtEnd)
assert.True(t, removed)
assert.Equal(t, false, peerSet.Has(peerAtEnd.ID()), "#%d: failed to remove item at end", i)
assert.Equal(t, i, peerSet.Size(), "#%d: differing sizes after peerSet.Remove(atEndPeer)", i)
}
}
func TestPeerSetAddRemoveMany(t *testing.T) {
t.Parallel()
peerSet := NewPeerSet()
peers := []Peer{}
N := 100
for i := 0; i < N; i++ {
peer := newMockPeer(net.IP{127, 0, 0, byte(i)})
if err := peerSet.Add(peer); err != nil {
t.Errorf("failed to add new peer")
}
if peerSet.Size() != i+1 {
t.Errorf("failed to add new peer and increment size")
}
peers = append(peers, peer)
}
for i, peer := range peers {
removed := peerSet.Remove(peer)
assert.True(t, removed)
if peerSet.Has(peer.ID()) {
t.Errorf("failed to remove peer")
}
if peerSet.Size() != len(peers)-i-1 {
t.Errorf("failed to remove peer and decrement size")
}
}
}
func TestPeerSetAddDuplicate(t *testing.T) {
t.Parallel()
peerSet := NewPeerSet()
peer := newMockPeer(nil)
n := 20
errsChan := make(chan error)
// Add the same asynchronously to test the
// concurrent guarantees of our APIs, and
// our expectation in the end is that only
// one addition succeeded, but the rest are
// instances of ErrSwitchDuplicatePeer.
for i := 0; i < n; i++ {
go func() {
errsChan <- peerSet.Add(peer)
}()
}
// Now collect and tally the results
errsTally := make(map[string]int)
for i := 0; i < n; i++ {
err := <-errsChan
switch err.(type) {
case ErrSwitchDuplicatePeerID:
errsTally["duplicateID"]++
default:
errsTally["other"]++
}
}
// Our next procedure is to ensure that only one addition
// succeeded and that the rest are each ErrSwitchDuplicatePeer.
wantErrCount, gotErrCount := n-1, errsTally["duplicateID"]
assert.Equal(t, wantErrCount, gotErrCount, "invalid ErrSwitchDuplicatePeer count")
wantNilErrCount, gotNilErrCount := 1, errsTally["other"]
assert.Equal(t, wantNilErrCount, gotNilErrCount, "invalid nil errCount")
}
func TestPeerSetGet(t *testing.T) {
t.Parallel()
var (
peerSet = NewPeerSet()
peer = newMockPeer(nil)
)
assert.Nil(t, peerSet.Get(peer.ID()), "expecting a nil lookup, before .Add")
if err := peerSet.Add(peer); err != nil {
t.Fatalf("Failed to add new peer: %v", err)
}
var wg sync.WaitGroup
for i := 0; i < 10; i++ {
// Add them asynchronously to test the
// concurrent guarantees of our APIs.
wg.Add(1)
go func(i int) {
defer wg.Done()
have, want := peerSet.Get(peer.ID()), peer
assert.Equal(t, have, want, "%d: have %v, want %v", i, have, want)
}(i)
}
wg.Wait()
}

+ 0
- 239
internal/p2p/peer_test.go View File

@ -1,239 +0,0 @@
package p2p
import (
"context"
"fmt"
golog "log"
"net"
"testing"
"time"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
"github.com/tendermint/tendermint/crypto"
"github.com/tendermint/tendermint/crypto/ed25519"
"github.com/tendermint/tendermint/libs/bytes"
"github.com/tendermint/tendermint/libs/log"
"github.com/tendermint/tendermint/types"
"github.com/tendermint/tendermint/config"
tmconn "github.com/tendermint/tendermint/internal/p2p/conn"
)
func TestPeerBasic(t *testing.T) {
assert, require := assert.New(t), require.New(t)
// simulate remote peer
rp := &remotePeer{PrivKey: ed25519.GenPrivKey(), Config: cfg}
rp.Start()
t.Cleanup(rp.Stop)
p, err := createOutboundPeerAndPerformHandshake(rp.Addr(), cfg, tmconn.DefaultMConnConfig())
require.Nil(err)
err = p.Start()
require.Nil(err)
t.Cleanup(func() {
if err := p.Stop(); err != nil {
t.Error(err)
}
})
assert.True(p.IsRunning())
assert.True(p.IsOutbound())
assert.False(p.IsPersistent())
p.persistent = true
assert.True(p.IsPersistent())
assert.Equal(rp.Addr().DialString(), p.RemoteAddr().String())
assert.Equal(rp.ID(), p.ID())
}
func TestPeerSend(t *testing.T) {
assert, require := assert.New(t), require.New(t)
config := cfg
// simulate remote peer
rp := &remotePeer{PrivKey: ed25519.GenPrivKey(), Config: config}
rp.Start()
t.Cleanup(rp.Stop)
p, err := createOutboundPeerAndPerformHandshake(rp.Addr(), config, tmconn.DefaultMConnConfig())
require.Nil(err)
err = p.Start()
require.Nil(err)
t.Cleanup(func() {
if err := p.Stop(); err != nil {
t.Error(err)
}
})
assert.True(p.Send(testCh, []byte("Asylum")))
}
func createOutboundPeerAndPerformHandshake(
addr *NetAddress,
config *config.P2PConfig,
mConfig tmconn.MConnConfig,
) (*peer, error) {
chDescs := []*tmconn.ChannelDescriptor{
{ID: testCh, Priority: 1},
}
pk := ed25519.GenPrivKey()
ourNodeInfo := testNodeInfo(types.NodeIDFromPubKey(pk.PubKey()), "host_peer")
transport := NewMConnTransport(log.TestingLogger(), mConfig, chDescs, MConnTransportOptions{})
reactorsByCh := map[byte]Reactor{testCh: NewTestReactor(chDescs, true)}
pc, err := testOutboundPeerConn(transport, addr, config, false, pk)
if err != nil {
return nil, err
}
peerInfo, _, err := pc.conn.Handshake(context.Background(), ourNodeInfo, pk)
if err != nil {
return nil, err
}
p := newPeer(peerInfo, pc, reactorsByCh, func(p Peer, r interface{}) {})
p.SetLogger(log.TestingLogger().With("peer", addr))
return p, nil
}
func testDial(addr *NetAddress, cfg *config.P2PConfig) (net.Conn, error) {
if cfg.TestDialFail {
return nil, fmt.Errorf("dial err (peerConfig.DialFail == true)")
}
conn, err := addr.DialTimeout(cfg.DialTimeout)
if err != nil {
return nil, err
}
return conn, nil
}
func testOutboundPeerConn(
transport *MConnTransport,
addr *NetAddress,
config *config.P2PConfig,
persistent bool,
ourNodePrivKey crypto.PrivKey,
) (peerConn, error) {
var pc peerConn
conn, err := testDial(addr, config)
if err != nil {
return pc, fmt.Errorf("error creating peer: %w", err)
}
pc, err = testPeerConn(transport, conn, true, persistent)
if err != nil {
if cerr := conn.Close(); cerr != nil {
return pc, fmt.Errorf("%v: %w", cerr.Error(), err)
}
return pc, err
}
return pc, nil
}
type remotePeer struct {
PrivKey crypto.PrivKey
Config *config.P2PConfig
Network string
addr *NetAddress
channels bytes.HexBytes
listenAddr string
listener net.Listener
}
func (rp *remotePeer) Addr() *NetAddress {
return rp.addr
}
func (rp *remotePeer) ID() types.NodeID {
return types.NodeIDFromPubKey(rp.PrivKey.PubKey())
}
func (rp *remotePeer) Start() {
if rp.listenAddr == "" {
rp.listenAddr = "127.0.0.1:0"
}
l, e := net.Listen("tcp", rp.listenAddr) // any available address
if e != nil {
golog.Fatalf("net.Listen tcp :0: %+v", e)
}
rp.listener = l
rp.addr = types.NewNetAddress(types.NodeIDFromPubKey(rp.PrivKey.PubKey()), l.Addr())
if rp.channels == nil {
rp.channels = []byte{testCh}
}
go rp.accept()
}
func (rp *remotePeer) Stop() {
rp.listener.Close()
}
func (rp *remotePeer) Dial(addr *NetAddress) (net.Conn, error) {
transport := NewMConnTransport(log.TestingLogger(), MConnConfig(rp.Config),
[]*ChannelDescriptor{}, MConnTransportOptions{})
conn, err := addr.DialTimeout(1 * time.Second)
if err != nil {
return nil, err
}
pc, err := testInboundPeerConn(transport, conn)
if err != nil {
return nil, err
}
_, _, err = pc.conn.Handshake(context.Background(), rp.nodeInfo(), rp.PrivKey)
if err != nil {
return nil, err
}
return conn, err
}
func (rp *remotePeer) accept() {
transport := NewMConnTransport(log.TestingLogger(), MConnConfig(rp.Config),
[]*ChannelDescriptor{}, MConnTransportOptions{})
conns := []net.Conn{}
for {
conn, err := rp.listener.Accept()
if err != nil {
golog.Printf("Failed to accept conn: %+v", err)
for _, conn := range conns {
_ = conn.Close()
}
return
}
pc, err := testInboundPeerConn(transport, conn)
if err != nil {
golog.Printf("Failed to create a peer: %+v", err)
}
_, _, err = pc.conn.Handshake(context.Background(), rp.nodeInfo(), rp.PrivKey)
if err != nil {
golog.Printf("Failed to handshake a peer: %+v", err)
}
conns = append(conns, conn)
}
}
func (rp *remotePeer) nodeInfo() types.NodeInfo {
ni := types.NodeInfo{
ProtocolVersion: defaultProtocolVersion,
NodeID: rp.Addr().ID,
ListenAddr: rp.listener.Addr().String(),
Network: "testing",
Version: "1.2.3-rc0-deadbeef",
Channels: rp.channels,
Moniker: "remote_peer",
}
if rp.Network != "" {
ni.Network = rp.Network
}
return ni
}

+ 0
- 948
internal/p2p/pex/addrbook.go View File

@ -1,948 +0,0 @@
// Modified for Tendermint
// Originally Copyright (c) 2013-2014 Conformal Systems LLC.
// https://github.com/conformal/btcd/blob/master/LICENSE
package pex
import (
"encoding/binary"
"fmt"
"hash"
"math"
mrand "math/rand"
"net"
"sync"
"time"
"github.com/minio/highwayhash"
"github.com/tendermint/tendermint/crypto"
tmsync "github.com/tendermint/tendermint/internal/libs/sync"
"github.com/tendermint/tendermint/internal/p2p"
tmmath "github.com/tendermint/tendermint/libs/math"
tmrand "github.com/tendermint/tendermint/libs/rand"
"github.com/tendermint/tendermint/libs/service"
"github.com/tendermint/tendermint/types"
)
const (
bucketTypeNew = 0x01
bucketTypeOld = 0x02
)
// AddrBook is an address book used for tracking peers
// so we can gossip about them to others and select
// peers to dial.
// TODO: break this up?
type AddrBook interface {
service.Service
// Add our own addresses so we don't later add ourselves
AddOurAddress(*p2p.NetAddress)
// Check if it is our address
OurAddress(*p2p.NetAddress) bool
AddPrivateIDs([]string)
// Add and remove an address
AddAddress(addr *p2p.NetAddress, src *p2p.NetAddress) error
RemoveAddress(*p2p.NetAddress)
// Check if the address is in the book
HasAddress(*p2p.NetAddress) bool
// Do we need more peers?
NeedMoreAddrs() bool
// Is Address Book Empty? Answer should not depend on being in your own
// address book, or private peers
Empty() bool
// Pick an address to dial
PickAddress(biasTowardsNewAddrs int) *p2p.NetAddress
// Mark address
MarkGood(types.NodeID)
MarkAttempt(*p2p.NetAddress)
MarkBad(*p2p.NetAddress, time.Duration) // Move peer to bad peers list
// Add bad peers back to addrBook
ReinstateBadPeers()
IsGood(*p2p.NetAddress) bool
IsBanned(*p2p.NetAddress) bool
// Send a selection of addresses to peers
GetSelection() []*p2p.NetAddress
// Send a selection of addresses with bias
GetSelectionWithBias(biasTowardsNewAddrs int) []*p2p.NetAddress
Size() int
// Persist to disk
Save()
}
var _ AddrBook = (*addrBook)(nil)
// addrBook - concurrency safe peer address manager.
// Implements AddrBook.
type addrBook struct {
service.BaseService
// accessed concurrently
mtx tmsync.Mutex
ourAddrs map[string]struct{}
privateIDs map[types.NodeID]struct{}
addrLookup map[types.NodeID]*knownAddress // new & old
badPeers map[types.NodeID]*knownAddress // blacklisted peers
bucketsOld []map[string]*knownAddress
bucketsNew []map[string]*knownAddress
nOld int
nNew int
// immutable after creation
filePath string
key string // random prefix for bucket placement
routabilityStrict bool
hasher hash.Hash64
wg sync.WaitGroup
}
func mustNewHasher() hash.Hash64 {
key := crypto.CRandBytes(highwayhash.Size)
hasher, err := highwayhash.New64(key)
if err != nil {
panic(err)
}
return hasher
}
// NewAddrBook creates a new address book.
// Use Start to begin processing asynchronous address updates.
func NewAddrBook(filePath string, routabilityStrict bool) AddrBook {
am := &addrBook{
ourAddrs: make(map[string]struct{}),
privateIDs: make(map[types.NodeID]struct{}),
addrLookup: make(map[types.NodeID]*knownAddress),
badPeers: make(map[types.NodeID]*knownAddress),
filePath: filePath,
routabilityStrict: routabilityStrict,
}
am.init()
am.BaseService = *service.NewBaseService(nil, "AddrBook", am)
return am
}
// Initialize the buckets.
// When modifying this, don't forget to update loadFromFile()
func (a *addrBook) init() {
a.key = crypto.CRandHex(24) // 24/2 * 8 = 96 bits
// New addr buckets
a.bucketsNew = make([]map[string]*knownAddress, newBucketCount)
for i := range a.bucketsNew {
a.bucketsNew[i] = make(map[string]*knownAddress)
}
// Old addr buckets
a.bucketsOld = make([]map[string]*knownAddress, oldBucketCount)
for i := range a.bucketsOld {
a.bucketsOld[i] = make(map[string]*knownAddress)
}
a.hasher = mustNewHasher()
}
// OnStart implements Service.
func (a *addrBook) OnStart() error {
if err := a.BaseService.OnStart(); err != nil {
return err
}
a.loadFromFile(a.filePath)
// wg.Add to ensure that any invocation of .Wait()
// later on will wait for saveRoutine to terminate.
a.wg.Add(1)
go a.saveRoutine()
return nil
}
// OnStop implements Service.
func (a *addrBook) OnStop() {
a.BaseService.OnStop()
}
func (a *addrBook) Wait() {
a.wg.Wait()
}
func (a *addrBook) FilePath() string {
return a.filePath
}
//-------------------------------------------------------
// AddOurAddress one of our addresses.
func (a *addrBook) AddOurAddress(addr *p2p.NetAddress) {
a.mtx.Lock()
defer a.mtx.Unlock()
a.Logger.Info("Add our address to book", "addr", addr)
a.ourAddrs[addr.String()] = struct{}{}
}
// OurAddress returns true if it is our address.
func (a *addrBook) OurAddress(addr *p2p.NetAddress) bool {
a.mtx.Lock()
defer a.mtx.Unlock()
_, ok := a.ourAddrs[addr.String()]
return ok
}
func (a *addrBook) AddPrivateIDs(ids []string) {
a.mtx.Lock()
defer a.mtx.Unlock()
for _, id := range ids {
a.privateIDs[types.NodeID(id)] = struct{}{}
}
}
// AddAddress implements AddrBook
// Add address to a "new" bucket. If it's already in one, only add it probabilistically.
// Returns error if the addr is non-routable. Does not add self.
// NOTE: addr must not be nil
func (a *addrBook) AddAddress(addr *p2p.NetAddress, src *p2p.NetAddress) error {
a.mtx.Lock()
defer a.mtx.Unlock()
return a.addAddress(addr, src)
}
// RemoveAddress implements AddrBook - removes the address from the book.
func (a *addrBook) RemoveAddress(addr *p2p.NetAddress) {
a.mtx.Lock()
defer a.mtx.Unlock()
a.removeAddress(addr)
}
// IsGood returns true if peer was ever marked as good and haven't
// done anything wrong since then.
func (a *addrBook) IsGood(addr *p2p.NetAddress) bool {
a.mtx.Lock()
defer a.mtx.Unlock()
return a.addrLookup[addr.ID].isOld()
}
// IsBanned returns true if the peer is currently banned
func (a *addrBook) IsBanned(addr *p2p.NetAddress) bool {
a.mtx.Lock()
_, ok := a.badPeers[addr.ID]
a.mtx.Unlock()
return ok
}
// HasAddress returns true if the address is in the book.
func (a *addrBook) HasAddress(addr *p2p.NetAddress) bool {
a.mtx.Lock()
defer a.mtx.Unlock()
ka := a.addrLookup[addr.ID]
return ka != nil
}
// NeedMoreAddrs implements AddrBook - returns true if there are not have enough addresses in the book.
func (a *addrBook) NeedMoreAddrs() bool {
return a.Size() < needAddressThreshold
}
// Empty implements AddrBook - returns true if there are no addresses in the address book.
// Does not count the peer appearing in its own address book, or private peers.
func (a *addrBook) Empty() bool {
return a.Size() == 0
}
// PickAddress implements AddrBook. It picks an address to connect to.
// The address is picked randomly from an old or new bucket according
// to the biasTowardsNewAddrs argument, which must be between [0, 100] (or else is truncated to that range)
// and determines how biased we are to pick an address from a new bucket.
// PickAddress returns nil if the AddrBook is empty or if we try to pick
// from an empty bucket.
// nolint:gosec // G404: Use of weak random number generator
func (a *addrBook) PickAddress(biasTowardsNewAddrs int) *p2p.NetAddress {
a.mtx.Lock()
defer a.mtx.Unlock()
bookSize := a.size()
if bookSize <= 0 {
if bookSize < 0 {
panic(fmt.Sprintf("Addrbook size %d (new: %d + old: %d) is less than 0", a.nNew+a.nOld, a.nNew, a.nOld))
}
return nil
}
if biasTowardsNewAddrs > 100 {
biasTowardsNewAddrs = 100
}
if biasTowardsNewAddrs < 0 {
biasTowardsNewAddrs = 0
}
// Bias between new and old addresses.
oldCorrelation := math.Sqrt(float64(a.nOld)) * (100.0 - float64(biasTowardsNewAddrs))
newCorrelation := math.Sqrt(float64(a.nNew)) * float64(biasTowardsNewAddrs)
// pick a random peer from a random bucket
var bucket map[string]*knownAddress
pickFromOldBucket := (newCorrelation+oldCorrelation)*mrand.Float64() < oldCorrelation
if (pickFromOldBucket && a.nOld == 0) ||
(!pickFromOldBucket && a.nNew == 0) {
return nil
}
// loop until we pick a random non-empty bucket
for len(bucket) == 0 {
if pickFromOldBucket {
bucket = a.bucketsOld[mrand.Intn(len(a.bucketsOld))]
} else {
bucket = a.bucketsNew[mrand.Intn(len(a.bucketsNew))]
}
}
// pick a random index and loop over the map to return that index
randIndex := mrand.Intn(len(bucket))
for _, ka := range bucket {
if randIndex == 0 {
return ka.Addr
}
randIndex--
}
return nil
}
// MarkGood implements AddrBook - it marks the peer as good and
// moves it into an "old" bucket.
func (a *addrBook) MarkGood(id types.NodeID) {
a.mtx.Lock()
defer a.mtx.Unlock()
ka := a.addrLookup[id]
if ka == nil {
return
}
ka.markGood()
if ka.isNew() {
if err := a.moveToOld(ka); err != nil {
a.Logger.Error("Error moving address to old", "err", err)
}
}
}
// MarkAttempt implements AddrBook - it marks that an attempt was made to connect to the address.
func (a *addrBook) MarkAttempt(addr *p2p.NetAddress) {
a.mtx.Lock()
defer a.mtx.Unlock()
ka := a.addrLookup[addr.ID]
if ka == nil {
return
}
ka.markAttempt()
}
// MarkBad implements AddrBook. Kicks address out from book, places
// the address in the badPeers pool.
func (a *addrBook) MarkBad(addr *p2p.NetAddress, banTime time.Duration) {
a.mtx.Lock()
defer a.mtx.Unlock()
if a.addBadPeer(addr, banTime) {
a.removeAddress(addr)
}
}
// ReinstateBadPeers removes bad peers from ban list and places them into a new
// bucket.
func (a *addrBook) ReinstateBadPeers() {
a.mtx.Lock()
defer a.mtx.Unlock()
for _, ka := range a.badPeers {
if ka.isBanned() {
continue
}
bucket, err := a.calcNewBucket(ka.Addr, ka.Src)
if err != nil {
a.Logger.Error("Failed to calculate new bucket (bad peer won't be reinstantiated)",
"addr", ka.Addr, "err", err)
continue
}
if err := a.addToNewBucket(ka, bucket); err != nil {
a.Logger.Error("Error adding peer to new bucket", "err", err)
}
delete(a.badPeers, ka.ID())
a.Logger.Info("Reinstated address", "addr", ka.Addr)
}
}
// GetSelection implements AddrBook.
// It randomly selects some addresses (old & new). Suitable for peer-exchange protocols.
// Must never return a nil address.
func (a *addrBook) GetSelection() []*p2p.NetAddress {
a.mtx.Lock()
defer a.mtx.Unlock()
bookSize := a.size()
if bookSize <= 0 {
if bookSize < 0 {
panic(fmt.Sprintf("Addrbook size %d (new: %d + old: %d) is less than 0", a.nNew+a.nOld, a.nNew, a.nOld))
}
return nil
}
numAddresses := tmmath.MaxInt(
tmmath.MinInt(minGetSelection, bookSize),
bookSize*getSelectionPercent/100)
numAddresses = tmmath.MinInt(maxGetSelection, numAddresses)
// XXX: instead of making a list of all addresses, shuffling, and slicing a random chunk,
// could we just select a random numAddresses of indexes?
allAddr := make([]*p2p.NetAddress, bookSize)
i := 0
for _, ka := range a.addrLookup {
allAddr[i] = ka.Addr
i++
}
// Fisher-Yates shuffle the array. We only need to do the first
// `numAddresses' since we are throwing the rest.
for i := 0; i < numAddresses; i++ {
// pick a number between current index and the end
// nolint:gosec // G404: Use of weak random number generator
j := mrand.Intn(len(allAddr)-i) + i
allAddr[i], allAddr[j] = allAddr[j], allAddr[i]
}
// slice off the limit we are willing to share.
return allAddr[:numAddresses]
}
func percentageOfNum(p, n int) int {
return int(math.Round((float64(p) / float64(100)) * float64(n)))
}
// GetSelectionWithBias implements AddrBook.
// It randomly selects some addresses (old & new). Suitable for peer-exchange protocols.
// Must never return a nil address.
//
// Each address is picked randomly from an old or new bucket according to the
// biasTowardsNewAddrs argument, which must be between [0, 100] (or else is truncated to
// that range) and determines how biased we are to pick an address from a new
// bucket.
func (a *addrBook) GetSelectionWithBias(biasTowardsNewAddrs int) []*p2p.NetAddress {
a.mtx.Lock()
defer a.mtx.Unlock()
bookSize := a.size()
if bookSize <= 0 {
if bookSize < 0 {
panic(fmt.Sprintf("Addrbook size %d (new: %d + old: %d) is less than 0", a.nNew+a.nOld, a.nNew, a.nOld))
}
return nil
}
if biasTowardsNewAddrs > 100 {
biasTowardsNewAddrs = 100
}
if biasTowardsNewAddrs < 0 {
biasTowardsNewAddrs = 0
}
numAddresses := tmmath.MaxInt(
tmmath.MinInt(minGetSelection, bookSize),
bookSize*getSelectionPercent/100)
numAddresses = tmmath.MinInt(maxGetSelection, numAddresses)
// number of new addresses that, if possible, should be in the beginning of the selection
// if there are no enough old addrs, will choose new addr instead.
numRequiredNewAdd := tmmath.MaxInt(percentageOfNum(biasTowardsNewAddrs, numAddresses), numAddresses-a.nOld)
selection := a.randomPickAddresses(bucketTypeNew, numRequiredNewAdd)
selection = append(selection, a.randomPickAddresses(bucketTypeOld, numAddresses-len(selection))...)
return selection
}
//------------------------------------------------
// Size returns the number of addresses in the book.
func (a *addrBook) Size() int {
a.mtx.Lock()
defer a.mtx.Unlock()
return a.size()
}
func (a *addrBook) size() int {
return a.nNew + a.nOld
}
//----------------------------------------------------------
// Save persists the address book to disk.
func (a *addrBook) Save() {
a.saveToFile(a.filePath) // thread safe
}
func (a *addrBook) saveRoutine() {
defer a.wg.Done()
saveFileTicker := time.NewTicker(dumpAddressInterval)
out:
for {
select {
case <-saveFileTicker.C:
a.saveToFile(a.filePath)
case <-a.Quit():
break out
}
}
saveFileTicker.Stop()
a.saveToFile(a.filePath)
}
//----------------------------------------------------------
func (a *addrBook) getBucket(bucketType byte, bucketIdx int) map[string]*knownAddress {
switch bucketType {
case bucketTypeNew:
return a.bucketsNew[bucketIdx]
case bucketTypeOld:
return a.bucketsOld[bucketIdx]
default:
panic("Invalid bucket type")
}
}
// Adds ka to new bucket. Returns false if it couldn't do it cuz buckets full.
// NOTE: currently it always returns true.
func (a *addrBook) addToNewBucket(ka *knownAddress, bucketIdx int) error {
// Consistency check to ensure we don't add an already known address
if ka.isOld() {
return errAddrBookOldAddressNewBucket{ka.Addr, bucketIdx}
}
addrStr := ka.Addr.String()
bucket := a.getBucket(bucketTypeNew, bucketIdx)
// Already exists?
if _, ok := bucket[addrStr]; ok {
return nil
}
// Enforce max addresses.
if len(bucket) > newBucketSize {
a.Logger.Info("new bucket is full, expiring new")
a.expireNew(bucketIdx)
}
// Add to bucket.
bucket[addrStr] = ka
// increment nNew if the peer doesnt already exist in a bucket
if ka.addBucketRef(bucketIdx) == 1 {
a.nNew++
}
// Add it to addrLookup
a.addrLookup[ka.ID()] = ka
return nil
}
// Adds ka to old bucket. Returns false if it couldn't do it cuz buckets full.
func (a *addrBook) addToOldBucket(ka *knownAddress, bucketIdx int) bool {
// Sanity check
if ka.isNew() {
a.Logger.Error(fmt.Sprintf("Cannot add new address to old bucket: %v", ka))
return false
}
if len(ka.Buckets) != 0 {
a.Logger.Error(fmt.Sprintf("Cannot add already old address to another old bucket: %v", ka))
return false
}
addrStr := ka.Addr.String()
bucket := a.getBucket(bucketTypeOld, bucketIdx)
// Already exists?
if _, ok := bucket[addrStr]; ok {
return true
}
// Enforce max addresses.
if len(bucket) > oldBucketSize {
return false
}
// Add to bucket.
bucket[addrStr] = ka
if ka.addBucketRef(bucketIdx) == 1 {
a.nOld++
}
// Ensure in addrLookup
a.addrLookup[ka.ID()] = ka
return true
}
func (a *addrBook) removeFromBucket(ka *knownAddress, bucketType byte, bucketIdx int) {
if ka.BucketType != bucketType {
a.Logger.Error(fmt.Sprintf("Bucket type mismatch: %v", ka))
return
}
bucket := a.getBucket(bucketType, bucketIdx)
delete(bucket, ka.Addr.String())
if ka.removeBucketRef(bucketIdx) == 0 {
if bucketType == bucketTypeNew {
a.nNew--
} else {
a.nOld--
}
delete(a.addrLookup, ka.ID())
}
}
func (a *addrBook) removeFromAllBuckets(ka *knownAddress) {
for _, bucketIdx := range ka.Buckets {
bucket := a.getBucket(ka.BucketType, bucketIdx)
delete(bucket, ka.Addr.String())
}
ka.Buckets = nil
if ka.BucketType == bucketTypeNew {
a.nNew--
} else {
a.nOld--
}
delete(a.addrLookup, ka.ID())
}
//----------------------------------------------------------
func (a *addrBook) pickOldest(bucketType byte, bucketIdx int) *knownAddress {
bucket := a.getBucket(bucketType, bucketIdx)
var oldest *knownAddress
for _, ka := range bucket {
if oldest == nil || ka.LastAttempt.Before(oldest.LastAttempt) {
oldest = ka
}
}
return oldest
}
// adds the address to a "new" bucket. if its already in one,
// it only adds it probabilistically
func (a *addrBook) addAddress(addr, src *p2p.NetAddress) error {
if addr == nil || src == nil {
return ErrAddrBookNilAddr{addr, src}
}
if err := addr.Valid(); err != nil {
return ErrAddrBookInvalidAddr{Addr: addr, AddrErr: err}
}
if _, ok := a.badPeers[addr.ID]; ok {
return ErrAddressBanned{addr}
}
if _, ok := a.privateIDs[addr.ID]; ok {
return ErrAddrBookPrivate{addr}
}
if _, ok := a.privateIDs[src.ID]; ok {
return ErrAddrBookPrivateSrc{src}
}
// TODO: we should track ourAddrs by ID and by IP:PORT and refuse both.
if _, ok := a.ourAddrs[addr.String()]; ok {
return ErrAddrBookSelf{addr}
}
if a.routabilityStrict && !addr.Routable() {
return ErrAddrBookNonRoutable{addr}
}
ka := a.addrLookup[addr.ID]
if ka != nil {
// If its already old and the address ID's are the same, ignore it.
// Thereby avoiding issues with a node on the network attempting to change
// the IP of a known node ID. (Which could yield an eclipse attack on the node)
if ka.isOld() && ka.Addr.ID == addr.ID {
return nil
}
// Already in max new buckets.
if len(ka.Buckets) == maxNewBucketsPerAddress {
return nil
}
// The more entries we have, the less likely we are to add more.
factor := int32(2 * len(ka.Buckets))
// nolint:gosec // G404: Use of weak random number generator
if mrand.Int31n(factor) != 0 {
return nil
}
} else {
ka = newKnownAddress(addr, src)
}
bucket, err := a.calcNewBucket(addr, src)
if err != nil {
return err
}
return a.addToNewBucket(ka, bucket)
}
func (a *addrBook) randomPickAddresses(bucketType byte, num int) []*p2p.NetAddress {
var buckets []map[string]*knownAddress
switch bucketType {
case bucketTypeNew:
buckets = a.bucketsNew
case bucketTypeOld:
buckets = a.bucketsOld
default:
panic("unexpected bucketType")
}
total := 0
for _, bucket := range buckets {
total += len(bucket)
}
addresses := make([]*knownAddress, 0, total)
for _, bucket := range buckets {
for _, ka := range bucket {
addresses = append(addresses, ka)
}
}
selection := make([]*p2p.NetAddress, 0, num)
chosenSet := make(map[string]bool, num)
rand := tmrand.NewRand()
rand.Shuffle(total, func(i, j int) {
addresses[i], addresses[j] = addresses[j], addresses[i]
})
for _, addr := range addresses {
if chosenSet[addr.Addr.String()] {
continue
}
chosenSet[addr.Addr.String()] = true
selection = append(selection, addr.Addr)
if len(selection) >= num {
return selection
}
}
return selection
}
// Make space in the new buckets by expiring the really bad entries.
// If no bad entries are available we remove the oldest.
func (a *addrBook) expireNew(bucketIdx int) {
for addrStr, ka := range a.bucketsNew[bucketIdx] {
// If an entry is bad, throw it away
if ka.isBad() {
a.Logger.Info(fmt.Sprintf("expiring bad address %v", addrStr))
a.removeFromBucket(ka, bucketTypeNew, bucketIdx)
return
}
}
// If we haven't thrown out a bad entry, throw out the oldest entry
oldest := a.pickOldest(bucketTypeNew, bucketIdx)
a.removeFromBucket(oldest, bucketTypeNew, bucketIdx)
}
// Promotes an address from new to old. If the destination bucket is full,
// demote the oldest one to a "new" bucket.
// TODO: Demote more probabilistically?
func (a *addrBook) moveToOld(ka *knownAddress) error {
// Sanity check
if ka.isOld() {
a.Logger.Error(fmt.Sprintf("Cannot promote address that is already old %v", ka))
return nil
}
if len(ka.Buckets) == 0 {
a.Logger.Error(fmt.Sprintf("Cannot promote address that isn't in any new buckets %v", ka))
return nil
}
// Remove from all (new) buckets.
a.removeFromAllBuckets(ka)
// It's officially old now.
ka.BucketType = bucketTypeOld
// Try to add it to its oldBucket destination.
oldBucketIdx, err := a.calcOldBucket(ka.Addr)
if err != nil {
return err
}
added := a.addToOldBucket(ka, oldBucketIdx)
if !added {
// No room; move the oldest to a new bucket
oldest := a.pickOldest(bucketTypeOld, oldBucketIdx)
a.removeFromBucket(oldest, bucketTypeOld, oldBucketIdx)
newBucketIdx, err := a.calcNewBucket(oldest.Addr, oldest.Src)
if err != nil {
return err
}
if err := a.addToNewBucket(oldest, newBucketIdx); err != nil {
a.Logger.Error("Error adding peer to old bucket", "err", err)
}
// Finally, add our ka to old bucket again.
added = a.addToOldBucket(ka, oldBucketIdx)
if !added {
a.Logger.Error(fmt.Sprintf("Could not re-add ka %v to oldBucketIdx %v", ka, oldBucketIdx))
}
}
return nil
}
func (a *addrBook) removeAddress(addr *p2p.NetAddress) {
ka := a.addrLookup[addr.ID]
if ka == nil {
return
}
a.Logger.Info("Remove address from book", "addr", addr)
a.removeFromAllBuckets(ka)
}
func (a *addrBook) addBadPeer(addr *p2p.NetAddress, banTime time.Duration) bool {
// check it exists in addrbook
ka := a.addrLookup[addr.ID]
// check address is not already there
if ka == nil {
return false
}
if _, alreadyBadPeer := a.badPeers[addr.ID]; !alreadyBadPeer {
// add to bad peer list
ka.ban(banTime)
a.badPeers[addr.ID] = ka
a.Logger.Info("Add address to blacklist", "addr", addr)
}
return true
}
//---------------------------------------------------------------------
// calculate bucket placements
// hash(key + sourcegroup + int64(hash(key + group + sourcegroup)) % bucket_per_group) % num_new_buckets
func (a *addrBook) calcNewBucket(addr, src *p2p.NetAddress) (int, error) {
data1 := []byte{}
data1 = append(data1, []byte(a.key)...)
data1 = append(data1, []byte(a.groupKey(addr))...)
data1 = append(data1, []byte(a.groupKey(src))...)
hash1, err := a.hash(data1)
if err != nil {
return 0, err
}
hash64 := binary.BigEndian.Uint64(hash1)
hash64 %= newBucketsPerGroup
var hashbuf [8]byte
binary.BigEndian.PutUint64(hashbuf[:], hash64)
data2 := []byte{}
data2 = append(data2, []byte(a.key)...)
data2 = append(data2, a.groupKey(src)...)
data2 = append(data2, hashbuf[:]...)
hash2, err := a.hash(data2)
if err != nil {
return 0, err
}
result := int(binary.BigEndian.Uint64(hash2) % newBucketCount)
return result, nil
}
// hash(key + group + int64(hash(key + addr)) % buckets_per_group) % num_old_buckets
func (a *addrBook) calcOldBucket(addr *p2p.NetAddress) (int, error) {
data1 := []byte{}
data1 = append(data1, []byte(a.key)...)
data1 = append(data1, []byte(addr.String())...)
hash1, err := a.hash(data1)
if err != nil {
return 0, err
}
hash64 := binary.BigEndian.Uint64(hash1)
hash64 %= oldBucketsPerGroup
var hashbuf [8]byte
binary.BigEndian.PutUint64(hashbuf[:], hash64)
data2 := []byte{}
data2 = append(data2, []byte(a.key)...)
data2 = append(data2, a.groupKey(addr)...)
data2 = append(data2, hashbuf[:]...)
hash2, err := a.hash(data2)
if err != nil {
return 0, err
}
result := int(binary.BigEndian.Uint64(hash2) % oldBucketCount)
return result, nil
}
// Return a string representing the network group of this address.
// This is the /16 for IPv4 (e.g. 1.2.0.0), the /32 (/36 for he.net) for IPv6, the string
// "local" for a local address and the string "unroutable" for an unroutable
// address.
func (a *addrBook) groupKey(na *p2p.NetAddress) string {
return groupKeyFor(na, a.routabilityStrict)
}
func groupKeyFor(na *p2p.NetAddress, routabilityStrict bool) string {
if routabilityStrict && na.Local() {
return "local"
}
if routabilityStrict && !na.Routable() {
return "unroutable"
}
if ipv4 := na.IP.To4(); ipv4 != nil {
return na.IP.Mask(net.CIDRMask(16, 32)).String()
}
if na.RFC6145() || na.RFC6052() {
// last four bytes are the ip address
ip := na.IP[12:16]
return ip.Mask(net.CIDRMask(16, 32)).String()
}
if na.RFC3964() {
ip := na.IP[2:6]
return ip.Mask(net.CIDRMask(16, 32)).String()
}
if na.RFC4380() {
// teredo tunnels have the last 4 bytes as the v4 address XOR
// 0xff.
ip := net.IP(make([]byte, 4))
for i, byte := range na.IP[12:16] {
ip[i] = byte ^ 0xff
}
return ip.Mask(net.CIDRMask(16, 32)).String()
}
if na.OnionCatTor() {
// group is keyed off the first 4 bits of the actual onion key.
return fmt.Sprintf("tor:%d", na.IP[6]&((1<<4)-1))
}
// OK, so now we know ourselves to be a IPv6 address.
// bitcoind uses /32 for everything, except for Hurricane Electric's
// (he.net) IP range, which it uses /36 for.
bits := 32
heNet := &net.IPNet{IP: net.ParseIP("2001:470::"), Mask: net.CIDRMask(32, 128)}
if heNet.Contains(na.IP) {
bits = 36
}
ipv6Mask := net.CIDRMask(bits, 128)
return na.IP.Mask(ipv6Mask).String()
}
func (a *addrBook) hash(b []byte) ([]byte, error) {
a.hasher.Reset()
a.hasher.Write(b)
return a.hasher.Sum(nil), nil
}

+ 0
- 777
internal/p2p/pex/addrbook_test.go View File

@ -1,777 +0,0 @@
package pex
import (
"encoding/hex"
"fmt"
"io/ioutil"
"math"
mrand "math/rand"
"net"
"os"
"testing"
"time"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
"github.com/tendermint/tendermint/internal/p2p"
"github.com/tendermint/tendermint/libs/log"
tmmath "github.com/tendermint/tendermint/libs/math"
tmrand "github.com/tendermint/tendermint/libs/rand"
"github.com/tendermint/tendermint/types"
)
// FIXME These tests should not rely on .(*addrBook) assertions
func TestAddrBookPickAddress(t *testing.T) {
fname := createTempFileName(t, "addrbook_test")
// 0 addresses
book := NewAddrBook(fname, true)
book.SetLogger(log.TestingLogger())
assert.Zero(t, book.Size())
addr := book.PickAddress(50)
assert.Nil(t, addr, "expected no address")
randAddrs := randNetAddressPairs(t, 1)
addrSrc := randAddrs[0]
err := book.AddAddress(addrSrc.addr, addrSrc.src)
require.NoError(t, err)
// pick an address when we only have new address
addr = book.PickAddress(0)
assert.NotNil(t, addr, "expected an address")
addr = book.PickAddress(50)
assert.NotNil(t, addr, "expected an address")
addr = book.PickAddress(100)
assert.NotNil(t, addr, "expected an address")
// pick an address when we only have old address
book.MarkGood(addrSrc.addr.ID)
addr = book.PickAddress(0)
assert.NotNil(t, addr, "expected an address")
addr = book.PickAddress(50)
assert.NotNil(t, addr, "expected an address")
// in this case, nNew==0 but we biased 100% to new, so we return nil
addr = book.PickAddress(100)
assert.Nil(t, addr, "did not expected an address")
}
func TestAddrBookSaveLoad(t *testing.T) {
fname := createTempFileName(t, "addrbook_test")
// 0 addresses
book := NewAddrBook(fname, true)
book.SetLogger(log.TestingLogger())
book.Save()
book = NewAddrBook(fname, true)
book.SetLogger(log.TestingLogger())
err := book.Start()
require.NoError(t, err)
assert.True(t, book.Empty())
// 100 addresses
randAddrs := randNetAddressPairs(t, 100)
for _, addrSrc := range randAddrs {
err := book.AddAddress(addrSrc.addr, addrSrc.src)
require.NoError(t, err)
}
assert.Equal(t, 100, book.Size())
book.Save()
book = NewAddrBook(fname, true)
book.SetLogger(log.TestingLogger())
err = book.Start()
require.NoError(t, err)
assert.Equal(t, 100, book.Size())
}
func TestAddrBookLookup(t *testing.T) {
fname := createTempFileName(t, "addrbook_test")
randAddrs := randNetAddressPairs(t, 100)
book := NewAddrBook(fname, true)
book.SetLogger(log.TestingLogger())
for _, addrSrc := range randAddrs {
addr := addrSrc.addr
src := addrSrc.src
err := book.AddAddress(addr, src)
require.NoError(t, err)
ka := book.HasAddress(addr)
assert.True(t, ka, "Expected to find KnownAddress %v but wasn't there.", addr)
}
}
func TestAddrBookPromoteToOld(t *testing.T) {
fname := createTempFileName(t, "addrbook_test")
randAddrs := randNetAddressPairs(t, 100)
book := NewAddrBook(fname, true)
book.SetLogger(log.TestingLogger())
for _, addrSrc := range randAddrs {
err := book.AddAddress(addrSrc.addr, addrSrc.src)
require.NoError(t, err)
}
// Attempt all addresses.
for _, addrSrc := range randAddrs {
book.MarkAttempt(addrSrc.addr)
}
// Promote half of them
for i, addrSrc := range randAddrs {
if i%2 == 0 {
book.MarkGood(addrSrc.addr.ID)
}
}
// TODO: do more testing :)
selection := book.GetSelection()
t.Logf("selection: %v", selection)
if len(selection) > book.Size() {
t.Errorf("selection could not be bigger than the book")
}
selection = book.GetSelectionWithBias(30)
t.Logf("selection: %v", selection)
if len(selection) > book.Size() {
t.Errorf("selection with bias could not be bigger than the book")
}
assert.Equal(t, book.Size(), 100, "expecting book size to be 100")
}
func TestAddrBookHandlesDuplicates(t *testing.T) {
fname := createTempFileName(t, "addrbook_test")
book := NewAddrBook(fname, true)
book.SetLogger(log.TestingLogger())
randAddrs := randNetAddressPairs(t, 100)
differentSrc := randIPv4Address(t)
for _, addrSrc := range randAddrs {
err := book.AddAddress(addrSrc.addr, addrSrc.src)
require.NoError(t, err)
err = book.AddAddress(addrSrc.addr, addrSrc.src) // duplicate
require.NoError(t, err)
err = book.AddAddress(addrSrc.addr, differentSrc) // different src
require.NoError(t, err)
}
assert.Equal(t, 100, book.Size())
}
type netAddressPair struct {
addr *p2p.NetAddress
src *p2p.NetAddress
}
func randNetAddressPairs(t *testing.T, n int) []netAddressPair {
randAddrs := make([]netAddressPair, n)
for i := 0; i < n; i++ {
randAddrs[i] = netAddressPair{addr: randIPv4Address(t), src: randIPv4Address(t)}
}
return randAddrs
}
func randIPv4Address(t *testing.T) *p2p.NetAddress {
for {
ip := fmt.Sprintf("%v.%v.%v.%v",
mrand.Intn(254)+1,
mrand.Intn(255),
mrand.Intn(255),
mrand.Intn(255),
)
port := mrand.Intn(65535-1) + 1
id := types.NodeID(hex.EncodeToString(tmrand.Bytes(types.NodeIDByteLength)))
idAddr := id.AddressString(fmt.Sprintf("%v:%v", ip, port))
addr, err := types.NewNetAddressString(idAddr)
assert.Nil(t, err, "error generating rand network address")
if addr.Routable() {
return addr
}
}
}
func TestAddrBookRemoveAddress(t *testing.T) {
fname := createTempFileName(t, "addrbook_test")
book := NewAddrBook(fname, true)
book.SetLogger(log.TestingLogger())
addr := randIPv4Address(t)
err := book.AddAddress(addr, addr)
require.NoError(t, err)
assert.Equal(t, 1, book.Size())
book.RemoveAddress(addr)
assert.Equal(t, 0, book.Size())
nonExistingAddr := randIPv4Address(t)
book.RemoveAddress(nonExistingAddr)
assert.Equal(t, 0, book.Size())
}
func TestAddrBookGetSelectionWithOneMarkedGood(t *testing.T) {
// create a book with 10 addresses, 1 good/old and 9 new
book, _ := createAddrBookWithMOldAndNNewAddrs(t, 1, 9)
addrs := book.GetSelectionWithBias(biasToSelectNewPeers)
assert.NotNil(t, addrs)
assertMOldAndNNewAddrsInSelection(t, 1, 9, addrs, book)
}
func TestAddrBookGetSelectionWithOneNotMarkedGood(t *testing.T) {
// create a book with 10 addresses, 9 good/old and 1 new
book, _ := createAddrBookWithMOldAndNNewAddrs(t, 9, 1)
addrs := book.GetSelectionWithBias(biasToSelectNewPeers)
assert.NotNil(t, addrs)
assertMOldAndNNewAddrsInSelection(t, 9, 1, addrs, book)
}
func TestAddrBookGetSelectionReturnsNilWhenAddrBookIsEmpty(t *testing.T) {
book, _ := createAddrBookWithMOldAndNNewAddrs(t, 0, 0)
addrs := book.GetSelectionWithBias(biasToSelectNewPeers)
assert.Nil(t, addrs)
}
func TestAddrBookGetSelection(t *testing.T) {
fname := createTempFileName(t, "addrbook_test")
book := NewAddrBook(fname, true)
book.SetLogger(log.TestingLogger())
// 1) empty book
assert.Empty(t, book.GetSelection())
// 2) add one address
addr := randIPv4Address(t)
err := book.AddAddress(addr, addr)
require.NoError(t, err)
assert.Equal(t, 1, len(book.GetSelection()))
assert.Equal(t, addr, book.GetSelection()[0])
// 3) add a bunch of addresses
randAddrs := randNetAddressPairs(t, 100)
for _, addrSrc := range randAddrs {
err := book.AddAddress(addrSrc.addr, addrSrc.src)
require.NoError(t, err)
}
// check there is no duplicates
addrs := make(map[string]*p2p.NetAddress)
selection := book.GetSelection()
for _, addr := range selection {
if dup, ok := addrs[addr.String()]; ok {
t.Fatalf("selection %v contains duplicates %v", selection, dup)
}
addrs[addr.String()] = addr
}
if len(selection) > book.Size() {
t.Errorf("selection %v could not be bigger than the book", selection)
}
}
func TestAddrBookGetSelectionWithBias(t *testing.T) {
const biasTowardsNewAddrs = 30
fname := createTempFileName(t, "addrbook_test")
book := NewAddrBook(fname, true)
book.SetLogger(log.TestingLogger())
// 1) empty book
selection := book.GetSelectionWithBias(biasTowardsNewAddrs)
assert.Empty(t, selection)
// 2) add one address
addr := randIPv4Address(t)
err := book.AddAddress(addr, addr)
require.NoError(t, err)
selection = book.GetSelectionWithBias(biasTowardsNewAddrs)
assert.Equal(t, 1, len(selection))
assert.Equal(t, addr, selection[0])
// 3) add a bunch of addresses
randAddrs := randNetAddressPairs(t, 100)
for _, addrSrc := range randAddrs {
err := book.AddAddress(addrSrc.addr, addrSrc.src)
require.NoError(t, err)
}
// check there is no duplicates
addrs := make(map[string]*p2p.NetAddress)
selection = book.GetSelectionWithBias(biasTowardsNewAddrs)
for _, addr := range selection {
if dup, ok := addrs[addr.String()]; ok {
t.Fatalf("selection %v contains duplicates %v", selection, dup)
}
addrs[addr.String()] = addr
}
if len(selection) > book.Size() {
t.Fatalf("selection %v could not be bigger than the book", selection)
}
// 4) mark 80% of the addresses as good
randAddrsLen := len(randAddrs)
for i, addrSrc := range randAddrs {
if int((float64(i)/float64(randAddrsLen))*100) >= 20 {
book.MarkGood(addrSrc.addr.ID)
}
}
selection = book.GetSelectionWithBias(biasTowardsNewAddrs)
// check that ~70% of addresses returned are good
good := 0
for _, addr := range selection {
if book.IsGood(addr) {
good++
}
}
got, expected := int((float64(good)/float64(len(selection)))*100), 100-biasTowardsNewAddrs
// compute some slack to protect against small differences due to rounding:
slack := int(math.Round(float64(100) / float64(len(selection))))
if got > expected+slack {
t.Fatalf(
"got more good peers (%% got: %d, %% expected: %d, number of good addrs: %d, total: %d)",
got,
expected,
good,
len(selection),
)
}
if got < expected-slack {
t.Fatalf(
"got fewer good peers (%% got: %d, %% expected: %d, number of good addrs: %d, total: %d)",
got,
expected,
good,
len(selection),
)
}
}
func TestAddrBookHasAddress(t *testing.T) {
fname := createTempFileName(t, "addrbook_test")
book := NewAddrBook(fname, true)
book.SetLogger(log.TestingLogger())
addr := randIPv4Address(t)
err := book.AddAddress(addr, addr)
require.NoError(t, err)
assert.True(t, book.HasAddress(addr))
book.RemoveAddress(addr)
assert.False(t, book.HasAddress(addr))
}
func testCreatePrivateAddrs(t *testing.T, numAddrs int) ([]*p2p.NetAddress, []string) {
t.Helper()
addrs := make([]*p2p.NetAddress, numAddrs)
for i := 0; i < numAddrs; i++ {
addrs[i] = randIPv4Address(t)
}
private := make([]string, numAddrs)
for i, addr := range addrs {
private[i] = string(addr.ID)
}
return addrs, private
}
func TestBanBadPeers(t *testing.T) {
fname := createTempFileName(t, "addrbook_test")
book := NewAddrBook(fname, true)
book.SetLogger(log.TestingLogger())
addr := randIPv4Address(t)
_ = book.AddAddress(addr, addr)
book.MarkBad(addr, 1*time.Second)
// addr should not reachable
assert.False(t, book.HasAddress(addr))
assert.True(t, book.IsBanned(addr))
err := book.AddAddress(addr, addr)
// book should not add address from the blacklist
assert.Error(t, err)
time.Sleep(1 * time.Second)
book.ReinstateBadPeers()
// address should be reinstated in the new bucket
assert.EqualValues(t, 1, book.Size())
assert.True(t, book.HasAddress(addr))
assert.False(t, book.IsGood(addr))
}
func TestAddrBookEmpty(t *testing.T) {
fname := createTempFileName(t, "addrbook_test")
book := NewAddrBook(fname, true)
book.SetLogger(log.TestingLogger())
// Check that empty book is empty
require.True(t, book.Empty())
// Check that book with our address is empty
book.AddOurAddress(randIPv4Address(t))
require.True(t, book.Empty())
// Check that book with private addrs is empty
_, privateIds := testCreatePrivateAddrs(t, 5)
book.AddPrivateIDs(privateIds)
require.True(t, book.Empty())
// Check that book with address is not empty
err := book.AddAddress(randIPv4Address(t), randIPv4Address(t))
require.NoError(t, err)
require.False(t, book.Empty())
}
func TestPrivatePeers(t *testing.T) {
fname := createTempFileName(t, "addrbook_test")
book := NewAddrBook(fname, true)
book.SetLogger(log.TestingLogger())
addrs, private := testCreatePrivateAddrs(t, 10)
book.AddPrivateIDs(private)
// private addrs must not be added
for _, addr := range addrs {
err := book.AddAddress(addr, addr)
if assert.Error(t, err) {
_, ok := err.(ErrAddrBookPrivate)
assert.True(t, ok)
}
}
// addrs coming from private peers must not be added
err := book.AddAddress(randIPv4Address(t), addrs[0])
if assert.Error(t, err) {
_, ok := err.(ErrAddrBookPrivateSrc)
assert.True(t, ok)
}
}
func testAddrBookAddressSelection(t *testing.T, bookSize int) {
// generate all combinations of old (m) and new addresses
for nBookOld := 0; nBookOld <= bookSize; nBookOld++ {
nBookNew := bookSize - nBookOld
dbgStr := fmt.Sprintf("book of size %d (new %d, old %d)", bookSize, nBookNew, nBookOld)
// create book and get selection
book, _ := createAddrBookWithMOldAndNNewAddrs(t, nBookOld, nBookNew)
addrs := book.GetSelectionWithBias(biasToSelectNewPeers)
assert.NotNil(t, addrs, "%s - expected a non-nil selection", dbgStr)
nAddrs := len(addrs)
assert.NotZero(t, nAddrs, "%s - expected at least one address in selection", dbgStr)
// check there's no nil addresses
for _, addr := range addrs {
if addr == nil {
t.Fatalf("%s - got nil address in selection %v", dbgStr, addrs)
}
}
// XXX: shadowing
nOld, nNew := countOldAndNewAddrsInSelection(addrs, book)
// Given:
// n - num new addrs, m - num old addrs
// k - num new addrs expected in the beginning (based on bias %)
// i=min(n, max(k,r-m)), aka expNew
// j=min(m, r-i), aka expOld
//
// We expect this layout:
// indices: 0...i-1 i...i+j-1
// addresses: N0..Ni-1 O0..Oj-1
//
// There is at least one partition and at most three.
var (
k = percentageOfNum(biasToSelectNewPeers, nAddrs)
expNew = tmmath.MinInt(nNew, tmmath.MaxInt(k, nAddrs-nBookOld))
expOld = tmmath.MinInt(nOld, nAddrs-expNew)
)
// Verify that the number of old and new addresses are as expected
if nNew != expNew {
t.Fatalf("%s - expected new addrs %d, got %d", dbgStr, expNew, nNew)
}
if nOld != expOld {
t.Fatalf("%s - expected old addrs %d, got %d", dbgStr, expOld, nOld)
}
// Verify that the order of addresses is as expected
// Get the sequence types and lengths of the selection
seqLens, seqTypes, err := analyseSelectionLayout(book, addrs)
assert.NoError(t, err, "%s", dbgStr)
// Build a list with the expected lengths of partitions and another with the expected types, e.g.:
// expSeqLens = [10, 22], expSeqTypes = [1, 2]
// means we expect 10 new (type 1) addresses followed by 22 old (type 2) addresses.
var expSeqLens []int
var expSeqTypes []int
switch {
case expOld == 0: // all new addresses
expSeqLens = []int{nAddrs}
expSeqTypes = []int{1}
case expNew == 0: // all old addresses
expSeqLens = []int{nAddrs}
expSeqTypes = []int{2}
case nAddrs-expNew-expOld == 0: // new addresses, old addresses
expSeqLens = []int{expNew, expOld}
expSeqTypes = []int{1, 2}
}
assert.Equal(t, expSeqLens, seqLens,
"%s - expected sequence lengths of old/new %v, got %v",
dbgStr, expSeqLens, seqLens)
assert.Equal(t, expSeqTypes, seqTypes,
"%s - expected sequence types (1-new, 2-old) was %v, got %v",
dbgStr, expSeqTypes, seqTypes)
}
}
func TestMultipleAddrBookAddressSelection(t *testing.T) {
// test books with smaller size, < N
const N = 32
for bookSize := 1; bookSize < N; bookSize++ {
testAddrBookAddressSelection(t, bookSize)
}
// Test for two books with sizes from following ranges
ranges := [...][]int{{33, 100}, {100, 175}}
bookSizes := make([]int, 0, len(ranges))
for _, r := range ranges {
bookSizes = append(bookSizes, mrand.Intn(r[1]-r[0])+r[0])
}
t.Logf("Testing address selection for the following book sizes %v\n", bookSizes)
for _, bookSize := range bookSizes {
testAddrBookAddressSelection(t, bookSize)
}
}
func TestAddrBookAddDoesNotOverwriteOldIP(t *testing.T) {
fname := createTempFileName(t, "addrbook_test")
// This test creates adds a peer to the address book and marks it good
// It then attempts to override the peer's IP, by adding a peer with the same ID
// but different IP. We distinguish the IP's by "RealIP" and "OverrideAttemptIP"
peerID := "678503e6c8f50db7279c7da3cb9b072aac4bc0d5"
peerRealIP := "1.1.1.1:26656"
peerOverrideAttemptIP := "2.2.2.2:26656"
SrcAddr := "b0dd378c3fbc4c156cd6d302a799f0d2e4227201@159.89.121.174:26656"
// There is a chance that AddAddress will ignore the new peer its given.
// So we repeat trying to override the peer several times,
// to ensure we aren't in a case that got probabilistically ignored
numOverrideAttempts := 10
peerRealAddr, err := types.NewNetAddressString(peerID + "@" + peerRealIP)
require.Nil(t, err)
peerOverrideAttemptAddr, err := types.NewNetAddressString(peerID + "@" + peerOverrideAttemptIP)
require.Nil(t, err)
src, err := types.NewNetAddressString(SrcAddr)
require.Nil(t, err)
book := NewAddrBook(fname, true)
book.SetLogger(log.TestingLogger())
err = book.AddAddress(peerRealAddr, src)
require.Nil(t, err)
book.MarkAttempt(peerRealAddr)
book.MarkGood(peerRealAddr.ID)
// Double check that adding a peer again doesn't error
err = book.AddAddress(peerRealAddr, src)
require.Nil(t, err)
// Try changing ip but keeping the same node id. (change 1.1.1.1 to 2.2.2.2)
// This should just be ignored, and not error.
for i := 0; i < numOverrideAttempts; i++ {
err = book.AddAddress(peerOverrideAttemptAddr, src)
require.Nil(t, err)
}
// Now check that the IP was not overridden.
// This is done by sampling several peers from addr book
// and ensuring they all have the correct IP.
// In the expected functionality, this test should only have 1 Peer, hence will pass.
for i := 0; i < numOverrideAttempts; i++ {
selection := book.GetSelection()
for _, addr := range selection {
require.Equal(t, addr.IP, peerRealAddr.IP)
}
}
}
func TestAddrBookGroupKey(t *testing.T) {
// non-strict routability
testCases := []struct {
name string
ip string
expKey string
}{
// IPv4 normal.
{"ipv4 normal class a", "12.1.2.3", "12.1.0.0"},
{"ipv4 normal class b", "173.1.2.3", "173.1.0.0"},
{"ipv4 normal class c", "196.1.2.3", "196.1.0.0"},
// IPv6/IPv4 translations.
{"ipv6 rfc3964 with ipv4 encap", "2002:0c01:0203::", "12.1.0.0"},
{"ipv6 rfc4380 toredo ipv4", "2001:0:1234::f3fe:fdfc", "12.1.0.0"},
{"ipv6 rfc6052 well-known prefix with ipv4", "64:ff9b::0c01:0203", "12.1.0.0"},
{"ipv6 rfc6145 translated ipv4", "::ffff:0:0c01:0203", "12.1.0.0"},
// Tor.
{"ipv6 tor onioncat", "fd87:d87e:eb43:1234::5678", "tor:2"},
{"ipv6 tor onioncat 2", "fd87:d87e:eb43:1245::6789", "tor:2"},
{"ipv6 tor onioncat 3", "fd87:d87e:eb43:1345::6789", "tor:3"},
// IPv6 normal.
{"ipv6 normal", "2602:100::1", "2602:100::"},
{"ipv6 normal 2", "2602:0100::1234", "2602:100::"},
{"ipv6 hurricane electric", "2001:470:1f10:a1::2", "2001:470:1000::"},
{"ipv6 hurricane electric 2", "2001:0470:1f10:a1::2", "2001:470:1000::"},
}
for i, tc := range testCases {
nip := net.ParseIP(tc.ip)
key := groupKeyFor(types.NewNetAddressIPPort(nip, 26656), false)
assert.Equal(t, tc.expKey, key, "#%d", i)
}
// strict routability
testCases = []struct {
name string
ip string
expKey string
}{
// Local addresses.
{"ipv4 localhost", "127.0.0.1", "local"},
{"ipv6 localhost", "::1", "local"},
{"ipv4 zero", "0.0.0.0", "local"},
{"ipv4 first octet zero", "0.1.2.3", "local"},
// Unroutable addresses.
{"ipv4 invalid bcast", "255.255.255.255", "unroutable"},
{"ipv4 rfc1918 10/8", "10.1.2.3", "unroutable"},
{"ipv4 rfc1918 172.16/12", "172.16.1.2", "unroutable"},
{"ipv4 rfc1918 192.168/16", "192.168.1.2", "unroutable"},
{"ipv6 rfc3849 2001:db8::/32", "2001:db8::1234", "unroutable"},
{"ipv4 rfc3927 169.254/16", "169.254.1.2", "unroutable"},
{"ipv6 rfc4193 fc00::/7", "fc00::1234", "unroutable"},
{"ipv6 rfc4843 2001:10::/28", "2001:10::1234", "unroutable"},
{"ipv6 rfc4862 fe80::/64", "fe80::1234", "unroutable"},
}
for i, tc := range testCases {
nip := net.ParseIP(tc.ip)
key := groupKeyFor(types.NewNetAddressIPPort(nip, 26656), true)
assert.Equal(t, tc.expKey, key, "#%d", i)
}
}
func assertMOldAndNNewAddrsInSelection(t *testing.T, m, n int, addrs []*p2p.NetAddress, book *addrBook) {
nOld, nNew := countOldAndNewAddrsInSelection(addrs, book)
assert.Equal(t, m, nOld, "old addresses")
assert.Equal(t, n, nNew, "new addresses")
}
func createTempFileName(t *testing.T, prefix string) string {
t.Helper()
f, err := ioutil.TempFile("", prefix)
if err != nil {
panic(err)
}
fname := f.Name()
if err := f.Close(); err != nil {
t.Fatal(err)
}
t.Cleanup(func() { _ = os.Remove(fname) })
return fname
}
func createAddrBookWithMOldAndNNewAddrs(t *testing.T, nOld, nNew int) (book *addrBook, fname string) {
t.Helper()
fname = createTempFileName(t, "addrbook_test")
book = NewAddrBook(fname, true).(*addrBook)
book.SetLogger(log.TestingLogger())
assert.Zero(t, book.Size())
randAddrs := randNetAddressPairs(t, nOld)
for _, addr := range randAddrs {
err := book.AddAddress(addr.addr, addr.src)
require.NoError(t, err)
book.MarkGood(addr.addr.ID)
}
randAddrs = randNetAddressPairs(t, nNew)
for _, addr := range randAddrs {
err := book.AddAddress(addr.addr, addr.src)
require.NoError(t, err)
}
return
}
func countOldAndNewAddrsInSelection(addrs []*p2p.NetAddress, book *addrBook) (nOld, nNew int) {
for _, addr := range addrs {
if book.IsGood(addr) {
nOld++
} else {
nNew++
}
}
return
}
// Analyze the layout of the selection specified by 'addrs'
// Returns:
// - seqLens - the lengths of the sequences of addresses of same type
// - seqTypes - the types of sequences in selection
func analyseSelectionLayout(book *addrBook, addrs []*p2p.NetAddress) (seqLens, seqTypes []int, err error) {
// address types are: 0 - nil, 1 - new, 2 - old
var (
prevType = 0
currentSeqLen = 0
)
for _, addr := range addrs {
addrType := 0
if book.IsGood(addr) {
addrType = 2
} else {
addrType = 1
}
if addrType != prevType && prevType != 0 {
seqLens = append(seqLens, currentSeqLen)
seqTypes = append(seqTypes, prevType)
currentSeqLen = 0
}
currentSeqLen++
prevType = addrType
}
seqLens = append(seqLens, currentSeqLen)
seqTypes = append(seqTypes, prevType)
return
}

+ 0
- 24
internal/p2p/pex/bench_test.go View File

@ -1,24 +0,0 @@
package pex
import (
"testing"
"github.com/tendermint/tendermint/types"
)
func BenchmarkAddrBook_hash(b *testing.B) {
book := &addrBook{
ourAddrs: make(map[string]struct{}),
privateIDs: make(map[types.NodeID]struct{}),
addrLookup: make(map[types.NodeID]*knownAddress),
badPeers: make(map[types.NodeID]*knownAddress),
filePath: "",
routabilityStrict: true,
}
book.init()
msg := []byte(`foobar`)
b.ResetTimer()
for i := 0; i < b.N; i++ {
_, _ = book.hash(msg)
}
}

+ 0
- 11
internal/p2p/pex/errors.go View File

@ -15,17 +15,6 @@ func (err ErrAddrBookNonRoutable) Error() string {
return fmt.Sprintf("Cannot add non-routable address %v", err.Addr)
}
type errAddrBookOldAddressNewBucket struct {
Addr *p2p.NetAddress
BucketID int
}
func (err errAddrBookOldAddressNewBucket) Error() string {
return fmt.Sprintf("failed consistency check!"+
" Cannot add pre-existing address %v into new bucket %v",
err.Addr, err.BucketID)
}
type ErrAddrBookSelf struct {
Addr *p2p.NetAddress
}


+ 0
- 83
internal/p2p/pex/file.go View File

@ -1,83 +0,0 @@
package pex
import (
"encoding/json"
"fmt"
"os"
"github.com/tendermint/tendermint/internal/libs/tempfile"
)
/* Loading & Saving */
type addrBookJSON struct {
Key string `json:"key"`
Addrs []*knownAddress `json:"addrs"`
}
func (a *addrBook) saveToFile(filePath string) {
a.mtx.Lock()
defer a.mtx.Unlock()
a.Logger.Info("Saving AddrBook to file", "size", a.size())
addrs := make([]*knownAddress, 0, len(a.addrLookup))
for _, ka := range a.addrLookup {
addrs = append(addrs, ka)
}
aJSON := &addrBookJSON{
Key: a.key,
Addrs: addrs,
}
jsonBytes, err := json.MarshalIndent(aJSON, "", "\t")
if err != nil {
a.Logger.Error("Failed to save AddrBook to file", "err", err)
return
}
err = tempfile.WriteFileAtomic(filePath, jsonBytes, 0644)
if err != nil {
a.Logger.Error("Failed to save AddrBook to file", "file", filePath, "err", err)
}
}
// Returns false if file does not exist.
// cmn.Panics if file is corrupt.
func (a *addrBook) loadFromFile(filePath string) bool {
// If doesn't exist, do nothing.
_, err := os.Stat(filePath)
if os.IsNotExist(err) {
return false
}
// Load addrBookJSON{}
r, err := os.Open(filePath)
if err != nil {
panic(fmt.Sprintf("Error opening file %s: %v", filePath, err))
}
defer r.Close()
aJSON := &addrBookJSON{}
dec := json.NewDecoder(r)
err = dec.Decode(aJSON)
if err != nil {
panic(fmt.Sprintf("Error reading file %s: %v", filePath, err))
}
// Restore all the fields...
// Restore the key
a.key = aJSON.Key
// Restore .bucketsNew & .bucketsOld
for _, ka := range aJSON.Addrs {
for _, bucketIndex := range ka.Buckets {
bucket := a.getBucket(ka.BucketType, bucketIndex)
bucket[ka.Addr.String()] = ka
}
a.addrLookup[ka.ID()] = ka
if ka.BucketType == bucketTypeNew {
a.nNew++
} else {
a.nOld++
}
}
return true
}

+ 0
- 141
internal/p2p/pex/known_address.go View File

@ -1,141 +0,0 @@
package pex
import (
"time"
"github.com/tendermint/tendermint/internal/p2p"
"github.com/tendermint/tendermint/types"
)
// knownAddress tracks information about a known network address
// that is used to determine how viable an address is.
type knownAddress struct {
Addr *p2p.NetAddress `json:"addr"`
Src *p2p.NetAddress `json:"src"`
Buckets []int `json:"buckets"`
Attempts int32 `json:"attempts"`
BucketType byte `json:"bucket_type"`
LastAttempt time.Time `json:"last_attempt"`
LastSuccess time.Time `json:"last_success"`
LastBanTime time.Time `json:"last_ban_time"`
}
func newKnownAddress(addr *p2p.NetAddress, src *p2p.NetAddress) *knownAddress {
return &knownAddress{
Addr: addr,
Src: src,
Attempts: 0,
LastAttempt: time.Now(),
BucketType: bucketTypeNew,
Buckets: nil,
}
}
func (ka *knownAddress) ID() types.NodeID {
return ka.Addr.ID
}
func (ka *knownAddress) isOld() bool {
return ka.BucketType == bucketTypeOld
}
func (ka *knownAddress) isNew() bool {
return ka.BucketType == bucketTypeNew
}
func (ka *knownAddress) markAttempt() {
now := time.Now()
ka.LastAttempt = now
ka.Attempts++
}
func (ka *knownAddress) markGood() {
now := time.Now()
ka.LastAttempt = now
ka.Attempts = 0
ka.LastSuccess = now
}
func (ka *knownAddress) ban(banTime time.Duration) {
if ka.LastBanTime.Before(time.Now().Add(banTime)) {
ka.LastBanTime = time.Now().Add(banTime)
}
}
func (ka *knownAddress) isBanned() bool {
return ka.LastBanTime.After(time.Now())
}
func (ka *knownAddress) addBucketRef(bucketIdx int) int {
for _, bucket := range ka.Buckets {
if bucket == bucketIdx {
// TODO refactor to return error?
// log.Warn(Fmt("Bucket already exists in ka.Buckets: %v", ka))
return -1
}
}
ka.Buckets = append(ka.Buckets, bucketIdx)
return len(ka.Buckets)
}
func (ka *knownAddress) removeBucketRef(bucketIdx int) int {
buckets := []int{}
for _, bucket := range ka.Buckets {
if bucket != bucketIdx {
buckets = append(buckets, bucket)
}
}
if len(buckets) != len(ka.Buckets)-1 {
// TODO refactor to return error?
// log.Warn(Fmt("bucketIdx not found in ka.Buckets: %v", ka))
return -1
}
ka.Buckets = buckets
return len(ka.Buckets)
}
/*
An address is bad if the address in question is a New address, has not been tried in the last
minute, and meets one of the following criteria:
1) It claims to be from the future
2) It hasn't been seen in over a week
3) It has failed at least three times and never succeeded
4) It has failed ten times in the last week
All addresses that meet these criteria are assumed to be worthless and not
worth keeping hold of.
*/
func (ka *knownAddress) isBad() bool {
// Is Old --> good
if ka.BucketType == bucketTypeOld {
return false
}
// Has been attempted in the last minute --> good
if ka.LastAttempt.After(time.Now().Add(-1 * time.Minute)) {
return false
}
// TODO: From the future?
// Too old?
// TODO: should be a timestamp of last seen, not just last attempt
if ka.LastAttempt.Before(time.Now().Add(-1 * numMissingDays * time.Hour * 24)) {
return true
}
// Never succeeded?
if ka.LastSuccess.IsZero() && ka.Attempts >= numRetries {
return true
}
// Hasn't succeeded in too long?
if ka.LastSuccess.Before(time.Now().Add(-1*minBadDays*time.Hour*24)) &&
ka.Attempts >= maxFailures {
return true
}
return false
}

+ 0
- 55
internal/p2p/pex/params.go View File

@ -1,55 +0,0 @@
package pex
import "time"
const (
// addresses under which the address manager will claim to need more addresses.
needAddressThreshold = 1000
// interval used to dump the address cache to disk for future use.
dumpAddressInterval = time.Minute * 2
// max addresses in each old address bucket.
oldBucketSize = 64
// buckets we split old addresses over.
oldBucketCount = 64
// max addresses in each new address bucket.
newBucketSize = 64
// buckets that we spread new addresses over.
newBucketCount = 256
// old buckets over which an address group will be spread.
oldBucketsPerGroup = 4
// new buckets over which a source address group will be spread.
newBucketsPerGroup = 32
// buckets a frequently seen new address may end up in.
maxNewBucketsPerAddress = 4
// days before which we assume an address has vanished
// if we have not seen it announced in that long.
numMissingDays = 7
// tries without a single success before we assume an address is bad.
numRetries = 3
// max failures we will accept without a success before considering an address bad.
maxFailures = 10 // ?
// days since the last success before we will consider evicting an address.
minBadDays = 7
// % of total addresses known returned by GetSelection.
getSelectionPercent = 23
// min addresses that must be returned by GetSelection. Useful for bootstrapping.
minGetSelection = 32
// max addresses returned by GetSelection
// NOTE: this must match "maxMsgSize"
maxGetSelection = 250
)

+ 0
- 862
internal/p2p/pex/pex_reactor.go View File

@ -1,862 +0,0 @@
package pex
import (
"errors"
"fmt"
"net"
"sync"
"time"
"github.com/gogo/protobuf/proto"
"github.com/tendermint/tendermint/internal/p2p"
"github.com/tendermint/tendermint/internal/p2p/conn"
"github.com/tendermint/tendermint/libs/cmap"
tmmath "github.com/tendermint/tendermint/libs/math"
tmrand "github.com/tendermint/tendermint/libs/rand"
"github.com/tendermint/tendermint/libs/service"
tmp2p "github.com/tendermint/tendermint/proto/tendermint/p2p"
"github.com/tendermint/tendermint/types"
)
type Peer = p2p.Peer
const (
// PexChannel is a channel for PEX messages
PexChannel = byte(0x00)
// over-estimate of max NetAddress size
// hexID (40) + IP (16) + Port (2) + Name (100) ...
// NOTE: dont use massive DNS name ..
maxAddressSize = 256
// NOTE: amplificaiton factor!
// small request results in up to maxMsgSize response
maxMsgSize = maxAddressSize * maxGetSelection
// ensure we have enough peers
defaultEnsurePeersPeriod = 30 * time.Second
// Seed/Crawler constants
// minTimeBetweenCrawls is a minimum time between attempts to crawl a peer.
minTimeBetweenCrawls = 2 * time.Minute
// check some peers every this
crawlPeerPeriod = 30 * time.Second
maxAttemptsToDial = 16 // ~ 35h in total (last attempt - 18h)
// if node connects to seed, it does not have any trusted peers.
// Especially in the beginning, node should have more trusted peers than
// untrusted.
biasToSelectNewPeers = 30 // 70 to select good peers
// if a peer is marked bad, it will be banned for at least this time period
defaultBanTime = 24 * time.Hour
)
type errMaxAttemptsToDial struct {
}
func (e errMaxAttemptsToDial) Error() string {
return fmt.Sprintf("reached max attempts %d to dial", maxAttemptsToDial)
}
type errTooEarlyToDial struct {
backoffDuration time.Duration
lastDialed time.Time
}
func (e errTooEarlyToDial) Error() string {
return fmt.Sprintf(
"too early to dial (backoff duration: %d, last dialed: %v, time since: %v)",
e.backoffDuration, e.lastDialed, time.Since(e.lastDialed))
}
// Reactor handles PEX (peer exchange) and ensures that an
// adequate number of peers are connected to the switch.
//
// It uses `AddrBook` (address book) to store `NetAddress`es of the peers.
//
// ## Preventing abuse
//
// Only accept pexAddrsMsg from peers we sent a corresponding pexRequestMsg too.
// Only accept one pexRequestMsg every ~defaultEnsurePeersPeriod.
type Reactor struct {
p2p.BaseReactor
book AddrBook
config *ReactorConfig
ensurePeersPeriod time.Duration // TODO: should go in the config
// maps to prevent abuse
requestsSent *cmap.CMap // ID->struct{}: unanswered send requests
lastReceivedRequests *cmap.CMap // ID->time.Time: last time peer requested from us
seedAddrs []*p2p.NetAddress
attemptsToDial sync.Map // address (string) -> {number of attempts (int), last time dialed (time.Time)}
// seed/crawled mode fields
crawlPeerInfos map[types.NodeID]crawlPeerInfo
}
func (r *Reactor) minReceiveRequestInterval() time.Duration {
// NOTE: must be less than ensurePeersPeriod, otherwise we'll request
// peers too quickly from others and they'll think we're bad!
return r.ensurePeersPeriod / 3
}
// ReactorConfig holds reactor specific configuration data.
type ReactorConfig struct {
// Seed/Crawler mode
SeedMode bool
// We want seeds to only advertise good peers. Therefore they should wait at
// least as long as we expect it to take for a peer to become good before
// disconnecting.
SeedDisconnectWaitPeriod time.Duration
// Maximum pause when redialing a persistent peer (if zero, exponential backoff is used)
PersistentPeersMaxDialPeriod time.Duration
// Seeds is a list of addresses reactor may use
// if it can't connect to peers in the addrbook.
Seeds []string
}
type _attemptsToDial struct {
number int
lastDialed time.Time
}
// NewReactor creates new PEX reactor.
func NewReactor(b AddrBook, config *ReactorConfig) *Reactor {
r := &Reactor{
book: b,
config: config,
ensurePeersPeriod: defaultEnsurePeersPeriod,
requestsSent: cmap.NewCMap(),
lastReceivedRequests: cmap.NewCMap(),
crawlPeerInfos: make(map[types.NodeID]crawlPeerInfo),
}
r.BaseReactor = *p2p.NewBaseReactor("PEX", r)
return r
}
// OnStart implements BaseService
func (r *Reactor) OnStart() error {
err := r.book.Start()
if err != nil && err != service.ErrAlreadyStarted {
return err
}
numOnline, seedAddrs, err := r.checkSeeds()
if err != nil {
return err
} else if numOnline == 0 && r.book.Empty() {
return errors.New("address book is empty and couldn't resolve any seed nodes")
}
r.seedAddrs = seedAddrs
// Check if this node should run
// in seed/crawler mode
if r.config.SeedMode {
go r.crawlPeersRoutine()
} else {
go r.ensurePeersRoutine()
}
return nil
}
// OnStop implements BaseService
func (r *Reactor) OnStop() {
if err := r.book.Stop(); err != nil {
r.Logger.Error("Error stopping address book", "err", err)
}
}
// GetChannels implements Reactor
func (r *Reactor) GetChannels() []*conn.ChannelDescriptor {
return []*conn.ChannelDescriptor{
{
ID: PexChannel,
Priority: 1,
SendQueueCapacity: 10,
RecvMessageCapacity: maxMsgSize,
MaxSendBytes: 200,
},
}
}
// AddPeer implements Reactor by adding peer to the address book (if inbound)
// or by requesting more addresses (if outbound).
func (r *Reactor) AddPeer(p Peer) {
if p.IsOutbound() {
// For outbound peers, the address is already in the books -
// either via DialPeersAsync or r.Receive.
// Ask it for more peers if we need.
if r.book.NeedMoreAddrs() {
r.RequestAddrs(p)
}
} else {
// inbound peer is its own source
addr, err := p.NodeInfo().NetAddress()
if err != nil {
r.Logger.Error("Failed to get peer NetAddress", "err", err, "peer", p)
return
}
// Make it explicit that addr and src are the same for an inbound peer.
src := addr
// add to book. dont RequestAddrs right away because
// we don't trust inbound as much - let ensurePeersRoutine handle it.
err = r.book.AddAddress(addr, src)
r.logErrAddrBook(err)
}
}
// RemovePeer implements Reactor by resetting peer's requests info.
func (r *Reactor) RemovePeer(p Peer, reason interface{}) {
id := string(p.ID())
r.requestsSent.Delete(id)
r.lastReceivedRequests.Delete(id)
}
func (r *Reactor) logErrAddrBook(err error) {
if err != nil {
switch err.(type) {
case ErrAddrBookNilAddr:
r.Logger.Error("Failed to add new address", "err", err)
default:
// non-routable, self, full book, private, etc.
r.Logger.Debug("Failed to add new address", "err", err)
}
}
}
// Receive implements Reactor by handling incoming PEX messages.
// XXX: do not call any methods that can block or incur heavy processing.
// https://github.com/tendermint/tendermint/issues/2888
func (r *Reactor) Receive(chID byte, src Peer, msgBytes []byte) {
msg, err := decodeMsg(msgBytes)
if err != nil {
r.Logger.Error("Error decoding message", "src", src, "chId", chID, "err", err)
r.Switch.StopPeerForError(src, err)
return
}
r.Logger.Debug("Received message", "src", src, "chId", chID, "msg", msg)
switch msg := msg.(type) {
case *tmp2p.PexRequest:
// NOTE: this is a prime candidate for amplification attacks,
// so it's important we
// 1) restrict how frequently peers can request
// 2) limit the output size
// If we're a seed and this is an inbound peer,
// respond once and disconnect.
if r.config.SeedMode && !src.IsOutbound() {
id := string(src.ID())
v := r.lastReceivedRequests.Get(id)
if v != nil {
// FlushStop/StopPeer are already
// running in a go-routine.
return
}
r.lastReceivedRequests.Set(id, time.Now())
// Send addrs and disconnect
r.SendAddrs(src, r.book.GetSelectionWithBias(biasToSelectNewPeers))
go func() {
// In a go-routine so it doesn't block .Receive.
src.FlushStop()
r.Switch.StopPeerGracefully(src)
}()
} else {
// Check we're not receiving requests too frequently.
if err := r.receiveRequest(src); err != nil {
r.Switch.StopPeerForError(src, err)
r.book.MarkBad(src.SocketAddr(), defaultBanTime)
return
}
r.SendAddrs(src, r.book.GetSelection())
}
case *tmp2p.PexResponse:
// If we asked for addresses, add them to the book
addrs, err := NetAddressesFromProto(msg.Addresses)
if err != nil {
r.Switch.StopPeerForError(src, err)
r.book.MarkBad(src.SocketAddr(), defaultBanTime)
return
}
err = r.ReceiveAddrs(addrs, src)
if err != nil {
r.Switch.StopPeerForError(src, err)
if err == ErrUnsolicitedList {
r.book.MarkBad(src.SocketAddr(), defaultBanTime)
}
return
}
default:
r.Logger.Error(fmt.Sprintf("Unknown message type %T", msg))
}
}
// enforces a minimum amount of time between requests
func (r *Reactor) receiveRequest(src Peer) error {
id := string(src.ID())
v := r.lastReceivedRequests.Get(id)
if v == nil {
// initialize with empty time
lastReceived := time.Time{}
r.lastReceivedRequests.Set(id, lastReceived)
return nil
}
lastReceived := v.(time.Time)
if lastReceived.Equal(time.Time{}) {
// first time gets a free pass. then we start tracking the time
lastReceived = time.Now()
r.lastReceivedRequests.Set(id, lastReceived)
return nil
}
now := time.Now()
minInterval := r.minReceiveRequestInterval()
if now.Sub(lastReceived) < minInterval {
return fmt.Errorf(
"peer (%v) sent next PEX request too soon. lastReceived: %v, now: %v, minInterval: %v. Disconnecting",
src.ID(),
lastReceived,
now,
minInterval,
)
}
r.lastReceivedRequests.Set(id, now)
return nil
}
// RequestAddrs asks peer for more addresses if we do not already have a
// request out for this peer.
func (r *Reactor) RequestAddrs(p Peer) {
id := string(p.ID())
if _, exists := r.requestsSent.GetOrSet(id, struct{}{}); exists {
return
}
r.Logger.Debug("Request addrs", "from", p)
p.Send(PexChannel, mustEncode(&tmp2p.PexRequest{}))
}
// ReceiveAddrs adds the given addrs to the addrbook if theres an open
// request for this peer and deletes the open request.
// If there's no open request for the src peer, it returns an error.
func (r *Reactor) ReceiveAddrs(addrs []*p2p.NetAddress, src Peer) error {
id := string(src.ID())
if !r.requestsSent.Has(id) {
return ErrUnsolicitedList
}
r.requestsSent.Delete(id)
srcAddr, err := src.NodeInfo().NetAddress()
if err != nil {
return err
}
srcIsSeed := false
for _, seedAddr := range r.seedAddrs {
if seedAddr.Equals(srcAddr) {
srcIsSeed = true
break
}
}
for _, netAddr := range addrs {
// NOTE: we check netAddr validity and routability in book#AddAddress.
err = r.book.AddAddress(netAddr, srcAddr)
if err != nil {
r.logErrAddrBook(err)
// XXX: should we be strict about incoming data and disconnect from a
// peer here too?
continue
}
// If this address came from a seed node, try to connect to it without
// waiting (#2093)
if srcIsSeed {
r.Logger.Info("Will dial address, which came from seed", "addr", netAddr, "seed", srcAddr)
go func(addr *p2p.NetAddress) {
err := r.dialPeer(addr)
if err != nil {
switch err.(type) {
case errMaxAttemptsToDial, errTooEarlyToDial, p2p.ErrCurrentlyDialingOrExistingAddress:
r.Logger.Debug(err.Error(), "addr", addr)
default:
r.Logger.Error(err.Error(), "addr", addr)
}
}
}(netAddr)
}
}
return nil
}
// SendAddrs sends addrs to the peer.
func (r *Reactor) SendAddrs(p Peer, netAddrs []*p2p.NetAddress) {
p.Send(PexChannel, mustEncode(&tmp2p.PexResponse{Addresses: NetAddressesToProto(netAddrs)}))
}
// SetEnsurePeersPeriod sets period to ensure peers connected.
func (r *Reactor) SetEnsurePeersPeriod(d time.Duration) {
r.ensurePeersPeriod = d
}
// Ensures that sufficient peers are connected. (continuous)
func (r *Reactor) ensurePeersRoutine() {
var (
seed = tmrand.NewRand()
jitter = seed.Int63n(r.ensurePeersPeriod.Nanoseconds())
)
// Randomize first round of communication to avoid thundering herd.
// If no peers are present directly start connecting so we guarantee swift
// setup with the help of configured seeds.
if r.nodeHasSomePeersOrDialingAny() {
time.Sleep(time.Duration(jitter))
}
// fire once immediately.
// ensures we dial the seeds right away if the book is empty
r.ensurePeers()
// fire periodically
ticker := time.NewTicker(r.ensurePeersPeriod)
for {
select {
case <-ticker.C:
r.ensurePeers()
case <-r.Quit():
ticker.Stop()
return
}
}
}
// ensurePeers ensures that sufficient peers are connected. (once)
//
// heuristic that we haven't perfected yet, or, perhaps is manually edited by
// the node operator. It should not be used to compute what addresses are
// already connected or not.
func (r *Reactor) ensurePeers() {
var (
out, in, dial = r.Switch.NumPeers()
numToDial = r.Switch.MaxNumOutboundPeers() - (out + dial)
)
r.Logger.Info(
"Ensure peers",
"numOutPeers", out,
"numInPeers", in,
"numDialing", dial,
"numToDial", numToDial,
)
if numToDial <= 0 {
return
}
// bias to prefer more vetted peers when we have fewer connections.
// not perfect, but somewhate ensures that we prioritize connecting to more-vetted
// NOTE: range here is [10, 90]. Too high ?
newBias := tmmath.MinInt(out, 8)*10 + 10
toDial := make(map[types.NodeID]*p2p.NetAddress)
// Try maxAttempts times to pick numToDial addresses to dial
maxAttempts := numToDial * 3
for i := 0; i < maxAttempts && len(toDial) < numToDial; i++ {
try := r.book.PickAddress(newBias)
if try == nil {
continue
}
if _, selected := toDial[try.ID]; selected {
continue
}
if r.Switch.IsDialingOrExistingAddress(try) {
continue
}
// TODO: consider moving some checks from toDial into here
// so we don't even consider dialing peers that we want to wait
// before dialing again, or have dialed too many times already
r.Logger.Info("Will dial address", "addr", try)
toDial[try.ID] = try
}
// Dial picked addresses
for _, addr := range toDial {
go func(addr *p2p.NetAddress) {
err := r.dialPeer(addr)
if err != nil {
switch err.(type) {
case errMaxAttemptsToDial, errTooEarlyToDial:
r.Logger.Debug(err.Error(), "addr", addr)
default:
r.Logger.Error(err.Error(), "addr", addr)
}
}
}(addr)
}
if r.book.NeedMoreAddrs() {
// Check if banned nodes can be reinstated
r.book.ReinstateBadPeers()
}
if r.book.NeedMoreAddrs() {
// 1) Pick a random peer and ask for more.
peers := r.Switch.Peers().List()
peersCount := len(peers)
if peersCount > 0 {
rand := tmrand.NewRand()
peer := peers[rand.Int()%peersCount]
r.Logger.Info("We need more addresses. Sending pexRequest to random peer", "peer", peer)
r.RequestAddrs(peer)
}
// 2) Dial seeds if we are not dialing anyone.
// This is done in addition to asking a peer for addresses to work-around
// peers not participating in PEX.
if len(toDial) == 0 {
r.Logger.Info("No addresses to dial. Falling back to seeds")
r.dialSeeds()
}
}
}
func (r *Reactor) dialAttemptsInfo(addr *p2p.NetAddress) (attempts int, lastDialed time.Time) {
_attempts, ok := r.attemptsToDial.Load(addr.DialString())
if !ok {
return
}
atd := _attempts.(_attemptsToDial)
return atd.number, atd.lastDialed
}
func (r *Reactor) dialPeer(addr *p2p.NetAddress) error {
attempts, lastDialed := r.dialAttemptsInfo(addr)
if !r.Switch.IsPeerPersistent(addr) && attempts > maxAttemptsToDial {
r.book.MarkBad(addr, defaultBanTime)
return errMaxAttemptsToDial{}
}
// exponential backoff if it's not our first attempt to dial given address
if attempts > 0 {
rand := tmrand.NewRand()
jitter := time.Duration(rand.Float64() * float64(time.Second)) // 1s == (1e9 ns)
backoffDuration := jitter + ((1 << uint(attempts)) * time.Second)
backoffDuration = r.maxBackoffDurationForPeer(addr, backoffDuration)
sinceLastDialed := time.Since(lastDialed)
if sinceLastDialed < backoffDuration {
return errTooEarlyToDial{backoffDuration, lastDialed}
}
}
err := r.Switch.DialPeerWithAddress(addr)
if err != nil {
if _, ok := err.(p2p.ErrCurrentlyDialingOrExistingAddress); ok {
return err
}
markAddrInBookBasedOnErr(addr, r.book, err)
switch err.(type) {
case p2p.ErrSwitchAuthenticationFailure:
// NOTE: addr is removed from addrbook in markAddrInBookBasedOnErr
r.attemptsToDial.Delete(addr.DialString())
default:
r.attemptsToDial.Store(addr.DialString(), _attemptsToDial{attempts + 1, time.Now()})
}
return fmt.Errorf("dialing failed (attempts: %d): %w", attempts+1, err)
}
// cleanup any history
r.attemptsToDial.Delete(addr.DialString())
return nil
}
// maxBackoffDurationForPeer caps the backoff duration for persistent peers.
func (r *Reactor) maxBackoffDurationForPeer(addr *p2p.NetAddress, planned time.Duration) time.Duration {
if r.config.PersistentPeersMaxDialPeriod > 0 &&
planned > r.config.PersistentPeersMaxDialPeriod &&
r.Switch.IsPeerPersistent(addr) {
return r.config.PersistentPeersMaxDialPeriod
}
return planned
}
// checkSeeds checks that addresses are well formed.
// Returns number of seeds we can connect to, along with all seeds addrs.
// return err if user provided any badly formatted seed addresses.
// Doesn't error if the seed node can't be reached.
// numOnline returns -1 if no seed nodes were in the initial configuration.
func (r *Reactor) checkSeeds() (numOnline int, netAddrs []*p2p.NetAddress, err error) {
lSeeds := len(r.config.Seeds)
if lSeeds == 0 {
return -1, nil, nil
}
netAddrs, errs := p2p.NewNetAddressStrings(r.config.Seeds)
numOnline = lSeeds - len(errs)
for _, err := range errs {
switch e := err.(type) {
case types.ErrNetAddressLookup:
r.Logger.Error("Connecting to seed failed", "err", e)
default:
return 0, nil, fmt.Errorf("seed node configuration has error: %w", e)
}
}
return numOnline, netAddrs, nil
}
// randomly dial seeds until we connect to one or exhaust them
func (r *Reactor) dialSeeds() {
rand := tmrand.NewRand()
perm := rand.Perm(len(r.seedAddrs))
// perm := r.Switch.rng.Perm(lSeeds)
for _, i := range perm {
// dial a random seed
seedAddr := r.seedAddrs[i]
err := r.Switch.DialPeerWithAddress(seedAddr)
switch err.(type) {
case nil, p2p.ErrCurrentlyDialingOrExistingAddress:
return
}
r.Switch.Logger.Error("Error dialing seed", "err", err, "seed", seedAddr)
}
// do not write error message if there were no seeds specified in config
if len(r.seedAddrs) > 0 {
r.Switch.Logger.Error("Couldn't connect to any seeds")
}
}
// AttemptsToDial returns the number of attempts to dial specific address. It
// returns 0 if never attempted or successfully connected.
func (r *Reactor) AttemptsToDial(addr *p2p.NetAddress) int {
lAttempts, attempted := r.attemptsToDial.Load(addr.DialString())
if attempted {
return lAttempts.(_attemptsToDial).number
}
return 0
}
//----------------------------------------------------------
// Explores the network searching for more peers. (continuous)
// Seed/Crawler Mode causes this node to quickly disconnect
// from peers, except other seed nodes.
func (r *Reactor) crawlPeersRoutine() {
// If we have any seed nodes, consult them first
if len(r.seedAddrs) > 0 {
r.dialSeeds()
} else {
// Do an initial crawl
r.crawlPeers(r.book.GetSelection())
}
// Fire periodically
ticker := time.NewTicker(crawlPeerPeriod)
for {
select {
case <-ticker.C:
r.attemptDisconnects()
r.crawlPeers(r.book.GetSelection())
r.cleanupCrawlPeerInfos()
case <-r.Quit():
return
}
}
}
// nodeHasSomePeersOrDialingAny returns true if the node is connected to some
// peers or dialing them currently.
func (r *Reactor) nodeHasSomePeersOrDialingAny() bool {
out, in, dial := r.Switch.NumPeers()
return out+in+dial > 0
}
// crawlPeerInfo handles temporary data needed for the network crawling
// performed during seed/crawler mode.
type crawlPeerInfo struct {
Addr *p2p.NetAddress `json:"addr"`
// The last time we crawled the peer or attempted to do so.
LastCrawled time.Time `json:"last_crawled"`
}
// crawlPeers will crawl the network looking for new peer addresses.
func (r *Reactor) crawlPeers(addrs []*p2p.NetAddress) {
now := time.Now()
for _, addr := range addrs {
peerInfo, ok := r.crawlPeerInfos[addr.ID]
// Do not attempt to connect with peers we recently crawled.
if ok && now.Sub(peerInfo.LastCrawled) < minTimeBetweenCrawls {
continue
}
// Record crawling attempt.
r.crawlPeerInfos[addr.ID] = crawlPeerInfo{
Addr: addr,
LastCrawled: now,
}
err := r.dialPeer(addr)
if err != nil {
switch err.(type) {
case errMaxAttemptsToDial, errTooEarlyToDial, p2p.ErrCurrentlyDialingOrExistingAddress:
r.Logger.Debug(err.Error(), "addr", addr)
default:
r.Logger.Error(err.Error(), "addr", addr)
}
continue
}
peer := r.Switch.Peers().Get(addr.ID)
if peer != nil {
r.RequestAddrs(peer)
}
}
}
func (r *Reactor) cleanupCrawlPeerInfos() {
for id, info := range r.crawlPeerInfos {
// If we did not crawl a peer for 24 hours, it means the peer was removed
// from the addrbook => remove
//
// 10000 addresses / maxGetSelection = 40 cycles to get all addresses in
// the ideal case,
// 40 * crawlPeerPeriod ~ 20 minutes
if time.Since(info.LastCrawled) > 24*time.Hour {
delete(r.crawlPeerInfos, id)
}
}
}
// attemptDisconnects checks if we've been with each peer long enough to disconnect
func (r *Reactor) attemptDisconnects() {
for _, peer := range r.Switch.Peers().List() {
if peer.Status().Duration < r.config.SeedDisconnectWaitPeriod {
continue
}
if peer.IsPersistent() {
continue
}
r.Switch.StopPeerGracefully(peer)
}
}
func markAddrInBookBasedOnErr(addr *p2p.NetAddress, book AddrBook, err error) {
// TODO: detect more "bad peer" scenarios
switch err.(type) {
case p2p.ErrSwitchAuthenticationFailure:
book.MarkBad(addr, defaultBanTime)
default:
book.MarkAttempt(addr)
}
}
//-----------------------------------------------------------------------------
// Messages
// mustEncode proto encodes a tmp2p.Message
func mustEncode(pb proto.Message) []byte {
msg := tmp2p.PexMessage{}
switch pb := pb.(type) {
case *tmp2p.PexRequest:
msg.Sum = &tmp2p.PexMessage_PexRequest{PexRequest: pb}
case *tmp2p.PexResponse:
msg.Sum = &tmp2p.PexMessage_PexResponse{PexResponse: pb}
default:
panic(fmt.Sprintf("Unknown message type %T", pb))
}
bz, err := msg.Marshal()
if err != nil {
panic(fmt.Errorf("unable to marshal %T: %w", pb, err))
}
return bz
}
func decodeMsg(bz []byte) (proto.Message, error) {
pb := &tmp2p.PexMessage{}
err := pb.Unmarshal(bz)
if err != nil {
return nil, err
}
switch msg := pb.Sum.(type) {
case *tmp2p.PexMessage_PexRequest:
return msg.PexRequest, nil
case *tmp2p.PexMessage_PexResponse:
return msg.PexResponse, nil
default:
return nil, fmt.Errorf("unknown message: %T", msg)
}
}
//-----------------------------------------------------------------------------
// address converters
// NetAddressFromProto converts a Protobuf PexAddress into a native struct.
func NetAddressFromProto(pb tmp2p.PexAddress) (*types.NetAddress, error) {
ip := net.ParseIP(pb.IP)
if ip == nil {
return nil, fmt.Errorf("invalid IP address %v", pb.IP)
}
if pb.Port >= 1<<16 {
return nil, fmt.Errorf("invalid port number %v", pb.Port)
}
return &types.NetAddress{
ID: types.NodeID(pb.ID),
IP: ip,
Port: uint16(pb.Port),
}, nil
}
// NetAddressesFromProto converts a slice of Protobuf PexAddresses into a native slice.
func NetAddressesFromProto(pbs []tmp2p.PexAddress) ([]*types.NetAddress, error) {
nas := make([]*types.NetAddress, 0, len(pbs))
for _, pb := range pbs {
na, err := NetAddressFromProto(pb)
if err != nil {
return nil, err
}
nas = append(nas, na)
}
return nas, nil
}
// NetAddressesToProto converts a slice of NetAddresses into a Protobuf PexAddress slice.
func NetAddressesToProto(nas []*types.NetAddress) []tmp2p.PexAddress {
pbs := make([]tmp2p.PexAddress, 0, len(nas))
for _, na := range nas {
if na != nil {
pbs = append(pbs, tmp2p.PexAddress{
ID: string(na.ID),
IP: na.IP.String(),
Port: uint32(na.Port),
})
}
}
return pbs
}

+ 0
- 680
internal/p2p/pex/pex_reactor_test.go View File

@ -1,680 +0,0 @@
package pex
import (
"encoding/hex"
"fmt"
"io/ioutil"
"os"
"path/filepath"
"testing"
"time"
"github.com/gogo/protobuf/proto"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
"github.com/tendermint/tendermint/config"
"github.com/tendermint/tendermint/internal/p2p"
"github.com/tendermint/tendermint/internal/p2p/mock"
"github.com/tendermint/tendermint/libs/log"
tmp2p "github.com/tendermint/tendermint/proto/tendermint/p2p"
"github.com/tendermint/tendermint/types"
)
var (
cfg *config.P2PConfig
)
func init() {
cfg = config.DefaultP2PConfig()
cfg.PexReactor = true
cfg.AllowDuplicateIP = true
}
func TestPEXReactorBasic(t *testing.T) {
r, _ := createReactor(t, &ReactorConfig{})
assert.NotNil(t, r)
assert.NotEmpty(t, r.GetChannels())
}
func TestPEXReactorAddRemovePeer(t *testing.T) {
r, book := createReactor(t, &ReactorConfig{})
size := book.Size()
peer := p2p.CreateRandomPeer(false)
r.AddPeer(peer)
assert.Equal(t, size+1, book.Size())
r.RemovePeer(peer, "peer not available")
outboundPeer := p2p.CreateRandomPeer(true)
r.AddPeer(outboundPeer)
assert.Equal(t, size+1, book.Size(), "outbound peers should not be added to the address book")
r.RemovePeer(outboundPeer, "peer not available")
}
// --- FAIL: TestPEXReactorRunning (11.10s)
// pex_reactor_test.go:411: expected all switches to be connected to at
// least one peer (switches: 0 => {outbound: 1, inbound: 0}, 1 =>
// {outbound: 0, inbound: 1}, 2 => {outbound: 0, inbound: 0}, )
//
// EXPLANATION: peers are getting rejected because in switch#addPeer we check
// if any peer (who we already connected to) has the same IP. Even though local
// peers have different IP addresses, they all have the same underlying remote
// IP: 127.0.0.1.
//
func TestPEXReactorRunning(t *testing.T) {
N := 3
switches := make([]*p2p.Switch, N)
// directory to store address books
dir := tempDir(t)
books := make([]AddrBook, N)
logger := log.TestingLogger()
// create switches
for i := 0; i < N; i++ {
switches[i] = p2p.MakeSwitch(cfg, i, "testing", "123.123.123", func(i int, sw *p2p.Switch) *p2p.Switch {
books[i] = NewAddrBook(filepath.Join(dir, fmt.Sprintf("addrbook%d.json", i)), false)
books[i].SetLogger(logger.With("pex", i))
sw.SetAddrBook(books[i])
sw.SetLogger(logger.With("pex", i))
r := NewReactor(books[i], &ReactorConfig{})
r.SetLogger(logger.With("pex", i))
r.SetEnsurePeersPeriod(250 * time.Millisecond)
sw.AddReactor("pex", r)
return sw
}, logger)
}
for _, sw := range switches {
err := sw.Start() // start switch and reactors
require.Nil(t, err)
}
addOtherNodeAddrToAddrBook := func(switchIndex, otherSwitchIndex int) {
addr := switches[otherSwitchIndex].NetAddress()
err := books[switchIndex].AddAddress(addr, addr)
require.NoError(t, err)
}
addOtherNodeAddrToAddrBook(0, 1)
addOtherNodeAddrToAddrBook(1, 0)
addOtherNodeAddrToAddrBook(2, 1)
assertPeersWithTimeout(t, switches, 10*time.Millisecond, 10*time.Second, N-1)
// stop them
for _, s := range switches {
err := s.Stop()
require.NoError(t, err)
}
}
func TestPEXReactorReceive(t *testing.T) {
r, book := createReactor(t, &ReactorConfig{})
peer := p2p.CreateRandomPeer(false)
// we have to send a request to receive responses
r.RequestAddrs(peer)
size := book.Size()
na, err := peer.NodeInfo().NetAddress()
require.NoError(t, err)
msg := mustEncode(&tmp2p.PexResponse{Addresses: NetAddressesToProto([]*types.NetAddress{na})})
r.Receive(PexChannel, peer, msg)
assert.Equal(t, size+1, book.Size())
msg = mustEncode(&tmp2p.PexRequest{})
r.Receive(PexChannel, peer, msg) // should not panic.
}
func TestPEXReactorRequestMessageAbuse(t *testing.T) {
r, book := createReactor(t, &ReactorConfig{})
sw := createSwitchAndAddReactors(r)
sw.SetAddrBook(book)
peer := mock.NewPeer(nil)
peerAddr := peer.SocketAddr()
p2p.AddPeerToSwitchPeerSet(sw, peer)
assert.True(t, sw.Peers().Has(peer.ID()))
err := book.AddAddress(peerAddr, peerAddr)
require.NoError(t, err)
require.True(t, book.HasAddress(peerAddr))
id := string(peer.ID())
msg := mustEncode(&tmp2p.PexRequest{})
// first time creates the entry
r.Receive(PexChannel, peer, msg)
assert.True(t, r.lastReceivedRequests.Has(id))
assert.True(t, sw.Peers().Has(peer.ID()))
// next time sets the last time value
r.Receive(PexChannel, peer, msg)
assert.True(t, r.lastReceivedRequests.Has(id))
assert.True(t, sw.Peers().Has(peer.ID()))
// third time is too many too soon - peer is removed
r.Receive(PexChannel, peer, msg)
assert.False(t, r.lastReceivedRequests.Has(id))
assert.False(t, sw.Peers().Has(peer.ID()))
assert.True(t, book.IsBanned(peerAddr))
}
func TestPEXReactorAddrsMessageAbuse(t *testing.T) {
r, book := createReactor(t, &ReactorConfig{})
sw := createSwitchAndAddReactors(r)
sw.SetAddrBook(book)
peer := mock.NewPeer(nil)
p2p.AddPeerToSwitchPeerSet(sw, peer)
assert.True(t, sw.Peers().Has(peer.ID()))
id := string(peer.ID())
// request addrs from the peer
r.RequestAddrs(peer)
assert.True(t, r.requestsSent.Has(id))
assert.True(t, sw.Peers().Has(peer.ID()))
msg := mustEncode(&tmp2p.PexResponse{Addresses: NetAddressesToProto([]*types.NetAddress{peer.SocketAddr()})})
// receive some addrs. should clear the request
r.Receive(PexChannel, peer, msg)
assert.False(t, r.requestsSent.Has(id))
assert.True(t, sw.Peers().Has(peer.ID()))
// receiving more unsolicited addrs causes a disconnect and ban
r.Receive(PexChannel, peer, msg)
assert.False(t, sw.Peers().Has(peer.ID()))
assert.True(t, book.IsBanned(peer.SocketAddr()))
}
func TestCheckSeeds(t *testing.T) {
// directory to store address books
dir := tempDir(t)
// 1. test creating peer with no seeds works
peerSwitch := testCreateDefaultPeer(dir, 0)
require.Nil(t, peerSwitch.Start())
peerSwitch.Stop() // nolint:errcheck // ignore for tests
// 2. create seed
seed := testCreateSeed(dir, 1, []*p2p.NetAddress{}, []*p2p.NetAddress{})
// 3. test create peer with online seed works
peerSwitch = testCreatePeerWithSeed(dir, 2, seed)
require.Nil(t, peerSwitch.Start())
peerSwitch.Stop() // nolint:errcheck // ignore for tests
// 4. test create peer with all seeds having unresolvable DNS fails
badPeerConfig := &ReactorConfig{
Seeds: []string{"ed3dfd27bfc4af18f67a49862f04cc100696e84d@bad.network.addr:26657",
"d824b13cb5d40fa1d8a614e089357c7eff31b670@anotherbad.network.addr:26657"},
}
peerSwitch = testCreatePeerWithConfig(dir, 2, badPeerConfig)
require.Error(t, peerSwitch.Start())
peerSwitch.Stop() // nolint:errcheck // ignore for tests
// 5. test create peer with one good seed address succeeds
badPeerConfig = &ReactorConfig{
Seeds: []string{"ed3dfd27bfc4af18f67a49862f04cc100696e84d@bad.network.addr:26657",
"d824b13cb5d40fa1d8a614e089357c7eff31b670@anotherbad.network.addr:26657",
seed.NetAddress().String()},
}
peerSwitch = testCreatePeerWithConfig(dir, 2, badPeerConfig)
require.Nil(t, peerSwitch.Start())
peerSwitch.Stop() // nolint:errcheck // ignore for tests
}
func TestPEXReactorUsesSeedsIfNeeded(t *testing.T) {
// directory to store address books
dir := tempDir(t)
// 1. create seed
seed := testCreateSeed(dir, 0, []*p2p.NetAddress{}, []*p2p.NetAddress{})
require.Nil(t, seed.Start())
t.Cleanup(func() { _ = seed.Stop() })
// 2. create usual peer with only seed configured.
peer := testCreatePeerWithSeed(dir, 1, seed)
require.Nil(t, peer.Start())
t.Cleanup(func() { _ = peer.Stop() })
// 3. check that the peer connects to seed immediately
assertPeersWithTimeout(t, []*p2p.Switch{peer}, 10*time.Millisecond, 3*time.Second, 1)
}
func TestConnectionSpeedForPeerReceivedFromSeed(t *testing.T) {
// directory to store address books
dir := tempDir(t)
// 1. create peer
peerSwitch := testCreateDefaultPeer(dir, 1)
require.Nil(t, peerSwitch.Start())
t.Cleanup(func() { _ = peerSwitch.Stop() })
// 2. Create seed which knows about the peer
peerAddr := peerSwitch.NetAddress()
seed := testCreateSeed(dir, 2, []*p2p.NetAddress{peerAddr}, []*p2p.NetAddress{peerAddr})
require.Nil(t, seed.Start())
t.Cleanup(func() { _ = seed.Stop() })
// 3. create another peer with only seed configured.
secondPeer := testCreatePeerWithSeed(dir, 3, seed)
require.Nil(t, secondPeer.Start())
t.Cleanup(func() { _ = secondPeer.Stop() })
// 4. check that the second peer connects to seed immediately
assertPeersWithTimeout(t, []*p2p.Switch{secondPeer}, 10*time.Millisecond, 3*time.Second, 1)
// 5. check that the second peer connects to the first peer immediately
assertPeersWithTimeout(t, []*p2p.Switch{secondPeer}, 10*time.Millisecond, 1*time.Second, 2)
}
func TestPEXReactorSeedMode(t *testing.T) {
// directory to store address books
dir := tempDir(t)
pexRConfig := &ReactorConfig{SeedMode: true, SeedDisconnectWaitPeriod: 10 * time.Millisecond}
pexR, book := createReactor(t, pexRConfig)
sw := createSwitchAndAddReactors(pexR)
sw.SetAddrBook(book)
require.NoError(t, sw.Start())
t.Cleanup(func() { _ = sw.Stop() })
assert.Zero(t, sw.Peers().Size())
peerSwitch := testCreateDefaultPeer(dir, 1)
require.NoError(t, peerSwitch.Start())
t.Cleanup(func() { _ = peerSwitch.Stop() })
// 1. Test crawlPeers dials the peer
pexR.crawlPeers([]*p2p.NetAddress{peerSwitch.NetAddress()})
assert.Equal(t, 1, sw.Peers().Size())
assert.True(t, sw.Peers().Has(peerSwitch.NodeInfo().ID()))
// 2. attemptDisconnects should not disconnect because of wait period
pexR.attemptDisconnects()
assert.Equal(t, 1, sw.Peers().Size())
// sleep for SeedDisconnectWaitPeriod
time.Sleep(pexRConfig.SeedDisconnectWaitPeriod + 1*time.Millisecond)
// 3. attemptDisconnects should disconnect after wait period
pexR.attemptDisconnects()
assert.Equal(t, 0, sw.Peers().Size())
}
func TestPEXReactorDoesNotDisconnectFromPersistentPeerInSeedMode(t *testing.T) {
// directory to store address books
dir := tempDir(t)
pexRConfig := &ReactorConfig{SeedMode: true, SeedDisconnectWaitPeriod: 1 * time.Millisecond}
pexR, book := createReactor(t, pexRConfig)
sw := createSwitchAndAddReactors(pexR)
sw.SetAddrBook(book)
require.NoError(t, sw.Start())
t.Cleanup(func() { _ = sw.Stop() })
assert.Zero(t, sw.Peers().Size())
peerSwitch := testCreatePeerWithConfig(dir, 1, pexRConfig)
require.NoError(t, peerSwitch.Start())
t.Cleanup(func() { _ = peerSwitch.Stop() })
require.NoError(t, sw.AddPersistentPeers([]string{peerSwitch.NetAddress().String()}))
// 1. Test crawlPeers dials the peer
pexR.crawlPeers([]*p2p.NetAddress{peerSwitch.NetAddress()})
assert.Equal(t, 1, sw.Peers().Size())
assert.True(t, sw.Peers().Has(peerSwitch.NodeInfo().ID()))
// sleep for SeedDisconnectWaitPeriod
time.Sleep(pexRConfig.SeedDisconnectWaitPeriod + 1*time.Millisecond)
// 2. attemptDisconnects should not disconnect because the peer is persistent
pexR.attemptDisconnects()
assert.Equal(t, 1, sw.Peers().Size())
}
func TestPEXReactorDialsPeerUpToMaxAttemptsInSeedMode(t *testing.T) {
// directory to store address books
pexR, book := createReactor(t, &ReactorConfig{SeedMode: true})
sw := createSwitchAndAddReactors(pexR)
sw.SetAddrBook(book)
// No need to start sw since crawlPeers is called manually here.
peer := mock.NewPeer(nil)
addr := peer.SocketAddr()
require.NoError(t, book.AddAddress(addr, addr))
assert.True(t, book.HasAddress(addr))
// imitate maxAttemptsToDial reached
pexR.attemptsToDial.Store(addr.DialString(), _attemptsToDial{maxAttemptsToDial + 1, time.Now()})
pexR.crawlPeers([]*p2p.NetAddress{addr})
assert.False(t, book.HasAddress(addr))
}
// connect a peer to a seed, wait a bit, then stop it.
// this should give it time to request addrs and for the seed
// to call FlushStop, and allows us to test calling Stop concurrently
// with FlushStop. Before a fix, this non-deterministically reproduced
// https://github.com/tendermint/tendermint/issues/3231.
func TestPEXReactorSeedModeFlushStop(t *testing.T) {
t.Skip("flaky test, will be replaced by new P2P stack")
N := 2
switches := make([]*p2p.Switch, N)
// directory to store address books
dir := tempDir(t)
books := make([]AddrBook, N)
logger := log.TestingLogger()
// create switches
for i := 0; i < N; i++ {
switches[i] = p2p.MakeSwitch(cfg, i, "testing", "123.123.123", func(i int, sw *p2p.Switch) *p2p.Switch {
books[i] = NewAddrBook(filepath.Join(dir, fmt.Sprintf("addrbook%d.json", i)), false)
books[i].SetLogger(logger.With("pex", i))
sw.SetAddrBook(books[i])
sw.SetLogger(logger.With("pex", i))
config := &ReactorConfig{}
if i == 0 {
// first one is a seed node
config = &ReactorConfig{SeedMode: true}
}
r := NewReactor(books[i], config)
r.SetLogger(logger.With("pex", i))
r.SetEnsurePeersPeriod(250 * time.Millisecond)
sw.AddReactor("pex", r)
return sw
}, logger)
}
for _, sw := range switches {
err := sw.Start() // start switch and reactors
require.Nil(t, err)
}
reactor := switches[0].Reactors()["pex"].(*Reactor)
peerID := switches[1].NodeInfo().ID()
assert.NoError(t, switches[1].DialPeerWithAddress(switches[0].NetAddress()))
// sleep up to a second while waiting for the peer to send us a message.
// this isn't perfect since it's possible the peer sends us a msg and we FlushStop
// before this loop catches it. but non-deterministically it works pretty well.
for i := 0; i < 1000; i++ {
v := reactor.lastReceivedRequests.Get(string(peerID))
if v != nil {
break
}
time.Sleep(time.Millisecond)
}
// by now the FlushStop should have happened. Try stopping the peer.
// it should be safe to do this.
peers := switches[0].Peers().List()
for _, peer := range peers {
err := peer.Stop()
require.NoError(t, err)
}
// stop the switches
for _, s := range switches {
err := s.Stop()
require.NoError(t, err)
}
}
func TestPEXReactorDoesNotAddPrivatePeersToAddrBook(t *testing.T) {
peer := p2p.CreateRandomPeer(false)
pexR, book := createReactor(t, &ReactorConfig{})
book.AddPrivateIDs([]string{string(peer.NodeInfo().ID())})
// we have to send a request to receive responses
pexR.RequestAddrs(peer)
size := book.Size()
na, err := peer.NodeInfo().NetAddress()
require.NoError(t, err)
msg := mustEncode(&tmp2p.PexResponse{Addresses: NetAddressesToProto([]*types.NetAddress{na})})
pexR.Receive(PexChannel, peer, msg)
assert.Equal(t, size, book.Size())
pexR.AddPeer(peer)
assert.Equal(t, size, book.Size())
}
func TestPEXReactorDialPeer(t *testing.T) {
pexR, book := createReactor(t, &ReactorConfig{})
sw := createSwitchAndAddReactors(pexR)
sw.SetAddrBook(book)
peer := mock.NewPeer(nil)
addr := peer.SocketAddr()
assert.Equal(t, 0, pexR.AttemptsToDial(addr))
// 1st unsuccessful attempt
err := pexR.dialPeer(addr)
require.Error(t, err)
assert.Equal(t, 1, pexR.AttemptsToDial(addr))
// 2nd unsuccessful attempt
err = pexR.dialPeer(addr)
require.Error(t, err)
// must be skipped because it is too early
assert.Equal(t, 1, pexR.AttemptsToDial(addr))
if !testing.Short() {
time.Sleep(3 * time.Second)
// 3rd attempt
err = pexR.dialPeer(addr)
require.Error(t, err)
assert.Equal(t, 2, pexR.AttemptsToDial(addr))
}
}
func assertPeersWithTimeout(
t *testing.T,
switches []*p2p.Switch,
checkPeriod, timeout time.Duration,
nPeers int,
) {
var (
ticker = time.NewTicker(checkPeriod)
remaining = timeout
)
for {
select {
case <-ticker.C:
// check peers are connected
allGood := true
for _, s := range switches {
outbound, inbound, _ := s.NumPeers()
if outbound+inbound < nPeers {
allGood = false
break
}
}
remaining -= checkPeriod
if remaining < 0 {
remaining = 0
}
if allGood {
return
}
case <-time.After(remaining):
numPeersStr := ""
for i, s := range switches {
outbound, inbound, _ := s.NumPeers()
numPeersStr += fmt.Sprintf("%d => {outbound: %d, inbound: %d}, ", i, outbound, inbound)
}
t.Errorf(
"expected all switches to be connected to at least %d peer(s) (switches: %s)",
nPeers, numPeersStr,
)
return
}
}
}
// Creates a peer with the provided config
func testCreatePeerWithConfig(dir string, id int, config *ReactorConfig) *p2p.Switch {
peer := p2p.MakeSwitch(
cfg,
id,
"127.0.0.1",
"123.123.123",
func(i int, sw *p2p.Switch) *p2p.Switch {
book := NewAddrBook(filepath.Join(dir, fmt.Sprintf("addrbook%d.json", id)), false)
book.SetLogger(log.TestingLogger())
sw.SetAddrBook(book)
sw.SetLogger(log.TestingLogger())
r := NewReactor(
book,
config,
)
r.SetLogger(log.TestingLogger())
sw.AddReactor("pex", r)
return sw
},
log.TestingLogger(),
)
return peer
}
// Creates a peer with the default config
func testCreateDefaultPeer(dir string, id int) *p2p.Switch {
return testCreatePeerWithConfig(dir, id, &ReactorConfig{})
}
// Creates a seed which knows about the provided addresses / source address pairs.
// Starting and stopping the seed is left to the caller
func testCreateSeed(dir string, id int, knownAddrs, srcAddrs []*p2p.NetAddress) *p2p.Switch {
seed := p2p.MakeSwitch(
cfg,
id,
"127.0.0.1",
"123.123.123",
func(i int, sw *p2p.Switch) *p2p.Switch {
book := NewAddrBook(filepath.Join(dir, "addrbookSeed.json"), false)
book.SetLogger(log.TestingLogger())
for j := 0; j < len(knownAddrs); j++ {
book.AddAddress(knownAddrs[j], srcAddrs[j]) // nolint:errcheck // ignore for tests
book.MarkGood(knownAddrs[j].ID)
}
sw.SetAddrBook(book)
sw.SetLogger(log.TestingLogger())
r := NewReactor(book, &ReactorConfig{})
r.SetLogger(log.TestingLogger())
sw.AddReactor("pex", r)
return sw
},
log.TestingLogger(),
)
return seed
}
// Creates a peer which knows about the provided seed.
// Starting and stopping the peer is left to the caller
func testCreatePeerWithSeed(dir string, id int, seed *p2p.Switch) *p2p.Switch {
conf := &ReactorConfig{
Seeds: []string{seed.NetAddress().String()},
}
return testCreatePeerWithConfig(dir, id, conf)
}
func createReactor(t *testing.T, conf *ReactorConfig) (r *Reactor, book AddrBook) {
// directory to store address book
book = NewAddrBook(filepath.Join(tempDir(t), "addrbook.json"), true)
book.SetLogger(log.TestingLogger())
r = NewReactor(book, conf)
r.SetLogger(log.TestingLogger())
return
}
func createSwitchAndAddReactors(reactors ...p2p.Reactor) *p2p.Switch {
sw := p2p.MakeSwitch(cfg, 0, "127.0.0.1", "123.123.123", func(i int, sw *p2p.Switch) *p2p.Switch {
for _, r := range reactors {
sw.AddReactor(r.String(), r)
}
return sw
}, log.TestingLogger())
return sw
}
func TestPexVectors(t *testing.T) {
addr := tmp2p.PexAddress{
ID: "1",
IP: "127.0.0.1",
Port: 9090,
}
testCases := []struct {
testName string
msg proto.Message
expBytes string
}{
{"PexRequest", &tmp2p.PexRequest{}, "0a00"},
{"PexAddrs", &tmp2p.PexResponse{Addresses: []tmp2p.PexAddress{addr}}, "12130a110a013112093132372e302e302e31188247"},
}
for _, tc := range testCases {
tc := tc
bz := mustEncode(tc.msg)
require.Equal(t, tc.expBytes, hex.EncodeToString(bz), tc.testName)
}
}
// FIXME: This function is used in place of testing.TB.TempDir()
// as the latter seems to cause test cases to fail when it is
// unable to remove the temporary directory once the test case
// execution terminates. This seems to happen often with pex
// reactor test cases.
//
// References:
// https://github.com/tendermint/tendermint/pull/5733
// https://github.com/tendermint/tendermint/issues/5732
func tempDir(t *testing.T) string {
t.Helper()
dir, err := ioutil.TempDir("", "")
require.NoError(t, err)
t.Cleanup(func() { _ = os.RemoveAll(dir) })
return dir
}

+ 16
- 0
internal/p2p/pex/reactor.go View File

@ -24,6 +24,22 @@ var (
// TODO: Consolidate with params file.
// See https://github.com/tendermint/tendermint/issues/6371
const (
// PexChannel is a channel for PEX messages
PexChannel = byte(0x00)
// over-estimate of max NetAddress size
// hexID (40) + IP (16) + Port (2) + Name (100) ...
// NOTE: dont use massive DNS name ..
maxAddressSize = 256
// max addresses returned by GetSelection
// NOTE: this must match "maxMsgSize"
maxGetSelection = 250
// NOTE: amplification factor!
// small request results in up to maxMsgSize response
maxMsgSize = maxAddressSize * maxGetSelection
// the minimum time one peer can send another request to the same peer
minReceiveRequestInterval = 100 * time.Millisecond


+ 3
- 1
internal/p2p/router.go View File

@ -21,6 +21,8 @@ import (
const queueBufferDefault = 32
const dialRandomizerIntervalMillisecond = 3000
// ChannelID is an arbitrary channel ID.
type ChannelID uint16
@ -544,7 +546,7 @@ func (r *Router) filterPeersID(ctx context.Context, id types.NodeID) error {
func (r *Router) dialSleep(ctx context.Context) {
if r.options.DialSleep == nil {
// nolint:gosec // G404: Use of weak random number generator
timer := time.NewTimer(time.Duration(rand.Int63n(dialRandomizerIntervalMilliseconds)) * time.Millisecond)
timer := time.NewTimer(time.Duration(rand.Int63n(dialRandomizerIntervalMillisecond)) * time.Millisecond)
defer timer.Stop()
select {


+ 42
- 261
internal/p2p/shim.go View File

@ -1,58 +1,42 @@
package p2p
import (
"errors"
"sort"
"github.com/gogo/protobuf/proto"
"github.com/tendermint/tendermint/config"
"github.com/tendermint/tendermint/internal/p2p/conn"
"github.com/tendermint/tendermint/libs/log"
)
// ============================================================================
// TODO: Types and business logic below are temporary and will be removed once
// the legacy p2p stack is removed in favor of the new model.
//
// ref: https://github.com/tendermint/tendermint/issues/5670
// ============================================================================
var _ Reactor = (*ReactorShim)(nil)
type (
messageValidator interface {
Validate() error
}
// ReactorShim defines a generic shim wrapper around a BaseReactor. It is
// responsible for wiring up legacy p2p behavior to the new p2p semantics
// (e.g. proxying Envelope messages to legacy peers).
ReactorShim struct {
BaseReactor
Name string
PeerUpdates *PeerUpdates
Channels map[ChannelID]*ChannelShim
}
// ChannelDescriptorShim defines a shim wrapper around a legacy p2p channel
// and the proto.Message the new p2p Channel is responsible for handling.
// A ChannelDescriptorShim is not contained in ReactorShim, but is rather
// used to construct a ReactorShim.
type ChannelDescriptorShim struct {
MsgType proto.Message
Descriptor *ChannelDescriptor
}
// ChannelShim defines a generic shim wrapper around a legacy p2p channel
// and the new p2p Channel. It also includes the raw bi-directional Go channels
// so we can proxy message delivery.
ChannelShim struct {
Descriptor *ChannelDescriptor
Channel *Channel
inCh chan<- Envelope
outCh <-chan Envelope
errCh <-chan PeerError
}
// ChannelShim defines a generic shim wrapper around a legacy p2p channel
// and the new p2p Channel. It also includes the raw bi-directional Go channels
// so we can proxy message delivery.
type ChannelShim struct {
Descriptor *ChannelDescriptor
Channel *Channel
inCh chan<- Envelope
outCh <-chan Envelope
errCh <-chan PeerError
}
// ChannelDescriptorShim defines a shim wrapper around a legacy p2p channel
// and the proto.Message the new p2p Channel is responsible for handling.
// A ChannelDescriptorShim is not contained in ReactorShim, but is rather
// used to construct a ReactorShim.
ChannelDescriptorShim struct {
MsgType proto.Message
Descriptor *ChannelDescriptor
}
)
// ReactorShim defines a generic shim wrapper around a BaseReactor. It is
// responsible for wiring up legacy p2p behavior to the new p2p semantics
// (e.g. proxying Envelope messages to legacy peers).
type ReactorShim struct {
Name string
PeerUpdates *PeerUpdates
Channels map[ChannelID]*ChannelShim
}
func NewReactorShim(logger log.Logger, name string, descriptors map[ChannelID]*ChannelDescriptorShim) *ReactorShim {
channels := make(map[ChannelID]*ChannelShim)
@ -68,9 +52,6 @@ func NewReactorShim(logger log.Logger, name string, descriptors map[ChannelID]*C
Channels: channels,
}
rs.BaseReactor = *NewBaseReactor(name, rs)
rs.SetLogger(logger)
return rs
}
@ -93,121 +74,15 @@ func NewChannelShim(cds *ChannelDescriptorShim, buf uint) *ChannelShim {
}
}
// proxyPeerEnvelopes iterates over each p2p Channel and starts a separate
// go-routine where we listen for outbound envelopes sent during Receive
// executions (or anything else that may send on the Channel) and proxy them to
// the corresponding Peer using the To field from the envelope.
func (rs *ReactorShim) proxyPeerEnvelopes() {
for _, cs := range rs.Channels {
go func(cs *ChannelShim) {
for e := range cs.outCh {
msg := proto.Clone(cs.Channel.messageType)
msg.Reset()
wrapper, ok := msg.(Wrapper)
if ok {
if err := wrapper.Wrap(e.Message); err != nil {
rs.Logger.Error(
"failed to proxy envelope; failed to wrap message",
"ch_id", cs.Descriptor.ID,
"err", err,
)
continue
}
} else {
msg = e.Message
}
bz, err := proto.Marshal(msg)
if err != nil {
rs.Logger.Error(
"failed to proxy envelope; failed to encode message",
"ch_id", cs.Descriptor.ID,
"err", err,
)
continue
}
switch {
case e.Broadcast:
rs.Switch.Broadcast(cs.Descriptor.ID, bz)
case e.To != "":
src := rs.Switch.peers.Get(e.To)
if src == nil {
rs.Logger.Debug(
"failed to proxy envelope; failed to find peer",
"ch_id", cs.Descriptor.ID,
"peer", e.To,
)
continue
}
if !src.Send(cs.Descriptor.ID, bz) {
// This usually happens when we try to send across a channel
// that the peer doesn't have open. To avoid bloating the
// logs we set this to be Debug
rs.Logger.Debug(
"failed to proxy message to peer",
"ch_id", cs.Descriptor.ID,
"peer", e.To,
)
}
default:
rs.Logger.Error("failed to proxy envelope; missing peer ID", "ch_id", cs.Descriptor.ID)
}
}
}(cs)
}
}
// handlePeerErrors iterates over each p2p Channel and starts a separate go-routine
// where we listen for peer errors. For each peer error, we find the peer from
// the legacy p2p Switch and execute a StopPeerForError call with the corresponding
// peer error.
func (rs *ReactorShim) handlePeerErrors() {
for _, cs := range rs.Channels {
go func(cs *ChannelShim) {
for pErr := range cs.errCh {
if pErr.NodeID != "" {
peer := rs.Switch.peers.Get(pErr.NodeID)
if peer == nil {
rs.Logger.Error("failed to handle peer error; failed to find peer", "peer", pErr.NodeID)
continue
}
rs.Switch.StopPeerForError(peer, pErr.Err)
}
}
}(cs)
}
}
// OnStart executes the reactor shim's OnStart hook where we start all the
// necessary go-routines in order to proxy peer envelopes and errors per p2p
// Channel.
func (rs *ReactorShim) OnStart() error {
if rs.Switch == nil {
return errors.New("proxyPeerEnvelopes: reactor shim switch is nil")
}
// start envelope proxying and peer error handling in separate go routines
rs.proxyPeerEnvelopes()
rs.handlePeerErrors()
return nil
}
// GetChannel returns a p2p Channel reference for a given ChannelID. If no
// Channel exists, nil is returned.
func (rs *ReactorShim) GetChannel(cID ChannelID) *Channel {
channelShim, ok := rs.Channels[cID]
if ok {
return channelShim.Channel
}
return nil
// MConnConfig returns an MConnConfig based on the defaults, with fields updated
// from the P2PConfig.
func MConnConfig(cfg *config.P2PConfig) conn.MConnConfig {
mConfig := conn.DefaultMConnConfig()
mConfig.FlushThrottle = cfg.FlushThrottleTimeout
mConfig.SendRate = cfg.SendRate
mConfig.RecvRate = cfg.RecvRate
mConfig.MaxPacketMsgPayloadSize = cfg.MaxPacketMsgPayloadSize
return mConfig
}
// GetChannels implements the legacy Reactor interface for getting a slice of all
@ -228,107 +103,13 @@ func (rs *ReactorShim) GetChannels() []*ChannelDescriptor {
return descriptors
}
// AddPeer sends a PeerUpdate with status PeerStatusUp on the PeerUpdateCh.
// The embedding reactor must be sure to listen for messages on this channel to
// handle adding a peer.
func (rs *ReactorShim) AddPeer(peer Peer) {
select {
case rs.PeerUpdates.reactorUpdatesCh <- PeerUpdate{NodeID: peer.ID(), Status: PeerStatusUp}:
rs.Logger.Debug("sent peer update", "reactor", rs.Name, "peer", peer.ID(), "status", PeerStatusUp)
case <-rs.PeerUpdates.Done():
// NOTE: We explicitly DO NOT close the PeerUpdatesCh's updateCh go channel.
// This is because there may be numerous spawned goroutines that are
// attempting to send on the updateCh go channel and when the reactor stops
// we do not want to preemptively close the channel as that could result in
// panics sending on a closed channel. This also means that reactors MUST
// be certain there are NO listeners on the updateCh channel when closing or
// stopping.
}
}
// RemovePeer sends a PeerUpdate with status PeerStatusDown on the PeerUpdateCh.
// The embedding reactor must be sure to listen for messages on this channel to
// handle removing a peer.
func (rs *ReactorShim) RemovePeer(peer Peer, reason interface{}) {
select {
case rs.PeerUpdates.reactorUpdatesCh <- PeerUpdate{NodeID: peer.ID(), Status: PeerStatusDown}:
rs.Logger.Debug(
"sent peer update",
"reactor", rs.Name,
"peer", peer.ID(),
"reason", reason,
"status", PeerStatusDown,
)
case <-rs.PeerUpdates.Done():
// NOTE: We explicitly DO NOT close the PeerUpdatesCh's updateCh go channel.
// This is because there may be numerous spawned goroutines that are
// attempting to send on the updateCh go channel and when the reactor stops
// we do not want to preemptively close the channel as that could result in
// panics sending on a closed channel. This also means that reactors MUST
// be certain there are NO listeners on the updateCh channel when closing or
// stopping.
}
}
// Receive implements a generic wrapper around implementing the Receive method
// on the legacy Reactor p2p interface. If the reactor is running, Receive will
// find the corresponding new p2p Channel, create and decode the appropriate
// proto.Message from the msgBytes, execute any validation and finally construct
// and send a p2p Envelope on the appropriate p2p Channel.
func (rs *ReactorShim) Receive(chID byte, src Peer, msgBytes []byte) {
if !rs.IsRunning() {
return
}
cID := ChannelID(chID)
// GetChannel returns a p2p Channel reference for a given ChannelID. If no
// Channel exists, nil is returned.
func (rs *ReactorShim) GetChannel(cID ChannelID) *Channel {
channelShim, ok := rs.Channels[cID]
if !ok {
rs.Logger.Error("unexpected channel", "peer", src, "ch_id", chID)
return
}
msg := proto.Clone(channelShim.Channel.messageType)
msg.Reset()
if err := proto.Unmarshal(msgBytes, msg); err != nil {
rs.Logger.Error("error decoding message", "peer", src, "ch_id", cID, "err", err)
rs.Switch.StopPeerForError(src, err)
return
}
validator, ok := msg.(messageValidator)
if ok {
if err := validator.Validate(); err != nil {
rs.Logger.Error("invalid message", "peer", src, "ch_id", cID, "err", err)
rs.Switch.StopPeerForError(src, err)
return
}
}
wrapper, ok := msg.(Wrapper)
if ok {
var err error
msg, err = wrapper.Unwrap()
if err != nil {
rs.Logger.Error("failed to unwrap message", "peer", src, "ch_id", chID, "err", err)
return
}
return channelShim.Channel
}
select {
case channelShim.inCh <- Envelope{From: src.ID(), Message: msg}:
rs.Logger.Debug("proxied envelope", "reactor", rs.Name, "ch_id", cID, "peer", src.ID())
case <-channelShim.Channel.Done():
// NOTE: We explicitly DO NOT close the p2p Channel's inbound go channel.
// This is because there may be numerous spawned goroutines that are
// attempting to send on the inbound channel and when the reactor stops we
// do not want to preemptively close the channel as that could result in
// panics sending on a closed channel. This also means that reactors MUST
// be certain there are NO listeners on the inbound channel when closing or
// stopping.
}
return nil
}

+ 0
- 207
internal/p2p/shim_test.go View File

@ -1,207 +0,0 @@
package p2p_test
import (
"sync"
"testing"
"github.com/gogo/protobuf/proto"
"github.com/stretchr/testify/mock"
"github.com/stretchr/testify/require"
"github.com/tendermint/tendermint/config"
"github.com/tendermint/tendermint/internal/p2p"
p2pmocks "github.com/tendermint/tendermint/internal/p2p/mocks"
"github.com/tendermint/tendermint/libs/log"
ssproto "github.com/tendermint/tendermint/proto/tendermint/statesync"
"github.com/tendermint/tendermint/types"
)
var (
channelID1 = byte(0x01)
channelID2 = byte(0x02)
p2pCfg = config.DefaultP2PConfig()
testChannelShims = map[p2p.ChannelID]*p2p.ChannelDescriptorShim{
p2p.ChannelID(channelID1): {
MsgType: new(ssproto.Message),
Descriptor: &p2p.ChannelDescriptor{
ID: channelID1,
Priority: 3,
SendQueueCapacity: 10,
RecvMessageCapacity: int(4e6),
},
},
p2p.ChannelID(channelID2): {
MsgType: new(ssproto.Message),
Descriptor: &p2p.ChannelDescriptor{
ID: channelID2,
Priority: 1,
SendQueueCapacity: 4,
RecvMessageCapacity: int(16e6),
},
},
}
)
type reactorShimTestSuite struct {
shim *p2p.ReactorShim
sw *p2p.Switch
}
func setup(t *testing.T, peers []p2p.Peer) *reactorShimTestSuite {
t.Helper()
rts := &reactorShimTestSuite{
shim: p2p.NewReactorShim(log.TestingLogger(), "TestShim", testChannelShims),
}
rts.sw = p2p.MakeSwitch(p2pCfg, 1, "testing", "123.123.123", func(_ int, sw *p2p.Switch) *p2p.Switch {
for _, peer := range peers {
p2p.AddPeerToSwitchPeerSet(sw, peer)
}
sw.AddReactor(rts.shim.Name, rts.shim)
return sw
}, log.TestingLogger())
// start the reactor shim
require.NoError(t, rts.shim.Start())
t.Cleanup(func() {
require.NoError(t, rts.shim.Stop())
for _, chs := range rts.shim.Channels {
chs.Channel.Close()
}
})
return rts
}
func simplePeer(t *testing.T, id string) (*p2pmocks.Peer, types.NodeID) {
t.Helper()
peerID := types.NodeID(id)
peer := &p2pmocks.Peer{}
peer.On("ID").Return(peerID)
return peer, peerID
}
func TestReactorShim_GetChannel(t *testing.T) {
rts := setup(t, nil)
p2pCh := rts.shim.GetChannel(p2p.ChannelID(channelID1))
require.NotNil(t, p2pCh)
require.Equal(t, p2pCh.ID, p2p.ChannelID(channelID1))
p2pCh = rts.shim.GetChannel(p2p.ChannelID(byte(0x03)))
require.Nil(t, p2pCh)
}
func TestReactorShim_GetChannels(t *testing.T) {
rts := setup(t, nil)
p2pChs := rts.shim.GetChannels()
require.Len(t, p2pChs, 2)
require.Equal(t, p2p.ChannelID(p2pChs[0].ID), p2p.ChannelID(channelID1))
require.Equal(t, p2p.ChannelID(p2pChs[1].ID), p2p.ChannelID(channelID2))
}
func TestReactorShim_AddPeer(t *testing.T) {
peerA, peerIDA := simplePeer(t, "aa")
rts := setup(t, []p2p.Peer{peerA})
var wg sync.WaitGroup
wg.Add(1)
var peerUpdate p2p.PeerUpdate
go func() {
peerUpdate = <-rts.shim.PeerUpdates.Updates()
wg.Done()
}()
rts.shim.AddPeer(peerA)
wg.Wait()
require.Equal(t, peerIDA, peerUpdate.NodeID)
require.Equal(t, p2p.PeerStatusUp, peerUpdate.Status)
}
func TestReactorShim_RemovePeer(t *testing.T) {
peerA, peerIDA := simplePeer(t, "aa")
rts := setup(t, []p2p.Peer{peerA})
var wg sync.WaitGroup
wg.Add(1)
var peerUpdate p2p.PeerUpdate
go func() {
peerUpdate = <-rts.shim.PeerUpdates.Updates()
wg.Done()
}()
rts.shim.RemovePeer(peerA, "test reason")
wg.Wait()
require.Equal(t, peerIDA, peerUpdate.NodeID)
require.Equal(t, p2p.PeerStatusDown, peerUpdate.Status)
}
func TestReactorShim_Receive(t *testing.T) {
peerA, peerIDA := simplePeer(t, "aa")
rts := setup(t, []p2p.Peer{peerA})
msg := &ssproto.Message{
Sum: &ssproto.Message_ChunkRequest{
ChunkRequest: &ssproto.ChunkRequest{Height: 1, Format: 1, Index: 1},
},
}
bz, err := proto.Marshal(msg)
require.NoError(t, err)
var wg sync.WaitGroup
var response *ssproto.Message
peerA.On("Send", channelID1, mock.Anything).Run(func(args mock.Arguments) {
m := &ssproto.Message{}
require.NoError(t, proto.Unmarshal(args[1].([]byte), m))
response = m
wg.Done()
}).Return(true)
p2pCh := rts.shim.Channels[p2p.ChannelID(channelID1)]
wg.Add(2)
// Simulate receiving the envelope in some real reactor and replying back with
// the same envelope and then closing the Channel.
go func() {
e := <-p2pCh.Channel.In
require.Equal(t, peerIDA, e.From)
require.NotNil(t, e.Message)
p2pCh.Channel.Out <- p2p.Envelope{To: e.From, Message: e.Message}
p2pCh.Channel.Close()
wg.Done()
}()
rts.shim.Receive(channelID1, peerA, bz)
// wait until the mock peer called Send and we (fake) proxied the envelope
wg.Wait()
require.NotNil(t, response)
m, err := response.Unwrap()
require.NoError(t, err)
require.Equal(t, msg.GetChunkRequest(), m)
// Since p2pCh was closed in the simulated reactor above, calling Receive
// should not block.
rts.shim.Receive(channelID1, peerA, bz)
require.Empty(t, p2pCh.Channel.In)
peerA.AssertExpectations(t)
}

+ 0
- 1064
internal/p2p/switch.go
File diff suppressed because it is too large
View File


+ 0
- 932
internal/p2p/switch_test.go View File

@ -1,932 +0,0 @@
package p2p
import (
"bytes"
"context"
"errors"
"fmt"
"io/ioutil"
"net"
"net/http"
"net/http/httptest"
"regexp"
"strconv"
"sync/atomic"
"testing"
"time"
"github.com/prometheus/client_golang/prometheus/promhttp"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
"github.com/tendermint/tendermint/config"
"github.com/tendermint/tendermint/crypto/ed25519"
tmsync "github.com/tendermint/tendermint/internal/libs/sync"
"github.com/tendermint/tendermint/internal/p2p/conn"
"github.com/tendermint/tendermint/libs/log"
"github.com/tendermint/tendermint/types"
)
var (
cfg *config.P2PConfig
ctx = context.Background()
)
func init() {
cfg = config.DefaultP2PConfig()
cfg.PexReactor = true
cfg.AllowDuplicateIP = true
}
type PeerMessage struct {
PeerID types.NodeID
Bytes []byte
Counter int
}
type TestReactor struct {
BaseReactor
mtx tmsync.Mutex
channels []*conn.ChannelDescriptor
logMessages bool
msgsCounter int
msgsReceived map[byte][]PeerMessage
}
func NewTestReactor(channels []*conn.ChannelDescriptor, logMessages bool) *TestReactor {
tr := &TestReactor{
channels: channels,
logMessages: logMessages,
msgsReceived: make(map[byte][]PeerMessage),
}
tr.BaseReactor = *NewBaseReactor("TestReactor", tr)
tr.SetLogger(log.TestingLogger())
return tr
}
func (tr *TestReactor) GetChannels() []*conn.ChannelDescriptor {
return tr.channels
}
func (tr *TestReactor) AddPeer(peer Peer) {}
func (tr *TestReactor) RemovePeer(peer Peer, reason interface{}) {}
func (tr *TestReactor) Receive(chID byte, peer Peer, msgBytes []byte) {
if tr.logMessages {
tr.mtx.Lock()
defer tr.mtx.Unlock()
// fmt.Printf("Received: %X, %X\n", chID, msgBytes)
tr.msgsReceived[chID] = append(tr.msgsReceived[chID], PeerMessage{peer.ID(), msgBytes, tr.msgsCounter})
tr.msgsCounter++
}
}
func (tr *TestReactor) getMsgs(chID byte) []PeerMessage {
tr.mtx.Lock()
defer tr.mtx.Unlock()
return tr.msgsReceived[chID]
}
//-----------------------------------------------------------------------------
// convenience method for creating two switches connected to each other.
// XXX: note this uses net.Pipe and not a proper TCP conn
func MakeSwitchPair(t testing.TB, initSwitch func(int, *Switch) *Switch) (*Switch, *Switch) {
// Create two switches that will be interconnected.
switches := MakeConnectedSwitches(cfg, 2, initSwitch, Connect2Switches)
return switches[0], switches[1]
}
func initSwitchFunc(i int, sw *Switch) *Switch {
sw.SetAddrBook(&AddrBookMock{
Addrs: make(map[string]struct{}),
OurAddrs: make(map[string]struct{})})
// Make two reactors of two channels each
sw.AddReactor("foo", NewTestReactor([]*conn.ChannelDescriptor{
{ID: byte(0x00), Priority: 10},
{ID: byte(0x01), Priority: 10},
}, true))
sw.AddReactor("bar", NewTestReactor([]*conn.ChannelDescriptor{
{ID: byte(0x02), Priority: 10},
{ID: byte(0x03), Priority: 10},
}, true))
return sw
}
func TestSwitches(t *testing.T) {
s1, s2 := MakeSwitchPair(t, initSwitchFunc)
t.Cleanup(func() {
if err := s1.Stop(); err != nil {
t.Error(err)
}
})
t.Cleanup(func() {
if err := s2.Stop(); err != nil {
t.Error(err)
}
})
if s1.Peers().Size() != 1 {
t.Errorf("expected exactly 1 peer in s1, got %v", s1.Peers().Size())
}
if s2.Peers().Size() != 1 {
t.Errorf("expected exactly 1 peer in s2, got %v", s2.Peers().Size())
}
// Lets send some messages
ch0Msg := []byte("channel zero")
ch1Msg := []byte("channel foo")
ch2Msg := []byte("channel bar")
s1.Broadcast(byte(0x00), ch0Msg)
s1.Broadcast(byte(0x01), ch1Msg)
s1.Broadcast(byte(0x02), ch2Msg)
assertMsgReceivedWithTimeout(t,
ch0Msg,
byte(0x00),
s2.Reactor("foo").(*TestReactor), 10*time.Millisecond, 5*time.Second)
assertMsgReceivedWithTimeout(t,
ch1Msg,
byte(0x01),
s2.Reactor("foo").(*TestReactor), 10*time.Millisecond, 5*time.Second)
assertMsgReceivedWithTimeout(t,
ch2Msg,
byte(0x02),
s2.Reactor("bar").(*TestReactor), 10*time.Millisecond, 5*time.Second)
}
func assertMsgReceivedWithTimeout(
t *testing.T,
msgBytes []byte,
channel byte,
reactor *TestReactor,
checkPeriod,
timeout time.Duration,
) {
ticker := time.NewTicker(checkPeriod)
for {
select {
case <-ticker.C:
msgs := reactor.getMsgs(channel)
if len(msgs) > 0 {
if !bytes.Equal(msgs[0].Bytes, msgBytes) {
t.Fatalf("Unexpected message bytes. Wanted: %X, Got: %X", msgBytes, msgs[0].Bytes)
}
return
}
case <-time.After(timeout):
t.Fatalf("Expected to have received 1 message in channel #%v, got zero", channel)
}
}
}
func TestSwitchFiltersOutItself(t *testing.T) {
s1 := MakeSwitch(cfg, 1, "127.0.0.1", "123.123.123", initSwitchFunc, log.TestingLogger())
// simulate s1 having a public IP by creating a remote peer with the same ID
rp := &remotePeer{PrivKey: s1.nodeKey.PrivKey, Config: cfg}
rp.Start()
// addr should be rejected in addPeer based on the same ID
err := s1.DialPeerWithAddress(rp.Addr())
if assert.Error(t, err) {
if err, ok := err.(ErrRejected); ok {
if !err.IsSelf() {
t.Errorf("expected self to be rejected")
}
} else {
t.Errorf("expected ErrRejected")
}
}
assert.True(t, s1.addrBook.OurAddress(rp.Addr()))
assert.False(t, s1.addrBook.HasAddress(rp.Addr()))
rp.Stop()
assertNoPeersAfterTimeout(t, s1, 100*time.Millisecond)
}
func TestSwitchDialFailsOnIncompatiblePeer(t *testing.T) {
s1 := MakeSwitch(cfg, 1, "127.0.0.1", "123.123.123", initSwitchFunc, log.TestingLogger())
ni := s1.NodeInfo()
ni.Network = "network-a"
s1.SetNodeInfo(ni)
rp := &remotePeer{PrivKey: ed25519.GenPrivKey(), Config: cfg, Network: "network-b"}
rp.Start()
defer rp.Stop()
err := s1.DialPeerWithAddress(rp.Addr())
require.Error(t, err)
errRejected, ok := err.(ErrRejected)
require.True(t, ok, "expected error to be of type IsRejected")
require.True(t, errRejected.IsIncompatible(), "expected error to be IsIncompatible")
// remote peer should not have been added to the addressbook
require.False(t, s1.addrBook.HasAddress(rp.Addr()))
}
func TestSwitchPeerFilter(t *testing.T) {
var (
filters = []PeerFilterFunc{
func(_ IPeerSet, _ Peer) error { return nil },
func(_ IPeerSet, _ Peer) error { return fmt.Errorf("denied") },
func(_ IPeerSet, _ Peer) error { return nil },
}
sw = MakeSwitch(
cfg,
1,
"testing",
"123.123.123",
initSwitchFunc,
log.TestingLogger(),
SwitchPeerFilters(filters...),
)
)
err := sw.Start()
require.NoError(t, err)
t.Cleanup(func() {
if err := sw.Stop(); err != nil {
t.Error(err)
}
})
// simulate remote peer
rp := &remotePeer{PrivKey: ed25519.GenPrivKey(), Config: cfg}
rp.Start()
t.Cleanup(rp.Stop)
c, err := sw.transport.Dial(ctx, NewEndpoint(rp.Addr()))
if err != nil {
t.Fatal(err)
}
peerInfo, _, err := c.Handshake(ctx, sw.nodeInfo, sw.nodeKey.PrivKey)
if err != nil {
t.Fatal(err)
}
p := newPeer(
peerInfo,
newPeerConn(true, false, c),
sw.reactorsByCh,
sw.StopPeerForError,
)
err = sw.addPeer(p)
if err, ok := err.(ErrRejected); ok {
if !err.IsFiltered() {
t.Errorf("expected peer to be filtered")
}
} else {
t.Errorf("expected ErrRejected")
}
}
func TestSwitchPeerFilterTimeout(t *testing.T) {
var (
filters = []PeerFilterFunc{
func(_ IPeerSet, _ Peer) error {
time.Sleep(10 * time.Millisecond)
return nil
},
}
sw = MakeSwitch(
cfg,
1,
"testing",
"123.123.123",
initSwitchFunc,
log.TestingLogger(),
SwitchFilterTimeout(5*time.Millisecond),
SwitchPeerFilters(filters...),
)
)
err := sw.Start()
require.NoError(t, err)
t.Cleanup(func() {
if err := sw.Stop(); err != nil {
t.Log(err)
}
})
// simulate remote peer
rp := &remotePeer{PrivKey: ed25519.GenPrivKey(), Config: cfg}
rp.Start()
defer rp.Stop()
c, err := sw.transport.Dial(ctx, NewEndpoint(rp.Addr()))
if err != nil {
t.Fatal(err)
}
peerInfo, _, err := c.Handshake(ctx, sw.nodeInfo, sw.nodeKey.PrivKey)
if err != nil {
t.Fatal(err)
}
p := newPeer(
peerInfo,
newPeerConn(true, false, c),
sw.reactorsByCh,
sw.StopPeerForError,
)
err = sw.addPeer(p)
if _, ok := err.(ErrFilterTimeout); !ok {
t.Errorf("expected ErrFilterTimeout")
}
}
func TestSwitchPeerFilterDuplicate(t *testing.T) {
sw := MakeSwitch(cfg, 1, "testing", "123.123.123", initSwitchFunc, log.TestingLogger())
err := sw.Start()
require.NoError(t, err)
t.Cleanup(func() {
if err := sw.Stop(); err != nil {
t.Error(err)
}
})
// simulate remote peer
rp := &remotePeer{PrivKey: ed25519.GenPrivKey(), Config: cfg}
rp.Start()
defer rp.Stop()
c, err := sw.transport.Dial(ctx, NewEndpoint(rp.Addr()))
if err != nil {
t.Fatal(err)
}
peerInfo, _, err := c.Handshake(ctx, sw.nodeInfo, sw.nodeKey.PrivKey)
if err != nil {
t.Fatal(err)
}
p := newPeer(
peerInfo,
newPeerConn(true, false, c),
sw.reactorsByCh,
sw.StopPeerForError,
)
if err := sw.addPeer(p); err != nil {
t.Fatal(err)
}
err = sw.addPeer(p)
if errRej, ok := err.(ErrRejected); ok {
if !errRej.IsDuplicate() {
t.Errorf("expected peer to be duplicate. got %v", errRej)
}
} else {
t.Errorf("expected ErrRejected, got %v", err)
}
}
func assertNoPeersAfterTimeout(t *testing.T, sw *Switch, timeout time.Duration) {
time.Sleep(timeout)
if sw.Peers().Size() != 0 {
t.Fatalf("Expected %v to not connect to some peers, got %d", sw, sw.Peers().Size())
}
}
func TestSwitchStopsNonPersistentPeerOnError(t *testing.T) {
assert, require := assert.New(t), require.New(t)
sw := MakeSwitch(cfg, 1, "testing", "123.123.123", initSwitchFunc, log.TestingLogger())
err := sw.Start()
if err != nil {
t.Error(err)
}
t.Cleanup(func() {
if err := sw.Stop(); err != nil {
t.Error(err)
}
})
// simulate remote peer
rp := &remotePeer{PrivKey: ed25519.GenPrivKey(), Config: cfg}
rp.Start()
defer rp.Stop()
c, err := sw.transport.Dial(ctx, NewEndpoint(rp.Addr()))
if err != nil {
t.Fatal(err)
}
peerInfo, _, err := c.Handshake(ctx, sw.nodeInfo, sw.nodeKey.PrivKey)
if err != nil {
t.Fatal(err)
}
p := newPeer(
peerInfo,
newPeerConn(true, false, c),
sw.reactorsByCh,
sw.StopPeerForError,
)
err = sw.addPeer(p)
require.Nil(err)
require.NotNil(sw.Peers().Get(rp.ID()))
// simulate failure by closing connection
err = p.CloseConn()
require.NoError(err)
assertNoPeersAfterTimeout(t, sw, 100*time.Millisecond)
assert.False(p.IsRunning())
}
func TestSwitchStopPeerForError(t *testing.T) {
s := httptest.NewServer(promhttp.Handler())
defer s.Close()
scrapeMetrics := func() string {
resp, err := http.Get(s.URL)
require.NoError(t, err)
defer resp.Body.Close()
buf, _ := ioutil.ReadAll(resp.Body)
return string(buf)
}
namespace, subsystem, name := config.TestInstrumentationConfig().Namespace, MetricsSubsystem, "peers"
re := regexp.MustCompile(namespace + `_` + subsystem + `_` + name + ` ([0-9\.]+)`)
peersMetricValue := func() float64 {
matches := re.FindStringSubmatch(scrapeMetrics())
f, _ := strconv.ParseFloat(matches[1], 64)
return f
}
p2pMetrics := PrometheusMetrics(namespace)
// make two connected switches
sw1, sw2 := MakeSwitchPair(t, func(i int, sw *Switch) *Switch {
// set metrics on sw1
if i == 0 {
opt := WithMetrics(p2pMetrics)
opt(sw)
}
return initSwitchFunc(i, sw)
})
assert.Equal(t, len(sw1.Peers().List()), 1)
assert.EqualValues(t, 1, peersMetricValue())
// send messages to the peer from sw1
p := sw1.Peers().List()[0]
p.Send(0x1, []byte("here's a message to send"))
// stop sw2. this should cause the p to fail,
// which results in calling StopPeerForError internally
t.Cleanup(func() {
if err := sw2.Stop(); err != nil {
t.Error(err)
}
})
// now call StopPeerForError explicitly, eg. from a reactor
sw1.StopPeerForError(p, fmt.Errorf("some err"))
assert.Equal(t, len(sw1.Peers().List()), 0)
assert.EqualValues(t, 0, peersMetricValue())
}
func TestSwitchReconnectsToOutboundPersistentPeer(t *testing.T) {
sw := MakeSwitch(cfg, 1, "testing", "123.123.123", initSwitchFunc, log.TestingLogger())
err := sw.Start()
require.NoError(t, err)
t.Cleanup(func() {
if err := sw.Stop(); err != nil {
t.Error(err)
}
})
// 1. simulate failure by closing connection
rp := &remotePeer{PrivKey: ed25519.GenPrivKey(), Config: cfg}
rp.Start()
defer rp.Stop()
err = sw.AddPersistentPeers([]string{rp.Addr().String()})
require.NoError(t, err)
err = sw.DialPeerWithAddress(rp.Addr())
require.Nil(t, err)
require.NotNil(t, sw.Peers().Get(rp.ID()))
p := sw.Peers().List()[0]
err = p.(*peer).CloseConn()
require.NoError(t, err)
waitUntilSwitchHasAtLeastNPeers(sw, 1)
assert.False(t, p.IsRunning()) // old peer instance
assert.Equal(t, 1, sw.Peers().Size()) // new peer instance
// 2. simulate first time dial failure
rp = &remotePeer{
PrivKey: ed25519.GenPrivKey(),
Config: cfg,
// Use different interface to prevent duplicate IP filter, this will break
// beyond two peers.
listenAddr: "127.0.0.1:0",
}
rp.Start()
defer rp.Stop()
conf := config.DefaultP2PConfig()
conf.TestDialFail = true // will trigger a reconnect
err = sw.addOutboundPeerWithConfig(rp.Addr(), conf)
require.NotNil(t, err)
// DialPeerWithAddres - sw.peerConfig resets the dialer
waitUntilSwitchHasAtLeastNPeers(sw, 2)
assert.Equal(t, 2, sw.Peers().Size())
}
func TestSwitchReconnectsToInboundPersistentPeer(t *testing.T) {
sw := MakeSwitch(cfg, 1, "testing", "123.123.123", initSwitchFunc, log.TestingLogger())
err := sw.Start()
require.NoError(t, err)
t.Cleanup(func() {
if err := sw.Stop(); err != nil {
t.Error(err)
}
})
// 1. simulate failure by closing the connection
rp := &remotePeer{PrivKey: ed25519.GenPrivKey(), Config: cfg}
rp.Start()
defer rp.Stop()
err = sw.AddPersistentPeers([]string{rp.Addr().String()})
require.NoError(t, err)
conn, err := rp.Dial(sw.NetAddress())
require.NoError(t, err)
time.Sleep(50 * time.Millisecond)
require.NotNil(t, sw.Peers().Get(rp.ID()))
conn.Close()
waitUntilSwitchHasAtLeastNPeers(sw, 1)
assert.Equal(t, 1, sw.Peers().Size())
}
func TestSwitchDialPeersAsync(t *testing.T) {
if testing.Short() {
return
}
sw := MakeSwitch(cfg, 1, "testing", "123.123.123", initSwitchFunc, log.TestingLogger())
err := sw.Start()
require.NoError(t, err)
t.Cleanup(func() {
if err := sw.Stop(); err != nil {
t.Error(err)
}
})
rp := &remotePeer{PrivKey: ed25519.GenPrivKey(), Config: cfg}
rp.Start()
defer rp.Stop()
err = sw.DialPeersAsync([]string{rp.Addr().String()})
require.NoError(t, err)
time.Sleep(dialRandomizerIntervalMilliseconds * time.Millisecond)
require.NotNil(t, sw.Peers().Get(rp.ID()))
}
func waitUntilSwitchHasAtLeastNPeers(sw *Switch, n int) {
for i := 0; i < 20; i++ {
time.Sleep(250 * time.Millisecond)
has := sw.Peers().Size()
if has >= n {
break
}
}
}
func TestSwitchFullConnectivity(t *testing.T) {
switches := MakeConnectedSwitches(cfg, 3, initSwitchFunc, Connect2Switches)
defer func() {
for _, sw := range switches {
sw := sw
t.Cleanup(func() {
if err := sw.Stop(); err != nil {
t.Error(err)
}
})
}
}()
for i, sw := range switches {
if sw.Peers().Size() != 2 {
t.Fatalf("Expected each switch to be connected to 2 other, but %d switch only connected to %d", sw.Peers().Size(), i)
}
}
}
func TestSwitchAcceptRoutine(t *testing.T) {
cfg.MaxNumInboundPeers = 5
// Create some unconditional peers.
const unconditionalPeersNum = 2
var (
unconditionalPeers = make([]*remotePeer, unconditionalPeersNum)
unconditionalPeerIDs = make([]string, unconditionalPeersNum)
)
for i := 0; i < unconditionalPeersNum; i++ {
peer := &remotePeer{PrivKey: ed25519.GenPrivKey(), Config: cfg}
peer.Start()
unconditionalPeers[i] = peer
unconditionalPeerIDs[i] = string(peer.ID())
}
// make switch
sw := MakeSwitch(cfg, 1, "testing", "123.123.123", initSwitchFunc, log.TestingLogger())
err := sw.AddUnconditionalPeerIDs(unconditionalPeerIDs)
require.NoError(t, err)
err = sw.Start()
require.NoError(t, err)
t.Cleanup(func() {
err := sw.Stop()
require.NoError(t, err)
})
// 0. check there are no peers
assert.Equal(t, 0, sw.Peers().Size())
// 1. check we connect up to MaxNumInboundPeers
peers := make([]*remotePeer, 0)
for i := 0; i < cfg.MaxNumInboundPeers; i++ {
peer := &remotePeer{PrivKey: ed25519.GenPrivKey(), Config: cfg}
peers = append(peers, peer)
peer.Start()
c, err := peer.Dial(sw.NetAddress())
require.NoError(t, err)
// spawn a reading routine to prevent connection from closing
go func(c net.Conn) {
for {
one := make([]byte, 1)
_, err := c.Read(one)
if err != nil {
return
}
}
}(c)
}
time.Sleep(100 * time.Millisecond)
assert.Equal(t, cfg.MaxNumInboundPeers, sw.Peers().Size())
// 2. check we close new connections if we already have MaxNumInboundPeers peers
peer := &remotePeer{PrivKey: ed25519.GenPrivKey(), Config: cfg}
peer.Start()
conn, err := peer.Dial(sw.NetAddress())
require.NoError(t, err)
// check conn is closed
one := make([]byte, 1)
_ = conn.SetReadDeadline(time.Now().Add(10 * time.Millisecond))
_, err = conn.Read(one)
assert.Error(t, err)
assert.Equal(t, cfg.MaxNumInboundPeers, sw.Peers().Size())
peer.Stop()
// 3. check we connect to unconditional peers despite the limit.
for _, peer := range unconditionalPeers {
c, err := peer.Dial(sw.NetAddress())
require.NoError(t, err)
// spawn a reading routine to prevent connection from closing
go func(c net.Conn) {
for {
one := make([]byte, 1)
_, err := c.Read(one)
if err != nil {
return
}
}
}(c)
}
time.Sleep(10 * time.Millisecond)
assert.Equal(t, cfg.MaxNumInboundPeers+unconditionalPeersNum, sw.Peers().Size())
for _, peer := range peers {
peer.Stop()
}
for _, peer := range unconditionalPeers {
peer.Stop()
}
}
func TestSwitchRejectsIncompatiblePeers(t *testing.T) {
sw := MakeSwitch(cfg, 1, "127.0.0.1", "123.123.123", initSwitchFunc, log.TestingLogger())
ni := sw.NodeInfo()
ni.Network = "network-a"
sw.SetNodeInfo(ni)
err := sw.Start()
require.NoError(t, err)
t.Cleanup(func() {
err := sw.Stop()
require.NoError(t, err)
})
rp := &remotePeer{PrivKey: ed25519.GenPrivKey(), Config: cfg, Network: "network-b"}
rp.Start()
defer rp.Stop()
assert.Equal(t, 0, sw.Peers().Size())
conn, err := rp.Dial(sw.NetAddress())
assert.Nil(t, err)
one := make([]byte, 1)
_ = conn.SetReadDeadline(time.Now().Add(10 * time.Millisecond))
_, err = conn.Read(one)
assert.Error(t, err)
assert.Equal(t, 0, sw.Peers().Size())
}
type errorTransport struct {
acceptErr error
}
func (et errorTransport) String() string {
return "error"
}
func (et errorTransport) Protocols() []Protocol {
return []Protocol{"error"}
}
func (et errorTransport) Accept() (Connection, error) {
return nil, et.acceptErr
}
func (errorTransport) Dial(context.Context, Endpoint) (Connection, error) {
panic("not implemented")
}
func (errorTransport) Close() error { panic("not implemented") }
func (errorTransport) FlushClose() error { panic("not implemented") }
func (errorTransport) Endpoints() []Endpoint { panic("not implemented") }
func TestSwitchAcceptRoutineErrorCases(t *testing.T) {
sw := NewSwitch(cfg, errorTransport{ErrFilterTimeout{}})
assert.NotPanics(t, func() {
err := sw.Start()
require.NoError(t, err)
err = sw.Stop()
require.NoError(t, err)
})
sw = NewSwitch(cfg, errorTransport{ErrRejected{conn: nil, err: errors.New("filtered"), isFiltered: true}})
assert.NotPanics(t, func() {
err := sw.Start()
require.NoError(t, err)
err = sw.Stop()
require.NoError(t, err)
})
// TODO(melekes) check we remove our address from addrBook
sw = NewSwitch(cfg, errorTransport{ErrTransportClosed{}})
assert.NotPanics(t, func() {
err := sw.Start()
require.NoError(t, err)
err = sw.Stop()
require.NoError(t, err)
})
}
// mockReactor checks that InitPeer never called before RemovePeer. If that's
// not true, InitCalledBeforeRemoveFinished will return true.
type mockReactor struct {
*BaseReactor
// atomic
removePeerInProgress uint32
initCalledBeforeRemoveFinished uint32
}
func (r *mockReactor) GetChannels() []*ChannelDescriptor {
return []*ChannelDescriptor{{ID: testCh, Priority: 10}}
}
func (r *mockReactor) RemovePeer(peer Peer, reason interface{}) {
atomic.StoreUint32(&r.removePeerInProgress, 1)
defer atomic.StoreUint32(&r.removePeerInProgress, 0)
time.Sleep(100 * time.Millisecond)
}
func (r *mockReactor) InitPeer(peer Peer) Peer {
if atomic.LoadUint32(&r.removePeerInProgress) == 1 {
atomic.StoreUint32(&r.initCalledBeforeRemoveFinished, 1)
}
return peer
}
func (r *mockReactor) InitCalledBeforeRemoveFinished() bool {
return atomic.LoadUint32(&r.initCalledBeforeRemoveFinished) == 1
}
// see stopAndRemovePeer
func TestSwitchInitPeerIsNotCalledBeforeRemovePeer(t *testing.T) {
// make reactor
reactor := &mockReactor{}
reactor.BaseReactor = NewBaseReactor("mockReactor", reactor)
// make switch
sw := MakeSwitch(cfg, 1, "testing", "123.123.123", func(i int, sw *Switch) *Switch {
sw.AddReactor("mock", reactor)
return sw
}, log.TestingLogger())
err := sw.Start()
require.NoError(t, err)
t.Cleanup(func() {
if err := sw.Stop(); err != nil {
t.Error(err)
}
})
// add peer
rp := &remotePeer{PrivKey: ed25519.GenPrivKey(), Config: cfg}
rp.Start()
defer rp.Stop()
_, err = rp.Dial(sw.NetAddress())
require.NoError(t, err)
// wait till the switch adds rp to the peer set, then stop the peer asynchronously
for {
time.Sleep(20 * time.Millisecond)
if peer := sw.Peers().Get(rp.ID()); peer != nil {
go sw.StopPeerForError(peer, "test")
break
}
}
// simulate peer reconnecting to us
_, err = rp.Dial(sw.NetAddress())
require.NoError(t, err)
// wait till the switch adds rp to the peer set
time.Sleep(50 * time.Millisecond)
// make sure reactor.RemovePeer is finished before InitPeer is called
assert.False(t, reactor.InitCalledBeforeRemoveFinished())
}
func BenchmarkSwitchBroadcast(b *testing.B) {
s1, s2 := MakeSwitchPair(b, func(i int, sw *Switch) *Switch {
// Make bar reactors of bar channels each
sw.AddReactor("foo", NewTestReactor([]*conn.ChannelDescriptor{
{ID: byte(0x00), Priority: 10},
{ID: byte(0x01), Priority: 10},
}, false))
sw.AddReactor("bar", NewTestReactor([]*conn.ChannelDescriptor{
{ID: byte(0x02), Priority: 10},
{ID: byte(0x03), Priority: 10},
}, false))
return sw
})
b.Cleanup(func() {
if err := s1.Stop(); err != nil {
b.Error(err)
}
})
b.Cleanup(func() {
if err := s2.Stop(); err != nil {
b.Error(err)
}
})
// Allow time for goroutines to boot up
time.Sleep(1 * time.Second)
b.ResetTimer()
numSuccess, numFailure := 0, 0
// Send random message from foo channel to another
for i := 0; i < b.N; i++ {
chID := byte(i % 4)
successChan := s1.Broadcast(chID, []byte("test data"))
for s := range successChan {
if s {
numSuccess++
} else {
numFailure++
}
}
}
b.Logf("success: %v, failure: %v", numSuccess, numFailure)
}
func TestNewNetAddressStrings(t *testing.T) {
addrs, errs := NewNetAddressStrings([]string{
"127.0.0.1:8080",
"deadbeefdeadbeefdeadbeefdeadbeefdeadbeef@127.0.0.1:8080",
"deadbeefdeadbeefdeadbeefdeadbeefdeadbeed@127.0.0.2:8080"})
assert.Len(t, errs, 1)
assert.Equal(t, 2, len(addrs))
}

+ 0
- 256
internal/p2p/test_util.go View File

@ -1,42 +1,15 @@
package p2p
import (
"context"
"fmt"
mrand "math/rand"
"net"
"github.com/tendermint/tendermint/libs/log"
tmnet "github.com/tendermint/tendermint/libs/net"
tmrand "github.com/tendermint/tendermint/libs/rand"
"github.com/tendermint/tendermint/types"
"github.com/tendermint/tendermint/config"
"github.com/tendermint/tendermint/internal/p2p/conn"
)
const testCh = 0x01
//------------------------------------------------
func AddPeerToSwitchPeerSet(sw *Switch, peer Peer) {
sw.peers.Add(peer) //nolint:errcheck // ignore error
}
func CreateRandomPeer(outbound bool) Peer {
addr, netAddr := CreateRoutableAddr()
p := &peer{
peerConn: peerConn{outbound: outbound},
nodeInfo: types.NodeInfo{
NodeID: netAddr.ID,
ListenAddr: netAddr.DialString(),
},
metrics: NopMetrics(),
}
p.SetLogger(log.TestingLogger().With("peer", addr))
return p
}
// nolint:gosec // G404: Use of weak random number generator
func CreateRoutableAddr() (addr string, netAddr *NetAddress) {
for {
@ -57,232 +30,3 @@ func CreateRoutableAddr() (addr string, netAddr *NetAddress) {
}
return
}
//------------------------------------------------------------------
// Connects switches via arbitrary net.Conn. Used for testing.
const TestHost = "localhost"
// MakeConnectedSwitches returns n switches, connected according to the connect func.
// If connect==Connect2Switches, the switches will be fully connected.
// initSwitch defines how the i'th switch should be initialized (ie. with what reactors).
// NOTE: panics if any switch fails to start.
func MakeConnectedSwitches(cfg *config.P2PConfig,
n int,
initSwitch func(int, *Switch) *Switch,
connect func([]*Switch, int, int),
) []*Switch {
switches := make([]*Switch, n)
for i := 0; i < n; i++ {
switches[i] = MakeSwitch(cfg, i, TestHost, "123.123.123", initSwitch, log.TestingLogger())
}
if err := StartSwitches(switches); err != nil {
panic(err)
}
for i := 0; i < n; i++ {
for j := i + 1; j < n; j++ {
connect(switches, i, j)
}
}
return switches
}
// Connect2Switches will connect switches i and j via net.Pipe().
// Blocks until a connection is established.
// NOTE: caller ensures i and j are within bounds.
func Connect2Switches(switches []*Switch, i, j int) {
switchI := switches[i]
switchJ := switches[j]
c1, c2 := conn.NetPipe()
doneCh := make(chan struct{})
go func() {
err := switchI.addPeerWithConnection(c1)
if err != nil {
panic(err)
}
doneCh <- struct{}{}
}()
go func() {
err := switchJ.addPeerWithConnection(c2)
if err != nil {
panic(err)
}
doneCh <- struct{}{}
}()
<-doneCh
<-doneCh
}
func (sw *Switch) addPeerWithConnection(conn net.Conn) error {
pc, err := testInboundPeerConn(sw.transport.(*MConnTransport), conn)
if err != nil {
if err := conn.Close(); err != nil {
sw.Logger.Error("Error closing connection", "err", err)
}
return err
}
peerNodeInfo, _, err := pc.conn.Handshake(context.Background(), sw.nodeInfo, sw.nodeKey.PrivKey)
if err != nil {
if err := conn.Close(); err != nil {
sw.Logger.Error("Error closing connection", "err", err)
}
return err
}
p := newPeer(
peerNodeInfo,
pc,
sw.reactorsByCh,
sw.StopPeerForError,
)
if err = sw.addPeer(p); err != nil {
pc.CloseConn()
return err
}
return nil
}
// StartSwitches calls sw.Start() for each given switch.
// It returns the first encountered error.
func StartSwitches(switches []*Switch) error {
for _, s := range switches {
err := s.Start() // start switch and reactors
if err != nil {
return err
}
}
return nil
}
func MakeSwitch(
cfg *config.P2PConfig,
i int,
network, version string,
initSwitch func(int, *Switch) *Switch,
logger log.Logger,
opts ...SwitchOption,
) *Switch {
nodeKey := types.GenNodeKey()
nodeInfo := testNodeInfo(nodeKey.ID, fmt.Sprintf("node%d", i))
addr, err := types.NewNetAddressString(
nodeKey.ID.AddressString(nodeInfo.ListenAddr),
)
if err != nil {
panic(err)
}
swLogger := logger.With("switch", i)
t := NewMConnTransport(swLogger, MConnConfig(cfg),
[]*ChannelDescriptor{}, MConnTransportOptions{})
// TODO: let the config be passed in?
sw := initSwitch(i, NewSwitch(cfg, t, opts...))
sw.SetLogger(swLogger)
sw.SetNodeKey(nodeKey)
if err := t.Listen(NewEndpoint(addr)); err != nil {
panic(err)
}
ni := nodeInfo
ni.Channels = []byte{}
for ch := range sw.reactorsByCh {
ni.Channels = append(ni.Channels, ch)
}
nodeInfo = ni
// TODO: We need to setup reactors ahead of time so the NodeInfo is properly
// populated and we don't have to do those awkward overrides and setters.
sw.SetNodeInfo(nodeInfo)
return sw
}
func testInboundPeerConn(
transport *MConnTransport,
conn net.Conn,
) (peerConn, error) {
return testPeerConn(transport, conn, false, false)
}
func testPeerConn(
transport *MConnTransport,
rawConn net.Conn,
outbound, persistent bool,
) (pc peerConn, err error) {
conn := newMConnConnection(transport.logger, rawConn, transport.mConnConfig, transport.channelDescs)
return newPeerConn(outbound, persistent, conn), nil
}
//----------------------------------------------------------------
// rand node info
func testNodeInfo(id types.NodeID, name string) types.NodeInfo {
return testNodeInfoWithNetwork(id, name, "testing")
}
func testNodeInfoWithNetwork(id types.NodeID, name, network string) types.NodeInfo {
return types.NodeInfo{
ProtocolVersion: defaultProtocolVersion,
NodeID: id,
ListenAddr: fmt.Sprintf("127.0.0.1:%d", getFreePort()),
Network: network,
Version: "1.2.3-rc0-deadbeef",
Channels: []byte{testCh},
Moniker: name,
Other: types.NodeInfoOther{
TxIndex: "on",
RPCAddress: fmt.Sprintf("127.0.0.1:%d", getFreePort()),
},
}
}
func getFreePort() int {
port, err := tmnet.GetFreePort()
if err != nil {
panic(err)
}
return port
}
type AddrBookMock struct {
Addrs map[string]struct{}
OurAddrs map[string]struct{}
PrivateAddrs map[string]struct{}
}
var _ AddrBook = (*AddrBookMock)(nil)
func (book *AddrBookMock) AddAddress(addr *NetAddress, src *NetAddress) error {
book.Addrs[addr.String()] = struct{}{}
return nil
}
func (book *AddrBookMock) AddOurAddress(addr *NetAddress) { book.OurAddrs[addr.String()] = struct{}{} }
func (book *AddrBookMock) OurAddress(addr *NetAddress) bool {
_, ok := book.OurAddrs[addr.String()]
return ok
}
func (book *AddrBookMock) MarkGood(types.NodeID) {}
func (book *AddrBookMock) HasAddress(addr *NetAddress) bool {
_, ok := book.Addrs[addr.String()]
return ok
}
func (book *AddrBookMock) RemoveAddress(addr *NetAddress) {
delete(book.Addrs, addr.String())
}
func (book *AddrBookMock) Save() {}
func (book *AddrBookMock) AddPrivateIDs(addrs []string) {
for _, addr := range addrs {
book.PrivateAddrs[addr] = struct{}{}
}
}

+ 0
- 9
internal/p2p/transport.go View File

@ -9,7 +9,6 @@ import (
"github.com/tendermint/tendermint/crypto"
"github.com/tendermint/tendermint/internal/p2p/conn"
"github.com/tendermint/tendermint/types"
"github.com/tendermint/tendermint/version"
)
//go:generate ../../scripts/mockery_generate.sh Transport|Connection
@ -20,14 +19,6 @@ const (
defaultProtocol Protocol = MConnProtocol
)
// defaultProtocolVersion populates the Block and P2P versions using
// the global values, but not the App.
var defaultProtocolVersion = types.ProtocolVersion{
P2P: version.P2PProtocol,
Block: version.BlockProtocol,
App: 0,
}
// Protocol identifies a transport protocol.
type Protocol string


+ 16
- 44
internal/rpc/core/consensus.go View File

@ -1,13 +1,9 @@
package core
import (
"errors"
"github.com/tendermint/tendermint/internal/consensus"
tmmath "github.com/tendermint/tendermint/libs/math"
"github.com/tendermint/tendermint/rpc/coretypes"
rpctypes "github.com/tendermint/tendermint/rpc/jsonrpc/types"
"github.com/tendermint/tendermint/types"
)
// Validators gets the validator set at the given block height.
@ -58,52 +54,28 @@ func (env *Environment) DumpConsensusState(ctx *rpctypes.Context) (*coretypes.Re
// Get Peer consensus states.
var peerStates []coretypes.PeerStateInfo
switch {
case env.P2PPeers != nil:
peers := env.P2PPeers.Peers().List()
peerStates = make([]coretypes.PeerStateInfo, 0, len(peers))
for _, peer := range peers {
peerState, ok := peer.Get(types.PeerStateKey).(*consensus.PeerState)
if !ok { // peer does not have a state yet
continue
}
peerStateJSON, err := peerState.ToJSON()
if err != nil {
return nil, err
}
peers := env.PeerManager.Peers()
peerStates = make([]coretypes.PeerStateInfo, 0, len(peers))
for _, pid := range peers {
peerState, ok := env.ConsensusReactor.GetPeerState(pid)
if !ok {
continue
}
peerStateJSON, err := peerState.ToJSON()
if err != nil {
return nil, err
}
addr := env.PeerManager.Addresses(pid)
if len(addr) != 0 {
peerStates = append(peerStates, coretypes.PeerStateInfo{
// Peer basic info.
NodeAddress: peer.SocketAddr().String(),
NodeAddress: addr[0].String(),
// Peer consensus state.
PeerState: peerStateJSON,
})
}
case env.PeerManager != nil:
peers := env.PeerManager.Peers()
peerStates = make([]coretypes.PeerStateInfo, 0, len(peers))
for _, pid := range peers {
peerState, ok := env.ConsensusReactor.GetPeerState(pid)
if !ok {
continue
}
peerStateJSON, err := peerState.ToJSON()
if err != nil {
return nil, err
}
addr := env.PeerManager.Addresses(pid)
if len(addr) >= 1 {
peerStates = append(peerStates, coretypes.PeerStateInfo{
// Peer basic info.
NodeAddress: addr[0].String(),
// Peer consensus state.
PeerState: peerStateJSON,
})
}
}
default:
return nil, errors.New("no peer system configured")
}
// Get self round state.


+ 0
- 9
internal/rpc/core/env.go View File

@ -51,14 +51,6 @@ type transport interface {
NodeInfo() types.NodeInfo
}
type peers interface {
AddPersistentPeers([]string) error
AddUnconditionalPeerIDs([]string) error
AddPrivatePeerIDs([]string) error
DialPeersAsync([]string) error
Peers() p2p.IPeerSet
}
type consensusReactor interface {
WaitSync() bool
GetPeerState(peerID types.NodeID) (*consensus.PeerState, bool)
@ -83,7 +75,6 @@ type Environment struct {
EvidencePool sm.EvidencePool
ConsensusState consensusState
ConsensusReactor consensusReactor
P2PPeers peers
// Legacy p2p stack
P2PTransport transport


+ 10
- 105
internal/rpc/core/net.go View File

@ -3,9 +3,7 @@ package core
import (
"errors"
"fmt"
"strings"
"github.com/tendermint/tendermint/internal/p2p"
"github.com/tendermint/tendermint/rpc/coretypes"
rpctypes "github.com/tendermint/tendermint/rpc/jsonrpc/types"
)
@ -13,33 +11,19 @@ import (
// NetInfo returns network info.
// More: https://docs.tendermint.com/master/rpc/#/Info/net_info
func (env *Environment) NetInfo(ctx *rpctypes.Context) (*coretypes.ResultNetInfo, error) {
var peers []coretypes.Peer
peerList := env.PeerManager.Peers()
switch {
case env.P2PPeers != nil:
peersList := env.P2PPeers.Peers().List()
peers = make([]coretypes.Peer, 0, len(peersList))
for _, peer := range peersList {
peers = append(peers, coretypes.Peer{
ID: peer.ID(),
URL: peer.SocketAddr().String(),
})
peers := make([]coretypes.Peer, 0, len(peerList))
for _, peer := range peerList {
addrs := env.PeerManager.Addresses(peer)
if len(addrs) == 0 {
continue
}
case env.PeerManager != nil:
peerList := env.PeerManager.Peers()
for _, peer := range peerList {
addrs := env.PeerManager.Addresses(peer)
if len(addrs) == 0 {
continue
}
peers = append(peers, coretypes.Peer{
ID: peer,
URL: addrs[0].String(),
})
}
default:
return nil, errors.New("peer management system does not support NetInfo responses")
peers = append(peers, coretypes.Peer{
ID: peer,
URL: addrs[0].String(),
})
}
return &coretypes.ResultNetInfo{
@ -50,70 +34,6 @@ func (env *Environment) NetInfo(ctx *rpctypes.Context) (*coretypes.ResultNetInfo
}, nil
}
// UnsafeDialSeeds dials the given seeds (comma-separated id@IP:PORT).
func (env *Environment) UnsafeDialSeeds(ctx *rpctypes.Context, seeds []string) (*coretypes.ResultDialSeeds, error) {
if env.P2PPeers == nil {
return nil, errors.New("peer management system does not support this operation")
}
if len(seeds) == 0 {
return &coretypes.ResultDialSeeds{}, fmt.Errorf("%w: no seeds provided", coretypes.ErrInvalidRequest)
}
env.Logger.Info("DialSeeds", "seeds", seeds)
if err := env.P2PPeers.DialPeersAsync(seeds); err != nil {
return &coretypes.ResultDialSeeds{}, err
}
return &coretypes.ResultDialSeeds{Log: "Dialing seeds in progress. See /net_info for details"}, nil
}
// UnsafeDialPeers dials the given peers (comma-separated id@IP:PORT),
// optionally making them persistent.
func (env *Environment) UnsafeDialPeers(
ctx *rpctypes.Context,
peers []string,
persistent, unconditional, private bool) (*coretypes.ResultDialPeers, error) {
if env.P2PPeers == nil {
return nil, errors.New("peer management system does not support this operation")
}
if len(peers) == 0 {
return &coretypes.ResultDialPeers{}, fmt.Errorf("%w: no peers provided", coretypes.ErrInvalidRequest)
}
ids, err := getIDs(peers)
if err != nil {
return &coretypes.ResultDialPeers{}, err
}
env.Logger.Info("DialPeers", "peers", peers, "persistent",
persistent, "unconditional", unconditional, "private", private)
if persistent {
if err := env.P2PPeers.AddPersistentPeers(peers); err != nil {
return &coretypes.ResultDialPeers{}, err
}
}
if private {
if err := env.P2PPeers.AddPrivatePeerIDs(ids); err != nil {
return &coretypes.ResultDialPeers{}, err
}
}
if unconditional {
if err := env.P2PPeers.AddUnconditionalPeerIDs(ids); err != nil {
return &coretypes.ResultDialPeers{}, err
}
}
if err := env.P2PPeers.DialPeersAsync(peers); err != nil {
return &coretypes.ResultDialPeers{}, err
}
return &coretypes.ResultDialPeers{Log: "Dialing peers in progress. See /net_info for details"}, nil
}
// Genesis returns genesis file.
// More: https://docs.tendermint.com/master/rpc/#/Info/genesis
func (env *Environment) Genesis(ctx *rpctypes.Context) (*coretypes.ResultGenesis, error) {
@ -145,18 +65,3 @@ func (env *Environment) GenesisChunked(ctx *rpctypes.Context, chunk uint) (*core
Data: env.genChunks[id],
}, nil
}
func getIDs(peers []string) ([]string, error) {
ids := make([]string, 0, len(peers))
for _, peer := range peers {
spl := strings.Split(peer, "@")
if len(spl) != 2 {
return nil, p2p.ErrNetAddressNoID{Addr: peer}
}
ids = append(ids, spl[0])
}
return ids, nil
}

+ 0
- 89
internal/rpc/core/net_test.go View File

@ -1,89 +0,0 @@
package core
import (
"testing"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
"github.com/tendermint/tendermint/config"
"github.com/tendermint/tendermint/internal/p2p"
"github.com/tendermint/tendermint/libs/log"
rpctypes "github.com/tendermint/tendermint/rpc/jsonrpc/types"
)
func TestUnsafeDialSeeds(t *testing.T) {
sw := p2p.MakeSwitch(config.DefaultP2PConfig(), 1, "testing", "123.123.123",
func(n int, sw *p2p.Switch) *p2p.Switch { return sw }, log.TestingLogger())
err := sw.Start()
require.NoError(t, err)
t.Cleanup(func() {
if err := sw.Stop(); err != nil {
t.Error(err)
}
})
env := &Environment{}
env.Logger = log.TestingLogger()
env.P2PPeers = sw
testCases := []struct {
seeds []string
isErr bool
}{
{[]string{}, true},
{[]string{"d51fb70907db1c6c2d5237e78379b25cf1a37ab4@127.0.0.1:41198"}, false},
{[]string{"127.0.0.1:41198"}, true},
}
for _, tc := range testCases {
res, err := env.UnsafeDialSeeds(&rpctypes.Context{}, tc.seeds)
if tc.isErr {
assert.Error(t, err)
} else {
assert.NoError(t, err)
assert.NotNil(t, res)
}
}
}
func TestUnsafeDialPeers(t *testing.T) {
sw := p2p.MakeSwitch(config.DefaultP2PConfig(), 1, "testing", "123.123.123",
func(n int, sw *p2p.Switch) *p2p.Switch { return sw }, log.TestingLogger())
sw.SetAddrBook(&p2p.AddrBookMock{
Addrs: make(map[string]struct{}),
OurAddrs: make(map[string]struct{}),
PrivateAddrs: make(map[string]struct{}),
})
err := sw.Start()
require.NoError(t, err)
t.Cleanup(func() {
if err := sw.Stop(); err != nil {
t.Error(err)
}
})
env := &Environment{}
env.Logger = log.TestingLogger()
env.P2PPeers = sw
testCases := []struct {
peers []string
persistence, unconditional, private bool
isErr bool
}{
{[]string{}, false, false, false, true},
{[]string{"d51fb70907db1c6c2d5237e78379b25cf1a37ab4@127.0.0.1:41198"}, true, true, true, false},
{[]string{"127.0.0.1:41198"}, true, true, false, true},
}
for _, tc := range testCases {
res, err := env.UnsafeDialPeers(&rpctypes.Context{}, tc.peers, tc.persistence, tc.unconditional, tc.private)
if tc.isErr {
assert.Error(t, err)
} else {
assert.NoError(t, err)
assert.NotNil(t, res)
}
}
}

+ 0
- 2
internal/rpc/core/routes.go View File

@ -55,7 +55,5 @@ func (env *Environment) GetRoutes() RoutesMap {
// AddUnsafeRoutes adds unsafe routes.
func (env *Environment) AddUnsafe(routes RoutesMap) {
// control API
routes["dial_seeds"] = rpc.NewRPCFunc(env.UnsafeDialSeeds, "seeds", false)
routes["dial_peers"] = rpc.NewRPCFunc(env.UnsafeDialPeers, "peers,persistent,unconditional,private", false)
routes["unsafe_flush_mempool"] = rpc.NewRPCFunc(env.UnsafeFlushMempool, "", false)
}

+ 1
- 0
internal/rpc/core/status.go View File

@ -58,6 +58,7 @@ func (env *Environment) Status(ctx *rpctypes.Context) (*coretypes.ResultStatus,
VotingPower: votingPower,
}
}
result := &coretypes.ResultStatus{
NodeInfo: env.P2PTransport.NodeInfo(),
SyncInfo: coretypes.SyncInfo{


+ 20
- 146
node/node.go View File

@ -54,10 +54,8 @@ type nodeImpl struct {
// network
transport *p2p.MConnTransport
sw *p2p.Switch // p2p connections
peerManager *p2p.PeerManager
router *p2p.Router
addrBook pex.AddrBook // known peers
nodeInfo types.NodeInfo
nodeKey types.NodeKey // our node privkey
isListening bool
@ -292,14 +290,6 @@ func makeNode(cfg *config.Config,
return nil, fmt.Errorf("could not create blockchain reactor: %w", err)
}
// TODO: Remove this once the switch is removed.
var bcReactorForSwitch p2p.Reactor
if bcReactorShim != nil {
bcReactorForSwitch = bcReactorShim
} else {
bcReactorForSwitch = bcReactor.(p2p.Reactor)
}
// Make ConsensusReactor. Don't enable fully if doing a state sync and/or block sync first.
// FIXME We need to update metrics here, since other reactors don't have access to them.
if stateSync {
@ -312,29 +302,15 @@ func makeNode(cfg *config.Config,
// FIXME The way we do phased startups (e.g. replay -> block sync -> consensus) is very messy,
// we should clean this whole thing up. See:
// https://github.com/tendermint/tendermint/issues/4644
var (
stateSyncReactor *statesync.Reactor
stateSyncReactorShim *p2p.ReactorShim
channels map[p2p.ChannelID]*p2p.Channel
peerUpdates *p2p.PeerUpdates
)
stateSyncReactorShim = p2p.NewReactorShim(logger.With("module", "statesync"), "StateSyncShim", statesync.ChannelShims)
if cfg.P2P.UseLegacy {
channels = getChannelsFromShim(stateSyncReactorShim)
peerUpdates = stateSyncReactorShim.PeerUpdates
} else {
channels = makeChannelsFromShims(router, statesync.ChannelShims)
peerUpdates = peerManager.Subscribe()
}
stateSyncReactor = statesync.NewReactor(
ssLogger := logger.With("module", "statesync")
ssReactorShim := p2p.NewReactorShim(ssLogger, "StateSyncShim", statesync.ChannelShims)
channels := makeChannelsFromShims(router, statesync.ChannelShims)
peerUpdates := peerManager.Subscribe()
stateSyncReactor := statesync.NewReactor(
genDoc.ChainID,
genDoc.InitialHeight,
*cfg.StateSync,
stateSyncReactorShim.Logger,
ssLogger,
proxyApp.Snapshot(),
proxyApp.Query(),
channels[statesync.SnapshotChannel],
@ -353,10 +329,10 @@ func makeNode(cfg *config.Config,
// transports can either be agnostic to channel descriptors or can be
// declared in the constructor.
transport.AddChannelDescriptors(mpReactorShim.GetChannels())
transport.AddChannelDescriptors(bcReactorForSwitch.GetChannels())
transport.AddChannelDescriptors(bcReactorShim.GetChannels())
transport.AddChannelDescriptors(csReactorShim.GetChannels())
transport.AddChannelDescriptors(evReactorShim.GetChannels())
transport.AddChannelDescriptors(stateSyncReactorShim.GetChannels())
transport.AddChannelDescriptors(ssReactorShim.GetChannels())
// Optionally, start the pex reactor
//
@ -371,44 +347,14 @@ func makeNode(cfg *config.Config,
// If PEX is on, it should handle dialing the seeds. Otherwise the switch does it.
// Note we currently use the addrBook regardless at least for AddOurAddress
var (
pexReactor service.Service
sw *p2p.Switch
addrBook pex.AddrBook
)
var pexReactor service.Service
pexCh := pex.ChannelDescriptor()
transport.AddChannelDescriptors([]*p2p.ChannelDescriptor{&pexCh})
if cfg.P2P.UseLegacy {
// setup Transport and Switch
sw = createSwitch(
cfg, transport, nodeMetrics.p2p, mpReactorShim, bcReactorForSwitch,
stateSyncReactorShim, csReactorShim, evReactorShim, proxyApp, nodeInfo, nodeKey, p2pLogger,
)
err = sw.AddPersistentPeers(strings.SplitAndTrimEmpty(cfg.P2P.PersistentPeers, ",", " "))
if err != nil {
return nil, fmt.Errorf("could not add peers from persistent-peers field: %w", err)
}
err = sw.AddUnconditionalPeerIDs(strings.SplitAndTrimEmpty(cfg.P2P.UnconditionalPeerIDs, ",", " "))
if err != nil {
return nil, fmt.Errorf("could not add peer ids from unconditional_peer_ids field: %w", err)
}
addrBook, err = createAddrBookAndSetOnSwitch(cfg, sw, p2pLogger, nodeKey)
if err != nil {
return nil, fmt.Errorf("could not create addrbook: %w", err)
}
pexReactor = createPEXReactorAndAddToSwitch(addrBook, cfg, sw, logger)
} else {
addrBook = nil
pexReactor, err = createPEXReactorV2(cfg, logger, peerManager, router)
if err != nil {
return nil, err
}
pexReactor, err = createPEXReactorV2(cfg, logger, peerManager, router)
if err != nil {
return nil, err
}
if cfg.RPC.PprofListenAddress != "" {
@ -424,10 +370,8 @@ func makeNode(cfg *config.Config,
privValidator: privValidator,
transport: transport,
sw: sw,
peerManager: peerManager,
router: router,
addrBook: addrBook,
nodeInfo: nodeInfo,
nodeKey: nodeKey,
@ -456,7 +400,6 @@ func makeNode(cfg *config.Config,
ConsensusReactor: csReactor,
BlockSyncReactor: bcReactor.(consensus.BlockSyncReactor),
P2PPeers: sw,
PeerManager: peerManager,
GenDoc: genDoc,
@ -468,17 +411,6 @@ func makeNode(cfg *config.Config,
},
}
// this is a terrible, because typed nil interfaces are not ==
// nil, so this is just cleanup to avoid having a non-nil
// value in the RPC environment that has the semantic
// properties of nil.
if sw == nil {
node.rpcEnv.P2PPeers = nil
} else if peerManager == nil {
node.rpcEnv.PeerManager = nil
}
// end hack
node.rpcEnv.P2PTransport = node
node.BaseService = *service.NewBaseService(logger, "Node", node)
@ -525,11 +457,7 @@ func makeSeedNode(cfg *config.Config,
return nil, fmt.Errorf("failed to create router: %w", err)
}
var (
pexReactor service.Service
sw *p2p.Switch
addrBook pex.AddrBook
)
var pexReactor service.Service
// add the pex reactor
// FIXME: we add channel descriptors to both the router and the transport but only the router
@ -538,33 +466,9 @@ func makeSeedNode(cfg *config.Config,
pexCh := pex.ChannelDescriptor()
transport.AddChannelDescriptors([]*p2p.ChannelDescriptor{&pexCh})
if cfg.P2P.UseLegacy {
sw = createSwitch(
cfg, transport, p2pMetrics, nil, nil,
nil, nil, nil, nil, nodeInfo, nodeKey, p2pLogger,
)
err = sw.AddPersistentPeers(strings.SplitAndTrimEmpty(cfg.P2P.PersistentPeers, ",", " "))
if err != nil {
return nil, fmt.Errorf("could not add peers from persistent_peers field: %w", err)
}
err = sw.AddUnconditionalPeerIDs(strings.SplitAndTrimEmpty(cfg.P2P.UnconditionalPeerIDs, ",", " "))
if err != nil {
return nil, fmt.Errorf("could not add peer ids from unconditional_peer_ids field: %w", err)
}
addrBook, err = createAddrBookAndSetOnSwitch(cfg, sw, p2pLogger, nodeKey)
if err != nil {
return nil, fmt.Errorf("could not create addrbook: %w", err)
}
pexReactor = createPEXReactorAndAddToSwitch(addrBook, cfg, sw, logger)
} else {
pexReactor, err = createPEXReactorV2(cfg, logger, peerManager, router)
if err != nil {
return nil, err
}
pexReactor, err = createPEXReactorV2(cfg, logger, peerManager, router)
if err != nil {
return nil, err
}
if cfg.RPC.PprofListenAddress != "" {
@ -579,8 +483,6 @@ func makeSeedNode(cfg *config.Config,
genesisDoc: genDoc,
transport: transport,
sw: sw,
addrBook: addrBook,
nodeInfo: nodeInfo,
nodeKey: nodeKey,
peerManager: peerManager,
@ -627,15 +529,8 @@ func (n *nodeImpl) OnStart() error {
}
n.isListening = true
n.Logger.Info("p2p service", "legacy_enabled", n.config.P2P.UseLegacy)
if n.config.P2P.UseLegacy {
// Add private IDs to addrbook to block those peers being added
n.addrBook.AddPrivateIDs(strings.SplitAndTrimEmpty(n.config.P2P.PrivatePeerIDs, ",", " "))
if err = n.sw.Start(); err != nil {
return err
}
} else if err = n.router.Start(); err != nil {
if err = n.router.Start(); err != nil {
return err
}
@ -667,13 +562,7 @@ func (n *nodeImpl) OnStart() error {
}
}
if n.config.P2P.UseLegacy {
// Always connect to persistent peers
err = n.sw.DialPeersAsync(strings.SplitAndTrimEmpty(n.config.P2P.PersistentPeers, ",", " "))
if err != nil {
return fmt.Errorf("could not dial peers from persistent-peers field: %w", err)
}
} else if err := n.pexReactor.Start(); err != nil {
if err := n.pexReactor.Start(); err != nil {
return err
}
@ -794,14 +683,8 @@ func (n *nodeImpl) OnStop() {
n.Logger.Error("failed to stop the PEX v2 reactor", "err", err)
}
if n.config.P2P.UseLegacy {
if err := n.sw.Stop(); err != nil {
n.Logger.Error("failed to stop switch", "err", err)
}
} else {
if err := n.router.Stop(); err != nil {
n.Logger.Error("failed to stop router", "err", err)
}
if err := n.router.Stop(); err != nil {
n.Logger.Error("failed to stop router", "err", err)
}
if err := n.transport.Close(); err != nil {
@ -1216,12 +1099,3 @@ func makeChannelsFromShims(
return channels
}
func getChannelsFromShim(reactorShim *p2p.ReactorShim) map[p2p.ChannelID]*p2p.Channel {
channels := map[p2p.ChannelID]*p2p.Channel{}
for chID := range reactorShim.Channels {
channels[chID] = reactorShim.GetChannel(chID)
}
return channels
}

+ 8
- 186
node/setup.go View File

@ -2,16 +2,13 @@ package node
import (
"bytes"
"context"
"fmt"
"math"
"net"
"time"
dbm "github.com/tendermint/tm-db"
abciclient "github.com/tendermint/tendermint/abci/client"
abci "github.com/tendermint/tendermint/abci/types"
"github.com/tendermint/tendermint/config"
"github.com/tendermint/tendermint/crypto"
bcv0 "github.com/tendermint/tendermint/internal/blocksync/v0"
@ -161,18 +158,8 @@ func createMempoolReactor(
channelShims := mempoolv0.GetChannelShims(cfg.Mempool)
reactorShim := p2p.NewReactorShim(logger, "MempoolShim", channelShims)
var (
channels map[p2p.ChannelID]*p2p.Channel
peerUpdates *p2p.PeerUpdates
)
if cfg.P2P.UseLegacy {
channels = getChannelsFromShim(reactorShim)
peerUpdates = reactorShim.PeerUpdates
} else {
channels = makeChannelsFromShims(router, channelShims)
peerUpdates = peerManager.Subscribe()
}
channels := makeChannelsFromShims(router, channelShims)
peerUpdates := peerManager.Subscribe()
switch cfg.Mempool.Version {
case config.MempoolV0:
@ -255,23 +242,10 @@ func createEvidenceReactor(
return nil, nil, nil, fmt.Errorf("creating evidence pool: %w", err)
}
var (
channels map[p2p.ChannelID]*p2p.Channel
peerUpdates *p2p.PeerUpdates
)
if cfg.P2P.UseLegacy {
channels = getChannelsFromShim(reactorShim)
peerUpdates = reactorShim.PeerUpdates
} else {
channels = makeChannelsFromShims(router, evidence.ChannelShims)
peerUpdates = peerManager.Subscribe()
}
evidenceReactor := evidence.NewReactor(
logger,
channels[evidence.EvidenceChannel],
peerUpdates,
makeChannelsFromShims(router, evidence.ChannelShims)[evidence.EvidenceChannel],
peerManager.Subscribe(),
evidencePool,
)
@ -294,19 +268,8 @@ func createBlockchainReactor(
logger = logger.With("module", "blockchain")
reactorShim := p2p.NewReactorShim(logger, "BlockchainShim", bcv0.ChannelShims)
var (
channels map[p2p.ChannelID]*p2p.Channel
peerUpdates *p2p.PeerUpdates
)
if cfg.P2P.UseLegacy {
channels = getChannelsFromShim(reactorShim)
peerUpdates = reactorShim.PeerUpdates
} else {
channels = makeChannelsFromShims(router, bcv0.ChannelShims)
peerUpdates = peerManager.Subscribe()
}
channels := makeChannelsFromShims(router, bcv0.ChannelShims)
peerUpdates := peerManager.Subscribe()
reactor, err := bcv0.NewReactor(
logger, state.Copy(), blockExec, blockStore, csReactor,
@ -357,13 +320,8 @@ func createConsensusReactor(
peerUpdates *p2p.PeerUpdates
)
if cfg.P2P.UseLegacy {
channels = getChannelsFromShim(reactorShim)
peerUpdates = reactorShim.PeerUpdates
} else {
channels = makeChannelsFromShims(router, consensus.ChannelShims)
peerUpdates = peerManager.Subscribe()
}
channels = makeChannelsFromShims(router, consensus.ChannelShims)
peerUpdates = peerManager.Subscribe()
reactor := consensus.NewReactor(
logger,
@ -500,142 +458,6 @@ func createRouter(
)
}
func createSwitch(
cfg *config.Config,
transport p2p.Transport,
p2pMetrics *p2p.Metrics,
mempoolReactor *p2p.ReactorShim,
bcReactor p2p.Reactor,
stateSyncReactor *p2p.ReactorShim,
consensusReactor *p2p.ReactorShim,
evidenceReactor *p2p.ReactorShim,
proxyApp proxy.AppConns,
nodeInfo types.NodeInfo,
nodeKey types.NodeKey,
p2pLogger log.Logger,
) *p2p.Switch {
var (
connFilters = []p2p.ConnFilterFunc{}
peerFilters = []p2p.PeerFilterFunc{}
)
if !cfg.P2P.AllowDuplicateIP {
connFilters = append(connFilters, p2p.ConnDuplicateIPFilter)
}
// Filter peers by addr or pubkey with an ABCI query.
// If the query return code is OK, add peer.
if cfg.FilterPeers {
connFilters = append(
connFilters,
// ABCI query for address filtering.
func(_ p2p.ConnSet, c net.Conn, _ []net.IP) error {
res, err := proxyApp.Query().QuerySync(context.Background(), abci.RequestQuery{
Path: fmt.Sprintf("/p2p/filter/addr/%s", c.RemoteAddr().String()),
})
if err != nil {
return err
}
if res.IsErr() {
return fmt.Errorf("error querying abci app: %v", res)
}
return nil
},
)
peerFilters = append(
peerFilters,
// ABCI query for ID filtering.
func(_ p2p.IPeerSet, p p2p.Peer) error {
res, err := proxyApp.Query().QuerySync(context.Background(), abci.RequestQuery{
Path: fmt.Sprintf("/p2p/filter/id/%s", p.ID()),
})
if err != nil {
return err
}
if res.IsErr() {
return fmt.Errorf("error querying abci app: %v", res)
}
return nil
},
)
}
sw := p2p.NewSwitch(
cfg.P2P,
transport,
p2p.WithMetrics(p2pMetrics),
p2p.SwitchPeerFilters(peerFilters...),
p2p.SwitchConnFilters(connFilters...),
)
sw.SetLogger(p2pLogger)
if cfg.Mode != config.ModeSeed {
sw.AddReactor("MEMPOOL", mempoolReactor)
sw.AddReactor("BLOCKCHAIN", bcReactor)
sw.AddReactor("CONSENSUS", consensusReactor)
sw.AddReactor("EVIDENCE", evidenceReactor)
sw.AddReactor("STATESYNC", stateSyncReactor)
}
sw.SetNodeInfo(nodeInfo)
sw.SetNodeKey(nodeKey)
p2pLogger.Info("P2P Node ID", "ID", nodeKey.ID, "file", cfg.NodeKeyFile())
return sw
}
func createAddrBookAndSetOnSwitch(cfg *config.Config, sw *p2p.Switch,
p2pLogger log.Logger, nodeKey types.NodeKey) (pex.AddrBook, error) {
addrBook := pex.NewAddrBook(cfg.P2P.AddrBookFile(), cfg.P2P.AddrBookStrict)
addrBook.SetLogger(p2pLogger.With("book", cfg.P2P.AddrBookFile()))
// Add ourselves to addrbook to prevent dialing ourselves
if cfg.P2P.ExternalAddress != "" {
addr, err := types.NewNetAddressString(nodeKey.ID.AddressString(cfg.P2P.ExternalAddress))
if err != nil {
return nil, fmt.Errorf("p2p.external_address is incorrect: %w", err)
}
addrBook.AddOurAddress(addr)
}
if cfg.P2P.ListenAddress != "" {
addr, err := types.NewNetAddressString(nodeKey.ID.AddressString(cfg.P2P.ListenAddress))
if err != nil {
return nil, fmt.Errorf("p2p.laddr is incorrect: %w", err)
}
addrBook.AddOurAddress(addr)
}
sw.SetAddrBook(addrBook)
return addrBook, nil
}
func createPEXReactorAndAddToSwitch(addrBook pex.AddrBook, cfg *config.Config,
sw *p2p.Switch, logger log.Logger) *pex.Reactor {
reactorConfig := &pex.ReactorConfig{
Seeds: tmstrings.SplitAndTrimEmpty(cfg.P2P.Seeds, ",", " "),
SeedMode: cfg.Mode == config.ModeSeed,
// See consensus/reactor.go: blocksToContributeToBecomeGoodPeer 10000
// blocks assuming 10s blocks ~ 28 hours.
// TODO (melekes): make it dynamic based on the actual block latencies
// from the live network.
// https://github.com/tendermint/tendermint/issues/3523
SeedDisconnectWaitPeriod: 28 * time.Hour,
PersistentPeersMaxDialPeriod: cfg.P2P.PersistentPeersMaxDialPeriod,
}
// TODO persistent peers ? so we can have their DNS addrs saved
pexReactor := pex.NewReactor(addrBook, reactorConfig)
pexReactor.SetLogger(logger.With("module", "pex"))
sw.AddReactor("PEX", pexReactor)
return pexReactor
}
func createPEXReactorV2(
cfg *config.Config,
logger log.Logger,


+ 0
- 14
rpc/client/local/local.go View File

@ -136,20 +136,6 @@ func (c *Local) Health(ctx context.Context) (*coretypes.ResultHealth, error) {
return c.env.Health(c.ctx)
}
func (c *Local) DialSeeds(ctx context.Context, seeds []string) (*coretypes.ResultDialSeeds, error) {
return c.env.UnsafeDialSeeds(c.ctx, seeds)
}
func (c *Local) DialPeers(
ctx context.Context,
peers []string,
persistent,
unconditional,
private bool,
) (*coretypes.ResultDialPeers, error) {
return c.env.UnsafeDialPeers(c.ctx, peers, persistent, unconditional, private)
}
func (c *Local) BlockchainInfo(ctx context.Context, minHeight, maxHeight int64) (*coretypes.ResultBlockchainInfo, error) { //nolint:lll
return c.env.BlockchainInfo(c.ctx, minHeight, maxHeight)
}


+ 0
- 14
rpc/client/mock/client.go View File

@ -131,20 +131,6 @@ func (c Client) Health(ctx context.Context) (*coretypes.ResultHealth, error) {
return c.env.Health(&rpctypes.Context{})
}
func (c Client) DialSeeds(ctx context.Context, seeds []string) (*coretypes.ResultDialSeeds, error) {
return c.env.UnsafeDialSeeds(&rpctypes.Context{}, seeds)
}
func (c Client) DialPeers(
ctx context.Context,
peers []string,
persistent,
unconditional,
private bool,
) (*coretypes.ResultDialPeers, error) {
return c.env.UnsafeDialPeers(&rpctypes.Context{}, peers, persistent, unconditional, private)
}
func (c Client) BlockchainInfo(ctx context.Context, minHeight, maxHeight int64) (*coretypes.ResultBlockchainInfo, error) { //nolint:lll
return c.env.BlockchainInfo(&rpctypes.Context{}, minHeight, maxHeight)
}


+ 1
- 90
test/e2e/generator/generate.go View File

@ -15,7 +15,6 @@ var (
// separate testnet for each combination (Cartesian product) of options.
testnetCombinations = map[string][]interface{}{
"topology": {"single", "quad", "large"},
"p2p": {NewP2PMode, LegacyP2PMode, HybridP2PMode},
"queueType": {"priority"}, // "fifo", "wdrr"
"initialHeight": {0, 1000},
"initialState": {
@ -71,19 +70,6 @@ var (
// Generate generates random testnets using the given RNG.
func Generate(r *rand.Rand, opts Options) ([]e2e.Manifest, error) {
manifests := []e2e.Manifest{}
switch opts.P2P {
case NewP2PMode, LegacyP2PMode, HybridP2PMode:
defer func() {
// avoid modifying the global state.
original := make([]interface{}, len(testnetCombinations["p2p"]))
copy(original, testnetCombinations["p2p"])
testnetCombinations["p2p"] = original
}()
testnetCombinations["p2p"] = []interface{}{opts.P2P}
case MixedP2PMode:
testnetCombinations["p2p"] = []interface{}{NewP2PMode, LegacyP2PMode, HybridP2PMode}
}
for _, opt := range combinations(testnetCombinations) {
manifest, err := generateTestnet(r, opt)
@ -95,12 +81,6 @@ func Generate(r *rand.Rand, opts Options) ([]e2e.Manifest, error) {
continue
}
if len(manifest.Nodes) == 1 {
if opt["p2p"] == HybridP2PMode {
continue
}
}
if opts.MaxNetworkSize > 0 && len(manifest.Nodes) >= opts.MaxNetworkSize {
continue
}
@ -116,20 +96,9 @@ type Options struct {
MaxNetworkSize int
NumGroups int
Directory string
P2P P2PMode
Reverse bool
}
type P2PMode string
const (
NewP2PMode P2PMode = "new"
LegacyP2PMode P2PMode = "legacy"
HybridP2PMode P2PMode = "hybrid"
// mixed means that all combination are generated
MixedP2PMode P2PMode = "mixed"
)
// generateTestnet generates a single testnet with the given options.
func generateTestnet(r *rand.Rand, opt map[string]interface{}) (e2e.Manifest, error) {
manifest := e2e.Manifest{
@ -145,13 +114,6 @@ func generateTestnet(r *rand.Rand, opt map[string]interface{}) (e2e.Manifest, er
TxSize: int64(txSize.Choose(r).(int)),
}
p2pMode := opt["p2p"].(P2PMode)
switch p2pMode {
case NewP2PMode, LegacyP2PMode, HybridP2PMode:
default:
return manifest, fmt.Errorf("unknown p2p mode %s", p2pMode)
}
var numSeeds, numValidators, numFulls, numLightClients int
switch opt["topology"].(string) {
case "single":
@ -168,27 +130,13 @@ func generateTestnet(r *rand.Rand, opt map[string]interface{}) (e2e.Manifest, er
return manifest, fmt.Errorf("unknown topology %q", opt["topology"])
}
const legacyP2PFactor float64 = 0.5
// First we generate seed nodes, starting at the initial height.
for i := 1; i <= numSeeds; i++ {
node := generateNode(r, manifest, e2e.ModeSeed, 0, false)
switch p2pMode {
case LegacyP2PMode:
node.UseLegacyP2P = true
case HybridP2PMode:
node.UseLegacyP2P = r.Float64() < legacyP2PFactor
}
manifest.Nodes[fmt.Sprintf("seed%02d", i)] = node
}
var (
numSyncingNodes = 0
hybridNumNew = 0
hybridNumLegacy = 0
)
var numSyncingNodes = 0
// Next, we generate validators. We make sure a BFT quorum of validators start
// at the initial height, and that we have two archive nodes. We also set up
@ -205,29 +153,6 @@ func generateTestnet(r *rand.Rand, opt map[string]interface{}) (e2e.Manifest, er
name := fmt.Sprintf("validator%02d", i)
node := generateNode(r, manifest, e2e.ModeValidator, startAt, i <= 2)
switch p2pMode {
case LegacyP2PMode:
node.UseLegacyP2P = true
case HybridP2PMode:
node.UseLegacyP2P = r.Float64() < legacyP2PFactor
if node.UseLegacyP2P {
hybridNumLegacy++
if hybridNumNew == 0 {
hybridNumNew++
hybridNumLegacy--
node.UseLegacyP2P = false
}
} else {
hybridNumNew++
if hybridNumLegacy == 0 {
hybridNumNew--
hybridNumLegacy++
node.UseLegacyP2P = true
}
}
}
manifest.Nodes[name] = node
if startAt == 0 {
@ -259,13 +184,6 @@ func generateTestnet(r *rand.Rand, opt map[string]interface{}) (e2e.Manifest, er
}
node := generateNode(r, manifest, e2e.ModeFull, startAt, false)
switch p2pMode {
case LegacyP2PMode:
node.UseLegacyP2P = true
case HybridP2PMode:
node.UseLegacyP2P = r.Float64() > legacyP2PFactor
}
manifest.Nodes[fmt.Sprintf("full%02d", i)] = node
}
@ -336,13 +254,6 @@ func generateTestnet(r *rand.Rand, opt map[string]interface{}) (e2e.Manifest, er
r, startAt+(5*int64(i)), lightProviders,
)
switch p2pMode {
case LegacyP2PMode:
node.UseLegacyP2P = true
case HybridP2PMode:
node.UseLegacyP2P = r.Float64() < legacyP2PFactor
}
manifest.Nodes[fmt.Sprintf("light%02d", i)] = node
}


+ 2
- 70
test/e2e/generator/generate_test.go View File

@ -5,15 +5,14 @@ import (
"math/rand"
"testing"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
e2e "github.com/tendermint/tendermint/test/e2e/pkg"
)
func TestGenerator(t *testing.T) {
manifests, err := Generate(rand.New(rand.NewSource(randomSeed)), Options{P2P: MixedP2PMode})
manifests, err := Generate(rand.New(rand.NewSource(randomSeed)), Options{})
require.NoError(t, err)
require.True(t, len(manifests) >= 64, "insufficient combinations")
require.True(t, len(manifests) >= 24, "insufficient combinations %d", len(manifests))
// this just means that the numbers reported by the test
// failures map to the test cases that you'd see locally.
@ -41,71 +40,4 @@ func TestGenerator(t *testing.T) {
require.True(t, numStateSyncs <= 2)
})
}
t.Run("Hybrid", func(t *testing.T) {
manifests, err := Generate(rand.New(rand.NewSource(randomSeed)), Options{P2P: HybridP2PMode})
require.NoError(t, err)
require.True(t, len(manifests) >= 16, "insufficient combinations: %d", len(manifests))
// failures map to the test cases that you'd see locally.
e2e.SortManifests(manifests, false /* ascending */)
for idx, m := range manifests {
t.Run(fmt.Sprintf("Case%04d", idx), func(t *testing.T) {
require.True(t, len(m.Nodes) > 1)
var numLegacy, numNew int
for _, node := range m.Nodes {
if node.UseLegacyP2P {
numLegacy++
} else {
numNew++
}
}
assert.True(t, numLegacy >= 1, "not enough legacy nodes [%d/%d]",
numLegacy, len(m.Nodes))
assert.True(t, numNew >= 1, "not enough new nodes [%d/%d]",
numNew, len(m.Nodes))
})
}
})
t.Run("UnmixedP2P", func(t *testing.T) {
t.Run("New", func(t *testing.T) {
manifests, err := Generate(rand.New(rand.NewSource(randomSeed)), Options{P2P: NewP2PMode})
require.NoError(t, err)
require.True(t, len(manifests) >= 16, "insufficient combinations: %d", len(manifests))
// failures map to the test cases that you'd see locally.
e2e.SortManifests(manifests, false /* ascending */)
for idx, m := range manifests {
t.Run(fmt.Sprintf("Case%04d", idx), func(t *testing.T) {
for name, node := range m.Nodes {
t.Run(name, func(t *testing.T) {
require.False(t, node.UseLegacyP2P)
})
}
})
}
})
t.Run("Legacy", func(t *testing.T) {
manifests, err := Generate(rand.New(rand.NewSource(randomSeed)), Options{P2P: LegacyP2PMode})
require.NoError(t, err)
require.True(t, len(manifests) >= 16, "insufficient combinations: %d", len(manifests))
// failures map to the test cases that you'd see locally.
e2e.SortManifests(manifests, false /* ascending */)
for idx, m := range manifests {
t.Run(fmt.Sprintf("Case%04d", idx), func(t *testing.T) {
for name, node := range m.Nodes {
t.Run(name, func(t *testing.T) {
require.True(t, node.UseLegacyP2P)
})
}
})
}
})
})
}

+ 0
- 16
test/e2e/generator/main.go View File

@ -38,20 +38,6 @@ func NewCLI() *CLI {
SilenceUsage: true,
SilenceErrors: true, // we'll output them ourselves in Run()
RunE: func(cmd *cobra.Command, args []string) error {
var err error
p2pMode, err := cmd.Flags().GetString("p2p")
if err != nil {
return err
}
switch mode := P2PMode(p2pMode); mode {
case NewP2PMode, LegacyP2PMode, HybridP2PMode, MixedP2PMode:
cli.opts.P2P = mode
default:
return fmt.Errorf("p2p mode must be either new, legacy, hybrid or mixed got %s", p2pMode)
}
return cli.generate()
},
}
@ -60,8 +46,6 @@ func NewCLI() *CLI {
_ = cli.root.MarkPersistentFlagRequired("dir")
cli.root.Flags().BoolVarP(&cli.opts.Reverse, "reverse", "r", false, "Reverse sort order")
cli.root.PersistentFlags().IntVarP(&cli.opts.NumGroups, "groups", "g", 0, "Number of groups")
cli.root.PersistentFlags().StringP("p2p", "p", string(MixedP2PMode),
"P2P typology to be generated [\"new\", \"legacy\", \"hybrid\" or \"mixed\" ]")
cli.root.PersistentFlags().IntVarP(&cli.opts.MinNetworkSize, "min-size", "", 1,
"Minimum network size (nodes)")
cli.root.PersistentFlags().IntVarP(&cli.opts.MaxNetworkSize, "max-size", "", 0,


+ 0
- 1
test/e2e/networks/ci.toml View File

@ -1,7 +1,6 @@
# This testnet is run by CI, and attempts to cover a broad range of
# functionality with a single network.
disable_legacy_p2p = false
evidence = 5
initial_height = 1000
initial_state = {initial01 = "a", initial02 = "b", initial03 = "c"}


+ 0
- 3
test/e2e/pkg/manifest.go View File

@ -145,9 +145,6 @@ type ManifestNode struct {
// This is helpful when debugging a specific problem. This overrides the network
// level.
LogLevel string `toml:"log_level"`
// UseLegacyP2P enables use of the legacy p2p layer for this node.
UseLegacyP2P bool `toml:"use_legacy_p2p"`
}
// Stateless reports whether m is a node that does not own state, including light and seed nodes.


+ 0
- 2
test/e2e/pkg/testnet.go View File

@ -96,7 +96,6 @@ type Node struct {
PersistentPeers []*Node
Perturbations []Perturbation
LogLevel string
UseLegacyP2P bool
QueueType string
HasStarted bool
}
@ -182,7 +181,6 @@ func LoadTestnet(file string) (*Testnet, error) {
Perturbations: []Perturbation{},
LogLevel: manifest.LogLevel,
QueueType: manifest.QueueType,
UseLegacyP2P: nodeManifest.UseLegacyP2P,
}
if node.StartAt == testnet.InitialHeight {


+ 0
- 2
test/e2e/runner/setup.go View File

@ -238,7 +238,6 @@ func MakeConfig(node *e2e.Node) (*config.Config, error) {
cfg.RPC.PprofListenAddress = ":6060"
cfg.P2P.ExternalAddress = fmt.Sprintf("tcp://%v", node.AddressP2P(false))
cfg.P2P.AddrBookStrict = false
cfg.P2P.UseLegacy = node.UseLegacyP2P
cfg.P2P.QueueType = node.QueueType
cfg.DBBackend = node.Database
cfg.StateSync.DiscoveryTime = 5 * time.Second
@ -354,7 +353,6 @@ func MakeAppConfig(node *e2e.Node) ([]byte, error) {
"snapshot_interval": node.SnapshotInterval,
"retain_blocks": node.RetainBlocks,
"key_type": node.PrivvalKey.Type(),
"use_legacy_p2p": node.UseLegacyP2P,
}
switch node.ABCIProtocol {
case e2e.ProtocolUNIX:


+ 0
- 35
test/fuzz/p2p/addrbook/fuzz.go View File

@ -1,35 +0,0 @@
// nolint: gosec
package addrbook
import (
"encoding/json"
"fmt"
"math/rand"
"github.com/tendermint/tendermint/internal/p2p"
"github.com/tendermint/tendermint/internal/p2p/pex"
)
var addrBook = pex.NewAddrBook("./testdata/addrbook.json", true)
func Fuzz(data []byte) int {
addr := new(p2p.NetAddress)
if err := json.Unmarshal(data, addr); err != nil {
return -1
}
// Fuzz AddAddress.
err := addrBook.AddAddress(addr, addr)
if err != nil {
return 0
}
// Also, make sure PickAddress always returns a non-nil address.
bias := rand.Intn(100)
if p := addrBook.PickAddress(bias); p == nil {
panic(fmt.Sprintf("picked a nil address (bias: %d, addrBook size: %v)",
bias, addrBook.Size()))
}
return 1
}

+ 0
- 33
test/fuzz/p2p/addrbook/fuzz_test.go View File

@ -1,33 +0,0 @@
package addrbook_test
import (
"io/ioutil"
"os"
"path/filepath"
"testing"
"github.com/stretchr/testify/require"
"github.com/tendermint/tendermint/test/fuzz/p2p/addrbook"
)
const testdataCasesDir = "testdata/cases"
func TestAddrbookTestdataCases(t *testing.T) {
entries, err := os.ReadDir(testdataCasesDir)
require.NoError(t, err)
for _, e := range entries {
entry := e
t.Run(entry.Name(), func(t *testing.T) {
defer func() {
r := recover()
require.Nilf(t, r, "testdata/cases test panic")
}()
f, err := os.Open(filepath.Join(testdataCasesDir, entry.Name()))
require.NoError(t, err)
input, err := ioutil.ReadAll(f)
require.NoError(t, err)
addrbook.Fuzz(input)
})
}
}

+ 0
- 59
test/fuzz/p2p/addrbook/init-corpus/main.go View File

@ -1,59 +0,0 @@
// nolint: gosec
package main
import (
"encoding/json"
"flag"
"fmt"
"io/ioutil"
"log"
"net"
"os"
"path/filepath"
"github.com/tendermint/tendermint/crypto/ed25519"
"github.com/tendermint/tendermint/internal/p2p"
"github.com/tendermint/tendermint/types"
)
func main() {
baseDir := flag.String("base", ".", `where the "corpus" directory will live`)
flag.Parse()
initCorpus(*baseDir)
}
func initCorpus(baseDir string) {
log.SetFlags(0)
// create "corpus" directory
corpusDir := filepath.Join(baseDir, "corpus")
if err := os.MkdirAll(corpusDir, 0755); err != nil {
log.Fatalf("Creating %q err: %v", corpusDir, err)
}
// create corpus
privKey := ed25519.GenPrivKey()
addrs := []*p2p.NetAddress{
{ID: types.NodeIDFromPubKey(privKey.PubKey()), IP: net.IPv4(0, 0, 0, 0), Port: 0},
{ID: types.NodeIDFromPubKey(privKey.PubKey()), IP: net.IPv4(127, 0, 0, 0), Port: 80},
{ID: types.NodeIDFromPubKey(privKey.PubKey()), IP: net.IPv4(213, 87, 10, 200), Port: 8808},
{ID: types.NodeIDFromPubKey(privKey.PubKey()), IP: net.IPv4(111, 111, 111, 111), Port: 26656},
{ID: types.NodeIDFromPubKey(privKey.PubKey()), IP: net.ParseIP("2001:db8::68"), Port: 26656},
}
for i, addr := range addrs {
filename := filepath.Join(corpusDir, fmt.Sprintf("%d.json", i))
bz, err := json.Marshal(addr)
if err != nil {
log.Fatalf("can't marshal %v: %v", addr, err)
}
if err := ioutil.WriteFile(filename, bz, 0644); err != nil {
log.Fatalf("can't write %v to %q: %v", addr, filename, err)
}
log.Printf("wrote %q", filename)
}
}

+ 0
- 0
test/fuzz/p2p/addrbook/testdata/cases/empty View File


+ 0
- 33
test/fuzz/p2p/pex/fuzz_test.go View File

@ -1,33 +0,0 @@
package pex_test
import (
"io/ioutil"
"os"
"path/filepath"
"testing"
"github.com/stretchr/testify/require"
"github.com/tendermint/tendermint/test/fuzz/p2p/pex"
)
const testdataCasesDir = "testdata/cases"
func TestPexTestdataCases(t *testing.T) {
entries, err := os.ReadDir(testdataCasesDir)
require.NoError(t, err)
for _, e := range entries {
entry := e
t.Run(entry.Name(), func(t *testing.T) {
defer func() {
r := recover()
require.Nilf(t, r, "testdata/cases test panic")
}()
f, err := os.Open(filepath.Join(testdataCasesDir, entry.Name()))
require.NoError(t, err)
input, err := ioutil.ReadAll(f)
require.NoError(t, err)
pex.Fuzz(input)
})
}
}

+ 0
- 84
test/fuzz/p2p/pex/init-corpus/main.go View File

@ -1,84 +0,0 @@
// nolint: gosec
package main
import (
"flag"
"fmt"
"io/ioutil"
"log"
"math/rand"
"os"
"path/filepath"
"github.com/tendermint/tendermint/crypto/ed25519"
"github.com/tendermint/tendermint/internal/p2p"
"github.com/tendermint/tendermint/internal/p2p/pex"
tmp2p "github.com/tendermint/tendermint/proto/tendermint/p2p"
"github.com/tendermint/tendermint/types"
)
func main() {
baseDir := flag.String("base", ".", `where the "corpus" directory will live`)
flag.Parse()
initCorpus(*baseDir)
}
func initCorpus(rootDir string) {
log.SetFlags(0)
corpusDir := filepath.Join(rootDir, "corpus")
if err := os.MkdirAll(corpusDir, 0755); err != nil {
log.Fatalf("Creating %q err: %v", corpusDir, err)
}
sizes := []int{0, 1, 2, 17, 5, 31}
// Make the PRNG predictable
rand.Seed(10)
for _, n := range sizes {
var addrs []*p2p.NetAddress
// IPv4 addresses
for i := 0; i < n; i++ {
privKey := ed25519.GenPrivKey()
addr := fmt.Sprintf(
"%s@%v.%v.%v.%v:26656",
types.NodeIDFromPubKey(privKey.PubKey()),
rand.Int()%256,
rand.Int()%256,
rand.Int()%256,
rand.Int()%256,
)
netAddr, _ := types.NewNetAddressString(addr)
addrs = append(addrs, netAddr)
}
// IPv6 addresses
privKey := ed25519.GenPrivKey()
ipv6a, err := types.NewNetAddressString(
fmt.Sprintf("%s@[ff02::1:114]:26656", types.NodeIDFromPubKey(privKey.PubKey())))
if err != nil {
log.Fatalf("can't create a new netaddress: %v", err)
}
addrs = append(addrs, ipv6a)
msg := tmp2p.PexMessage{
Sum: &tmp2p.PexMessage_PexResponse{
PexResponse: &tmp2p.PexResponse{Addresses: pex.NetAddressesToProto(addrs)},
},
}
bz, err := msg.Marshal()
if err != nil {
log.Fatalf("unable to marshal: %v", err)
}
filename := filepath.Join(rootDir, "corpus", fmt.Sprintf("%d", n))
if err := ioutil.WriteFile(filename, bz, 0644); err != nil {
log.Fatalf("can't write %X to %q: %v", bz, filename, err)
}
log.Printf("wrote %q", filename)
}
}

+ 0
- 95
test/fuzz/p2p/pex/reactor_receive.go View File

@ -1,95 +0,0 @@
package pex
import (
"net"
"github.com/tendermint/tendermint/config"
"github.com/tendermint/tendermint/crypto/ed25519"
"github.com/tendermint/tendermint/internal/p2p"
"github.com/tendermint/tendermint/internal/p2p/pex"
"github.com/tendermint/tendermint/libs/log"
"github.com/tendermint/tendermint/libs/service"
"github.com/tendermint/tendermint/types"
"github.com/tendermint/tendermint/version"
)
var (
pexR *pex.Reactor
peer p2p.Peer
logger = log.MustNewDefaultLogger(log.LogFormatPlain, log.LogLevelInfo, false)
)
func init() {
addrB := pex.NewAddrBook("./testdata/addrbook1", false)
pexR = pex.NewReactor(addrB, &pex.ReactorConfig{SeedMode: false})
pexR.SetLogger(logger)
peer = newFuzzPeer()
pexR.AddPeer(peer)
cfg := config.DefaultP2PConfig()
cfg.PexReactor = true
sw := p2p.MakeSwitch(cfg, 0, "127.0.0.1", "123.123.123", func(i int, sw *p2p.Switch) *p2p.Switch {
return sw
}, logger)
pexR.SetSwitch(sw)
}
func Fuzz(data []byte) int {
if len(data) == 0 {
return -1
}
pexR.Receive(pex.PexChannel, peer, data)
if !peer.IsRunning() {
// do not increase priority for msgs which lead to peer being stopped
return 0
}
return 1
}
type fuzzPeer struct {
*service.BaseService
m map[string]interface{}
}
var _ p2p.Peer = (*fuzzPeer)(nil)
func newFuzzPeer() *fuzzPeer {
fp := &fuzzPeer{m: make(map[string]interface{})}
fp.BaseService = service.NewBaseService(nil, "fuzzPeer", fp)
return fp
}
var privKey = ed25519.GenPrivKey()
var nodeID = types.NodeIDFromPubKey(privKey.PubKey())
var defaultNodeInfo = types.NodeInfo{
ProtocolVersion: types.ProtocolVersion{
P2P: version.P2PProtocol,
Block: version.BlockProtocol,
App: 0,
},
NodeID: nodeID,
ListenAddr: "127.0.0.1:0",
Moniker: "foo1",
}
func (fp *fuzzPeer) FlushStop() {}
func (fp *fuzzPeer) ID() types.NodeID { return nodeID }
func (fp *fuzzPeer) RemoteIP() net.IP { return net.IPv4(198, 163, 190, 214) }
func (fp *fuzzPeer) RemoteAddr() net.Addr {
return &net.TCPAddr{IP: fp.RemoteIP(), Port: 26656, Zone: ""}
}
func (fp *fuzzPeer) IsOutbound() bool { return false }
func (fp *fuzzPeer) IsPersistent() bool { return false }
func (fp *fuzzPeer) CloseConn() error { return nil }
func (fp *fuzzPeer) NodeInfo() types.NodeInfo { return defaultNodeInfo }
func (fp *fuzzPeer) Status() p2p.ConnectionStatus { var cs p2p.ConnectionStatus; return cs }
func (fp *fuzzPeer) SocketAddr() *p2p.NetAddress {
return types.NewNetAddress(fp.ID(), fp.RemoteAddr())
}
func (fp *fuzzPeer) Send(byte, []byte) bool { return true }
func (fp *fuzzPeer) TrySend(byte, []byte) bool { return true }
func (fp *fuzzPeer) Set(key string, value interface{}) { fp.m[key] = value }
func (fp *fuzzPeer) Get(key string) interface{} { return fp.m[key] }

+ 0
- 1705
test/fuzz/p2p/pex/testdata/addrbook1
File diff suppressed because it is too large
View File


+ 0
- 0
test/fuzz/p2p/pex/testdata/cases/empty View File


Loading…
Cancel
Save