diff --git a/CHANGELOG_PENDING.md b/CHANGELOG_PENDING.md index 56efd763b..0a4c90f40 100644 --- a/CHANGELOG_PENDING.md +++ b/CHANGELOG_PENDING.md @@ -16,8 +16,15 @@ Special thanks to external contributors on this release: - P2P Protocol + - [p2p] \#7035 Remove legacy P2P routing implementation and + associated configuration (@tychoish) + - Go API + - [blocksync] \#7046 Remove v2 implementation of the blocksync + service and recactor, which was disabled in the previous release + (@tychoish) + - Blockchain Protocol ### FEATURES diff --git a/config/config.go b/config/config.go index 79ce56935..a393e6edc 100644 --- a/config/config.go +++ b/config/config.go @@ -709,11 +709,6 @@ type P2PConfig struct { //nolint: maligned // Force dial to fail TestDialFail bool `mapstructure:"test-dial-fail"` - // UseLegacy enables the "legacy" P2P implementation and - // disables the newer default implementation. This flag will - // be removed in a future release. - UseLegacy bool `mapstructure:"use-legacy"` - // Makes it possible to configure which queue backend the p2p // layer uses. Options are: "fifo", "priority" and "wdrr", // with the default being "priority". @@ -748,7 +743,6 @@ func DefaultP2PConfig() *P2PConfig { DialTimeout: 3 * time.Second, TestDialFail: false, QueueType: "priority", - UseLegacy: false, } } diff --git a/config/toml.go b/config/toml.go index 6f07d6537..ee4c30004 100644 --- a/config/toml.go +++ b/config/toml.go @@ -265,9 +265,6 @@ pprof-laddr = "{{ .RPC.PprofListenAddress }}" ####################################################### [p2p] -# Enable the legacy p2p layer. -use-legacy = {{ .P2P.UseLegacy }} - # Select the p2p internal queue queue-type = "{{ .P2P.QueueType }}" diff --git a/docs/nodes/configuration.md b/docs/nodes/configuration.md index 5695c1a28..0c11df6f7 100644 --- a/docs/nodes/configuration.md +++ b/docs/nodes/configuration.md @@ -221,9 +221,6 @@ pprof-laddr = "" ####################################################### [p2p] -# Enable the legacy p2p layer. -use-legacy = false - # Select the p2p internal queue queue-type = "priority" diff --git a/internal/p2p/base_reactor.go b/internal/p2p/base_reactor.go deleted file mode 100644 index 09925caf8..000000000 --- a/internal/p2p/base_reactor.go +++ /dev/null @@ -1,74 +0,0 @@ -package p2p - -import ( - "github.com/tendermint/tendermint/internal/p2p/conn" - "github.com/tendermint/tendermint/libs/service" -) - -// Reactor is responsible for handling incoming messages on one or more -// Channel. Switch calls GetChannels when reactor is added to it. When a new -// peer joins our node, InitPeer and AddPeer are called. RemovePeer is called -// when the peer is stopped. Receive is called when a message is received on a -// channel associated with this reactor. -// -// Peer#Send or Peer#TrySend should be used to send the message to a peer. -type Reactor interface { - service.Service // Start, Stop - - // SetSwitch allows setting a switch. - SetSwitch(*Switch) - - // GetChannels returns the list of MConnection.ChannelDescriptor. Make sure - // that each ID is unique across all the reactors added to the switch. - GetChannels() []*conn.ChannelDescriptor - - // InitPeer is called by the switch before the peer is started. Use it to - // initialize data for the peer (e.g. peer state). - // - // NOTE: The switch won't call AddPeer nor RemovePeer if it fails to start - // the peer. Do not store any data associated with the peer in the reactor - // itself unless you don't want to have a state, which is never cleaned up. - InitPeer(peer Peer) Peer - - // AddPeer is called by the switch after the peer is added and successfully - // started. Use it to start goroutines communicating with the peer. - AddPeer(peer Peer) - - // RemovePeer is called by the switch when the peer is stopped (due to error - // or other reason). - RemovePeer(peer Peer, reason interface{}) - - // Receive is called by the switch when msgBytes is received from the peer. - // - // NOTE reactor can not keep msgBytes around after Receive completes without - // copying. - // - // CONTRACT: msgBytes are not nil. - // - // XXX: do not call any methods that can block or incur heavy processing. - // https://github.com/tendermint/tendermint/issues/2888 - Receive(chID byte, peer Peer, msgBytes []byte) -} - -//-------------------------------------- - -type BaseReactor struct { - service.BaseService // Provides Start, Stop, .Quit - Switch *Switch -} - -func NewBaseReactor(name string, impl Reactor) *BaseReactor { - return &BaseReactor{ - BaseService: *service.NewBaseService(nil, name, impl), - Switch: nil, - } -} - -func (br *BaseReactor) SetSwitch(sw *Switch) { - br.Switch = sw -} -func (*BaseReactor) GetChannels() []*conn.ChannelDescriptor { return nil } -func (*BaseReactor) AddPeer(peer Peer) {} -func (*BaseReactor) RemovePeer(peer Peer, reason interface{}) {} -func (*BaseReactor) Receive(chID byte, peer Peer, msgBytes []byte) {} -func (*BaseReactor) InitPeer(peer Peer) Peer { return peer } diff --git a/internal/p2p/conn/secret_connection_test.go b/internal/p2p/conn/secret_connection_test.go index 62587c0da..84384011b 100644 --- a/internal/p2p/conn/secret_connection_test.go +++ b/internal/p2p/conn/secret_connection_test.go @@ -195,7 +195,7 @@ func TestSecretConnectionReadWrite(t *testing.T) { compareWritesReads := func(writes []string, reads []string) { for { // Pop next write & corresponding reads - var read, write string = "", writes[0] + var read, write = "", writes[0] var readCount = 0 for _, readChunk := range reads { read += readChunk diff --git a/internal/p2p/mock/reactor.go b/internal/p2p/mock/reactor.go deleted file mode 100644 index d634a8032..000000000 --- a/internal/p2p/mock/reactor.go +++ /dev/null @@ -1,23 +0,0 @@ -package mock - -import ( - "github.com/tendermint/tendermint/internal/p2p" - "github.com/tendermint/tendermint/internal/p2p/conn" - "github.com/tendermint/tendermint/libs/log" -) - -type Reactor struct { - p2p.BaseReactor -} - -func NewReactor() *Reactor { - r := &Reactor{} - r.BaseReactor = *p2p.NewBaseReactor("Mock-PEX", r) - r.SetLogger(log.TestingLogger()) - return r -} - -func (r *Reactor) GetChannels() []*conn.ChannelDescriptor { return []*conn.ChannelDescriptor{} } -func (r *Reactor) AddPeer(peer p2p.Peer) {} -func (r *Reactor) RemovePeer(peer p2p.Peer, reason interface{}) {} -func (r *Reactor) Receive(chID byte, peer p2p.Peer, msgBytes []byte) {} diff --git a/internal/p2p/peer.go b/internal/p2p/peer.go deleted file mode 100644 index 709a1294a..000000000 --- a/internal/p2p/peer.go +++ /dev/null @@ -1,371 +0,0 @@ -package p2p - -import ( - "fmt" - "io" - "net" - "runtime/debug" - "time" - - tmconn "github.com/tendermint/tendermint/internal/p2p/conn" - "github.com/tendermint/tendermint/libs/cmap" - "github.com/tendermint/tendermint/libs/log" - "github.com/tendermint/tendermint/libs/service" - "github.com/tendermint/tendermint/types" -) - -//go:generate ../../scripts/mockery_generate.sh Peer - -const metricsTickerDuration = 10 * time.Second - -// Peer is an interface representing a peer connected on a reactor. -type Peer interface { - service.Service - FlushStop() - - ID() types.NodeID // peer's cryptographic ID - RemoteIP() net.IP // remote IP of the connection - RemoteAddr() net.Addr // remote address of the connection - - IsOutbound() bool // did we dial the peer - IsPersistent() bool // do we redial this peer when we disconnect - - CloseConn() error // close original connection - - NodeInfo() types.NodeInfo // peer's info - Status() tmconn.ConnectionStatus - SocketAddr() *NetAddress // actual address of the socket - - Send(byte, []byte) bool - TrySend(byte, []byte) bool - - Set(string, interface{}) - Get(string) interface{} -} - -//---------------------------------------------------------- - -// peerConn contains the raw connection and its config. -type peerConn struct { - outbound bool - persistent bool - conn Connection - ip net.IP // cached RemoteIP() -} - -func newPeerConn(outbound, persistent bool, conn Connection) peerConn { - return peerConn{ - outbound: outbound, - persistent: persistent, - conn: conn, - } -} - -// Return the IP from the connection RemoteAddr -func (pc peerConn) RemoteIP() net.IP { - if pc.ip == nil { - pc.ip = pc.conn.RemoteEndpoint().IP - } - return pc.ip -} - -// peer implements Peer. -// -// Before using a peer, you will need to perform a handshake on connection. -type peer struct { - service.BaseService - - // raw peerConn and the multiplex connection - peerConn - - // peer's node info and the channel it knows about - // channels = nodeInfo.Channels - // cached to avoid copying nodeInfo in hasChannel - nodeInfo types.NodeInfo - channels []byte - reactors map[byte]Reactor - onPeerError func(Peer, interface{}) - - // User data - Data *cmap.CMap - - metrics *Metrics - metricsTicker *time.Ticker -} - -type PeerOption func(*peer) - -func newPeer( - nodeInfo types.NodeInfo, - pc peerConn, - reactorsByCh map[byte]Reactor, - onPeerError func(Peer, interface{}), - options ...PeerOption, -) *peer { - p := &peer{ - peerConn: pc, - nodeInfo: nodeInfo, - channels: nodeInfo.Channels, - reactors: reactorsByCh, - onPeerError: onPeerError, - Data: cmap.NewCMap(), - metricsTicker: time.NewTicker(metricsTickerDuration), - metrics: NopMetrics(), - } - - p.BaseService = *service.NewBaseService(nil, "Peer", p) - for _, option := range options { - option(p) - } - - return p -} - -// onError calls the peer error callback. -func (p *peer) onError(err interface{}) { - p.onPeerError(p, err) -} - -// String representation. -func (p *peer) String() string { - if p.outbound { - return fmt.Sprintf("Peer{%v %v out}", p.conn, p.ID()) - } - - return fmt.Sprintf("Peer{%v %v in}", p.conn, p.ID()) -} - -//--------------------------------------------------- -// Implements service.Service - -// SetLogger implements BaseService. -func (p *peer) SetLogger(l log.Logger) { - p.Logger = l -} - -// OnStart implements BaseService. -func (p *peer) OnStart() error { - if err := p.BaseService.OnStart(); err != nil { - return err - } - - go p.processMessages() - go p.metricsReporter() - - return nil -} - -// processMessages processes messages received from the connection. -func (p *peer) processMessages() { - defer func() { - if r := recover(); r != nil { - p.Logger.Error("peer message processing panic", "err", r, "stack", string(debug.Stack())) - p.onError(fmt.Errorf("panic during peer message processing: %v", r)) - } - }() - - for { - chID, msg, err := p.conn.ReceiveMessage() - if err != nil { - p.onError(err) - return - } - reactor, ok := p.reactors[byte(chID)] - if !ok { - p.onError(fmt.Errorf("unknown channel %v", chID)) - return - } - reactor.Receive(byte(chID), p, msg) - } -} - -// FlushStop mimics OnStop but additionally ensures that all successful -// .Send() calls will get flushed before closing the connection. -// NOTE: it is not safe to call this method more than once. -func (p *peer) FlushStop() { - p.metricsTicker.Stop() - p.BaseService.OnStop() - if err := p.conn.FlushClose(); err != nil { - p.Logger.Debug("error while stopping peer", "err", err) - } -} - -// OnStop implements BaseService. -func (p *peer) OnStop() { - p.metricsTicker.Stop() - p.BaseService.OnStop() - if err := p.conn.Close(); err != nil { - p.Logger.Debug("error while stopping peer", "err", err) - } -} - -//--------------------------------------------------- -// Implements Peer - -// ID returns the peer's ID - the hex encoded hash of its pubkey. -func (p *peer) ID() types.NodeID { - return p.nodeInfo.ID() -} - -// IsOutbound returns true if the connection is outbound, false otherwise. -func (p *peer) IsOutbound() bool { - return p.peerConn.outbound -} - -// IsPersistent returns true if the peer is persitent, false otherwise. -func (p *peer) IsPersistent() bool { - return p.peerConn.persistent -} - -// NodeInfo returns a copy of the peer's NodeInfo. -func (p *peer) NodeInfo() types.NodeInfo { - return p.nodeInfo -} - -// SocketAddr returns the address of the socket. -// For outbound peers, it's the address dialed (after DNS resolution). -// For inbound peers, it's the address returned by the underlying connection -// (not what's reported in the peer's NodeInfo). -func (p *peer) SocketAddr() *NetAddress { - endpoint := p.peerConn.conn.RemoteEndpoint() - return &NetAddress{ - ID: p.ID(), - IP: endpoint.IP, - Port: endpoint.Port, - } -} - -// Status returns the peer's ConnectionStatus. -func (p *peer) Status() tmconn.ConnectionStatus { - return p.conn.Status() -} - -// Send msg bytes to the channel identified by chID byte. Returns false if the -// send queue is full after timeout, specified by MConnection. -func (p *peer) Send(chID byte, msgBytes []byte) bool { - if !p.IsRunning() { - // see Switch#Broadcast, where we fetch the list of peers and loop over - // them - while we're looping, one peer may be removed and stopped. - return false - } else if !p.hasChannel(chID) { - return false - } - res, err := p.conn.SendMessage(ChannelID(chID), msgBytes) - if err == io.EOF { - return false - } else if err != nil { - p.onError(err) - return false - } - if res { - labels := []string{ - "peer_id", string(p.ID()), - "chID", fmt.Sprintf("%#x", chID), - } - p.metrics.PeerSendBytesTotal.With(labels...).Add(float64(len(msgBytes))) - } - return res -} - -// TrySend msg bytes to the channel identified by chID byte. Immediately returns -// false if the send queue is full. -func (p *peer) TrySend(chID byte, msgBytes []byte) bool { - if !p.IsRunning() { - return false - } else if !p.hasChannel(chID) { - return false - } - res, err := p.conn.TrySendMessage(ChannelID(chID), msgBytes) - if err == io.EOF { - return false - } else if err != nil { - p.onError(err) - return false - } - if res { - labels := []string{ - "peer_id", string(p.ID()), - "chID", fmt.Sprintf("%#x", chID), - } - p.metrics.PeerSendBytesTotal.With(labels...).Add(float64(len(msgBytes))) - } - return res -} - -// Get the data for a given key. -func (p *peer) Get(key string) interface{} { - return p.Data.Get(key) -} - -// Set sets the data for the given key. -func (p *peer) Set(key string, data interface{}) { - p.Data.Set(key, data) -} - -// hasChannel returns true if the peer reported -// knowing about the given chID. -func (p *peer) hasChannel(chID byte) bool { - for _, ch := range p.channels { - if ch == chID { - return true - } - } - // NOTE: probably will want to remove this - // but could be helpful while the feature is new - p.Logger.Debug( - "Unknown channel for peer", - "channel", - chID, - "channels", - p.channels, - ) - return false -} - -// CloseConn closes original connection. Used for cleaning up in cases where the peer had not been started at all. -func (p *peer) CloseConn() error { - return p.peerConn.conn.Close() -} - -//--------------------------------------------------- -// methods only used for testing -// TODO: can we remove these? - -// CloseConn closes the underlying connection -func (pc *peerConn) CloseConn() { - pc.conn.Close() -} - -// RemoteAddr returns peer's remote network address. -func (p *peer) RemoteAddr() net.Addr { - endpoint := p.conn.RemoteEndpoint() - return &net.TCPAddr{ - IP: endpoint.IP, - Port: int(endpoint.Port), - } -} - -//--------------------------------------------------- - -func PeerMetrics(metrics *Metrics) PeerOption { - return func(p *peer) { - p.metrics = metrics - } -} - -func (p *peer) metricsReporter() { - for { - select { - case <-p.metricsTicker.C: - status := p.conn.Status() - var sendQueueSize float64 - for _, chStatus := range status.Channels { - sendQueueSize += float64(chStatus.SendQueueSize) - } - - p.metrics.PeerPendingSendBytes.With("peer_id", string(p.ID())).Set(sendQueueSize) - case <-p.Quit(): - return - } - } -} diff --git a/internal/p2p/peer_set.go b/internal/p2p/peer_set.go deleted file mode 100644 index 8d4ad4939..000000000 --- a/internal/p2p/peer_set.go +++ /dev/null @@ -1,149 +0,0 @@ -package p2p - -import ( - "net" - - tmsync "github.com/tendermint/tendermint/internal/libs/sync" - "github.com/tendermint/tendermint/types" -) - -// IPeerSet has a (immutable) subset of the methods of PeerSet. -type IPeerSet interface { - Has(key types.NodeID) bool - HasIP(ip net.IP) bool - Get(key types.NodeID) Peer - List() []Peer - Size() int -} - -//----------------------------------------------------------------------------- - -// PeerSet is a special structure for keeping a table of peers. -// Iteration over the peers is super fast and thread-safe. -type PeerSet struct { - mtx tmsync.Mutex - lookup map[types.NodeID]*peerSetItem - list []Peer -} - -type peerSetItem struct { - peer Peer - index int -} - -// NewPeerSet creates a new peerSet with a list of initial capacity of 256 items. -func NewPeerSet() *PeerSet { - return &PeerSet{ - lookup: make(map[types.NodeID]*peerSetItem), - list: make([]Peer, 0, 256), - } -} - -// Add adds the peer to the PeerSet. -// It returns an error carrying the reason, if the peer is already present. -func (ps *PeerSet) Add(peer Peer) error { - ps.mtx.Lock() - defer ps.mtx.Unlock() - - if ps.lookup[peer.ID()] != nil { - return ErrSwitchDuplicatePeerID{peer.ID()} - } - - index := len(ps.list) - // Appending is safe even with other goroutines - // iterating over the ps.list slice. - ps.list = append(ps.list, peer) - ps.lookup[peer.ID()] = &peerSetItem{peer, index} - return nil -} - -// Has returns true if the set contains the peer referred to by this -// peerKey, otherwise false. -func (ps *PeerSet) Has(peerKey types.NodeID) bool { - ps.mtx.Lock() - _, ok := ps.lookup[peerKey] - ps.mtx.Unlock() - return ok -} - -// HasIP returns true if the set contains the peer referred to by this IP -// address, otherwise false. -func (ps *PeerSet) HasIP(peerIP net.IP) bool { - ps.mtx.Lock() - defer ps.mtx.Unlock() - - return ps.hasIP(peerIP) -} - -// hasIP does not acquire a lock so it can be used in public methods which -// already lock. -func (ps *PeerSet) hasIP(peerIP net.IP) bool { - for _, item := range ps.lookup { - if item.peer.RemoteIP().Equal(peerIP) { - return true - } - } - - return false -} - -// Get looks up a peer by the provided peerKey. Returns nil if peer is not -// found. -func (ps *PeerSet) Get(peerKey types.NodeID) Peer { - ps.mtx.Lock() - defer ps.mtx.Unlock() - item, ok := ps.lookup[peerKey] - if ok { - return item.peer - } - return nil -} - -// Remove discards peer by its Key, if the peer was previously memoized. -// Returns true if the peer was removed, and false if it was not found. -// in the set. -func (ps *PeerSet) Remove(peer Peer) bool { - ps.mtx.Lock() - defer ps.mtx.Unlock() - - item := ps.lookup[peer.ID()] - if item == nil { - return false - } - - index := item.index - // Create a new copy of the list but with one less item. - // (we must copy because we'll be mutating the list). - newList := make([]Peer, len(ps.list)-1) - copy(newList, ps.list) - // If it's the last peer, that's an easy special case. - if index == len(ps.list)-1 { - ps.list = newList - delete(ps.lookup, peer.ID()) - return true - } - - // Replace the popped item with the last item in the old list. - lastPeer := ps.list[len(ps.list)-1] - lastPeerKey := lastPeer.ID() - lastPeerItem := ps.lookup[lastPeerKey] - newList[index] = lastPeer - lastPeerItem.index = index - ps.list = newList - delete(ps.lookup, peer.ID()) - return true -} - -// Size returns the number of unique items in the peerSet. -func (ps *PeerSet) Size() int { - ps.mtx.Lock() - defer ps.mtx.Unlock() - return len(ps.list) -} - -// List returns the threadsafe list of peers. -func (ps *PeerSet) List() []Peer { - ps.mtx.Lock() - defer ps.mtx.Unlock() - return ps.list -} diff --git a/internal/p2p/peer_set_test.go b/internal/p2p/peer_set_test.go deleted file mode 100644 index 3e2397d2d..000000000 --- a/internal/p2p/peer_set_test.go +++ /dev/null @@ -1,190 +0,0 @@ -package p2p - -import ( - "net" - "sync" - "testing" - - "github.com/stretchr/testify/assert" - - "github.com/tendermint/tendermint/libs/service" - "github.com/tendermint/tendermint/types" -) - -// mockPeer for testing the PeerSet -type mockPeer struct { - service.BaseService - ip net.IP - id types.NodeID -} - -func (mp *mockPeer) FlushStop() { mp.Stop() } //nolint:errcheck // ignore error -func (mp *mockPeer) TrySend(chID byte, msgBytes []byte) bool { return true } -func (mp *mockPeer) Send(chID byte, msgBytes []byte) bool { return true } -func (mp *mockPeer) NodeInfo() types.NodeInfo { return types.NodeInfo{} } -func (mp *mockPeer) Status() ConnectionStatus { return ConnectionStatus{} } -func (mp *mockPeer) ID() types.NodeID { return mp.id } -func (mp *mockPeer) IsOutbound() bool { return false } -func (mp *mockPeer) IsPersistent() bool { return true } -func (mp *mockPeer) Get(s string) interface{} { return s } -func (mp *mockPeer) Set(string, interface{}) {} -func (mp *mockPeer) RemoteIP() net.IP { return mp.ip } -func (mp *mockPeer) SocketAddr() *NetAddress { return nil } -func (mp *mockPeer) RemoteAddr() net.Addr { return &net.TCPAddr{IP: mp.ip, Port: 8800} } -func (mp *mockPeer) CloseConn() error { return nil } - -// Returns a mock peer -func newMockPeer(ip net.IP) *mockPeer { - if ip == nil { - ip = net.IP{127, 0, 0, 1} - } - nodeKey := types.GenNodeKey() - return &mockPeer{ - ip: ip, - id: nodeKey.ID, - } -} - -func TestPeerSetAddRemoveOne(t *testing.T) { - t.Parallel() - - peerSet := NewPeerSet() - - var peerList []Peer - for i := 0; i < 5; i++ { - p := newMockPeer(net.IP{127, 0, 0, byte(i)}) - if err := peerSet.Add(p); err != nil { - t.Error(err) - } - peerList = append(peerList, p) - } - - n := len(peerList) - // 1. Test removing from the front - for i, peerAtFront := range peerList { - removed := peerSet.Remove(peerAtFront) - assert.True(t, removed) - wantSize := n - i - 1 - for j := 0; j < 2; j++ { - assert.Equal(t, false, peerSet.Has(peerAtFront.ID()), "#%d Run #%d: failed to remove peer", i, j) - assert.Equal(t, wantSize, peerSet.Size(), "#%d Run #%d: failed to remove peer and decrement size", i, j) - // Test the route of removing the now non-existent element - removed := peerSet.Remove(peerAtFront) - assert.False(t, removed) - } - } - - // 2. Next we are testing removing the peer at the end - // a) Replenish the peerSet - for _, peer := range peerList { - if err := peerSet.Add(peer); err != nil { - t.Error(err) - } - } - - // b) In reverse, remove each element - for i := n - 1; i >= 0; i-- { - peerAtEnd := peerList[i] - removed := peerSet.Remove(peerAtEnd) - assert.True(t, removed) - assert.Equal(t, false, peerSet.Has(peerAtEnd.ID()), "#%d: failed to remove item at end", i) - assert.Equal(t, i, peerSet.Size(), "#%d: differing sizes after peerSet.Remove(atEndPeer)", i) - } -} - -func TestPeerSetAddRemoveMany(t *testing.T) { - t.Parallel() - peerSet := NewPeerSet() - - peers := []Peer{} - N := 100 - for i := 0; i < N; i++ { - peer := newMockPeer(net.IP{127, 0, 0, byte(i)}) - if err := peerSet.Add(peer); err != nil { - t.Errorf("failed to add new peer") - } - if peerSet.Size() != i+1 { - t.Errorf("failed to add new peer and increment size") - } - peers = append(peers, peer) - } - - for i, peer := range peers { - removed := peerSet.Remove(peer) - assert.True(t, removed) - if peerSet.Has(peer.ID()) { - t.Errorf("failed to remove peer") - } - if peerSet.Size() != len(peers)-i-1 { - t.Errorf("failed to remove peer and decrement size") - } - } -} - -func TestPeerSetAddDuplicate(t *testing.T) { - t.Parallel() - peerSet := NewPeerSet() - peer := newMockPeer(nil) - - n := 20 - errsChan := make(chan error) - // Add the same asynchronously to test the - // concurrent guarantees of our APIs, and - // our expectation in the end is that only - // one addition succeeded, but the rest are - // instances of ErrSwitchDuplicatePeer. - for i := 0; i < n; i++ { - go func() { - errsChan <- peerSet.Add(peer) - }() - } - - // Now collect and tally the results - errsTally := make(map[string]int) - for i := 0; i < n; i++ { - err := <-errsChan - - switch err.(type) { - case ErrSwitchDuplicatePeerID: - errsTally["duplicateID"]++ - default: - errsTally["other"]++ - } - } - - // Our next procedure is to ensure that only one addition - // succeeded and that the rest are each ErrSwitchDuplicatePeer. - wantErrCount, gotErrCount := n-1, errsTally["duplicateID"] - assert.Equal(t, wantErrCount, gotErrCount, "invalid ErrSwitchDuplicatePeer count") - - wantNilErrCount, gotNilErrCount := 1, errsTally["other"] - assert.Equal(t, wantNilErrCount, gotNilErrCount, "invalid nil errCount") -} - -func TestPeerSetGet(t *testing.T) { - t.Parallel() - - var ( - peerSet = NewPeerSet() - peer = newMockPeer(nil) - ) - - assert.Nil(t, peerSet.Get(peer.ID()), "expecting a nil lookup, before .Add") - - if err := peerSet.Add(peer); err != nil { - t.Fatalf("Failed to add new peer: %v", err) - } - - var wg sync.WaitGroup - for i := 0; i < 10; i++ { - // Add them asynchronously to test the - // concurrent guarantees of our APIs. - wg.Add(1) - go func(i int) { - defer wg.Done() - have, want := peerSet.Get(peer.ID()), peer - assert.Equal(t, have, want, "%d: have %v, want %v", i, have, want) - }(i) - } - wg.Wait() -} diff --git a/internal/p2p/peer_test.go b/internal/p2p/peer_test.go deleted file mode 100644 index dfe7bc798..000000000 --- a/internal/p2p/peer_test.go +++ /dev/null @@ -1,239 +0,0 @@ -package p2p - -import ( - "context" - "fmt" - golog "log" - "net" - "testing" - "time" - - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" - - "github.com/tendermint/tendermint/crypto" - "github.com/tendermint/tendermint/crypto/ed25519" - "github.com/tendermint/tendermint/libs/bytes" - "github.com/tendermint/tendermint/libs/log" - "github.com/tendermint/tendermint/types" - - "github.com/tendermint/tendermint/config" - tmconn "github.com/tendermint/tendermint/internal/p2p/conn" -) - -func TestPeerBasic(t *testing.T) { - assert, require := assert.New(t), require.New(t) - - // simulate remote peer - rp := &remotePeer{PrivKey: ed25519.GenPrivKey(), Config: cfg} - rp.Start() - t.Cleanup(rp.Stop) - - p, err := createOutboundPeerAndPerformHandshake(rp.Addr(), cfg, tmconn.DefaultMConnConfig()) - require.Nil(err) - - err = p.Start() - require.Nil(err) - t.Cleanup(func() { - if err := p.Stop(); err != nil { - t.Error(err) - } - }) - - assert.True(p.IsRunning()) - assert.True(p.IsOutbound()) - assert.False(p.IsPersistent()) - p.persistent = true - assert.True(p.IsPersistent()) - assert.Equal(rp.Addr().DialString(), p.RemoteAddr().String()) - assert.Equal(rp.ID(), p.ID()) -} - -func TestPeerSend(t *testing.T) { - assert, require := assert.New(t), require.New(t) - - config := cfg - - // simulate remote peer - rp := &remotePeer{PrivKey: ed25519.GenPrivKey(), Config: config} - rp.Start() - t.Cleanup(rp.Stop) - - p, err := createOutboundPeerAndPerformHandshake(rp.Addr(), config, tmconn.DefaultMConnConfig()) - require.Nil(err) - - err = p.Start() - require.Nil(err) - - t.Cleanup(func() { - if err := p.Stop(); err != nil { - t.Error(err) - } - }) - - assert.True(p.Send(testCh, []byte("Asylum"))) -} - -func createOutboundPeerAndPerformHandshake( - addr *NetAddress, - config *config.P2PConfig, - mConfig tmconn.MConnConfig, -) (*peer, error) { - chDescs := []*tmconn.ChannelDescriptor{ - {ID: testCh, Priority: 1}, - } - pk := ed25519.GenPrivKey() - ourNodeInfo := testNodeInfo(types.NodeIDFromPubKey(pk.PubKey()), "host_peer") - transport := NewMConnTransport(log.TestingLogger(), mConfig, chDescs, MConnTransportOptions{}) - reactorsByCh := map[byte]Reactor{testCh: NewTestReactor(chDescs, true)} - pc, err := testOutboundPeerConn(transport, addr, config, false, pk) - if err != nil { - return nil, err - } - peerInfo, _, err := pc.conn.Handshake(context.Background(), ourNodeInfo, pk) - if err != nil { - return nil, err - } - - p := newPeer(peerInfo, pc, reactorsByCh, func(p Peer, r interface{}) {}) - p.SetLogger(log.TestingLogger().With("peer", addr)) - return p, nil -} - -func testDial(addr *NetAddress, cfg *config.P2PConfig) (net.Conn, error) { - if cfg.TestDialFail { - return nil, fmt.Errorf("dial err (peerConfig.DialFail == true)") - } - - conn, err := addr.DialTimeout(cfg.DialTimeout) - if err != nil { - return nil, err - } - return conn, nil -} - -func testOutboundPeerConn( - transport *MConnTransport, - addr *NetAddress, - config *config.P2PConfig, - persistent bool, - ourNodePrivKey crypto.PrivKey, -) (peerConn, error) { - - var pc peerConn - conn, err := testDial(addr, config) - if err != nil { - return pc, fmt.Errorf("error creating peer: %w", err) - } - - pc, err = testPeerConn(transport, conn, true, persistent) - if err != nil { - if cerr := conn.Close(); cerr != nil { - return pc, fmt.Errorf("%v: %w", cerr.Error(), err) - } - return pc, err - } - - return pc, nil -} - -type remotePeer struct { - PrivKey crypto.PrivKey - Config *config.P2PConfig - Network string - addr *NetAddress - channels bytes.HexBytes - listenAddr string - listener net.Listener -} - -func (rp *remotePeer) Addr() *NetAddress { - return rp.addr -} - -func (rp *remotePeer) ID() types.NodeID { - return types.NodeIDFromPubKey(rp.PrivKey.PubKey()) -} - -func (rp *remotePeer) Start() { - if rp.listenAddr == "" { - rp.listenAddr = "127.0.0.1:0" - } - - l, e := net.Listen("tcp", rp.listenAddr) // any available address - if e != nil { - golog.Fatalf("net.Listen tcp :0: %+v", e) - } - rp.listener = l - rp.addr = types.NewNetAddress(types.NodeIDFromPubKey(rp.PrivKey.PubKey()), l.Addr()) - if rp.channels == nil { - rp.channels = []byte{testCh} - } - go rp.accept() -} - -func (rp *remotePeer) Stop() { - rp.listener.Close() -} - -func (rp *remotePeer) Dial(addr *NetAddress) (net.Conn, error) { - transport := NewMConnTransport(log.TestingLogger(), MConnConfig(rp.Config), - []*ChannelDescriptor{}, MConnTransportOptions{}) - conn, err := addr.DialTimeout(1 * time.Second) - if err != nil { - return nil, err - } - pc, err := testInboundPeerConn(transport, conn) - if err != nil { - return nil, err - } - _, _, err = pc.conn.Handshake(context.Background(), rp.nodeInfo(), rp.PrivKey) - if err != nil { - return nil, err - } - return conn, err -} - -func (rp *remotePeer) accept() { - transport := NewMConnTransport(log.TestingLogger(), MConnConfig(rp.Config), - []*ChannelDescriptor{}, MConnTransportOptions{}) - conns := []net.Conn{} - - for { - conn, err := rp.listener.Accept() - if err != nil { - golog.Printf("Failed to accept conn: %+v", err) - for _, conn := range conns { - _ = conn.Close() - } - return - } - - pc, err := testInboundPeerConn(transport, conn) - if err != nil { - golog.Printf("Failed to create a peer: %+v", err) - } - _, _, err = pc.conn.Handshake(context.Background(), rp.nodeInfo(), rp.PrivKey) - if err != nil { - golog.Printf("Failed to handshake a peer: %+v", err) - } - - conns = append(conns, conn) - } -} - -func (rp *remotePeer) nodeInfo() types.NodeInfo { - ni := types.NodeInfo{ - ProtocolVersion: defaultProtocolVersion, - NodeID: rp.Addr().ID, - ListenAddr: rp.listener.Addr().String(), - Network: "testing", - Version: "1.2.3-rc0-deadbeef", - Channels: rp.channels, - Moniker: "remote_peer", - } - if rp.Network != "" { - ni.Network = rp.Network - } - return ni -} diff --git a/internal/p2p/pex/addrbook.go b/internal/p2p/pex/addrbook.go deleted file mode 100644 index 6c5f78663..000000000 --- a/internal/p2p/pex/addrbook.go +++ /dev/null @@ -1,948 +0,0 @@ -// Modified for Tendermint -// Originally Copyright (c) 2013-2014 Conformal Systems LLC. -// https://github.com/conformal/btcd/blob/master/LICENSE - -package pex - -import ( - "encoding/binary" - "fmt" - "hash" - "math" - mrand "math/rand" - "net" - "sync" - "time" - - "github.com/minio/highwayhash" - "github.com/tendermint/tendermint/crypto" - tmsync "github.com/tendermint/tendermint/internal/libs/sync" - "github.com/tendermint/tendermint/internal/p2p" - tmmath "github.com/tendermint/tendermint/libs/math" - tmrand "github.com/tendermint/tendermint/libs/rand" - "github.com/tendermint/tendermint/libs/service" - "github.com/tendermint/tendermint/types" -) - -const ( - bucketTypeNew = 0x01 - bucketTypeOld = 0x02 -) - -// AddrBook is an address book used for tracking peers -// so we can gossip about them to others and select -// peers to dial. -// TODO: break this up? -type AddrBook interface { - service.Service - - // Add our own addresses so we don't later add ourselves - AddOurAddress(*p2p.NetAddress) - // Check if it is our address - OurAddress(*p2p.NetAddress) bool - - AddPrivateIDs([]string) - - // Add and remove an address - AddAddress(addr *p2p.NetAddress, src *p2p.NetAddress) error - RemoveAddress(*p2p.NetAddress) - - // Check if the address is in the book - HasAddress(*p2p.NetAddress) bool - - // Do we need more peers? - NeedMoreAddrs() bool - // Is Address Book Empty? Answer should not depend on being in your own - // address book, or private peers - Empty() bool - - // Pick an address to dial - PickAddress(biasTowardsNewAddrs int) *p2p.NetAddress - - // Mark address - MarkGood(types.NodeID) - MarkAttempt(*p2p.NetAddress) - MarkBad(*p2p.NetAddress, time.Duration) // Move peer to bad peers list - // Add bad peers back to addrBook - ReinstateBadPeers() - - IsGood(*p2p.NetAddress) bool - IsBanned(*p2p.NetAddress) bool - - // Send a selection of addresses to peers - GetSelection() []*p2p.NetAddress - // Send a selection of addresses with bias - GetSelectionWithBias(biasTowardsNewAddrs int) []*p2p.NetAddress - - Size() int - - // Persist to disk - Save() -} - -var _ AddrBook = (*addrBook)(nil) - -// addrBook - concurrency safe peer address manager. -// Implements AddrBook. -type addrBook struct { - service.BaseService - - // accessed concurrently - mtx tmsync.Mutex - ourAddrs map[string]struct{} - privateIDs map[types.NodeID]struct{} - addrLookup map[types.NodeID]*knownAddress // new & old - badPeers map[types.NodeID]*knownAddress // blacklisted peers - bucketsOld []map[string]*knownAddress - bucketsNew []map[string]*knownAddress - nOld int - nNew int - - // immutable after creation - filePath string - key string // random prefix for bucket placement - routabilityStrict bool - hasher hash.Hash64 - - wg sync.WaitGroup -} - -func mustNewHasher() hash.Hash64 { - key := crypto.CRandBytes(highwayhash.Size) - hasher, err := highwayhash.New64(key) - if err != nil { - panic(err) - } - return hasher -} - -// NewAddrBook creates a new address book. -// Use Start to begin processing asynchronous address updates. -func NewAddrBook(filePath string, routabilityStrict bool) AddrBook { - am := &addrBook{ - ourAddrs: make(map[string]struct{}), - privateIDs: make(map[types.NodeID]struct{}), - addrLookup: make(map[types.NodeID]*knownAddress), - badPeers: make(map[types.NodeID]*knownAddress), - filePath: filePath, - routabilityStrict: routabilityStrict, - } - am.init() - am.BaseService = *service.NewBaseService(nil, "AddrBook", am) - return am -} - -// Initialize the buckets. -// When modifying this, don't forget to update loadFromFile() -func (a *addrBook) init() { - a.key = crypto.CRandHex(24) // 24/2 * 8 = 96 bits - // New addr buckets - a.bucketsNew = make([]map[string]*knownAddress, newBucketCount) - for i := range a.bucketsNew { - a.bucketsNew[i] = make(map[string]*knownAddress) - } - // Old addr buckets - a.bucketsOld = make([]map[string]*knownAddress, oldBucketCount) - for i := range a.bucketsOld { - a.bucketsOld[i] = make(map[string]*knownAddress) - } - a.hasher = mustNewHasher() -} - -// OnStart implements Service. -func (a *addrBook) OnStart() error { - if err := a.BaseService.OnStart(); err != nil { - return err - } - a.loadFromFile(a.filePath) - - // wg.Add to ensure that any invocation of .Wait() - // later on will wait for saveRoutine to terminate. - a.wg.Add(1) - go a.saveRoutine() - - return nil -} - -// OnStop implements Service. -func (a *addrBook) OnStop() { - a.BaseService.OnStop() -} - -func (a *addrBook) Wait() { - a.wg.Wait() -} - -func (a *addrBook) FilePath() string { - return a.filePath -} - -//------------------------------------------------------- - -// AddOurAddress one of our addresses. -func (a *addrBook) AddOurAddress(addr *p2p.NetAddress) { - a.mtx.Lock() - defer a.mtx.Unlock() - - a.Logger.Info("Add our address to book", "addr", addr) - a.ourAddrs[addr.String()] = struct{}{} -} - -// OurAddress returns true if it is our address. -func (a *addrBook) OurAddress(addr *p2p.NetAddress) bool { - a.mtx.Lock() - defer a.mtx.Unlock() - - _, ok := a.ourAddrs[addr.String()] - return ok -} - -func (a *addrBook) AddPrivateIDs(ids []string) { - a.mtx.Lock() - defer a.mtx.Unlock() - - for _, id := range ids { - a.privateIDs[types.NodeID(id)] = struct{}{} - } -} - -// AddAddress implements AddrBook -// Add address to a "new" bucket. If it's already in one, only add it probabilistically. -// Returns error if the addr is non-routable. Does not add self. -// NOTE: addr must not be nil -func (a *addrBook) AddAddress(addr *p2p.NetAddress, src *p2p.NetAddress) error { - a.mtx.Lock() - defer a.mtx.Unlock() - - return a.addAddress(addr, src) -} - -// RemoveAddress implements AddrBook - removes the address from the book. -func (a *addrBook) RemoveAddress(addr *p2p.NetAddress) { - a.mtx.Lock() - defer a.mtx.Unlock() - - a.removeAddress(addr) -} - -// IsGood returns true if peer was ever marked as good and haven't -// done anything wrong since then. -func (a *addrBook) IsGood(addr *p2p.NetAddress) bool { - a.mtx.Lock() - defer a.mtx.Unlock() - - return a.addrLookup[addr.ID].isOld() -} - -// IsBanned returns true if the peer is currently banned -func (a *addrBook) IsBanned(addr *p2p.NetAddress) bool { - a.mtx.Lock() - _, ok := a.badPeers[addr.ID] - a.mtx.Unlock() - - return ok -} - -// HasAddress returns true if the address is in the book. -func (a *addrBook) HasAddress(addr *p2p.NetAddress) bool { - a.mtx.Lock() - defer a.mtx.Unlock() - - ka := a.addrLookup[addr.ID] - return ka != nil -} - -// NeedMoreAddrs implements AddrBook - returns true if there are not have enough addresses in the book. -func (a *addrBook) NeedMoreAddrs() bool { - return a.Size() < needAddressThreshold -} - -// Empty implements AddrBook - returns true if there are no addresses in the address book. -// Does not count the peer appearing in its own address book, or private peers. -func (a *addrBook) Empty() bool { - return a.Size() == 0 -} - -// PickAddress implements AddrBook. It picks an address to connect to. -// The address is picked randomly from an old or new bucket according -// to the biasTowardsNewAddrs argument, which must be between [0, 100] (or else is truncated to that range) -// and determines how biased we are to pick an address from a new bucket. -// PickAddress returns nil if the AddrBook is empty or if we try to pick -// from an empty bucket. -// nolint:gosec // G404: Use of weak random number generator -func (a *addrBook) PickAddress(biasTowardsNewAddrs int) *p2p.NetAddress { - a.mtx.Lock() - defer a.mtx.Unlock() - - bookSize := a.size() - if bookSize <= 0 { - if bookSize < 0 { - panic(fmt.Sprintf("Addrbook size %d (new: %d + old: %d) is less than 0", a.nNew+a.nOld, a.nNew, a.nOld)) - } - return nil - } - if biasTowardsNewAddrs > 100 { - biasTowardsNewAddrs = 100 - } - if biasTowardsNewAddrs < 0 { - biasTowardsNewAddrs = 0 - } - - // Bias between new and old addresses. - oldCorrelation := math.Sqrt(float64(a.nOld)) * (100.0 - float64(biasTowardsNewAddrs)) - newCorrelation := math.Sqrt(float64(a.nNew)) * float64(biasTowardsNewAddrs) - - // pick a random peer from a random bucket - var bucket map[string]*knownAddress - pickFromOldBucket := (newCorrelation+oldCorrelation)*mrand.Float64() < oldCorrelation - if (pickFromOldBucket && a.nOld == 0) || - (!pickFromOldBucket && a.nNew == 0) { - return nil - } - // loop until we pick a random non-empty bucket - for len(bucket) == 0 { - if pickFromOldBucket { - bucket = a.bucketsOld[mrand.Intn(len(a.bucketsOld))] - } else { - bucket = a.bucketsNew[mrand.Intn(len(a.bucketsNew))] - } - } - // pick a random index and loop over the map to return that index - randIndex := mrand.Intn(len(bucket)) - for _, ka := range bucket { - if randIndex == 0 { - return ka.Addr - } - randIndex-- - } - return nil -} - -// MarkGood implements AddrBook - it marks the peer as good and -// moves it into an "old" bucket. -func (a *addrBook) MarkGood(id types.NodeID) { - a.mtx.Lock() - defer a.mtx.Unlock() - - ka := a.addrLookup[id] - if ka == nil { - return - } - ka.markGood() - if ka.isNew() { - if err := a.moveToOld(ka); err != nil { - a.Logger.Error("Error moving address to old", "err", err) - } - } -} - -// MarkAttempt implements AddrBook - it marks that an attempt was made to connect to the address. -func (a *addrBook) MarkAttempt(addr *p2p.NetAddress) { - a.mtx.Lock() - defer a.mtx.Unlock() - - ka := a.addrLookup[addr.ID] - if ka == nil { - return - } - ka.markAttempt() -} - -// MarkBad implements AddrBook. Kicks address out from book, places -// the address in the badPeers pool. -func (a *addrBook) MarkBad(addr *p2p.NetAddress, banTime time.Duration) { - a.mtx.Lock() - defer a.mtx.Unlock() - - if a.addBadPeer(addr, banTime) { - a.removeAddress(addr) - } -} - -// ReinstateBadPeers removes bad peers from ban list and places them into a new -// bucket. -func (a *addrBook) ReinstateBadPeers() { - a.mtx.Lock() - defer a.mtx.Unlock() - - for _, ka := range a.badPeers { - if ka.isBanned() { - continue - } - - bucket, err := a.calcNewBucket(ka.Addr, ka.Src) - if err != nil { - a.Logger.Error("Failed to calculate new bucket (bad peer won't be reinstantiated)", - "addr", ka.Addr, "err", err) - continue - } - - if err := a.addToNewBucket(ka, bucket); err != nil { - a.Logger.Error("Error adding peer to new bucket", "err", err) - } - delete(a.badPeers, ka.ID()) - - a.Logger.Info("Reinstated address", "addr", ka.Addr) - } -} - -// GetSelection implements AddrBook. -// It randomly selects some addresses (old & new). Suitable for peer-exchange protocols. -// Must never return a nil address. -func (a *addrBook) GetSelection() []*p2p.NetAddress { - a.mtx.Lock() - defer a.mtx.Unlock() - - bookSize := a.size() - if bookSize <= 0 { - if bookSize < 0 { - panic(fmt.Sprintf("Addrbook size %d (new: %d + old: %d) is less than 0", a.nNew+a.nOld, a.nNew, a.nOld)) - } - return nil - } - - numAddresses := tmmath.MaxInt( - tmmath.MinInt(minGetSelection, bookSize), - bookSize*getSelectionPercent/100) - numAddresses = tmmath.MinInt(maxGetSelection, numAddresses) - - // XXX: instead of making a list of all addresses, shuffling, and slicing a random chunk, - // could we just select a random numAddresses of indexes? - allAddr := make([]*p2p.NetAddress, bookSize) - i := 0 - for _, ka := range a.addrLookup { - allAddr[i] = ka.Addr - i++ - } - - // Fisher-Yates shuffle the array. We only need to do the first - // `numAddresses' since we are throwing the rest. - for i := 0; i < numAddresses; i++ { - // pick a number between current index and the end - // nolint:gosec // G404: Use of weak random number generator - j := mrand.Intn(len(allAddr)-i) + i - allAddr[i], allAddr[j] = allAddr[j], allAddr[i] - } - - // slice off the limit we are willing to share. - return allAddr[:numAddresses] -} - -func percentageOfNum(p, n int) int { - return int(math.Round((float64(p) / float64(100)) * float64(n))) -} - -// GetSelectionWithBias implements AddrBook. -// It randomly selects some addresses (old & new). Suitable for peer-exchange protocols. -// Must never return a nil address. -// -// Each address is picked randomly from an old or new bucket according to the -// biasTowardsNewAddrs argument, which must be between [0, 100] (or else is truncated to -// that range) and determines how biased we are to pick an address from a new -// bucket. -func (a *addrBook) GetSelectionWithBias(biasTowardsNewAddrs int) []*p2p.NetAddress { - a.mtx.Lock() - defer a.mtx.Unlock() - - bookSize := a.size() - if bookSize <= 0 { - if bookSize < 0 { - panic(fmt.Sprintf("Addrbook size %d (new: %d + old: %d) is less than 0", a.nNew+a.nOld, a.nNew, a.nOld)) - } - return nil - } - - if biasTowardsNewAddrs > 100 { - biasTowardsNewAddrs = 100 - } - if biasTowardsNewAddrs < 0 { - biasTowardsNewAddrs = 0 - } - - numAddresses := tmmath.MaxInt( - tmmath.MinInt(minGetSelection, bookSize), - bookSize*getSelectionPercent/100) - numAddresses = tmmath.MinInt(maxGetSelection, numAddresses) - - // number of new addresses that, if possible, should be in the beginning of the selection - // if there are no enough old addrs, will choose new addr instead. - numRequiredNewAdd := tmmath.MaxInt(percentageOfNum(biasTowardsNewAddrs, numAddresses), numAddresses-a.nOld) - selection := a.randomPickAddresses(bucketTypeNew, numRequiredNewAdd) - selection = append(selection, a.randomPickAddresses(bucketTypeOld, numAddresses-len(selection))...) - return selection -} - -//------------------------------------------------ - -// Size returns the number of addresses in the book. -func (a *addrBook) Size() int { - a.mtx.Lock() - defer a.mtx.Unlock() - - return a.size() -} - -func (a *addrBook) size() int { - return a.nNew + a.nOld -} - -//---------------------------------------------------------- - -// Save persists the address book to disk. -func (a *addrBook) Save() { - a.saveToFile(a.filePath) // thread safe -} - -func (a *addrBook) saveRoutine() { - defer a.wg.Done() - - saveFileTicker := time.NewTicker(dumpAddressInterval) -out: - for { - select { - case <-saveFileTicker.C: - a.saveToFile(a.filePath) - case <-a.Quit(): - break out - } - } - saveFileTicker.Stop() - a.saveToFile(a.filePath) -} - -//---------------------------------------------------------- - -func (a *addrBook) getBucket(bucketType byte, bucketIdx int) map[string]*knownAddress { - switch bucketType { - case bucketTypeNew: - return a.bucketsNew[bucketIdx] - case bucketTypeOld: - return a.bucketsOld[bucketIdx] - default: - panic("Invalid bucket type") - } -} - -// Adds ka to new bucket. Returns false if it couldn't do it cuz buckets full. -// NOTE: currently it always returns true. -func (a *addrBook) addToNewBucket(ka *knownAddress, bucketIdx int) error { - // Consistency check to ensure we don't add an already known address - if ka.isOld() { - return errAddrBookOldAddressNewBucket{ka.Addr, bucketIdx} - } - - addrStr := ka.Addr.String() - bucket := a.getBucket(bucketTypeNew, bucketIdx) - - // Already exists? - if _, ok := bucket[addrStr]; ok { - return nil - } - - // Enforce max addresses. - if len(bucket) > newBucketSize { - a.Logger.Info("new bucket is full, expiring new") - a.expireNew(bucketIdx) - } - - // Add to bucket. - bucket[addrStr] = ka - // increment nNew if the peer doesnt already exist in a bucket - if ka.addBucketRef(bucketIdx) == 1 { - a.nNew++ - } - - // Add it to addrLookup - a.addrLookup[ka.ID()] = ka - return nil -} - -// Adds ka to old bucket. Returns false if it couldn't do it cuz buckets full. -func (a *addrBook) addToOldBucket(ka *knownAddress, bucketIdx int) bool { - // Sanity check - if ka.isNew() { - a.Logger.Error(fmt.Sprintf("Cannot add new address to old bucket: %v", ka)) - return false - } - if len(ka.Buckets) != 0 { - a.Logger.Error(fmt.Sprintf("Cannot add already old address to another old bucket: %v", ka)) - return false - } - - addrStr := ka.Addr.String() - bucket := a.getBucket(bucketTypeOld, bucketIdx) - - // Already exists? - if _, ok := bucket[addrStr]; ok { - return true - } - - // Enforce max addresses. - if len(bucket) > oldBucketSize { - return false - } - - // Add to bucket. - bucket[addrStr] = ka - if ka.addBucketRef(bucketIdx) == 1 { - a.nOld++ - } - - // Ensure in addrLookup - a.addrLookup[ka.ID()] = ka - - return true -} - -func (a *addrBook) removeFromBucket(ka *knownAddress, bucketType byte, bucketIdx int) { - if ka.BucketType != bucketType { - a.Logger.Error(fmt.Sprintf("Bucket type mismatch: %v", ka)) - return - } - bucket := a.getBucket(bucketType, bucketIdx) - delete(bucket, ka.Addr.String()) - if ka.removeBucketRef(bucketIdx) == 0 { - if bucketType == bucketTypeNew { - a.nNew-- - } else { - a.nOld-- - } - delete(a.addrLookup, ka.ID()) - } -} - -func (a *addrBook) removeFromAllBuckets(ka *knownAddress) { - for _, bucketIdx := range ka.Buckets { - bucket := a.getBucket(ka.BucketType, bucketIdx) - delete(bucket, ka.Addr.String()) - } - ka.Buckets = nil - if ka.BucketType == bucketTypeNew { - a.nNew-- - } else { - a.nOld-- - } - delete(a.addrLookup, ka.ID()) -} - -//---------------------------------------------------------- - -func (a *addrBook) pickOldest(bucketType byte, bucketIdx int) *knownAddress { - bucket := a.getBucket(bucketType, bucketIdx) - var oldest *knownAddress - for _, ka := range bucket { - if oldest == nil || ka.LastAttempt.Before(oldest.LastAttempt) { - oldest = ka - } - } - return oldest -} - -// adds the address to a "new" bucket. if its already in one, -// it only adds it probabilistically -func (a *addrBook) addAddress(addr, src *p2p.NetAddress) error { - if addr == nil || src == nil { - return ErrAddrBookNilAddr{addr, src} - } - - if err := addr.Valid(); err != nil { - return ErrAddrBookInvalidAddr{Addr: addr, AddrErr: err} - } - - if _, ok := a.badPeers[addr.ID]; ok { - return ErrAddressBanned{addr} - } - - if _, ok := a.privateIDs[addr.ID]; ok { - return ErrAddrBookPrivate{addr} - } - - if _, ok := a.privateIDs[src.ID]; ok { - return ErrAddrBookPrivateSrc{src} - } - - // TODO: we should track ourAddrs by ID and by IP:PORT and refuse both. - if _, ok := a.ourAddrs[addr.String()]; ok { - return ErrAddrBookSelf{addr} - } - - if a.routabilityStrict && !addr.Routable() { - return ErrAddrBookNonRoutable{addr} - } - - ka := a.addrLookup[addr.ID] - if ka != nil { - // If its already old and the address ID's are the same, ignore it. - // Thereby avoiding issues with a node on the network attempting to change - // the IP of a known node ID. (Which could yield an eclipse attack on the node) - if ka.isOld() && ka.Addr.ID == addr.ID { - return nil - } - // Already in max new buckets. - if len(ka.Buckets) == maxNewBucketsPerAddress { - return nil - } - // The more entries we have, the less likely we are to add more. - factor := int32(2 * len(ka.Buckets)) - // nolint:gosec // G404: Use of weak random number generator - if mrand.Int31n(factor) != 0 { - return nil - } - } else { - ka = newKnownAddress(addr, src) - } - - bucket, err := a.calcNewBucket(addr, src) - if err != nil { - return err - } - return a.addToNewBucket(ka, bucket) -} - -func (a *addrBook) randomPickAddresses(bucketType byte, num int) []*p2p.NetAddress { - var buckets []map[string]*knownAddress - switch bucketType { - case bucketTypeNew: - buckets = a.bucketsNew - case bucketTypeOld: - buckets = a.bucketsOld - default: - panic("unexpected bucketType") - } - total := 0 - for _, bucket := range buckets { - total += len(bucket) - } - addresses := make([]*knownAddress, 0, total) - for _, bucket := range buckets { - for _, ka := range bucket { - addresses = append(addresses, ka) - } - } - selection := make([]*p2p.NetAddress, 0, num) - chosenSet := make(map[string]bool, num) - rand := tmrand.NewRand() - rand.Shuffle(total, func(i, j int) { - addresses[i], addresses[j] = addresses[j], addresses[i] - }) - for _, addr := range addresses { - if chosenSet[addr.Addr.String()] { - continue - } - chosenSet[addr.Addr.String()] = true - selection = append(selection, addr.Addr) - if len(selection) >= num { - return selection - } - } - return selection -} - -// Make space in the new buckets by expiring the really bad entries. -// If no bad entries are available we remove the oldest. -func (a *addrBook) expireNew(bucketIdx int) { - for addrStr, ka := range a.bucketsNew[bucketIdx] { - // If an entry is bad, throw it away - if ka.isBad() { - a.Logger.Info(fmt.Sprintf("expiring bad address %v", addrStr)) - a.removeFromBucket(ka, bucketTypeNew, bucketIdx) - return - } - } - - // If we haven't thrown out a bad entry, throw out the oldest entry - oldest := a.pickOldest(bucketTypeNew, bucketIdx) - a.removeFromBucket(oldest, bucketTypeNew, bucketIdx) -} - -// Promotes an address from new to old. If the destination bucket is full, -// demote the oldest one to a "new" bucket. -// TODO: Demote more probabilistically? -func (a *addrBook) moveToOld(ka *knownAddress) error { - // Sanity check - if ka.isOld() { - a.Logger.Error(fmt.Sprintf("Cannot promote address that is already old %v", ka)) - return nil - } - if len(ka.Buckets) == 0 { - a.Logger.Error(fmt.Sprintf("Cannot promote address that isn't in any new buckets %v", ka)) - return nil - } - - // Remove from all (new) buckets. - a.removeFromAllBuckets(ka) - // It's officially old now. - ka.BucketType = bucketTypeOld - - // Try to add it to its oldBucket destination. - oldBucketIdx, err := a.calcOldBucket(ka.Addr) - if err != nil { - return err - } - added := a.addToOldBucket(ka, oldBucketIdx) - if !added { - // No room; move the oldest to a new bucket - oldest := a.pickOldest(bucketTypeOld, oldBucketIdx) - a.removeFromBucket(oldest, bucketTypeOld, oldBucketIdx) - newBucketIdx, err := a.calcNewBucket(oldest.Addr, oldest.Src) - if err != nil { - return err - } - if err := a.addToNewBucket(oldest, newBucketIdx); err != nil { - a.Logger.Error("Error adding peer to old bucket", "err", err) - } - - // Finally, add our ka to old bucket again. - added = a.addToOldBucket(ka, oldBucketIdx) - if !added { - a.Logger.Error(fmt.Sprintf("Could not re-add ka %v to oldBucketIdx %v", ka, oldBucketIdx)) - } - } - return nil -} - -func (a *addrBook) removeAddress(addr *p2p.NetAddress) { - ka := a.addrLookup[addr.ID] - if ka == nil { - return - } - a.Logger.Info("Remove address from book", "addr", addr) - a.removeFromAllBuckets(ka) -} - -func (a *addrBook) addBadPeer(addr *p2p.NetAddress, banTime time.Duration) bool { - // check it exists in addrbook - ka := a.addrLookup[addr.ID] - // check address is not already there - if ka == nil { - return false - } - - if _, alreadyBadPeer := a.badPeers[addr.ID]; !alreadyBadPeer { - // add to bad peer list - ka.ban(banTime) - a.badPeers[addr.ID] = ka - a.Logger.Info("Add address to blacklist", "addr", addr) - } - return true -} - -//--------------------------------------------------------------------- -// calculate bucket placements - -// hash(key + sourcegroup + int64(hash(key + group + sourcegroup)) % bucket_per_group) % num_new_buckets -func (a *addrBook) calcNewBucket(addr, src *p2p.NetAddress) (int, error) { - data1 := []byte{} - data1 = append(data1, []byte(a.key)...) - data1 = append(data1, []byte(a.groupKey(addr))...) - data1 = append(data1, []byte(a.groupKey(src))...) - hash1, err := a.hash(data1) - if err != nil { - return 0, err - } - hash64 := binary.BigEndian.Uint64(hash1) - hash64 %= newBucketsPerGroup - var hashbuf [8]byte - binary.BigEndian.PutUint64(hashbuf[:], hash64) - data2 := []byte{} - data2 = append(data2, []byte(a.key)...) - data2 = append(data2, a.groupKey(src)...) - data2 = append(data2, hashbuf[:]...) - - hash2, err := a.hash(data2) - if err != nil { - return 0, err - } - result := int(binary.BigEndian.Uint64(hash2) % newBucketCount) - return result, nil -} - -// hash(key + group + int64(hash(key + addr)) % buckets_per_group) % num_old_buckets -func (a *addrBook) calcOldBucket(addr *p2p.NetAddress) (int, error) { - data1 := []byte{} - data1 = append(data1, []byte(a.key)...) - data1 = append(data1, []byte(addr.String())...) - hash1, err := a.hash(data1) - if err != nil { - return 0, err - } - hash64 := binary.BigEndian.Uint64(hash1) - hash64 %= oldBucketsPerGroup - var hashbuf [8]byte - binary.BigEndian.PutUint64(hashbuf[:], hash64) - data2 := []byte{} - data2 = append(data2, []byte(a.key)...) - data2 = append(data2, a.groupKey(addr)...) - data2 = append(data2, hashbuf[:]...) - - hash2, err := a.hash(data2) - if err != nil { - return 0, err - } - result := int(binary.BigEndian.Uint64(hash2) % oldBucketCount) - return result, nil -} - -// Return a string representing the network group of this address. -// This is the /16 for IPv4 (e.g. 1.2.0.0), the /32 (/36 for he.net) for IPv6, the string -// "local" for a local address and the string "unroutable" for an unroutable -// address. -func (a *addrBook) groupKey(na *p2p.NetAddress) string { - return groupKeyFor(na, a.routabilityStrict) -} - -func groupKeyFor(na *p2p.NetAddress, routabilityStrict bool) string { - if routabilityStrict && na.Local() { - return "local" - } - if routabilityStrict && !na.Routable() { - return "unroutable" - } - - if ipv4 := na.IP.To4(); ipv4 != nil { - return na.IP.Mask(net.CIDRMask(16, 32)).String() - } - - if na.RFC6145() || na.RFC6052() { - // last four bytes are the ip address - ip := na.IP[12:16] - return ip.Mask(net.CIDRMask(16, 32)).String() - } - - if na.RFC3964() { - ip := na.IP[2:6] - return ip.Mask(net.CIDRMask(16, 32)).String() - } - - if na.RFC4380() { - // teredo tunnels have the last 4 bytes as the v4 address XOR - // 0xff. - ip := net.IP(make([]byte, 4)) - for i, byte := range na.IP[12:16] { - ip[i] = byte ^ 0xff - } - return ip.Mask(net.CIDRMask(16, 32)).String() - } - - if na.OnionCatTor() { - // group is keyed off the first 4 bits of the actual onion key. - return fmt.Sprintf("tor:%d", na.IP[6]&((1<<4)-1)) - } - - // OK, so now we know ourselves to be a IPv6 address. - // bitcoind uses /32 for everything, except for Hurricane Electric's - // (he.net) IP range, which it uses /36 for. - bits := 32 - heNet := &net.IPNet{IP: net.ParseIP("2001:470::"), Mask: net.CIDRMask(32, 128)} - if heNet.Contains(na.IP) { - bits = 36 - } - ipv6Mask := net.CIDRMask(bits, 128) - return na.IP.Mask(ipv6Mask).String() -} - -func (a *addrBook) hash(b []byte) ([]byte, error) { - a.hasher.Reset() - a.hasher.Write(b) - return a.hasher.Sum(nil), nil -} diff --git a/internal/p2p/pex/addrbook_test.go b/internal/p2p/pex/addrbook_test.go deleted file mode 100644 index 3d21314ad..000000000 --- a/internal/p2p/pex/addrbook_test.go +++ /dev/null @@ -1,777 +0,0 @@ -package pex - -import ( - "encoding/hex" - "fmt" - "io/ioutil" - "math" - mrand "math/rand" - "net" - "os" - "testing" - "time" - - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" - - "github.com/tendermint/tendermint/internal/p2p" - "github.com/tendermint/tendermint/libs/log" - tmmath "github.com/tendermint/tendermint/libs/math" - tmrand "github.com/tendermint/tendermint/libs/rand" - "github.com/tendermint/tendermint/types" -) - -// FIXME These tests should not rely on .(*addrBook) assertions - -func TestAddrBookPickAddress(t *testing.T) { - fname := createTempFileName(t, "addrbook_test") - - // 0 addresses - book := NewAddrBook(fname, true) - book.SetLogger(log.TestingLogger()) - assert.Zero(t, book.Size()) - - addr := book.PickAddress(50) - assert.Nil(t, addr, "expected no address") - - randAddrs := randNetAddressPairs(t, 1) - addrSrc := randAddrs[0] - err := book.AddAddress(addrSrc.addr, addrSrc.src) - require.NoError(t, err) - - // pick an address when we only have new address - addr = book.PickAddress(0) - assert.NotNil(t, addr, "expected an address") - addr = book.PickAddress(50) - assert.NotNil(t, addr, "expected an address") - addr = book.PickAddress(100) - assert.NotNil(t, addr, "expected an address") - - // pick an address when we only have old address - book.MarkGood(addrSrc.addr.ID) - addr = book.PickAddress(0) - assert.NotNil(t, addr, "expected an address") - addr = book.PickAddress(50) - assert.NotNil(t, addr, "expected an address") - - // in this case, nNew==0 but we biased 100% to new, so we return nil - addr = book.PickAddress(100) - assert.Nil(t, addr, "did not expected an address") -} - -func TestAddrBookSaveLoad(t *testing.T) { - fname := createTempFileName(t, "addrbook_test") - - // 0 addresses - book := NewAddrBook(fname, true) - book.SetLogger(log.TestingLogger()) - book.Save() - - book = NewAddrBook(fname, true) - book.SetLogger(log.TestingLogger()) - err := book.Start() - require.NoError(t, err) - - assert.True(t, book.Empty()) - - // 100 addresses - randAddrs := randNetAddressPairs(t, 100) - - for _, addrSrc := range randAddrs { - err := book.AddAddress(addrSrc.addr, addrSrc.src) - require.NoError(t, err) - } - - assert.Equal(t, 100, book.Size()) - book.Save() - - book = NewAddrBook(fname, true) - book.SetLogger(log.TestingLogger()) - err = book.Start() - require.NoError(t, err) - - assert.Equal(t, 100, book.Size()) -} - -func TestAddrBookLookup(t *testing.T) { - fname := createTempFileName(t, "addrbook_test") - randAddrs := randNetAddressPairs(t, 100) - - book := NewAddrBook(fname, true) - book.SetLogger(log.TestingLogger()) - for _, addrSrc := range randAddrs { - addr := addrSrc.addr - src := addrSrc.src - err := book.AddAddress(addr, src) - require.NoError(t, err) - - ka := book.HasAddress(addr) - assert.True(t, ka, "Expected to find KnownAddress %v but wasn't there.", addr) - } -} - -func TestAddrBookPromoteToOld(t *testing.T) { - fname := createTempFileName(t, "addrbook_test") - randAddrs := randNetAddressPairs(t, 100) - - book := NewAddrBook(fname, true) - book.SetLogger(log.TestingLogger()) - for _, addrSrc := range randAddrs { - err := book.AddAddress(addrSrc.addr, addrSrc.src) - require.NoError(t, err) - } - - // Attempt all addresses. - for _, addrSrc := range randAddrs { - book.MarkAttempt(addrSrc.addr) - } - - // Promote half of them - for i, addrSrc := range randAddrs { - if i%2 == 0 { - book.MarkGood(addrSrc.addr.ID) - } - } - - // TODO: do more testing :) - - selection := book.GetSelection() - t.Logf("selection: %v", selection) - - if len(selection) > book.Size() { - t.Errorf("selection could not be bigger than the book") - } - - selection = book.GetSelectionWithBias(30) - t.Logf("selection: %v", selection) - - if len(selection) > book.Size() { - t.Errorf("selection with bias could not be bigger than the book") - } - - assert.Equal(t, book.Size(), 100, "expecting book size to be 100") -} - -func TestAddrBookHandlesDuplicates(t *testing.T) { - fname := createTempFileName(t, "addrbook_test") - book := NewAddrBook(fname, true) - - book.SetLogger(log.TestingLogger()) - - randAddrs := randNetAddressPairs(t, 100) - - differentSrc := randIPv4Address(t) - for _, addrSrc := range randAddrs { - err := book.AddAddress(addrSrc.addr, addrSrc.src) - require.NoError(t, err) - err = book.AddAddress(addrSrc.addr, addrSrc.src) // duplicate - require.NoError(t, err) - err = book.AddAddress(addrSrc.addr, differentSrc) // different src - require.NoError(t, err) - } - - assert.Equal(t, 100, book.Size()) -} - -type netAddressPair struct { - addr *p2p.NetAddress - src *p2p.NetAddress -} - -func randNetAddressPairs(t *testing.T, n int) []netAddressPair { - randAddrs := make([]netAddressPair, n) - for i := 0; i < n; i++ { - randAddrs[i] = netAddressPair{addr: randIPv4Address(t), src: randIPv4Address(t)} - } - return randAddrs -} - -func randIPv4Address(t *testing.T) *p2p.NetAddress { - for { - ip := fmt.Sprintf("%v.%v.%v.%v", - mrand.Intn(254)+1, - mrand.Intn(255), - mrand.Intn(255), - mrand.Intn(255), - ) - port := mrand.Intn(65535-1) + 1 - id := types.NodeID(hex.EncodeToString(tmrand.Bytes(types.NodeIDByteLength))) - idAddr := id.AddressString(fmt.Sprintf("%v:%v", ip, port)) - addr, err := types.NewNetAddressString(idAddr) - assert.Nil(t, err, "error generating rand network address") - if addr.Routable() { - return addr - } - } -} - -func TestAddrBookRemoveAddress(t *testing.T) { - fname := createTempFileName(t, "addrbook_test") - book := NewAddrBook(fname, true) - book.SetLogger(log.TestingLogger()) - - addr := randIPv4Address(t) - err := book.AddAddress(addr, addr) - require.NoError(t, err) - assert.Equal(t, 1, book.Size()) - - book.RemoveAddress(addr) - assert.Equal(t, 0, book.Size()) - - nonExistingAddr := randIPv4Address(t) - book.RemoveAddress(nonExistingAddr) - assert.Equal(t, 0, book.Size()) -} - -func TestAddrBookGetSelectionWithOneMarkedGood(t *testing.T) { - // create a book with 10 addresses, 1 good/old and 9 new - book, _ := createAddrBookWithMOldAndNNewAddrs(t, 1, 9) - addrs := book.GetSelectionWithBias(biasToSelectNewPeers) - assert.NotNil(t, addrs) - assertMOldAndNNewAddrsInSelection(t, 1, 9, addrs, book) -} - -func TestAddrBookGetSelectionWithOneNotMarkedGood(t *testing.T) { - // create a book with 10 addresses, 9 good/old and 1 new - book, _ := createAddrBookWithMOldAndNNewAddrs(t, 9, 1) - addrs := book.GetSelectionWithBias(biasToSelectNewPeers) - assert.NotNil(t, addrs) - assertMOldAndNNewAddrsInSelection(t, 9, 1, addrs, book) -} - -func TestAddrBookGetSelectionReturnsNilWhenAddrBookIsEmpty(t *testing.T) { - book, _ := createAddrBookWithMOldAndNNewAddrs(t, 0, 0) - addrs := book.GetSelectionWithBias(biasToSelectNewPeers) - assert.Nil(t, addrs) -} - -func TestAddrBookGetSelection(t *testing.T) { - fname := createTempFileName(t, "addrbook_test") - book := NewAddrBook(fname, true) - book.SetLogger(log.TestingLogger()) - - // 1) empty book - assert.Empty(t, book.GetSelection()) - - // 2) add one address - addr := randIPv4Address(t) - err := book.AddAddress(addr, addr) - require.NoError(t, err) - - assert.Equal(t, 1, len(book.GetSelection())) - assert.Equal(t, addr, book.GetSelection()[0]) - - // 3) add a bunch of addresses - randAddrs := randNetAddressPairs(t, 100) - for _, addrSrc := range randAddrs { - err := book.AddAddress(addrSrc.addr, addrSrc.src) - require.NoError(t, err) - } - - // check there is no duplicates - addrs := make(map[string]*p2p.NetAddress) - selection := book.GetSelection() - for _, addr := range selection { - if dup, ok := addrs[addr.String()]; ok { - t.Fatalf("selection %v contains duplicates %v", selection, dup) - } - addrs[addr.String()] = addr - } - - if len(selection) > book.Size() { - t.Errorf("selection %v could not be bigger than the book", selection) - } -} - -func TestAddrBookGetSelectionWithBias(t *testing.T) { - const biasTowardsNewAddrs = 30 - - fname := createTempFileName(t, "addrbook_test") - book := NewAddrBook(fname, true) - book.SetLogger(log.TestingLogger()) - - // 1) empty book - selection := book.GetSelectionWithBias(biasTowardsNewAddrs) - assert.Empty(t, selection) - - // 2) add one address - addr := randIPv4Address(t) - err := book.AddAddress(addr, addr) - require.NoError(t, err) - - selection = book.GetSelectionWithBias(biasTowardsNewAddrs) - assert.Equal(t, 1, len(selection)) - assert.Equal(t, addr, selection[0]) - - // 3) add a bunch of addresses - randAddrs := randNetAddressPairs(t, 100) - for _, addrSrc := range randAddrs { - err := book.AddAddress(addrSrc.addr, addrSrc.src) - require.NoError(t, err) - } - - // check there is no duplicates - addrs := make(map[string]*p2p.NetAddress) - selection = book.GetSelectionWithBias(biasTowardsNewAddrs) - for _, addr := range selection { - if dup, ok := addrs[addr.String()]; ok { - t.Fatalf("selection %v contains duplicates %v", selection, dup) - } - addrs[addr.String()] = addr - } - - if len(selection) > book.Size() { - t.Fatalf("selection %v could not be bigger than the book", selection) - } - - // 4) mark 80% of the addresses as good - randAddrsLen := len(randAddrs) - for i, addrSrc := range randAddrs { - if int((float64(i)/float64(randAddrsLen))*100) >= 20 { - book.MarkGood(addrSrc.addr.ID) - } - } - - selection = book.GetSelectionWithBias(biasTowardsNewAddrs) - - // check that ~70% of addresses returned are good - good := 0 - for _, addr := range selection { - if book.IsGood(addr) { - good++ - } - } - - got, expected := int((float64(good)/float64(len(selection)))*100), 100-biasTowardsNewAddrs - - // compute some slack to protect against small differences due to rounding: - slack := int(math.Round(float64(100) / float64(len(selection)))) - if got > expected+slack { - t.Fatalf( - "got more good peers (%% got: %d, %% expected: %d, number of good addrs: %d, total: %d)", - got, - expected, - good, - len(selection), - ) - } - if got < expected-slack { - t.Fatalf( - "got fewer good peers (%% got: %d, %% expected: %d, number of good addrs: %d, total: %d)", - got, - expected, - good, - len(selection), - ) - } -} - -func TestAddrBookHasAddress(t *testing.T) { - fname := createTempFileName(t, "addrbook_test") - book := NewAddrBook(fname, true) - book.SetLogger(log.TestingLogger()) - addr := randIPv4Address(t) - err := book.AddAddress(addr, addr) - require.NoError(t, err) - - assert.True(t, book.HasAddress(addr)) - - book.RemoveAddress(addr) - - assert.False(t, book.HasAddress(addr)) -} - -func testCreatePrivateAddrs(t *testing.T, numAddrs int) ([]*p2p.NetAddress, []string) { - t.Helper() - addrs := make([]*p2p.NetAddress, numAddrs) - for i := 0; i < numAddrs; i++ { - addrs[i] = randIPv4Address(t) - } - - private := make([]string, numAddrs) - for i, addr := range addrs { - private[i] = string(addr.ID) - } - return addrs, private -} - -func TestBanBadPeers(t *testing.T) { - fname := createTempFileName(t, "addrbook_test") - book := NewAddrBook(fname, true) - book.SetLogger(log.TestingLogger()) - - addr := randIPv4Address(t) - _ = book.AddAddress(addr, addr) - - book.MarkBad(addr, 1*time.Second) - // addr should not reachable - assert.False(t, book.HasAddress(addr)) - assert.True(t, book.IsBanned(addr)) - - err := book.AddAddress(addr, addr) - // book should not add address from the blacklist - assert.Error(t, err) - - time.Sleep(1 * time.Second) - book.ReinstateBadPeers() - // address should be reinstated in the new bucket - assert.EqualValues(t, 1, book.Size()) - assert.True(t, book.HasAddress(addr)) - assert.False(t, book.IsGood(addr)) -} - -func TestAddrBookEmpty(t *testing.T) { - fname := createTempFileName(t, "addrbook_test") - book := NewAddrBook(fname, true) - book.SetLogger(log.TestingLogger()) - // Check that empty book is empty - require.True(t, book.Empty()) - // Check that book with our address is empty - book.AddOurAddress(randIPv4Address(t)) - require.True(t, book.Empty()) - // Check that book with private addrs is empty - _, privateIds := testCreatePrivateAddrs(t, 5) - book.AddPrivateIDs(privateIds) - require.True(t, book.Empty()) - - // Check that book with address is not empty - err := book.AddAddress(randIPv4Address(t), randIPv4Address(t)) - require.NoError(t, err) - require.False(t, book.Empty()) -} - -func TestPrivatePeers(t *testing.T) { - fname := createTempFileName(t, "addrbook_test") - book := NewAddrBook(fname, true) - book.SetLogger(log.TestingLogger()) - - addrs, private := testCreatePrivateAddrs(t, 10) - book.AddPrivateIDs(private) - - // private addrs must not be added - for _, addr := range addrs { - err := book.AddAddress(addr, addr) - if assert.Error(t, err) { - _, ok := err.(ErrAddrBookPrivate) - assert.True(t, ok) - } - } - - // addrs coming from private peers must not be added - err := book.AddAddress(randIPv4Address(t), addrs[0]) - if assert.Error(t, err) { - _, ok := err.(ErrAddrBookPrivateSrc) - assert.True(t, ok) - } -} - -func testAddrBookAddressSelection(t *testing.T, bookSize int) { - // generate all combinations of old (m) and new addresses - for nBookOld := 0; nBookOld <= bookSize; nBookOld++ { - nBookNew := bookSize - nBookOld - dbgStr := fmt.Sprintf("book of size %d (new %d, old %d)", bookSize, nBookNew, nBookOld) - - // create book and get selection - book, _ := createAddrBookWithMOldAndNNewAddrs(t, nBookOld, nBookNew) - addrs := book.GetSelectionWithBias(biasToSelectNewPeers) - assert.NotNil(t, addrs, "%s - expected a non-nil selection", dbgStr) - nAddrs := len(addrs) - assert.NotZero(t, nAddrs, "%s - expected at least one address in selection", dbgStr) - - // check there's no nil addresses - for _, addr := range addrs { - if addr == nil { - t.Fatalf("%s - got nil address in selection %v", dbgStr, addrs) - } - } - - // XXX: shadowing - nOld, nNew := countOldAndNewAddrsInSelection(addrs, book) - - // Given: - // n - num new addrs, m - num old addrs - // k - num new addrs expected in the beginning (based on bias %) - // i=min(n, max(k,r-m)), aka expNew - // j=min(m, r-i), aka expOld - // - // We expect this layout: - // indices: 0...i-1 i...i+j-1 - // addresses: N0..Ni-1 O0..Oj-1 - // - // There is at least one partition and at most three. - var ( - k = percentageOfNum(biasToSelectNewPeers, nAddrs) - expNew = tmmath.MinInt(nNew, tmmath.MaxInt(k, nAddrs-nBookOld)) - expOld = tmmath.MinInt(nOld, nAddrs-expNew) - ) - - // Verify that the number of old and new addresses are as expected - if nNew != expNew { - t.Fatalf("%s - expected new addrs %d, got %d", dbgStr, expNew, nNew) - } - if nOld != expOld { - t.Fatalf("%s - expected old addrs %d, got %d", dbgStr, expOld, nOld) - } - - // Verify that the order of addresses is as expected - // Get the sequence types and lengths of the selection - seqLens, seqTypes, err := analyseSelectionLayout(book, addrs) - assert.NoError(t, err, "%s", dbgStr) - - // Build a list with the expected lengths of partitions and another with the expected types, e.g.: - // expSeqLens = [10, 22], expSeqTypes = [1, 2] - // means we expect 10 new (type 1) addresses followed by 22 old (type 2) addresses. - var expSeqLens []int - var expSeqTypes []int - - switch { - case expOld == 0: // all new addresses - expSeqLens = []int{nAddrs} - expSeqTypes = []int{1} - case expNew == 0: // all old addresses - expSeqLens = []int{nAddrs} - expSeqTypes = []int{2} - case nAddrs-expNew-expOld == 0: // new addresses, old addresses - expSeqLens = []int{expNew, expOld} - expSeqTypes = []int{1, 2} - } - - assert.Equal(t, expSeqLens, seqLens, - "%s - expected sequence lengths of old/new %v, got %v", - dbgStr, expSeqLens, seqLens) - assert.Equal(t, expSeqTypes, seqTypes, - "%s - expected sequence types (1-new, 2-old) was %v, got %v", - dbgStr, expSeqTypes, seqTypes) - } -} - -func TestMultipleAddrBookAddressSelection(t *testing.T) { - // test books with smaller size, < N - const N = 32 - for bookSize := 1; bookSize < N; bookSize++ { - testAddrBookAddressSelection(t, bookSize) - } - - // Test for two books with sizes from following ranges - ranges := [...][]int{{33, 100}, {100, 175}} - bookSizes := make([]int, 0, len(ranges)) - for _, r := range ranges { - bookSizes = append(bookSizes, mrand.Intn(r[1]-r[0])+r[0]) - } - t.Logf("Testing address selection for the following book sizes %v\n", bookSizes) - for _, bookSize := range bookSizes { - testAddrBookAddressSelection(t, bookSize) - } -} - -func TestAddrBookAddDoesNotOverwriteOldIP(t *testing.T) { - fname := createTempFileName(t, "addrbook_test") - - // This test creates adds a peer to the address book and marks it good - // It then attempts to override the peer's IP, by adding a peer with the same ID - // but different IP. We distinguish the IP's by "RealIP" and "OverrideAttemptIP" - peerID := "678503e6c8f50db7279c7da3cb9b072aac4bc0d5" - peerRealIP := "1.1.1.1:26656" - peerOverrideAttemptIP := "2.2.2.2:26656" - SrcAddr := "b0dd378c3fbc4c156cd6d302a799f0d2e4227201@159.89.121.174:26656" - - // There is a chance that AddAddress will ignore the new peer its given. - // So we repeat trying to override the peer several times, - // to ensure we aren't in a case that got probabilistically ignored - numOverrideAttempts := 10 - - peerRealAddr, err := types.NewNetAddressString(peerID + "@" + peerRealIP) - require.Nil(t, err) - - peerOverrideAttemptAddr, err := types.NewNetAddressString(peerID + "@" + peerOverrideAttemptIP) - require.Nil(t, err) - - src, err := types.NewNetAddressString(SrcAddr) - require.Nil(t, err) - - book := NewAddrBook(fname, true) - book.SetLogger(log.TestingLogger()) - err = book.AddAddress(peerRealAddr, src) - require.Nil(t, err) - book.MarkAttempt(peerRealAddr) - book.MarkGood(peerRealAddr.ID) - - // Double check that adding a peer again doesn't error - err = book.AddAddress(peerRealAddr, src) - require.Nil(t, err) - - // Try changing ip but keeping the same node id. (change 1.1.1.1 to 2.2.2.2) - // This should just be ignored, and not error. - for i := 0; i < numOverrideAttempts; i++ { - err = book.AddAddress(peerOverrideAttemptAddr, src) - require.Nil(t, err) - } - // Now check that the IP was not overridden. - // This is done by sampling several peers from addr book - // and ensuring they all have the correct IP. - // In the expected functionality, this test should only have 1 Peer, hence will pass. - for i := 0; i < numOverrideAttempts; i++ { - selection := book.GetSelection() - for _, addr := range selection { - require.Equal(t, addr.IP, peerRealAddr.IP) - } - } -} - -func TestAddrBookGroupKey(t *testing.T) { - // non-strict routability - testCases := []struct { - name string - ip string - expKey string - }{ - // IPv4 normal. - {"ipv4 normal class a", "12.1.2.3", "12.1.0.0"}, - {"ipv4 normal class b", "173.1.2.3", "173.1.0.0"}, - {"ipv4 normal class c", "196.1.2.3", "196.1.0.0"}, - - // IPv6/IPv4 translations. - {"ipv6 rfc3964 with ipv4 encap", "2002:0c01:0203::", "12.1.0.0"}, - {"ipv6 rfc4380 toredo ipv4", "2001:0:1234::f3fe:fdfc", "12.1.0.0"}, - {"ipv6 rfc6052 well-known prefix with ipv4", "64:ff9b::0c01:0203", "12.1.0.0"}, - {"ipv6 rfc6145 translated ipv4", "::ffff:0:0c01:0203", "12.1.0.0"}, - - // Tor. - {"ipv6 tor onioncat", "fd87:d87e:eb43:1234::5678", "tor:2"}, - {"ipv6 tor onioncat 2", "fd87:d87e:eb43:1245::6789", "tor:2"}, - {"ipv6 tor onioncat 3", "fd87:d87e:eb43:1345::6789", "tor:3"}, - - // IPv6 normal. - {"ipv6 normal", "2602:100::1", "2602:100::"}, - {"ipv6 normal 2", "2602:0100::1234", "2602:100::"}, - {"ipv6 hurricane electric", "2001:470:1f10:a1::2", "2001:470:1000::"}, - {"ipv6 hurricane electric 2", "2001:0470:1f10:a1::2", "2001:470:1000::"}, - } - - for i, tc := range testCases { - nip := net.ParseIP(tc.ip) - key := groupKeyFor(types.NewNetAddressIPPort(nip, 26656), false) - assert.Equal(t, tc.expKey, key, "#%d", i) - } - - // strict routability - testCases = []struct { - name string - ip string - expKey string - }{ - // Local addresses. - {"ipv4 localhost", "127.0.0.1", "local"}, - {"ipv6 localhost", "::1", "local"}, - {"ipv4 zero", "0.0.0.0", "local"}, - {"ipv4 first octet zero", "0.1.2.3", "local"}, - - // Unroutable addresses. - {"ipv4 invalid bcast", "255.255.255.255", "unroutable"}, - {"ipv4 rfc1918 10/8", "10.1.2.3", "unroutable"}, - {"ipv4 rfc1918 172.16/12", "172.16.1.2", "unroutable"}, - {"ipv4 rfc1918 192.168/16", "192.168.1.2", "unroutable"}, - {"ipv6 rfc3849 2001:db8::/32", "2001:db8::1234", "unroutable"}, - {"ipv4 rfc3927 169.254/16", "169.254.1.2", "unroutable"}, - {"ipv6 rfc4193 fc00::/7", "fc00::1234", "unroutable"}, - {"ipv6 rfc4843 2001:10::/28", "2001:10::1234", "unroutable"}, - {"ipv6 rfc4862 fe80::/64", "fe80::1234", "unroutable"}, - } - - for i, tc := range testCases { - nip := net.ParseIP(tc.ip) - key := groupKeyFor(types.NewNetAddressIPPort(nip, 26656), true) - assert.Equal(t, tc.expKey, key, "#%d", i) - } -} - -func assertMOldAndNNewAddrsInSelection(t *testing.T, m, n int, addrs []*p2p.NetAddress, book *addrBook) { - nOld, nNew := countOldAndNewAddrsInSelection(addrs, book) - assert.Equal(t, m, nOld, "old addresses") - assert.Equal(t, n, nNew, "new addresses") -} - -func createTempFileName(t *testing.T, prefix string) string { - t.Helper() - f, err := ioutil.TempFile("", prefix) - if err != nil { - panic(err) - } - - fname := f.Name() - if err := f.Close(); err != nil { - t.Fatal(err) - } - - t.Cleanup(func() { _ = os.Remove(fname) }) - - return fname -} - -func createAddrBookWithMOldAndNNewAddrs(t *testing.T, nOld, nNew int) (book *addrBook, fname string) { - t.Helper() - fname = createTempFileName(t, "addrbook_test") - - book = NewAddrBook(fname, true).(*addrBook) - book.SetLogger(log.TestingLogger()) - assert.Zero(t, book.Size()) - - randAddrs := randNetAddressPairs(t, nOld) - for _, addr := range randAddrs { - err := book.AddAddress(addr.addr, addr.src) - require.NoError(t, err) - book.MarkGood(addr.addr.ID) - } - - randAddrs = randNetAddressPairs(t, nNew) - for _, addr := range randAddrs { - err := book.AddAddress(addr.addr, addr.src) - require.NoError(t, err) - } - - return -} - -func countOldAndNewAddrsInSelection(addrs []*p2p.NetAddress, book *addrBook) (nOld, nNew int) { - for _, addr := range addrs { - if book.IsGood(addr) { - nOld++ - } else { - nNew++ - } - } - return -} - -// Analyze the layout of the selection specified by 'addrs' -// Returns: -// - seqLens - the lengths of the sequences of addresses of same type -// - seqTypes - the types of sequences in selection -func analyseSelectionLayout(book *addrBook, addrs []*p2p.NetAddress) (seqLens, seqTypes []int, err error) { - // address types are: 0 - nil, 1 - new, 2 - old - var ( - prevType = 0 - currentSeqLen = 0 - ) - - for _, addr := range addrs { - addrType := 0 - if book.IsGood(addr) { - addrType = 2 - } else { - addrType = 1 - } - if addrType != prevType && prevType != 0 { - seqLens = append(seqLens, currentSeqLen) - seqTypes = append(seqTypes, prevType) - currentSeqLen = 0 - } - currentSeqLen++ - prevType = addrType - } - - seqLens = append(seqLens, currentSeqLen) - seqTypes = append(seqTypes, prevType) - - return -} diff --git a/internal/p2p/pex/bench_test.go b/internal/p2p/pex/bench_test.go deleted file mode 100644 index 37019f60a..000000000 --- a/internal/p2p/pex/bench_test.go +++ /dev/null @@ -1,24 +0,0 @@ -package pex - -import ( - "testing" - - "github.com/tendermint/tendermint/types" -) - -func BenchmarkAddrBook_hash(b *testing.B) { - book := &addrBook{ - ourAddrs: make(map[string]struct{}), - privateIDs: make(map[types.NodeID]struct{}), - addrLookup: make(map[types.NodeID]*knownAddress), - badPeers: make(map[types.NodeID]*knownAddress), - filePath: "", - routabilityStrict: true, - } - book.init() - msg := []byte(`foobar`) - b.ResetTimer() - for i := 0; i < b.N; i++ { - _, _ = book.hash(msg) - } -} diff --git a/internal/p2p/pex/errors.go b/internal/p2p/pex/errors.go index 275e71bf9..4d41cce07 100644 --- a/internal/p2p/pex/errors.go +++ b/internal/p2p/pex/errors.go @@ -15,17 +15,6 @@ func (err ErrAddrBookNonRoutable) Error() string { return fmt.Sprintf("Cannot add non-routable address %v", err.Addr) } -type errAddrBookOldAddressNewBucket struct { - Addr *p2p.NetAddress - BucketID int -} - -func (err errAddrBookOldAddressNewBucket) Error() string { - return fmt.Sprintf("failed consistency check!"+ - " Cannot add pre-existing address %v into new bucket %v", - err.Addr, err.BucketID) -} - type ErrAddrBookSelf struct { Addr *p2p.NetAddress } diff --git a/internal/p2p/pex/file.go b/internal/p2p/pex/file.go deleted file mode 100644 index ce65f7d4d..000000000 --- a/internal/p2p/pex/file.go +++ /dev/null @@ -1,83 +0,0 @@ -package pex - -import ( - "encoding/json" - "fmt" - "os" - - "github.com/tendermint/tendermint/internal/libs/tempfile" -) - -/* Loading & Saving */ - -type addrBookJSON struct { - Key string `json:"key"` - Addrs []*knownAddress `json:"addrs"` -} - -func (a *addrBook) saveToFile(filePath string) { - a.mtx.Lock() - defer a.mtx.Unlock() - - a.Logger.Info("Saving AddrBook to file", "size", a.size()) - - addrs := make([]*knownAddress, 0, len(a.addrLookup)) - for _, ka := range a.addrLookup { - addrs = append(addrs, ka) - } - aJSON := &addrBookJSON{ - Key: a.key, - Addrs: addrs, - } - - jsonBytes, err := json.MarshalIndent(aJSON, "", "\t") - if err != nil { - a.Logger.Error("Failed to save AddrBook to file", "err", err) - return - } - err = tempfile.WriteFileAtomic(filePath, jsonBytes, 0644) - if err != nil { - a.Logger.Error("Failed to save AddrBook to file", "file", filePath, "err", err) - } -} - -// Returns false if file does not exist. -// cmn.Panics if file is corrupt. -func (a *addrBook) loadFromFile(filePath string) bool { - // If doesn't exist, do nothing. - _, err := os.Stat(filePath) - if os.IsNotExist(err) { - return false - } - - // Load addrBookJSON{} - r, err := os.Open(filePath) - if err != nil { - panic(fmt.Sprintf("Error opening file %s: %v", filePath, err)) - } - defer r.Close() - aJSON := &addrBookJSON{} - dec := json.NewDecoder(r) - err = dec.Decode(aJSON) - if err != nil { - panic(fmt.Sprintf("Error reading file %s: %v", filePath, err)) - } - - // Restore all the fields... - // Restore the key - a.key = aJSON.Key - // Restore .bucketsNew & .bucketsOld - for _, ka := range aJSON.Addrs { - for _, bucketIndex := range ka.Buckets { - bucket := a.getBucket(ka.BucketType, bucketIndex) - bucket[ka.Addr.String()] = ka - } - a.addrLookup[ka.ID()] = ka - if ka.BucketType == bucketTypeNew { - a.nNew++ - } else { - a.nOld++ - } - } - return true -} diff --git a/internal/p2p/pex/known_address.go b/internal/p2p/pex/known_address.go deleted file mode 100644 index 2a2ebe038..000000000 --- a/internal/p2p/pex/known_address.go +++ /dev/null @@ -1,141 +0,0 @@ -package pex - -import ( - "time" - - "github.com/tendermint/tendermint/internal/p2p" - "github.com/tendermint/tendermint/types" -) - -// knownAddress tracks information about a known network address -// that is used to determine how viable an address is. -type knownAddress struct { - Addr *p2p.NetAddress `json:"addr"` - Src *p2p.NetAddress `json:"src"` - Buckets []int `json:"buckets"` - Attempts int32 `json:"attempts"` - BucketType byte `json:"bucket_type"` - LastAttempt time.Time `json:"last_attempt"` - LastSuccess time.Time `json:"last_success"` - LastBanTime time.Time `json:"last_ban_time"` -} - -func newKnownAddress(addr *p2p.NetAddress, src *p2p.NetAddress) *knownAddress { - return &knownAddress{ - Addr: addr, - Src: src, - Attempts: 0, - LastAttempt: time.Now(), - BucketType: bucketTypeNew, - Buckets: nil, - } -} - -func (ka *knownAddress) ID() types.NodeID { - return ka.Addr.ID -} - -func (ka *knownAddress) isOld() bool { - return ka.BucketType == bucketTypeOld -} - -func (ka *knownAddress) isNew() bool { - return ka.BucketType == bucketTypeNew -} - -func (ka *knownAddress) markAttempt() { - now := time.Now() - ka.LastAttempt = now - ka.Attempts++ -} - -func (ka *knownAddress) markGood() { - now := time.Now() - ka.LastAttempt = now - ka.Attempts = 0 - ka.LastSuccess = now -} - -func (ka *knownAddress) ban(banTime time.Duration) { - if ka.LastBanTime.Before(time.Now().Add(banTime)) { - ka.LastBanTime = time.Now().Add(banTime) - } -} - -func (ka *knownAddress) isBanned() bool { - return ka.LastBanTime.After(time.Now()) -} - -func (ka *knownAddress) addBucketRef(bucketIdx int) int { - for _, bucket := range ka.Buckets { - if bucket == bucketIdx { - // TODO refactor to return error? - // log.Warn(Fmt("Bucket already exists in ka.Buckets: %v", ka)) - return -1 - } - } - ka.Buckets = append(ka.Buckets, bucketIdx) - return len(ka.Buckets) -} - -func (ka *knownAddress) removeBucketRef(bucketIdx int) int { - buckets := []int{} - for _, bucket := range ka.Buckets { - if bucket != bucketIdx { - buckets = append(buckets, bucket) - } - } - if len(buckets) != len(ka.Buckets)-1 { - // TODO refactor to return error? - // log.Warn(Fmt("bucketIdx not found in ka.Buckets: %v", ka)) - return -1 - } - ka.Buckets = buckets - return len(ka.Buckets) -} - -/* - An address is bad if the address in question is a New address, has not been tried in the last - minute, and meets one of the following criteria: - - 1) It claims to be from the future - 2) It hasn't been seen in over a week - 3) It has failed at least three times and never succeeded - 4) It has failed ten times in the last week - - All addresses that meet these criteria are assumed to be worthless and not - worth keeping hold of. - -*/ -func (ka *knownAddress) isBad() bool { - // Is Old --> good - if ka.BucketType == bucketTypeOld { - return false - } - - // Has been attempted in the last minute --> good - if ka.LastAttempt.After(time.Now().Add(-1 * time.Minute)) { - return false - } - - // TODO: From the future? - - // Too old? - // TODO: should be a timestamp of last seen, not just last attempt - if ka.LastAttempt.Before(time.Now().Add(-1 * numMissingDays * time.Hour * 24)) { - return true - } - - // Never succeeded? - if ka.LastSuccess.IsZero() && ka.Attempts >= numRetries { - return true - } - - // Hasn't succeeded in too long? - if ka.LastSuccess.Before(time.Now().Add(-1*minBadDays*time.Hour*24)) && - ka.Attempts >= maxFailures { - return true - } - - return false -} diff --git a/internal/p2p/pex/params.go b/internal/p2p/pex/params.go deleted file mode 100644 index 29b4d45ab..000000000 --- a/internal/p2p/pex/params.go +++ /dev/null @@ -1,55 +0,0 @@ -package pex - -import "time" - -const ( - // addresses under which the address manager will claim to need more addresses. - needAddressThreshold = 1000 - - // interval used to dump the address cache to disk for future use. - dumpAddressInterval = time.Minute * 2 - - // max addresses in each old address bucket. - oldBucketSize = 64 - - // buckets we split old addresses over. - oldBucketCount = 64 - - // max addresses in each new address bucket. - newBucketSize = 64 - - // buckets that we spread new addresses over. - newBucketCount = 256 - - // old buckets over which an address group will be spread. - oldBucketsPerGroup = 4 - - // new buckets over which a source address group will be spread. - newBucketsPerGroup = 32 - - // buckets a frequently seen new address may end up in. - maxNewBucketsPerAddress = 4 - - // days before which we assume an address has vanished - // if we have not seen it announced in that long. - numMissingDays = 7 - - // tries without a single success before we assume an address is bad. - numRetries = 3 - - // max failures we will accept without a success before considering an address bad. - maxFailures = 10 // ? - - // days since the last success before we will consider evicting an address. - minBadDays = 7 - - // % of total addresses known returned by GetSelection. - getSelectionPercent = 23 - - // min addresses that must be returned by GetSelection. Useful for bootstrapping. - minGetSelection = 32 - - // max addresses returned by GetSelection - // NOTE: this must match "maxMsgSize" - maxGetSelection = 250 -) diff --git a/internal/p2p/pex/pex_reactor.go b/internal/p2p/pex/pex_reactor.go deleted file mode 100644 index 9eb58c054..000000000 --- a/internal/p2p/pex/pex_reactor.go +++ /dev/null @@ -1,862 +0,0 @@ -package pex - -import ( - "errors" - "fmt" - "net" - "sync" - "time" - - "github.com/gogo/protobuf/proto" - - "github.com/tendermint/tendermint/internal/p2p" - "github.com/tendermint/tendermint/internal/p2p/conn" - "github.com/tendermint/tendermint/libs/cmap" - tmmath "github.com/tendermint/tendermint/libs/math" - tmrand "github.com/tendermint/tendermint/libs/rand" - "github.com/tendermint/tendermint/libs/service" - tmp2p "github.com/tendermint/tendermint/proto/tendermint/p2p" - "github.com/tendermint/tendermint/types" -) - -type Peer = p2p.Peer - -const ( - // PexChannel is a channel for PEX messages - PexChannel = byte(0x00) - - // over-estimate of max NetAddress size - // hexID (40) + IP (16) + Port (2) + Name (100) ... - // NOTE: dont use massive DNS name .. - maxAddressSize = 256 - - // NOTE: amplificaiton factor! - // small request results in up to maxMsgSize response - maxMsgSize = maxAddressSize * maxGetSelection - - // ensure we have enough peers - defaultEnsurePeersPeriod = 30 * time.Second - - // Seed/Crawler constants - - // minTimeBetweenCrawls is a minimum time between attempts to crawl a peer. - minTimeBetweenCrawls = 2 * time.Minute - - // check some peers every this - crawlPeerPeriod = 30 * time.Second - - maxAttemptsToDial = 16 // ~ 35h in total (last attempt - 18h) - - // if node connects to seed, it does not have any trusted peers. - // Especially in the beginning, node should have more trusted peers than - // untrusted. - biasToSelectNewPeers = 30 // 70 to select good peers - - // if a peer is marked bad, it will be banned for at least this time period - defaultBanTime = 24 * time.Hour -) - -type errMaxAttemptsToDial struct { -} - -func (e errMaxAttemptsToDial) Error() string { - return fmt.Sprintf("reached max attempts %d to dial", maxAttemptsToDial) -} - -type errTooEarlyToDial struct { - backoffDuration time.Duration - lastDialed time.Time -} - -func (e errTooEarlyToDial) Error() string { - return fmt.Sprintf( - "too early to dial (backoff duration: %d, last dialed: %v, time since: %v)", - e.backoffDuration, e.lastDialed, time.Since(e.lastDialed)) -} - -// Reactor handles PEX (peer exchange) and ensures that an -// adequate number of peers are connected to the switch. -// -// It uses `AddrBook` (address book) to store `NetAddress`es of the peers. -// -// ## Preventing abuse -// -// Only accept pexAddrsMsg from peers we sent a corresponding pexRequestMsg too. -// Only accept one pexRequestMsg every ~defaultEnsurePeersPeriod. -type Reactor struct { - p2p.BaseReactor - - book AddrBook - config *ReactorConfig - ensurePeersPeriod time.Duration // TODO: should go in the config - - // maps to prevent abuse - requestsSent *cmap.CMap // ID->struct{}: unanswered send requests - lastReceivedRequests *cmap.CMap // ID->time.Time: last time peer requested from us - - seedAddrs []*p2p.NetAddress - - attemptsToDial sync.Map // address (string) -> {number of attempts (int), last time dialed (time.Time)} - - // seed/crawled mode fields - crawlPeerInfos map[types.NodeID]crawlPeerInfo -} - -func (r *Reactor) minReceiveRequestInterval() time.Duration { - // NOTE: must be less than ensurePeersPeriod, otherwise we'll request - // peers too quickly from others and they'll think we're bad! - return r.ensurePeersPeriod / 3 -} - -// ReactorConfig holds reactor specific configuration data. -type ReactorConfig struct { - // Seed/Crawler mode - SeedMode bool - - // We want seeds to only advertise good peers. Therefore they should wait at - // least as long as we expect it to take for a peer to become good before - // disconnecting. - SeedDisconnectWaitPeriod time.Duration - - // Maximum pause when redialing a persistent peer (if zero, exponential backoff is used) - PersistentPeersMaxDialPeriod time.Duration - - // Seeds is a list of addresses reactor may use - // if it can't connect to peers in the addrbook. - Seeds []string -} - -type _attemptsToDial struct { - number int - lastDialed time.Time -} - -// NewReactor creates new PEX reactor. -func NewReactor(b AddrBook, config *ReactorConfig) *Reactor { - r := &Reactor{ - book: b, - config: config, - ensurePeersPeriod: defaultEnsurePeersPeriod, - requestsSent: cmap.NewCMap(), - lastReceivedRequests: cmap.NewCMap(), - crawlPeerInfos: make(map[types.NodeID]crawlPeerInfo), - } - r.BaseReactor = *p2p.NewBaseReactor("PEX", r) - return r -} - -// OnStart implements BaseService -func (r *Reactor) OnStart() error { - err := r.book.Start() - if err != nil && err != service.ErrAlreadyStarted { - return err - } - - numOnline, seedAddrs, err := r.checkSeeds() - if err != nil { - return err - } else if numOnline == 0 && r.book.Empty() { - return errors.New("address book is empty and couldn't resolve any seed nodes") - } - - r.seedAddrs = seedAddrs - - // Check if this node should run - // in seed/crawler mode - if r.config.SeedMode { - go r.crawlPeersRoutine() - } else { - go r.ensurePeersRoutine() - } - return nil -} - -// OnStop implements BaseService -func (r *Reactor) OnStop() { - if err := r.book.Stop(); err != nil { - r.Logger.Error("Error stopping address book", "err", err) - } -} - -// GetChannels implements Reactor -func (r *Reactor) GetChannels() []*conn.ChannelDescriptor { - return []*conn.ChannelDescriptor{ - { - ID: PexChannel, - Priority: 1, - SendQueueCapacity: 10, - RecvMessageCapacity: maxMsgSize, - - MaxSendBytes: 200, - }, - } -} - -// AddPeer implements Reactor by adding peer to the address book (if inbound) -// or by requesting more addresses (if outbound). -func (r *Reactor) AddPeer(p Peer) { - if p.IsOutbound() { - // For outbound peers, the address is already in the books - - // either via DialPeersAsync or r.Receive. - // Ask it for more peers if we need. - if r.book.NeedMoreAddrs() { - r.RequestAddrs(p) - } - } else { - // inbound peer is its own source - addr, err := p.NodeInfo().NetAddress() - if err != nil { - r.Logger.Error("Failed to get peer NetAddress", "err", err, "peer", p) - return - } - - // Make it explicit that addr and src are the same for an inbound peer. - src := addr - - // add to book. dont RequestAddrs right away because - // we don't trust inbound as much - let ensurePeersRoutine handle it. - err = r.book.AddAddress(addr, src) - r.logErrAddrBook(err) - } -} - -// RemovePeer implements Reactor by resetting peer's requests info. -func (r *Reactor) RemovePeer(p Peer, reason interface{}) { - id := string(p.ID()) - r.requestsSent.Delete(id) - r.lastReceivedRequests.Delete(id) -} - -func (r *Reactor) logErrAddrBook(err error) { - if err != nil { - switch err.(type) { - case ErrAddrBookNilAddr: - r.Logger.Error("Failed to add new address", "err", err) - default: - // non-routable, self, full book, private, etc. - r.Logger.Debug("Failed to add new address", "err", err) - } - } -} - -// Receive implements Reactor by handling incoming PEX messages. -// XXX: do not call any methods that can block or incur heavy processing. -// https://github.com/tendermint/tendermint/issues/2888 -func (r *Reactor) Receive(chID byte, src Peer, msgBytes []byte) { - msg, err := decodeMsg(msgBytes) - if err != nil { - r.Logger.Error("Error decoding message", "src", src, "chId", chID, "err", err) - r.Switch.StopPeerForError(src, err) - return - } - r.Logger.Debug("Received message", "src", src, "chId", chID, "msg", msg) - - switch msg := msg.(type) { - case *tmp2p.PexRequest: - - // NOTE: this is a prime candidate for amplification attacks, - // so it's important we - // 1) restrict how frequently peers can request - // 2) limit the output size - - // If we're a seed and this is an inbound peer, - // respond once and disconnect. - if r.config.SeedMode && !src.IsOutbound() { - id := string(src.ID()) - v := r.lastReceivedRequests.Get(id) - if v != nil { - // FlushStop/StopPeer are already - // running in a go-routine. - return - } - r.lastReceivedRequests.Set(id, time.Now()) - - // Send addrs and disconnect - r.SendAddrs(src, r.book.GetSelectionWithBias(biasToSelectNewPeers)) - go func() { - // In a go-routine so it doesn't block .Receive. - src.FlushStop() - r.Switch.StopPeerGracefully(src) - }() - - } else { - // Check we're not receiving requests too frequently. - if err := r.receiveRequest(src); err != nil { - r.Switch.StopPeerForError(src, err) - r.book.MarkBad(src.SocketAddr(), defaultBanTime) - return - } - r.SendAddrs(src, r.book.GetSelection()) - } - - case *tmp2p.PexResponse: - // If we asked for addresses, add them to the book - addrs, err := NetAddressesFromProto(msg.Addresses) - if err != nil { - r.Switch.StopPeerForError(src, err) - r.book.MarkBad(src.SocketAddr(), defaultBanTime) - return - } - err = r.ReceiveAddrs(addrs, src) - if err != nil { - r.Switch.StopPeerForError(src, err) - if err == ErrUnsolicitedList { - r.book.MarkBad(src.SocketAddr(), defaultBanTime) - } - return - } - - default: - r.Logger.Error(fmt.Sprintf("Unknown message type %T", msg)) - } -} - -// enforces a minimum amount of time between requests -func (r *Reactor) receiveRequest(src Peer) error { - id := string(src.ID()) - v := r.lastReceivedRequests.Get(id) - if v == nil { - // initialize with empty time - lastReceived := time.Time{} - r.lastReceivedRequests.Set(id, lastReceived) - return nil - } - - lastReceived := v.(time.Time) - if lastReceived.Equal(time.Time{}) { - // first time gets a free pass. then we start tracking the time - lastReceived = time.Now() - r.lastReceivedRequests.Set(id, lastReceived) - return nil - } - - now := time.Now() - minInterval := r.minReceiveRequestInterval() - if now.Sub(lastReceived) < minInterval { - return fmt.Errorf( - "peer (%v) sent next PEX request too soon. lastReceived: %v, now: %v, minInterval: %v. Disconnecting", - src.ID(), - lastReceived, - now, - minInterval, - ) - } - r.lastReceivedRequests.Set(id, now) - return nil -} - -// RequestAddrs asks peer for more addresses if we do not already have a -// request out for this peer. -func (r *Reactor) RequestAddrs(p Peer) { - id := string(p.ID()) - if _, exists := r.requestsSent.GetOrSet(id, struct{}{}); exists { - return - } - r.Logger.Debug("Request addrs", "from", p) - p.Send(PexChannel, mustEncode(&tmp2p.PexRequest{})) -} - -// ReceiveAddrs adds the given addrs to the addrbook if theres an open -// request for this peer and deletes the open request. -// If there's no open request for the src peer, it returns an error. -func (r *Reactor) ReceiveAddrs(addrs []*p2p.NetAddress, src Peer) error { - id := string(src.ID()) - if !r.requestsSent.Has(id) { - return ErrUnsolicitedList - } - r.requestsSent.Delete(id) - - srcAddr, err := src.NodeInfo().NetAddress() - if err != nil { - return err - } - - srcIsSeed := false - for _, seedAddr := range r.seedAddrs { - if seedAddr.Equals(srcAddr) { - srcIsSeed = true - break - } - } - - for _, netAddr := range addrs { - // NOTE: we check netAddr validity and routability in book#AddAddress. - err = r.book.AddAddress(netAddr, srcAddr) - if err != nil { - r.logErrAddrBook(err) - // XXX: should we be strict about incoming data and disconnect from a - // peer here too? - continue - } - - // If this address came from a seed node, try to connect to it without - // waiting (#2093) - if srcIsSeed { - r.Logger.Info("Will dial address, which came from seed", "addr", netAddr, "seed", srcAddr) - go func(addr *p2p.NetAddress) { - err := r.dialPeer(addr) - if err != nil { - switch err.(type) { - case errMaxAttemptsToDial, errTooEarlyToDial, p2p.ErrCurrentlyDialingOrExistingAddress: - r.Logger.Debug(err.Error(), "addr", addr) - default: - r.Logger.Error(err.Error(), "addr", addr) - } - } - }(netAddr) - } - } - - return nil -} - -// SendAddrs sends addrs to the peer. -func (r *Reactor) SendAddrs(p Peer, netAddrs []*p2p.NetAddress) { - p.Send(PexChannel, mustEncode(&tmp2p.PexResponse{Addresses: NetAddressesToProto(netAddrs)})) -} - -// SetEnsurePeersPeriod sets period to ensure peers connected. -func (r *Reactor) SetEnsurePeersPeriod(d time.Duration) { - r.ensurePeersPeriod = d -} - -// Ensures that sufficient peers are connected. (continuous) -func (r *Reactor) ensurePeersRoutine() { - var ( - seed = tmrand.NewRand() - jitter = seed.Int63n(r.ensurePeersPeriod.Nanoseconds()) - ) - - // Randomize first round of communication to avoid thundering herd. - // If no peers are present directly start connecting so we guarantee swift - // setup with the help of configured seeds. - if r.nodeHasSomePeersOrDialingAny() { - time.Sleep(time.Duration(jitter)) - } - - // fire once immediately. - // ensures we dial the seeds right away if the book is empty - r.ensurePeers() - - // fire periodically - ticker := time.NewTicker(r.ensurePeersPeriod) - for { - select { - case <-ticker.C: - r.ensurePeers() - case <-r.Quit(): - ticker.Stop() - return - } - } -} - -// ensurePeers ensures that sufficient peers are connected. (once) -// -// heuristic that we haven't perfected yet, or, perhaps is manually edited by -// the node operator. It should not be used to compute what addresses are -// already connected or not. -func (r *Reactor) ensurePeers() { - var ( - out, in, dial = r.Switch.NumPeers() - numToDial = r.Switch.MaxNumOutboundPeers() - (out + dial) - ) - r.Logger.Info( - "Ensure peers", - "numOutPeers", out, - "numInPeers", in, - "numDialing", dial, - "numToDial", numToDial, - ) - - if numToDial <= 0 { - return - } - - // bias to prefer more vetted peers when we have fewer connections. - // not perfect, but somewhate ensures that we prioritize connecting to more-vetted - // NOTE: range here is [10, 90]. Too high ? - newBias := tmmath.MinInt(out, 8)*10 + 10 - - toDial := make(map[types.NodeID]*p2p.NetAddress) - // Try maxAttempts times to pick numToDial addresses to dial - maxAttempts := numToDial * 3 - - for i := 0; i < maxAttempts && len(toDial) < numToDial; i++ { - try := r.book.PickAddress(newBias) - if try == nil { - continue - } - if _, selected := toDial[try.ID]; selected { - continue - } - if r.Switch.IsDialingOrExistingAddress(try) { - continue - } - // TODO: consider moving some checks from toDial into here - // so we don't even consider dialing peers that we want to wait - // before dialing again, or have dialed too many times already - r.Logger.Info("Will dial address", "addr", try) - toDial[try.ID] = try - } - - // Dial picked addresses - for _, addr := range toDial { - go func(addr *p2p.NetAddress) { - err := r.dialPeer(addr) - if err != nil { - switch err.(type) { - case errMaxAttemptsToDial, errTooEarlyToDial: - r.Logger.Debug(err.Error(), "addr", addr) - default: - r.Logger.Error(err.Error(), "addr", addr) - } - } - }(addr) - } - - if r.book.NeedMoreAddrs() { - // Check if banned nodes can be reinstated - r.book.ReinstateBadPeers() - } - - if r.book.NeedMoreAddrs() { - - // 1) Pick a random peer and ask for more. - peers := r.Switch.Peers().List() - peersCount := len(peers) - if peersCount > 0 { - rand := tmrand.NewRand() - peer := peers[rand.Int()%peersCount] - r.Logger.Info("We need more addresses. Sending pexRequest to random peer", "peer", peer) - r.RequestAddrs(peer) - } - - // 2) Dial seeds if we are not dialing anyone. - // This is done in addition to asking a peer for addresses to work-around - // peers not participating in PEX. - if len(toDial) == 0 { - r.Logger.Info("No addresses to dial. Falling back to seeds") - r.dialSeeds() - } - } -} - -func (r *Reactor) dialAttemptsInfo(addr *p2p.NetAddress) (attempts int, lastDialed time.Time) { - _attempts, ok := r.attemptsToDial.Load(addr.DialString()) - if !ok { - return - } - atd := _attempts.(_attemptsToDial) - return atd.number, atd.lastDialed -} - -func (r *Reactor) dialPeer(addr *p2p.NetAddress) error { - attempts, lastDialed := r.dialAttemptsInfo(addr) - if !r.Switch.IsPeerPersistent(addr) && attempts > maxAttemptsToDial { - r.book.MarkBad(addr, defaultBanTime) - return errMaxAttemptsToDial{} - } - - // exponential backoff if it's not our first attempt to dial given address - if attempts > 0 { - rand := tmrand.NewRand() - jitter := time.Duration(rand.Float64() * float64(time.Second)) // 1s == (1e9 ns) - backoffDuration := jitter + ((1 << uint(attempts)) * time.Second) - backoffDuration = r.maxBackoffDurationForPeer(addr, backoffDuration) - sinceLastDialed := time.Since(lastDialed) - if sinceLastDialed < backoffDuration { - return errTooEarlyToDial{backoffDuration, lastDialed} - } - } - - err := r.Switch.DialPeerWithAddress(addr) - if err != nil { - if _, ok := err.(p2p.ErrCurrentlyDialingOrExistingAddress); ok { - return err - } - - markAddrInBookBasedOnErr(addr, r.book, err) - switch err.(type) { - case p2p.ErrSwitchAuthenticationFailure: - // NOTE: addr is removed from addrbook in markAddrInBookBasedOnErr - r.attemptsToDial.Delete(addr.DialString()) - default: - r.attemptsToDial.Store(addr.DialString(), _attemptsToDial{attempts + 1, time.Now()}) - } - return fmt.Errorf("dialing failed (attempts: %d): %w", attempts+1, err) - } - - // cleanup any history - r.attemptsToDial.Delete(addr.DialString()) - return nil -} - -// maxBackoffDurationForPeer caps the backoff duration for persistent peers. -func (r *Reactor) maxBackoffDurationForPeer(addr *p2p.NetAddress, planned time.Duration) time.Duration { - if r.config.PersistentPeersMaxDialPeriod > 0 && - planned > r.config.PersistentPeersMaxDialPeriod && - r.Switch.IsPeerPersistent(addr) { - return r.config.PersistentPeersMaxDialPeriod - } - return planned -} - -// checkSeeds checks that addresses are well formed. -// Returns number of seeds we can connect to, along with all seeds addrs. -// return err if user provided any badly formatted seed addresses. -// Doesn't error if the seed node can't be reached. -// numOnline returns -1 if no seed nodes were in the initial configuration. -func (r *Reactor) checkSeeds() (numOnline int, netAddrs []*p2p.NetAddress, err error) { - lSeeds := len(r.config.Seeds) - if lSeeds == 0 { - return -1, nil, nil - } - netAddrs, errs := p2p.NewNetAddressStrings(r.config.Seeds) - numOnline = lSeeds - len(errs) - for _, err := range errs { - switch e := err.(type) { - case types.ErrNetAddressLookup: - r.Logger.Error("Connecting to seed failed", "err", e) - default: - return 0, nil, fmt.Errorf("seed node configuration has error: %w", e) - } - } - return numOnline, netAddrs, nil -} - -// randomly dial seeds until we connect to one or exhaust them -func (r *Reactor) dialSeeds() { - rand := tmrand.NewRand() - perm := rand.Perm(len(r.seedAddrs)) - // perm := r.Switch.rng.Perm(lSeeds) - for _, i := range perm { - // dial a random seed - seedAddr := r.seedAddrs[i] - err := r.Switch.DialPeerWithAddress(seedAddr) - - switch err.(type) { - case nil, p2p.ErrCurrentlyDialingOrExistingAddress: - return - } - r.Switch.Logger.Error("Error dialing seed", "err", err, "seed", seedAddr) - } - // do not write error message if there were no seeds specified in config - if len(r.seedAddrs) > 0 { - r.Switch.Logger.Error("Couldn't connect to any seeds") - } -} - -// AttemptsToDial returns the number of attempts to dial specific address. It -// returns 0 if never attempted or successfully connected. -func (r *Reactor) AttemptsToDial(addr *p2p.NetAddress) int { - lAttempts, attempted := r.attemptsToDial.Load(addr.DialString()) - if attempted { - return lAttempts.(_attemptsToDial).number - } - return 0 -} - -//---------------------------------------------------------- - -// Explores the network searching for more peers. (continuous) -// Seed/Crawler Mode causes this node to quickly disconnect -// from peers, except other seed nodes. -func (r *Reactor) crawlPeersRoutine() { - // If we have any seed nodes, consult them first - if len(r.seedAddrs) > 0 { - r.dialSeeds() - } else { - // Do an initial crawl - r.crawlPeers(r.book.GetSelection()) - } - - // Fire periodically - ticker := time.NewTicker(crawlPeerPeriod) - - for { - select { - case <-ticker.C: - r.attemptDisconnects() - r.crawlPeers(r.book.GetSelection()) - r.cleanupCrawlPeerInfos() - case <-r.Quit(): - return - } - } -} - -// nodeHasSomePeersOrDialingAny returns true if the node is connected to some -// peers or dialing them currently. -func (r *Reactor) nodeHasSomePeersOrDialingAny() bool { - out, in, dial := r.Switch.NumPeers() - return out+in+dial > 0 -} - -// crawlPeerInfo handles temporary data needed for the network crawling -// performed during seed/crawler mode. -type crawlPeerInfo struct { - Addr *p2p.NetAddress `json:"addr"` - // The last time we crawled the peer or attempted to do so. - LastCrawled time.Time `json:"last_crawled"` -} - -// crawlPeers will crawl the network looking for new peer addresses. -func (r *Reactor) crawlPeers(addrs []*p2p.NetAddress) { - now := time.Now() - - for _, addr := range addrs { - peerInfo, ok := r.crawlPeerInfos[addr.ID] - - // Do not attempt to connect with peers we recently crawled. - if ok && now.Sub(peerInfo.LastCrawled) < minTimeBetweenCrawls { - continue - } - - // Record crawling attempt. - r.crawlPeerInfos[addr.ID] = crawlPeerInfo{ - Addr: addr, - LastCrawled: now, - } - - err := r.dialPeer(addr) - if err != nil { - switch err.(type) { - case errMaxAttemptsToDial, errTooEarlyToDial, p2p.ErrCurrentlyDialingOrExistingAddress: - r.Logger.Debug(err.Error(), "addr", addr) - default: - r.Logger.Error(err.Error(), "addr", addr) - } - continue - } - - peer := r.Switch.Peers().Get(addr.ID) - if peer != nil { - r.RequestAddrs(peer) - } - } -} - -func (r *Reactor) cleanupCrawlPeerInfos() { - for id, info := range r.crawlPeerInfos { - // If we did not crawl a peer for 24 hours, it means the peer was removed - // from the addrbook => remove - // - // 10000 addresses / maxGetSelection = 40 cycles to get all addresses in - // the ideal case, - // 40 * crawlPeerPeriod ~ 20 minutes - if time.Since(info.LastCrawled) > 24*time.Hour { - delete(r.crawlPeerInfos, id) - } - } -} - -// attemptDisconnects checks if we've been with each peer long enough to disconnect -func (r *Reactor) attemptDisconnects() { - for _, peer := range r.Switch.Peers().List() { - if peer.Status().Duration < r.config.SeedDisconnectWaitPeriod { - continue - } - if peer.IsPersistent() { - continue - } - r.Switch.StopPeerGracefully(peer) - } -} - -func markAddrInBookBasedOnErr(addr *p2p.NetAddress, book AddrBook, err error) { - // TODO: detect more "bad peer" scenarios - switch err.(type) { - case p2p.ErrSwitchAuthenticationFailure: - book.MarkBad(addr, defaultBanTime) - default: - book.MarkAttempt(addr) - } -} - -//----------------------------------------------------------------------------- -// Messages - -// mustEncode proto encodes a tmp2p.Message -func mustEncode(pb proto.Message) []byte { - msg := tmp2p.PexMessage{} - switch pb := pb.(type) { - case *tmp2p.PexRequest: - msg.Sum = &tmp2p.PexMessage_PexRequest{PexRequest: pb} - case *tmp2p.PexResponse: - msg.Sum = &tmp2p.PexMessage_PexResponse{PexResponse: pb} - default: - panic(fmt.Sprintf("Unknown message type %T", pb)) - } - - bz, err := msg.Marshal() - if err != nil { - panic(fmt.Errorf("unable to marshal %T: %w", pb, err)) - } - return bz -} - -func decodeMsg(bz []byte) (proto.Message, error) { - pb := &tmp2p.PexMessage{} - - err := pb.Unmarshal(bz) - if err != nil { - return nil, err - } - - switch msg := pb.Sum.(type) { - case *tmp2p.PexMessage_PexRequest: - return msg.PexRequest, nil - case *tmp2p.PexMessage_PexResponse: - return msg.PexResponse, nil - default: - return nil, fmt.Errorf("unknown message: %T", msg) - } -} - -//----------------------------------------------------------------------------- -// address converters - -// NetAddressFromProto converts a Protobuf PexAddress into a native struct. -func NetAddressFromProto(pb tmp2p.PexAddress) (*types.NetAddress, error) { - ip := net.ParseIP(pb.IP) - if ip == nil { - return nil, fmt.Errorf("invalid IP address %v", pb.IP) - } - if pb.Port >= 1<<16 { - return nil, fmt.Errorf("invalid port number %v", pb.Port) - } - return &types.NetAddress{ - ID: types.NodeID(pb.ID), - IP: ip, - Port: uint16(pb.Port), - }, nil -} - -// NetAddressesFromProto converts a slice of Protobuf PexAddresses into a native slice. -func NetAddressesFromProto(pbs []tmp2p.PexAddress) ([]*types.NetAddress, error) { - nas := make([]*types.NetAddress, 0, len(pbs)) - for _, pb := range pbs { - na, err := NetAddressFromProto(pb) - if err != nil { - return nil, err - } - nas = append(nas, na) - } - return nas, nil -} - -// NetAddressesToProto converts a slice of NetAddresses into a Protobuf PexAddress slice. -func NetAddressesToProto(nas []*types.NetAddress) []tmp2p.PexAddress { - pbs := make([]tmp2p.PexAddress, 0, len(nas)) - for _, na := range nas { - if na != nil { - pbs = append(pbs, tmp2p.PexAddress{ - ID: string(na.ID), - IP: na.IP.String(), - Port: uint32(na.Port), - }) - } - } - return pbs -} diff --git a/internal/p2p/pex/pex_reactor_test.go b/internal/p2p/pex/pex_reactor_test.go deleted file mode 100644 index 56f24457f..000000000 --- a/internal/p2p/pex/pex_reactor_test.go +++ /dev/null @@ -1,680 +0,0 @@ -package pex - -import ( - "encoding/hex" - "fmt" - "io/ioutil" - "os" - "path/filepath" - "testing" - "time" - - "github.com/gogo/protobuf/proto" - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" - - "github.com/tendermint/tendermint/config" - "github.com/tendermint/tendermint/internal/p2p" - "github.com/tendermint/tendermint/internal/p2p/mock" - "github.com/tendermint/tendermint/libs/log" - tmp2p "github.com/tendermint/tendermint/proto/tendermint/p2p" - "github.com/tendermint/tendermint/types" -) - -var ( - cfg *config.P2PConfig -) - -func init() { - cfg = config.DefaultP2PConfig() - cfg.PexReactor = true - cfg.AllowDuplicateIP = true -} - -func TestPEXReactorBasic(t *testing.T) { - r, _ := createReactor(t, &ReactorConfig{}) - - assert.NotNil(t, r) - assert.NotEmpty(t, r.GetChannels()) -} - -func TestPEXReactorAddRemovePeer(t *testing.T) { - r, book := createReactor(t, &ReactorConfig{}) - - size := book.Size() - peer := p2p.CreateRandomPeer(false) - - r.AddPeer(peer) - assert.Equal(t, size+1, book.Size()) - - r.RemovePeer(peer, "peer not available") - - outboundPeer := p2p.CreateRandomPeer(true) - - r.AddPeer(outboundPeer) - assert.Equal(t, size+1, book.Size(), "outbound peers should not be added to the address book") - - r.RemovePeer(outboundPeer, "peer not available") -} - -// --- FAIL: TestPEXReactorRunning (11.10s) -// pex_reactor_test.go:411: expected all switches to be connected to at -// least one peer (switches: 0 => {outbound: 1, inbound: 0}, 1 => -// {outbound: 0, inbound: 1}, 2 => {outbound: 0, inbound: 0}, ) -// -// EXPLANATION: peers are getting rejected because in switch#addPeer we check -// if any peer (who we already connected to) has the same IP. Even though local -// peers have different IP addresses, they all have the same underlying remote -// IP: 127.0.0.1. -// -func TestPEXReactorRunning(t *testing.T) { - N := 3 - switches := make([]*p2p.Switch, N) - - // directory to store address books - dir := tempDir(t) - - books := make([]AddrBook, N) - logger := log.TestingLogger() - - // create switches - for i := 0; i < N; i++ { - switches[i] = p2p.MakeSwitch(cfg, i, "testing", "123.123.123", func(i int, sw *p2p.Switch) *p2p.Switch { - books[i] = NewAddrBook(filepath.Join(dir, fmt.Sprintf("addrbook%d.json", i)), false) - books[i].SetLogger(logger.With("pex", i)) - sw.SetAddrBook(books[i]) - - sw.SetLogger(logger.With("pex", i)) - - r := NewReactor(books[i], &ReactorConfig{}) - r.SetLogger(logger.With("pex", i)) - r.SetEnsurePeersPeriod(250 * time.Millisecond) - sw.AddReactor("pex", r) - - return sw - }, logger) - } - - for _, sw := range switches { - err := sw.Start() // start switch and reactors - require.Nil(t, err) - } - - addOtherNodeAddrToAddrBook := func(switchIndex, otherSwitchIndex int) { - addr := switches[otherSwitchIndex].NetAddress() - err := books[switchIndex].AddAddress(addr, addr) - require.NoError(t, err) - } - - addOtherNodeAddrToAddrBook(0, 1) - addOtherNodeAddrToAddrBook(1, 0) - addOtherNodeAddrToAddrBook(2, 1) - - assertPeersWithTimeout(t, switches, 10*time.Millisecond, 10*time.Second, N-1) - - // stop them - for _, s := range switches { - err := s.Stop() - require.NoError(t, err) - } -} - -func TestPEXReactorReceive(t *testing.T) { - r, book := createReactor(t, &ReactorConfig{}) - peer := p2p.CreateRandomPeer(false) - - // we have to send a request to receive responses - r.RequestAddrs(peer) - - size := book.Size() - na, err := peer.NodeInfo().NetAddress() - require.NoError(t, err) - msg := mustEncode(&tmp2p.PexResponse{Addresses: NetAddressesToProto([]*types.NetAddress{na})}) - r.Receive(PexChannel, peer, msg) - assert.Equal(t, size+1, book.Size()) - - msg = mustEncode(&tmp2p.PexRequest{}) - r.Receive(PexChannel, peer, msg) // should not panic. -} - -func TestPEXReactorRequestMessageAbuse(t *testing.T) { - r, book := createReactor(t, &ReactorConfig{}) - sw := createSwitchAndAddReactors(r) - sw.SetAddrBook(book) - - peer := mock.NewPeer(nil) - peerAddr := peer.SocketAddr() - p2p.AddPeerToSwitchPeerSet(sw, peer) - assert.True(t, sw.Peers().Has(peer.ID())) - err := book.AddAddress(peerAddr, peerAddr) - require.NoError(t, err) - require.True(t, book.HasAddress(peerAddr)) - - id := string(peer.ID()) - msg := mustEncode(&tmp2p.PexRequest{}) - - // first time creates the entry - r.Receive(PexChannel, peer, msg) - assert.True(t, r.lastReceivedRequests.Has(id)) - assert.True(t, sw.Peers().Has(peer.ID())) - - // next time sets the last time value - r.Receive(PexChannel, peer, msg) - assert.True(t, r.lastReceivedRequests.Has(id)) - assert.True(t, sw.Peers().Has(peer.ID())) - - // third time is too many too soon - peer is removed - r.Receive(PexChannel, peer, msg) - assert.False(t, r.lastReceivedRequests.Has(id)) - assert.False(t, sw.Peers().Has(peer.ID())) - assert.True(t, book.IsBanned(peerAddr)) -} - -func TestPEXReactorAddrsMessageAbuse(t *testing.T) { - r, book := createReactor(t, &ReactorConfig{}) - sw := createSwitchAndAddReactors(r) - sw.SetAddrBook(book) - - peer := mock.NewPeer(nil) - p2p.AddPeerToSwitchPeerSet(sw, peer) - assert.True(t, sw.Peers().Has(peer.ID())) - - id := string(peer.ID()) - - // request addrs from the peer - r.RequestAddrs(peer) - assert.True(t, r.requestsSent.Has(id)) - assert.True(t, sw.Peers().Has(peer.ID())) - - msg := mustEncode(&tmp2p.PexResponse{Addresses: NetAddressesToProto([]*types.NetAddress{peer.SocketAddr()})}) - - // receive some addrs. should clear the request - r.Receive(PexChannel, peer, msg) - assert.False(t, r.requestsSent.Has(id)) - assert.True(t, sw.Peers().Has(peer.ID())) - - // receiving more unsolicited addrs causes a disconnect and ban - r.Receive(PexChannel, peer, msg) - assert.False(t, sw.Peers().Has(peer.ID())) - assert.True(t, book.IsBanned(peer.SocketAddr())) -} - -func TestCheckSeeds(t *testing.T) { - // directory to store address books - dir := tempDir(t) - - // 1. test creating peer with no seeds works - peerSwitch := testCreateDefaultPeer(dir, 0) - require.Nil(t, peerSwitch.Start()) - peerSwitch.Stop() // nolint:errcheck // ignore for tests - - // 2. create seed - seed := testCreateSeed(dir, 1, []*p2p.NetAddress{}, []*p2p.NetAddress{}) - - // 3. test create peer with online seed works - peerSwitch = testCreatePeerWithSeed(dir, 2, seed) - require.Nil(t, peerSwitch.Start()) - peerSwitch.Stop() // nolint:errcheck // ignore for tests - - // 4. test create peer with all seeds having unresolvable DNS fails - badPeerConfig := &ReactorConfig{ - Seeds: []string{"ed3dfd27bfc4af18f67a49862f04cc100696e84d@bad.network.addr:26657", - "d824b13cb5d40fa1d8a614e089357c7eff31b670@anotherbad.network.addr:26657"}, - } - peerSwitch = testCreatePeerWithConfig(dir, 2, badPeerConfig) - require.Error(t, peerSwitch.Start()) - peerSwitch.Stop() // nolint:errcheck // ignore for tests - - // 5. test create peer with one good seed address succeeds - badPeerConfig = &ReactorConfig{ - Seeds: []string{"ed3dfd27bfc4af18f67a49862f04cc100696e84d@bad.network.addr:26657", - "d824b13cb5d40fa1d8a614e089357c7eff31b670@anotherbad.network.addr:26657", - seed.NetAddress().String()}, - } - peerSwitch = testCreatePeerWithConfig(dir, 2, badPeerConfig) - require.Nil(t, peerSwitch.Start()) - peerSwitch.Stop() // nolint:errcheck // ignore for tests -} - -func TestPEXReactorUsesSeedsIfNeeded(t *testing.T) { - // directory to store address books - dir := tempDir(t) - - // 1. create seed - seed := testCreateSeed(dir, 0, []*p2p.NetAddress{}, []*p2p.NetAddress{}) - require.Nil(t, seed.Start()) - t.Cleanup(func() { _ = seed.Stop() }) - - // 2. create usual peer with only seed configured. - peer := testCreatePeerWithSeed(dir, 1, seed) - require.Nil(t, peer.Start()) - t.Cleanup(func() { _ = peer.Stop() }) - - // 3. check that the peer connects to seed immediately - assertPeersWithTimeout(t, []*p2p.Switch{peer}, 10*time.Millisecond, 3*time.Second, 1) -} - -func TestConnectionSpeedForPeerReceivedFromSeed(t *testing.T) { - // directory to store address books - dir := tempDir(t) - - // 1. create peer - peerSwitch := testCreateDefaultPeer(dir, 1) - require.Nil(t, peerSwitch.Start()) - t.Cleanup(func() { _ = peerSwitch.Stop() }) - - // 2. Create seed which knows about the peer - peerAddr := peerSwitch.NetAddress() - seed := testCreateSeed(dir, 2, []*p2p.NetAddress{peerAddr}, []*p2p.NetAddress{peerAddr}) - require.Nil(t, seed.Start()) - t.Cleanup(func() { _ = seed.Stop() }) - - // 3. create another peer with only seed configured. - secondPeer := testCreatePeerWithSeed(dir, 3, seed) - require.Nil(t, secondPeer.Start()) - t.Cleanup(func() { _ = secondPeer.Stop() }) - - // 4. check that the second peer connects to seed immediately - assertPeersWithTimeout(t, []*p2p.Switch{secondPeer}, 10*time.Millisecond, 3*time.Second, 1) - - // 5. check that the second peer connects to the first peer immediately - assertPeersWithTimeout(t, []*p2p.Switch{secondPeer}, 10*time.Millisecond, 1*time.Second, 2) -} - -func TestPEXReactorSeedMode(t *testing.T) { - // directory to store address books - dir := tempDir(t) - - pexRConfig := &ReactorConfig{SeedMode: true, SeedDisconnectWaitPeriod: 10 * time.Millisecond} - pexR, book := createReactor(t, pexRConfig) - sw := createSwitchAndAddReactors(pexR) - - sw.SetAddrBook(book) - require.NoError(t, sw.Start()) - t.Cleanup(func() { _ = sw.Stop() }) - - assert.Zero(t, sw.Peers().Size()) - - peerSwitch := testCreateDefaultPeer(dir, 1) - require.NoError(t, peerSwitch.Start()) - t.Cleanup(func() { _ = peerSwitch.Stop() }) - - // 1. Test crawlPeers dials the peer - pexR.crawlPeers([]*p2p.NetAddress{peerSwitch.NetAddress()}) - assert.Equal(t, 1, sw.Peers().Size()) - assert.True(t, sw.Peers().Has(peerSwitch.NodeInfo().ID())) - - // 2. attemptDisconnects should not disconnect because of wait period - pexR.attemptDisconnects() - assert.Equal(t, 1, sw.Peers().Size()) - - // sleep for SeedDisconnectWaitPeriod - time.Sleep(pexRConfig.SeedDisconnectWaitPeriod + 1*time.Millisecond) - - // 3. attemptDisconnects should disconnect after wait period - pexR.attemptDisconnects() - assert.Equal(t, 0, sw.Peers().Size()) -} - -func TestPEXReactorDoesNotDisconnectFromPersistentPeerInSeedMode(t *testing.T) { - // directory to store address books - dir := tempDir(t) - - pexRConfig := &ReactorConfig{SeedMode: true, SeedDisconnectWaitPeriod: 1 * time.Millisecond} - pexR, book := createReactor(t, pexRConfig) - sw := createSwitchAndAddReactors(pexR) - - sw.SetAddrBook(book) - require.NoError(t, sw.Start()) - t.Cleanup(func() { _ = sw.Stop() }) - - assert.Zero(t, sw.Peers().Size()) - - peerSwitch := testCreatePeerWithConfig(dir, 1, pexRConfig) - require.NoError(t, peerSwitch.Start()) - t.Cleanup(func() { _ = peerSwitch.Stop() }) - - require.NoError(t, sw.AddPersistentPeers([]string{peerSwitch.NetAddress().String()})) - - // 1. Test crawlPeers dials the peer - pexR.crawlPeers([]*p2p.NetAddress{peerSwitch.NetAddress()}) - assert.Equal(t, 1, sw.Peers().Size()) - assert.True(t, sw.Peers().Has(peerSwitch.NodeInfo().ID())) - - // sleep for SeedDisconnectWaitPeriod - time.Sleep(pexRConfig.SeedDisconnectWaitPeriod + 1*time.Millisecond) - - // 2. attemptDisconnects should not disconnect because the peer is persistent - pexR.attemptDisconnects() - assert.Equal(t, 1, sw.Peers().Size()) -} - -func TestPEXReactorDialsPeerUpToMaxAttemptsInSeedMode(t *testing.T) { - // directory to store address books - pexR, book := createReactor(t, &ReactorConfig{SeedMode: true}) - sw := createSwitchAndAddReactors(pexR) - - sw.SetAddrBook(book) - // No need to start sw since crawlPeers is called manually here. - - peer := mock.NewPeer(nil) - addr := peer.SocketAddr() - - require.NoError(t, book.AddAddress(addr, addr)) - - assert.True(t, book.HasAddress(addr)) - - // imitate maxAttemptsToDial reached - pexR.attemptsToDial.Store(addr.DialString(), _attemptsToDial{maxAttemptsToDial + 1, time.Now()}) - pexR.crawlPeers([]*p2p.NetAddress{addr}) - - assert.False(t, book.HasAddress(addr)) -} - -// connect a peer to a seed, wait a bit, then stop it. -// this should give it time to request addrs and for the seed -// to call FlushStop, and allows us to test calling Stop concurrently -// with FlushStop. Before a fix, this non-deterministically reproduced -// https://github.com/tendermint/tendermint/issues/3231. -func TestPEXReactorSeedModeFlushStop(t *testing.T) { - t.Skip("flaky test, will be replaced by new P2P stack") - N := 2 - switches := make([]*p2p.Switch, N) - - // directory to store address books - dir := tempDir(t) - - books := make([]AddrBook, N) - logger := log.TestingLogger() - - // create switches - for i := 0; i < N; i++ { - switches[i] = p2p.MakeSwitch(cfg, i, "testing", "123.123.123", func(i int, sw *p2p.Switch) *p2p.Switch { - books[i] = NewAddrBook(filepath.Join(dir, fmt.Sprintf("addrbook%d.json", i)), false) - books[i].SetLogger(logger.With("pex", i)) - sw.SetAddrBook(books[i]) - - sw.SetLogger(logger.With("pex", i)) - - config := &ReactorConfig{} - if i == 0 { - // first one is a seed node - config = &ReactorConfig{SeedMode: true} - } - r := NewReactor(books[i], config) - r.SetLogger(logger.With("pex", i)) - r.SetEnsurePeersPeriod(250 * time.Millisecond) - sw.AddReactor("pex", r) - - return sw - }, logger) - } - - for _, sw := range switches { - err := sw.Start() // start switch and reactors - require.Nil(t, err) - } - - reactor := switches[0].Reactors()["pex"].(*Reactor) - peerID := switches[1].NodeInfo().ID() - - assert.NoError(t, switches[1].DialPeerWithAddress(switches[0].NetAddress())) - - // sleep up to a second while waiting for the peer to send us a message. - // this isn't perfect since it's possible the peer sends us a msg and we FlushStop - // before this loop catches it. but non-deterministically it works pretty well. - for i := 0; i < 1000; i++ { - v := reactor.lastReceivedRequests.Get(string(peerID)) - if v != nil { - break - } - time.Sleep(time.Millisecond) - } - - // by now the FlushStop should have happened. Try stopping the peer. - // it should be safe to do this. - peers := switches[0].Peers().List() - for _, peer := range peers { - err := peer.Stop() - require.NoError(t, err) - } - - // stop the switches - for _, s := range switches { - err := s.Stop() - require.NoError(t, err) - } -} - -func TestPEXReactorDoesNotAddPrivatePeersToAddrBook(t *testing.T) { - peer := p2p.CreateRandomPeer(false) - - pexR, book := createReactor(t, &ReactorConfig{}) - book.AddPrivateIDs([]string{string(peer.NodeInfo().ID())}) - - // we have to send a request to receive responses - pexR.RequestAddrs(peer) - - size := book.Size() - na, err := peer.NodeInfo().NetAddress() - require.NoError(t, err) - - msg := mustEncode(&tmp2p.PexResponse{Addresses: NetAddressesToProto([]*types.NetAddress{na})}) - pexR.Receive(PexChannel, peer, msg) - assert.Equal(t, size, book.Size()) - - pexR.AddPeer(peer) - assert.Equal(t, size, book.Size()) -} - -func TestPEXReactorDialPeer(t *testing.T) { - pexR, book := createReactor(t, &ReactorConfig{}) - sw := createSwitchAndAddReactors(pexR) - - sw.SetAddrBook(book) - - peer := mock.NewPeer(nil) - addr := peer.SocketAddr() - - assert.Equal(t, 0, pexR.AttemptsToDial(addr)) - - // 1st unsuccessful attempt - err := pexR.dialPeer(addr) - require.Error(t, err) - - assert.Equal(t, 1, pexR.AttemptsToDial(addr)) - - // 2nd unsuccessful attempt - err = pexR.dialPeer(addr) - require.Error(t, err) - - // must be skipped because it is too early - assert.Equal(t, 1, pexR.AttemptsToDial(addr)) - - if !testing.Short() { - time.Sleep(3 * time.Second) - - // 3rd attempt - err = pexR.dialPeer(addr) - require.Error(t, err) - - assert.Equal(t, 2, pexR.AttemptsToDial(addr)) - } -} - -func assertPeersWithTimeout( - t *testing.T, - switches []*p2p.Switch, - checkPeriod, timeout time.Duration, - nPeers int, -) { - var ( - ticker = time.NewTicker(checkPeriod) - remaining = timeout - ) - - for { - select { - case <-ticker.C: - // check peers are connected - allGood := true - for _, s := range switches { - outbound, inbound, _ := s.NumPeers() - if outbound+inbound < nPeers { - allGood = false - break - } - } - remaining -= checkPeriod - if remaining < 0 { - remaining = 0 - } - if allGood { - return - } - case <-time.After(remaining): - numPeersStr := "" - for i, s := range switches { - outbound, inbound, _ := s.NumPeers() - numPeersStr += fmt.Sprintf("%d => {outbound: %d, inbound: %d}, ", i, outbound, inbound) - } - t.Errorf( - "expected all switches to be connected to at least %d peer(s) (switches: %s)", - nPeers, numPeersStr, - ) - return - } - } -} - -// Creates a peer with the provided config -func testCreatePeerWithConfig(dir string, id int, config *ReactorConfig) *p2p.Switch { - peer := p2p.MakeSwitch( - cfg, - id, - "127.0.0.1", - "123.123.123", - func(i int, sw *p2p.Switch) *p2p.Switch { - book := NewAddrBook(filepath.Join(dir, fmt.Sprintf("addrbook%d.json", id)), false) - book.SetLogger(log.TestingLogger()) - sw.SetAddrBook(book) - - sw.SetLogger(log.TestingLogger()) - - r := NewReactor( - book, - config, - ) - r.SetLogger(log.TestingLogger()) - sw.AddReactor("pex", r) - return sw - }, - log.TestingLogger(), - ) - return peer -} - -// Creates a peer with the default config -func testCreateDefaultPeer(dir string, id int) *p2p.Switch { - return testCreatePeerWithConfig(dir, id, &ReactorConfig{}) -} - -// Creates a seed which knows about the provided addresses / source address pairs. -// Starting and stopping the seed is left to the caller -func testCreateSeed(dir string, id int, knownAddrs, srcAddrs []*p2p.NetAddress) *p2p.Switch { - seed := p2p.MakeSwitch( - cfg, - id, - "127.0.0.1", - "123.123.123", - func(i int, sw *p2p.Switch) *p2p.Switch { - book := NewAddrBook(filepath.Join(dir, "addrbookSeed.json"), false) - book.SetLogger(log.TestingLogger()) - for j := 0; j < len(knownAddrs); j++ { - book.AddAddress(knownAddrs[j], srcAddrs[j]) // nolint:errcheck // ignore for tests - book.MarkGood(knownAddrs[j].ID) - } - sw.SetAddrBook(book) - - sw.SetLogger(log.TestingLogger()) - - r := NewReactor(book, &ReactorConfig{}) - r.SetLogger(log.TestingLogger()) - sw.AddReactor("pex", r) - return sw - }, - log.TestingLogger(), - ) - return seed -} - -// Creates a peer which knows about the provided seed. -// Starting and stopping the peer is left to the caller -func testCreatePeerWithSeed(dir string, id int, seed *p2p.Switch) *p2p.Switch { - conf := &ReactorConfig{ - Seeds: []string{seed.NetAddress().String()}, - } - return testCreatePeerWithConfig(dir, id, conf) -} - -func createReactor(t *testing.T, conf *ReactorConfig) (r *Reactor, book AddrBook) { - // directory to store address book - book = NewAddrBook(filepath.Join(tempDir(t), "addrbook.json"), true) - book.SetLogger(log.TestingLogger()) - - r = NewReactor(book, conf) - r.SetLogger(log.TestingLogger()) - return -} - -func createSwitchAndAddReactors(reactors ...p2p.Reactor) *p2p.Switch { - sw := p2p.MakeSwitch(cfg, 0, "127.0.0.1", "123.123.123", func(i int, sw *p2p.Switch) *p2p.Switch { - for _, r := range reactors { - sw.AddReactor(r.String(), r) - } - return sw - }, log.TestingLogger()) - return sw -} - -func TestPexVectors(t *testing.T) { - addr := tmp2p.PexAddress{ - ID: "1", - IP: "127.0.0.1", - Port: 9090, - } - - testCases := []struct { - testName string - msg proto.Message - expBytes string - }{ - {"PexRequest", &tmp2p.PexRequest{}, "0a00"}, - {"PexAddrs", &tmp2p.PexResponse{Addresses: []tmp2p.PexAddress{addr}}, "12130a110a013112093132372e302e302e31188247"}, - } - - for _, tc := range testCases { - tc := tc - - bz := mustEncode(tc.msg) - - require.Equal(t, tc.expBytes, hex.EncodeToString(bz), tc.testName) - } -} - -// FIXME: This function is used in place of testing.TB.TempDir() -// as the latter seems to cause test cases to fail when it is -// unable to remove the temporary directory once the test case -// execution terminates. This seems to happen often with pex -// reactor test cases. -// -// References: -// https://github.com/tendermint/tendermint/pull/5733 -// https://github.com/tendermint/tendermint/issues/5732 -func tempDir(t *testing.T) string { - t.Helper() - dir, err := ioutil.TempDir("", "") - require.NoError(t, err) - t.Cleanup(func() { _ = os.RemoveAll(dir) }) - return dir -} diff --git a/internal/p2p/pex/reactor.go b/internal/p2p/pex/reactor.go index 8cff2f95b..300a6022d 100644 --- a/internal/p2p/pex/reactor.go +++ b/internal/p2p/pex/reactor.go @@ -24,6 +24,22 @@ var ( // TODO: Consolidate with params file. // See https://github.com/tendermint/tendermint/issues/6371 const ( + // PexChannel is a channel for PEX messages + PexChannel = byte(0x00) + + // over-estimate of max NetAddress size + // hexID (40) + IP (16) + Port (2) + Name (100) ... + // NOTE: dont use massive DNS name .. + maxAddressSize = 256 + + // max addresses returned by GetSelection + // NOTE: this must match "maxMsgSize" + maxGetSelection = 250 + + // NOTE: amplification factor! + // small request results in up to maxMsgSize response + maxMsgSize = maxAddressSize * maxGetSelection + // the minimum time one peer can send another request to the same peer minReceiveRequestInterval = 100 * time.Millisecond diff --git a/internal/p2p/router.go b/internal/p2p/router.go index 1171566d1..d68f16c4f 100644 --- a/internal/p2p/router.go +++ b/internal/p2p/router.go @@ -21,6 +21,8 @@ import ( const queueBufferDefault = 32 +const dialRandomizerIntervalMillisecond = 3000 + // ChannelID is an arbitrary channel ID. type ChannelID uint16 @@ -544,7 +546,7 @@ func (r *Router) filterPeersID(ctx context.Context, id types.NodeID) error { func (r *Router) dialSleep(ctx context.Context) { if r.options.DialSleep == nil { // nolint:gosec // G404: Use of weak random number generator - timer := time.NewTimer(time.Duration(rand.Int63n(dialRandomizerIntervalMilliseconds)) * time.Millisecond) + timer := time.NewTimer(time.Duration(rand.Int63n(dialRandomizerIntervalMillisecond)) * time.Millisecond) defer timer.Stop() select { diff --git a/internal/p2p/shim.go b/internal/p2p/shim.go index 07d1ad156..c9cdc2756 100644 --- a/internal/p2p/shim.go +++ b/internal/p2p/shim.go @@ -1,58 +1,42 @@ package p2p import ( - "errors" "sort" "github.com/gogo/protobuf/proto" + "github.com/tendermint/tendermint/config" + "github.com/tendermint/tendermint/internal/p2p/conn" "github.com/tendermint/tendermint/libs/log" ) -// ============================================================================ -// TODO: Types and business logic below are temporary and will be removed once -// the legacy p2p stack is removed in favor of the new model. -// -// ref: https://github.com/tendermint/tendermint/issues/5670 -// ============================================================================ - -var _ Reactor = (*ReactorShim)(nil) - -type ( - messageValidator interface { - Validate() error - } - - // ReactorShim defines a generic shim wrapper around a BaseReactor. It is - // responsible for wiring up legacy p2p behavior to the new p2p semantics - // (e.g. proxying Envelope messages to legacy peers). - ReactorShim struct { - BaseReactor - - Name string - PeerUpdates *PeerUpdates - Channels map[ChannelID]*ChannelShim - } +// ChannelDescriptorShim defines a shim wrapper around a legacy p2p channel +// and the proto.Message the new p2p Channel is responsible for handling. +// A ChannelDescriptorShim is not contained in ReactorShim, but is rather +// used to construct a ReactorShim. +type ChannelDescriptorShim struct { + MsgType proto.Message + Descriptor *ChannelDescriptor +} - // ChannelShim defines a generic shim wrapper around a legacy p2p channel - // and the new p2p Channel. It also includes the raw bi-directional Go channels - // so we can proxy message delivery. - ChannelShim struct { - Descriptor *ChannelDescriptor - Channel *Channel - inCh chan<- Envelope - outCh <-chan Envelope - errCh <-chan PeerError - } +// ChannelShim defines a generic shim wrapper around a legacy p2p channel +// and the new p2p Channel. It also includes the raw bi-directional Go channels +// so we can proxy message delivery. +type ChannelShim struct { + Descriptor *ChannelDescriptor + Channel *Channel + inCh chan<- Envelope + outCh <-chan Envelope + errCh <-chan PeerError +} - // ChannelDescriptorShim defines a shim wrapper around a legacy p2p channel - // and the proto.Message the new p2p Channel is responsible for handling. - // A ChannelDescriptorShim is not contained in ReactorShim, but is rather - // used to construct a ReactorShim. - ChannelDescriptorShim struct { - MsgType proto.Message - Descriptor *ChannelDescriptor - } -) +// ReactorShim defines a generic shim wrapper around a BaseReactor. It is +// responsible for wiring up legacy p2p behavior to the new p2p semantics +// (e.g. proxying Envelope messages to legacy peers). +type ReactorShim struct { + Name string + PeerUpdates *PeerUpdates + Channels map[ChannelID]*ChannelShim +} func NewReactorShim(logger log.Logger, name string, descriptors map[ChannelID]*ChannelDescriptorShim) *ReactorShim { channels := make(map[ChannelID]*ChannelShim) @@ -68,9 +52,6 @@ func NewReactorShim(logger log.Logger, name string, descriptors map[ChannelID]*C Channels: channels, } - rs.BaseReactor = *NewBaseReactor(name, rs) - rs.SetLogger(logger) - return rs } @@ -93,121 +74,15 @@ func NewChannelShim(cds *ChannelDescriptorShim, buf uint) *ChannelShim { } } -// proxyPeerEnvelopes iterates over each p2p Channel and starts a separate -// go-routine where we listen for outbound envelopes sent during Receive -// executions (or anything else that may send on the Channel) and proxy them to -// the corresponding Peer using the To field from the envelope. -func (rs *ReactorShim) proxyPeerEnvelopes() { - for _, cs := range rs.Channels { - go func(cs *ChannelShim) { - for e := range cs.outCh { - msg := proto.Clone(cs.Channel.messageType) - msg.Reset() - - wrapper, ok := msg.(Wrapper) - if ok { - if err := wrapper.Wrap(e.Message); err != nil { - rs.Logger.Error( - "failed to proxy envelope; failed to wrap message", - "ch_id", cs.Descriptor.ID, - "err", err, - ) - continue - } - } else { - msg = e.Message - } - - bz, err := proto.Marshal(msg) - if err != nil { - rs.Logger.Error( - "failed to proxy envelope; failed to encode message", - "ch_id", cs.Descriptor.ID, - "err", err, - ) - continue - } - - switch { - case e.Broadcast: - rs.Switch.Broadcast(cs.Descriptor.ID, bz) - - case e.To != "": - src := rs.Switch.peers.Get(e.To) - if src == nil { - rs.Logger.Debug( - "failed to proxy envelope; failed to find peer", - "ch_id", cs.Descriptor.ID, - "peer", e.To, - ) - continue - } - - if !src.Send(cs.Descriptor.ID, bz) { - // This usually happens when we try to send across a channel - // that the peer doesn't have open. To avoid bloating the - // logs we set this to be Debug - rs.Logger.Debug( - "failed to proxy message to peer", - "ch_id", cs.Descriptor.ID, - "peer", e.To, - ) - } - - default: - rs.Logger.Error("failed to proxy envelope; missing peer ID", "ch_id", cs.Descriptor.ID) - } - } - }(cs) - } -} - -// handlePeerErrors iterates over each p2p Channel and starts a separate go-routine -// where we listen for peer errors. For each peer error, we find the peer from -// the legacy p2p Switch and execute a StopPeerForError call with the corresponding -// peer error. -func (rs *ReactorShim) handlePeerErrors() { - for _, cs := range rs.Channels { - go func(cs *ChannelShim) { - for pErr := range cs.errCh { - if pErr.NodeID != "" { - peer := rs.Switch.peers.Get(pErr.NodeID) - if peer == nil { - rs.Logger.Error("failed to handle peer error; failed to find peer", "peer", pErr.NodeID) - continue - } - - rs.Switch.StopPeerForError(peer, pErr.Err) - } - } - }(cs) - } -} - -// OnStart executes the reactor shim's OnStart hook where we start all the -// necessary go-routines in order to proxy peer envelopes and errors per p2p -// Channel. -func (rs *ReactorShim) OnStart() error { - if rs.Switch == nil { - return errors.New("proxyPeerEnvelopes: reactor shim switch is nil") - } - - // start envelope proxying and peer error handling in separate go routines - rs.proxyPeerEnvelopes() - rs.handlePeerErrors() - - return nil -} - -// GetChannel returns a p2p Channel reference for a given ChannelID. If no -// Channel exists, nil is returned. -func (rs *ReactorShim) GetChannel(cID ChannelID) *Channel { - channelShim, ok := rs.Channels[cID] - if ok { - return channelShim.Channel - } - - return nil +// MConnConfig returns an MConnConfig based on the defaults, with fields updated +// from the P2PConfig. +func MConnConfig(cfg *config.P2PConfig) conn.MConnConfig { + mConfig := conn.DefaultMConnConfig() + mConfig.FlushThrottle = cfg.FlushThrottleTimeout + mConfig.SendRate = cfg.SendRate + mConfig.RecvRate = cfg.RecvRate + mConfig.MaxPacketMsgPayloadSize = cfg.MaxPacketMsgPayloadSize + return mConfig } // GetChannels implements the legacy Reactor interface for getting a slice of all @@ -228,107 +103,13 @@ func (rs *ReactorShim) GetChannels() []*ChannelDescriptor { return descriptors } -// AddPeer sends a PeerUpdate with status PeerStatusUp on the PeerUpdateCh. -// The embedding reactor must be sure to listen for messages on this channel to -// handle adding a peer. -func (rs *ReactorShim) AddPeer(peer Peer) { - select { - case rs.PeerUpdates.reactorUpdatesCh <- PeerUpdate{NodeID: peer.ID(), Status: PeerStatusUp}: - rs.Logger.Debug("sent peer update", "reactor", rs.Name, "peer", peer.ID(), "status", PeerStatusUp) - - case <-rs.PeerUpdates.Done(): - // NOTE: We explicitly DO NOT close the PeerUpdatesCh's updateCh go channel. - // This is because there may be numerous spawned goroutines that are - // attempting to send on the updateCh go channel and when the reactor stops - // we do not want to preemptively close the channel as that could result in - // panics sending on a closed channel. This also means that reactors MUST - // be certain there are NO listeners on the updateCh channel when closing or - // stopping. - } -} - -// RemovePeer sends a PeerUpdate with status PeerStatusDown on the PeerUpdateCh. -// The embedding reactor must be sure to listen for messages on this channel to -// handle removing a peer. -func (rs *ReactorShim) RemovePeer(peer Peer, reason interface{}) { - select { - case rs.PeerUpdates.reactorUpdatesCh <- PeerUpdate{NodeID: peer.ID(), Status: PeerStatusDown}: - rs.Logger.Debug( - "sent peer update", - "reactor", rs.Name, - "peer", peer.ID(), - "reason", reason, - "status", PeerStatusDown, - ) - - case <-rs.PeerUpdates.Done(): - // NOTE: We explicitly DO NOT close the PeerUpdatesCh's updateCh go channel. - // This is because there may be numerous spawned goroutines that are - // attempting to send on the updateCh go channel and when the reactor stops - // we do not want to preemptively close the channel as that could result in - // panics sending on a closed channel. This also means that reactors MUST - // be certain there are NO listeners on the updateCh channel when closing or - // stopping. - } -} - -// Receive implements a generic wrapper around implementing the Receive method -// on the legacy Reactor p2p interface. If the reactor is running, Receive will -// find the corresponding new p2p Channel, create and decode the appropriate -// proto.Message from the msgBytes, execute any validation and finally construct -// and send a p2p Envelope on the appropriate p2p Channel. -func (rs *ReactorShim) Receive(chID byte, src Peer, msgBytes []byte) { - if !rs.IsRunning() { - return - } - - cID := ChannelID(chID) +// GetChannel returns a p2p Channel reference for a given ChannelID. If no +// Channel exists, nil is returned. +func (rs *ReactorShim) GetChannel(cID ChannelID) *Channel { channelShim, ok := rs.Channels[cID] - if !ok { - rs.Logger.Error("unexpected channel", "peer", src, "ch_id", chID) - return - } - - msg := proto.Clone(channelShim.Channel.messageType) - msg.Reset() - - if err := proto.Unmarshal(msgBytes, msg); err != nil { - rs.Logger.Error("error decoding message", "peer", src, "ch_id", cID, "err", err) - rs.Switch.StopPeerForError(src, err) - return - } - - validator, ok := msg.(messageValidator) if ok { - if err := validator.Validate(); err != nil { - rs.Logger.Error("invalid message", "peer", src, "ch_id", cID, "err", err) - rs.Switch.StopPeerForError(src, err) - return - } - } - - wrapper, ok := msg.(Wrapper) - if ok { - var err error - - msg, err = wrapper.Unwrap() - if err != nil { - rs.Logger.Error("failed to unwrap message", "peer", src, "ch_id", chID, "err", err) - return - } + return channelShim.Channel } - select { - case channelShim.inCh <- Envelope{From: src.ID(), Message: msg}: - rs.Logger.Debug("proxied envelope", "reactor", rs.Name, "ch_id", cID, "peer", src.ID()) - - case <-channelShim.Channel.Done(): - // NOTE: We explicitly DO NOT close the p2p Channel's inbound go channel. - // This is because there may be numerous spawned goroutines that are - // attempting to send on the inbound channel and when the reactor stops we - // do not want to preemptively close the channel as that could result in - // panics sending on a closed channel. This also means that reactors MUST - // be certain there are NO listeners on the inbound channel when closing or - // stopping. - } + return nil } diff --git a/internal/p2p/shim_test.go b/internal/p2p/shim_test.go deleted file mode 100644 index d8b9e30c3..000000000 --- a/internal/p2p/shim_test.go +++ /dev/null @@ -1,207 +0,0 @@ -package p2p_test - -import ( - "sync" - "testing" - - "github.com/gogo/protobuf/proto" - "github.com/stretchr/testify/mock" - "github.com/stretchr/testify/require" - "github.com/tendermint/tendermint/config" - "github.com/tendermint/tendermint/internal/p2p" - p2pmocks "github.com/tendermint/tendermint/internal/p2p/mocks" - "github.com/tendermint/tendermint/libs/log" - ssproto "github.com/tendermint/tendermint/proto/tendermint/statesync" - "github.com/tendermint/tendermint/types" -) - -var ( - channelID1 = byte(0x01) - channelID2 = byte(0x02) - - p2pCfg = config.DefaultP2PConfig() - - testChannelShims = map[p2p.ChannelID]*p2p.ChannelDescriptorShim{ - p2p.ChannelID(channelID1): { - MsgType: new(ssproto.Message), - Descriptor: &p2p.ChannelDescriptor{ - ID: channelID1, - Priority: 3, - SendQueueCapacity: 10, - RecvMessageCapacity: int(4e6), - }, - }, - p2p.ChannelID(channelID2): { - MsgType: new(ssproto.Message), - Descriptor: &p2p.ChannelDescriptor{ - ID: channelID2, - Priority: 1, - SendQueueCapacity: 4, - RecvMessageCapacity: int(16e6), - }, - }, - } -) - -type reactorShimTestSuite struct { - shim *p2p.ReactorShim - sw *p2p.Switch -} - -func setup(t *testing.T, peers []p2p.Peer) *reactorShimTestSuite { - t.Helper() - - rts := &reactorShimTestSuite{ - shim: p2p.NewReactorShim(log.TestingLogger(), "TestShim", testChannelShims), - } - - rts.sw = p2p.MakeSwitch(p2pCfg, 1, "testing", "123.123.123", func(_ int, sw *p2p.Switch) *p2p.Switch { - for _, peer := range peers { - p2p.AddPeerToSwitchPeerSet(sw, peer) - } - - sw.AddReactor(rts.shim.Name, rts.shim) - return sw - }, log.TestingLogger()) - - // start the reactor shim - require.NoError(t, rts.shim.Start()) - - t.Cleanup(func() { - require.NoError(t, rts.shim.Stop()) - - for _, chs := range rts.shim.Channels { - chs.Channel.Close() - } - }) - - return rts -} - -func simplePeer(t *testing.T, id string) (*p2pmocks.Peer, types.NodeID) { - t.Helper() - - peerID := types.NodeID(id) - peer := &p2pmocks.Peer{} - peer.On("ID").Return(peerID) - - return peer, peerID -} - -func TestReactorShim_GetChannel(t *testing.T) { - rts := setup(t, nil) - - p2pCh := rts.shim.GetChannel(p2p.ChannelID(channelID1)) - require.NotNil(t, p2pCh) - require.Equal(t, p2pCh.ID, p2p.ChannelID(channelID1)) - - p2pCh = rts.shim.GetChannel(p2p.ChannelID(byte(0x03))) - require.Nil(t, p2pCh) -} - -func TestReactorShim_GetChannels(t *testing.T) { - rts := setup(t, nil) - - p2pChs := rts.shim.GetChannels() - require.Len(t, p2pChs, 2) - require.Equal(t, p2p.ChannelID(p2pChs[0].ID), p2p.ChannelID(channelID1)) - require.Equal(t, p2p.ChannelID(p2pChs[1].ID), p2p.ChannelID(channelID2)) -} - -func TestReactorShim_AddPeer(t *testing.T) { - peerA, peerIDA := simplePeer(t, "aa") - rts := setup(t, []p2p.Peer{peerA}) - - var wg sync.WaitGroup - wg.Add(1) - - var peerUpdate p2p.PeerUpdate - go func() { - peerUpdate = <-rts.shim.PeerUpdates.Updates() - wg.Done() - }() - - rts.shim.AddPeer(peerA) - wg.Wait() - - require.Equal(t, peerIDA, peerUpdate.NodeID) - require.Equal(t, p2p.PeerStatusUp, peerUpdate.Status) -} - -func TestReactorShim_RemovePeer(t *testing.T) { - peerA, peerIDA := simplePeer(t, "aa") - rts := setup(t, []p2p.Peer{peerA}) - - var wg sync.WaitGroup - wg.Add(1) - - var peerUpdate p2p.PeerUpdate - go func() { - peerUpdate = <-rts.shim.PeerUpdates.Updates() - wg.Done() - }() - - rts.shim.RemovePeer(peerA, "test reason") - wg.Wait() - - require.Equal(t, peerIDA, peerUpdate.NodeID) - require.Equal(t, p2p.PeerStatusDown, peerUpdate.Status) -} - -func TestReactorShim_Receive(t *testing.T) { - peerA, peerIDA := simplePeer(t, "aa") - rts := setup(t, []p2p.Peer{peerA}) - - msg := &ssproto.Message{ - Sum: &ssproto.Message_ChunkRequest{ - ChunkRequest: &ssproto.ChunkRequest{Height: 1, Format: 1, Index: 1}, - }, - } - - bz, err := proto.Marshal(msg) - require.NoError(t, err) - - var wg sync.WaitGroup - - var response *ssproto.Message - peerA.On("Send", channelID1, mock.Anything).Run(func(args mock.Arguments) { - m := &ssproto.Message{} - require.NoError(t, proto.Unmarshal(args[1].([]byte), m)) - - response = m - wg.Done() - }).Return(true) - - p2pCh := rts.shim.Channels[p2p.ChannelID(channelID1)] - - wg.Add(2) - - // Simulate receiving the envelope in some real reactor and replying back with - // the same envelope and then closing the Channel. - go func() { - e := <-p2pCh.Channel.In - require.Equal(t, peerIDA, e.From) - require.NotNil(t, e.Message) - - p2pCh.Channel.Out <- p2p.Envelope{To: e.From, Message: e.Message} - p2pCh.Channel.Close() - wg.Done() - }() - - rts.shim.Receive(channelID1, peerA, bz) - - // wait until the mock peer called Send and we (fake) proxied the envelope - wg.Wait() - require.NotNil(t, response) - - m, err := response.Unwrap() - require.NoError(t, err) - require.Equal(t, msg.GetChunkRequest(), m) - - // Since p2pCh was closed in the simulated reactor above, calling Receive - // should not block. - rts.shim.Receive(channelID1, peerA, bz) - require.Empty(t, p2pCh.Channel.In) - - peerA.AssertExpectations(t) -} diff --git a/internal/p2p/switch.go b/internal/p2p/switch.go deleted file mode 100644 index ea1272354..000000000 --- a/internal/p2p/switch.go +++ /dev/null @@ -1,1064 +0,0 @@ -package p2p - -import ( - "context" - "fmt" - "io" - "math" - mrand "math/rand" - "net" - "sync" - "time" - - "github.com/tendermint/tendermint/config" - "github.com/tendermint/tendermint/crypto" - "github.com/tendermint/tendermint/internal/p2p/conn" - "github.com/tendermint/tendermint/libs/cmap" - tmrand "github.com/tendermint/tendermint/libs/rand" - "github.com/tendermint/tendermint/libs/service" - "github.com/tendermint/tendermint/types" -) - -const ( - // wait a random amount of time from this interval - // before dialing peers or reconnecting to help prevent DoS - dialRandomizerIntervalMilliseconds = 3000 - - // repeatedly try to reconnect for a few minutes - // ie. 5 * 20 = 100s - reconnectAttempts = 20 - reconnectInterval = 5 * time.Second - - // then move into exponential backoff mode for ~1day - // ie. 3**10 = 16hrs - reconnectBackOffAttempts = 10 - reconnectBackOffBaseSeconds = 3 - - defaultFilterTimeout = 5 * time.Second -) - -// MConnConfig returns an MConnConfig with fields updated -// from the P2PConfig. -func MConnConfig(cfg *config.P2PConfig) conn.MConnConfig { - mConfig := conn.DefaultMConnConfig() - mConfig.FlushThrottle = cfg.FlushThrottleTimeout - mConfig.SendRate = cfg.SendRate - mConfig.RecvRate = cfg.RecvRate - mConfig.MaxPacketMsgPayloadSize = cfg.MaxPacketMsgPayloadSize - return mConfig -} - -//----------------------------------------------------------------------------- - -// An AddrBook represents an address book from the pex package, which is used -// to store peer addresses. -type AddrBook interface { - AddAddress(addr *NetAddress, src *NetAddress) error - AddPrivateIDs([]string) - AddOurAddress(*NetAddress) - OurAddress(*NetAddress) bool - MarkGood(types.NodeID) - RemoveAddress(*NetAddress) - HasAddress(*NetAddress) bool - Save() -} - -// ConnFilterFunc is a callback for connection filtering. If it returns an -// error, the connection is rejected. The set of existing connections is passed -// along with the new connection and all resolved IPs. -type ConnFilterFunc func(ConnSet, net.Conn, []net.IP) error - -// PeerFilterFunc to be implemented by filter hooks after a new Peer has been -// fully setup. -type PeerFilterFunc func(IPeerSet, Peer) error - -// ConnDuplicateIPFilter resolves and keeps all ips for an incoming connection -// and refuses new ones if they come from a known ip. -var ConnDuplicateIPFilter ConnFilterFunc = func(cs ConnSet, c net.Conn, ips []net.IP) error { - for _, ip := range ips { - if cs.HasIP(ip) { - return ErrRejected{ - conn: c, - err: fmt.Errorf("ip<%v> already connected", ip), - isDuplicate: true, - } - } - } - return nil -} - -//----------------------------------------------------------------------------- - -// Switch handles peer connections and exposes an API to receive incoming messages -// on `Reactors`. Each `Reactor` is responsible for handling incoming messages of one -// or more `Channels`. So while sending outgoing messages is typically performed on the peer, -// incoming messages are received on the reactor. -type Switch struct { - service.BaseService - - config *config.P2PConfig - reactors map[string]Reactor - chDescs []*conn.ChannelDescriptor - reactorsByCh map[byte]Reactor - peers *PeerSet - dialing *cmap.CMap - reconnecting *cmap.CMap - nodeInfo types.NodeInfo // our node info - nodeKey types.NodeKey // our node privkey - addrBook AddrBook - // peers addresses with whom we'll maintain constant connection - persistentPeersAddrs []*NetAddress - unconditionalPeerIDs map[types.NodeID]struct{} - - transport Transport - - filterTimeout time.Duration - peerFilters []PeerFilterFunc - connFilters []ConnFilterFunc - conns ConnSet - - metrics *Metrics -} - -// NetAddress returns the first address the switch is listening on, -// or nil if no addresses are found. -func (sw *Switch) NetAddress() *NetAddress { - endpoints := sw.transport.Endpoints() - if len(endpoints) == 0 { - return nil - } - return &NetAddress{ - ID: sw.nodeInfo.NodeID, - IP: endpoints[0].IP, - Port: endpoints[0].Port, - } -} - -// SwitchOption sets an optional parameter on the Switch. -type SwitchOption func(*Switch) - -// NewSwitch creates a new Switch with the given config. -func NewSwitch( - cfg *config.P2PConfig, - transport Transport, - options ...SwitchOption, -) *Switch { - sw := &Switch{ - config: cfg, - reactors: make(map[string]Reactor), - chDescs: make([]*conn.ChannelDescriptor, 0), - reactorsByCh: make(map[byte]Reactor), - peers: NewPeerSet(), - dialing: cmap.NewCMap(), - reconnecting: cmap.NewCMap(), - metrics: NopMetrics(), - transport: transport, - persistentPeersAddrs: make([]*NetAddress, 0), - unconditionalPeerIDs: make(map[types.NodeID]struct{}), - filterTimeout: defaultFilterTimeout, - conns: NewConnSet(), - } - - // Ensure PRNG is reseeded. - tmrand.Reseed() - - sw.BaseService = *service.NewBaseService(nil, "P2P Switch", sw) - - for _, option := range options { - option(sw) - } - - return sw -} - -// SwitchFilterTimeout sets the timeout used for peer filters. -func SwitchFilterTimeout(timeout time.Duration) SwitchOption { - return func(sw *Switch) { sw.filterTimeout = timeout } -} - -// SwitchPeerFilters sets the filters for rejection of new peers. -func SwitchPeerFilters(filters ...PeerFilterFunc) SwitchOption { - return func(sw *Switch) { sw.peerFilters = filters } -} - -// SwitchConnFilters sets the filters for rejection of connections. -func SwitchConnFilters(filters ...ConnFilterFunc) SwitchOption { - return func(sw *Switch) { sw.connFilters = filters } -} - -// WithMetrics sets the metrics. -func WithMetrics(metrics *Metrics) SwitchOption { - return func(sw *Switch) { sw.metrics = metrics } -} - -//--------------------------------------------------------------------- -// Switch setup - -// AddReactor adds the given reactor to the switch. -// NOTE: Not goroutine safe. -func (sw *Switch) AddReactor(name string, reactor Reactor) Reactor { - for _, chDesc := range reactor.GetChannels() { - chID := chDesc.ID - // No two reactors can share the same channel. - if sw.reactorsByCh[chID] != nil { - panic(fmt.Sprintf("Channel %X has multiple reactors %v & %v", chID, sw.reactorsByCh[chID], reactor)) - } - sw.chDescs = append(sw.chDescs, chDesc) - sw.reactorsByCh[chID] = reactor - } - sw.reactors[name] = reactor - reactor.SetSwitch(sw) - return reactor -} - -// RemoveReactor removes the given Reactor from the Switch. -// NOTE: Not goroutine safe. -func (sw *Switch) RemoveReactor(name string, reactor Reactor) { - for _, chDesc := range reactor.GetChannels() { - // remove channel description - for i := 0; i < len(sw.chDescs); i++ { - if chDesc.ID == sw.chDescs[i].ID { - sw.chDescs = append(sw.chDescs[:i], sw.chDescs[i+1:]...) - break - } - } - delete(sw.reactorsByCh, chDesc.ID) - } - delete(sw.reactors, name) - reactor.SetSwitch(nil) -} - -// Reactors returns a map of reactors registered on the switch. -// NOTE: Not goroutine safe. -func (sw *Switch) Reactors() map[string]Reactor { - return sw.reactors -} - -// Reactor returns the reactor with the given name. -// NOTE: Not goroutine safe. -func (sw *Switch) Reactor(name string) Reactor { - return sw.reactors[name] -} - -// SetNodeInfo sets the switch's NodeInfo for checking compatibility and handshaking with other nodes. -// NOTE: Not goroutine safe. -func (sw *Switch) SetNodeInfo(nodeInfo types.NodeInfo) { - sw.nodeInfo = nodeInfo -} - -// NodeInfo returns the switch's NodeInfo. -// NOTE: Not goroutine safe. -func (sw *Switch) NodeInfo() types.NodeInfo { - return sw.nodeInfo -} - -// SetNodeKey sets the switch's private key for authenticated encryption. -// NOTE: Not goroutine safe. -func (sw *Switch) SetNodeKey(nodeKey types.NodeKey) { - sw.nodeKey = nodeKey -} - -//--------------------------------------------------------------------- -// Service start/stop - -// OnStart implements BaseService. It starts all the reactors and peers. -func (sw *Switch) OnStart() error { - - // FIXME: Temporary hack to pass channel descriptors to MConn transport, - // since they are not available when it is constructed. This will be - // fixed when we implement the new router abstraction. - if t, ok := sw.transport.(*MConnTransport); ok { - t.channelDescs = sw.chDescs - } - - // Start reactors - for _, reactor := range sw.reactors { - err := reactor.Start() - if err != nil { - return fmt.Errorf("failed to start %v: %w", reactor, err) - } - } - - // Start accepting Peers. - go sw.acceptRoutine() - - return nil -} - -// OnStop implements BaseService. It stops all peers and reactors. -func (sw *Switch) OnStop() { - // Stop peers - for _, p := range sw.peers.List() { - sw.stopAndRemovePeer(p, nil) - } - - // Stop reactors - sw.Logger.Debug("Switch: Stopping reactors") - for _, reactor := range sw.reactors { - if err := reactor.Stop(); err != nil { - sw.Logger.Error("error while stopping reactor", "reactor", reactor, "error", err) - } - } -} - -//--------------------------------------------------------------------- -// Peers - -// Broadcast runs a go routine for each attempted send, which will block trying -// to send for defaultSendTimeoutSeconds. Returns a channel which receives -// success values for each attempted send (false if times out). Channel will be -// closed once msg bytes are sent to all peers (or time out). -// -// NOTE: Broadcast uses goroutines, so order of broadcast may not be preserved. -func (sw *Switch) Broadcast(chID byte, msgBytes []byte) chan bool { - sw.Logger.Debug("Broadcast", "channel", chID, "msgBytes", msgBytes) - - peers := sw.peers.List() - var wg sync.WaitGroup - wg.Add(len(peers)) - successChan := make(chan bool, len(peers)) - - for _, peer := range peers { - go func(p Peer) { - defer wg.Done() - success := p.Send(chID, msgBytes) - successChan <- success - }(peer) - } - - go func() { - wg.Wait() - close(successChan) - }() - - return successChan -} - -// NumPeers returns the count of outbound/inbound and outbound-dialing peers. -// unconditional peers are not counted here. -func (sw *Switch) NumPeers() (outbound, inbound, dialing int) { - peers := sw.peers.List() - for _, peer := range peers { - if peer.IsOutbound() { - if !sw.IsPeerUnconditional(peer.ID()) { - outbound++ - } - } else { - if !sw.IsPeerUnconditional(peer.ID()) { - inbound++ - } - } - } - dialing = sw.dialing.Size() - return -} - -func (sw *Switch) IsPeerUnconditional(id types.NodeID) bool { - _, ok := sw.unconditionalPeerIDs[id] - return ok -} - -// MaxNumOutboundPeers returns a maximum number of outbound peers. -func (sw *Switch) MaxNumOutboundPeers() int { - return sw.config.MaxNumOutboundPeers -} - -// Peers returns the set of peers that are connected to the switch. -func (sw *Switch) Peers() IPeerSet { - return sw.peers -} - -// StopPeerForError disconnects from a peer due to external error. -// If the peer is persistent, it will attempt to reconnect. -// TODO: make record depending on reason. -func (sw *Switch) StopPeerForError(peer Peer, reason interface{}) { - if !peer.IsRunning() { - return - } - - sw.Logger.Error("Stopping peer for error", "peer", peer, "err", reason) - sw.stopAndRemovePeer(peer, reason) - - if peer.IsPersistent() { - var addr *NetAddress - if peer.IsOutbound() { // socket address for outbound peers - addr = peer.SocketAddr() - } else { // self-reported address for inbound peers - var err error - addr, err = peer.NodeInfo().NetAddress() - if err != nil { - sw.Logger.Error("Wanted to reconnect to inbound peer, but self-reported address is wrong", - "peer", peer, "err", err) - return - } - } - go sw.reconnectToPeer(addr) - } -} - -// StopPeerGracefully disconnects from a peer gracefully. -// TODO: handle graceful disconnects. -func (sw *Switch) StopPeerGracefully(peer Peer) { - sw.Logger.Info("Stopping peer gracefully") - sw.stopAndRemovePeer(peer, nil) -} - -func (sw *Switch) stopAndRemovePeer(peer Peer, reason interface{}) { - if err := peer.Stop(); err != nil { - sw.Logger.Error("error while stopping peer", "error", err) // TODO: should return error to be handled accordingly - } - - for _, reactor := range sw.reactors { - reactor.RemovePeer(peer, reason) - } - - // Removing a peer should go last to avoid a situation where a peer - // reconnect to our node and the switch calls InitPeer before - // RemovePeer is finished. - // https://github.com/tendermint/tendermint/issues/3338 - if sw.peers.Remove(peer) { - sw.metrics.Peers.Add(float64(-1)) - } - - sw.conns.RemoveAddr(peer.RemoteAddr()) -} - -// reconnectToPeer tries to reconnect to the addr, first repeatedly -// with a fixed interval, then with exponential backoff. -// If no success after all that, it stops trying, and leaves it -// to the PEX/Addrbook to find the peer with the addr again -// NOTE: this will keep trying even if the handshake or auth fails. -// TODO: be more explicit with error types so we only retry on certain failures -// - ie. if we're getting ErrDuplicatePeer we can stop -// because the addrbook got us the peer back already -func (sw *Switch) reconnectToPeer(addr *NetAddress) { - if _, exists := sw.reconnecting.GetOrSet(string(addr.ID), addr); exists { - return - } - defer sw.reconnecting.Delete(string(addr.ID)) - - start := time.Now() - sw.Logger.Info("Reconnecting to peer", "addr", addr) - for i := 0; i < reconnectAttempts; i++ { - if !sw.IsRunning() { - return - } - - err := sw.DialPeerWithAddress(addr) - if err == nil { - return // success - } else if _, ok := err.(ErrCurrentlyDialingOrExistingAddress); ok { - return - } - - sw.Logger.Info("Error reconnecting to peer. Trying again", "tries", i, "err", err, "addr", addr) - // sleep a set amount - sw.randomSleep(reconnectInterval) - continue - } - - sw.Logger.Error("Failed to reconnect to peer. Beginning exponential backoff", - "addr", addr, "elapsed", time.Since(start)) - for i := 0; i < reconnectBackOffAttempts; i++ { - if !sw.IsRunning() { - return - } - - // sleep an exponentially increasing amount - sleepIntervalSeconds := math.Pow(reconnectBackOffBaseSeconds, float64(i)) - sw.randomSleep(time.Duration(sleepIntervalSeconds) * time.Second) - - err := sw.DialPeerWithAddress(addr) - if err == nil { - return // success - } else if _, ok := err.(ErrCurrentlyDialingOrExistingAddress); ok { - return - } - sw.Logger.Info("Error reconnecting to peer. Trying again", "tries", i, "err", err, "addr", addr) - } - sw.Logger.Error("Failed to reconnect to peer. Giving up", "addr", addr, "elapsed", time.Since(start)) -} - -// SetAddrBook allows to set address book on Switch. -func (sw *Switch) SetAddrBook(addrBook AddrBook) { - sw.addrBook = addrBook -} - -// MarkPeerAsGood marks the given peer as good when it did something useful -// like contributed to consensus. -func (sw *Switch) MarkPeerAsGood(peer Peer) { - if sw.addrBook != nil { - sw.addrBook.MarkGood(peer.ID()) - } -} - -//--------------------------------------------------------------------- -// Dialing - -type privateAddr interface { - PrivateAddr() bool -} - -func isPrivateAddr(err error) bool { - te, ok := err.(privateAddr) - return ok && te.PrivateAddr() -} - -// DialPeersAsync dials a list of peers asynchronously in random order. -// Used to dial peers from config on startup or from unsafe-RPC (trusted sources). -// It ignores ErrNetAddressLookup. However, if there are other errors, first -// encounter is returned. -// Nop if there are no peers. -func (sw *Switch) DialPeersAsync(peers []string) error { - netAddrs, errs := NewNetAddressStrings(peers) - // report all the errors - for _, err := range errs { - sw.Logger.Error("Error in peer's address", "err", err) - } - // return first non-ErrNetAddressLookup error - for _, err := range errs { - if _, ok := err.(types.ErrNetAddressLookup); ok { - continue - } - return err - } - sw.dialPeersAsync(netAddrs) - return nil -} - -func (sw *Switch) dialPeersAsync(netAddrs []*NetAddress) { - ourAddr := sw.NetAddress() - - // TODO: this code feels like it's in the wrong place. - // The integration tests depend on the addrBook being saved - // right away but maybe we can change that. Recall that - // the addrBook is only written to disk every 2min - if sw.addrBook != nil { - // add peers to `addrBook` - for _, netAddr := range netAddrs { - // do not add our address or ID - if !netAddr.Same(ourAddr) { - if err := sw.addrBook.AddAddress(netAddr, ourAddr); err != nil { - if isPrivateAddr(err) { - sw.Logger.Debug("Won't add peer's address to addrbook", "err", err) - } else { - sw.Logger.Error("Can't add peer's address to addrbook", "err", err) - } - } - } - } - // Persist some peers to disk right away. - // NOTE: integration tests depend on this - sw.addrBook.Save() - } - - // permute the list, dial them in random order. - perm := mrand.Perm(len(netAddrs)) - for i := 0; i < len(perm); i++ { - go func(i int) { - j := perm[i] - addr := netAddrs[j] - - if addr.Same(ourAddr) { - sw.Logger.Debug("Ignore attempt to connect to ourselves", "addr", addr, "ourAddr", ourAddr) - return - } - - sw.randomSleep(0) - - err := sw.DialPeerWithAddress(addr) - if err != nil { - switch err.(type) { - case ErrSwitchConnectToSelf, ErrSwitchDuplicatePeerID, ErrCurrentlyDialingOrExistingAddress: - sw.Logger.Debug("Error dialing peer", "err", err) - default: - sw.Logger.Error("Error dialing peer", "err", err) - } - } - }(i) - } -} - -// DialPeerWithAddress dials the given peer and runs sw.addPeer if it connects -// and authenticates successfully. -// If we're currently dialing this address or it belongs to an existing peer, -// ErrCurrentlyDialingOrExistingAddress is returned. -func (sw *Switch) DialPeerWithAddress(addr *NetAddress) error { - if sw.IsDialingOrExistingAddress(addr) { - return ErrCurrentlyDialingOrExistingAddress{addr.String()} - } - - sw.dialing.Set(string(addr.ID), addr) - defer sw.dialing.Delete(string(addr.ID)) - - return sw.addOutboundPeerWithConfig(addr, sw.config) -} - -// sleep for interval plus some random amount of ms on [0, dialRandomizerIntervalMilliseconds] -func (sw *Switch) randomSleep(interval time.Duration) { - // nolint:gosec // G404: Use of weak random number generator - r := time.Duration(mrand.Int63n(dialRandomizerIntervalMilliseconds)) * time.Millisecond - time.Sleep(r + interval) -} - -// IsDialingOrExistingAddress returns true if switch has a peer with the given -// address or dialing it at the moment. -func (sw *Switch) IsDialingOrExistingAddress(addr *NetAddress) bool { - return sw.dialing.Has(string(addr.ID)) || - sw.peers.Has(addr.ID) || - (!sw.config.AllowDuplicateIP && sw.peers.HasIP(addr.IP)) -} - -// AddPersistentPeers allows you to set persistent peers. It ignores -// ErrNetAddressLookup. However, if there are other errors, first encounter is -// returned. -func (sw *Switch) AddPersistentPeers(addrs []string) error { - sw.Logger.Info("Adding persistent peers", "addrs", addrs) - netAddrs, errs := NewNetAddressStrings(addrs) - // report all the errors - for _, err := range errs { - sw.Logger.Error("Error in peer's address", "err", err) - } - // return first non-ErrNetAddressLookup error - for _, err := range errs { - if _, ok := err.(types.ErrNetAddressLookup); ok { - continue - } - return err - } - sw.persistentPeersAddrs = netAddrs - return nil -} - -func (sw *Switch) AddUnconditionalPeerIDs(ids []string) error { - sw.Logger.Info("Adding unconditional peer ids", "ids", ids) - for i, id := range ids { - err := types.NodeID(id).Validate() - if err != nil { - return fmt.Errorf("wrong ID #%d: %w", i, err) - } - sw.unconditionalPeerIDs[types.NodeID(id)] = struct{}{} - } - return nil -} - -func (sw *Switch) AddPrivatePeerIDs(ids []string) error { - validIDs := make([]string, 0, len(ids)) - for i, id := range ids { - err := types.NodeID(id).Validate() - if err != nil { - return fmt.Errorf("wrong ID #%d: %w", i, err) - } - validIDs = append(validIDs, id) - } - - sw.addrBook.AddPrivateIDs(validIDs) - - return nil -} - -func (sw *Switch) IsPeerPersistent(na *NetAddress) bool { - for _, pa := range sw.persistentPeersAddrs { - if pa.Equals(na) { - return true - } - } - return false -} - -func (sw *Switch) acceptRoutine() { - for { - var peerNodeInfo types.NodeInfo - c, err := sw.transport.Accept() - if err == nil { - // NOTE: The legacy MConn transport did handshaking in Accept(), - // which was asynchronous and avoided head-of-line-blocking. - // However, as handshakes are being migrated out from the transport, - // we just do it synchronously here for now. - peerNodeInfo, _, err = sw.handshakePeer(c, "") - } - if err == nil { - err = sw.filterConn(c.(*mConnConnection).conn) - } - if err != nil { - if c != nil { - _ = c.Close() - } - if err == io.EOF { - err = ErrTransportClosed{} - } - switch err := err.(type) { - case ErrRejected: - addr := err.Addr() - if err.IsSelf() { - // Remove the given address from the address book and add to our addresses - // to avoid dialing in the future. - sw.addrBook.RemoveAddress(&addr) - sw.addrBook.AddOurAddress(&addr) - } - if err.IsIncompatible() { - sw.addrBook.RemoveAddress(&addr) - } - - sw.Logger.Info( - "Inbound Peer rejected", - "err", err, - "numPeers", sw.peers.Size(), - ) - - continue - case ErrFilterTimeout: - sw.Logger.Error( - "Peer filter timed out", - "err", err, - ) - - continue - case ErrTransportClosed: - sw.Logger.Error( - "Stopped accept routine, as transport is closed", - "numPeers", sw.peers.Size(), - ) - default: - sw.Logger.Error( - "Accept on transport errored", - "err", err, - "numPeers", sw.peers.Size(), - ) - // We could instead have a retry loop around the acceptRoutine, - // but that would need to stop and let the node shutdown eventually. - // So might as well panic and let process managers restart the node. - // There's no point in letting the node run without the acceptRoutine, - // since it won't be able to accept new connections. - panic(fmt.Errorf("accept routine exited: %v", err)) - } - - break - } - - isPersistent := false - addr, err := peerNodeInfo.NetAddress() - if err == nil { - isPersistent = sw.IsPeerPersistent(addr) - } - - p := newPeer( - peerNodeInfo, - newPeerConn(false, isPersistent, c), - sw.reactorsByCh, - sw.StopPeerForError, - PeerMetrics(sw.metrics), - ) - - if !sw.IsPeerUnconditional(p.NodeInfo().ID()) { - // Ignore connection if we already have enough peers. - _, in, _ := sw.NumPeers() - if in >= sw.config.MaxNumInboundPeers { - sw.Logger.Info( - "Ignoring inbound connection: already have enough inbound peers", - "address", p.SocketAddr(), - "have", in, - "max", sw.config.MaxNumInboundPeers, - ) - _ = p.CloseConn() - continue - } - - } - - if err := sw.addPeer(p); err != nil { - _ = p.CloseConn() - if p.IsRunning() { - _ = p.Stop() - } - sw.conns.RemoveAddr(p.RemoteAddr()) - sw.Logger.Info( - "Ignoring inbound connection: error while adding peer", - "err", err, - "id", p.ID(), - ) - } - } -} - -// dial the peer; make secret connection; authenticate against the dialed ID; -// add the peer. -// if dialing fails, start the reconnect loop. If handshake fails, it's over. -// If peer is started successfully, reconnectLoop will start when -// StopPeerForError is called. -func (sw *Switch) addOutboundPeerWithConfig( - addr *NetAddress, - cfg *config.P2PConfig, -) error { - sw.Logger.Info("Dialing peer", "address", addr) - - // XXX(xla): Remove the leakage of test concerns in implementation. - if cfg.TestDialFail { - go sw.reconnectToPeer(addr) - return fmt.Errorf("dial err (peerConfig.DialFail == true)") - } - - // Hardcoded timeout moved from MConn transport during refactoring. - ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second) - defer cancel() - - var peerNodeInfo types.NodeInfo - c, err := sw.transport.Dial(ctx, Endpoint{ - Protocol: MConnProtocol, - IP: addr.IP, - Port: addr.Port, - }) - if err == nil { - peerNodeInfo, _, err = sw.handshakePeer(c, addr.ID) - } - if err == nil { - err = sw.filterConn(c.(*mConnConnection).conn) - } - if err != nil { - if c != nil { - _ = c.Close() - } - if e, ok := err.(ErrRejected); ok { - if e.IsSelf() { - // Remove the given address from the address book and add to our addresses - // to avoid dialing in the future. - sw.addrBook.RemoveAddress(addr) - sw.addrBook.AddOurAddress(addr) - } - if e.IsIncompatible() { - sw.addrBook.RemoveAddress(addr) - } - - return err - } - - // retry persistent peers after - // any dial error besides IsSelf() - if sw.IsPeerPersistent(addr) { - go sw.reconnectToPeer(addr) - } - - return err - } - - p := newPeer( - peerNodeInfo, - newPeerConn(true, sw.IsPeerPersistent(addr), c), - sw.reactorsByCh, - sw.StopPeerForError, - PeerMetrics(sw.metrics), - ) - - if err := sw.addPeer(p); err != nil { - _ = p.CloseConn() - if p.IsRunning() { - _ = p.Stop() - } - sw.conns.RemoveAddr(p.RemoteAddr()) - return err - } - - return nil -} - -func (sw *Switch) handshakePeer( - c Connection, - expectPeerID types.NodeID, -) (types.NodeInfo, crypto.PubKey, error) { - // Moved from transport and hardcoded until legacy P2P stack removal. - ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second) - defer cancel() - - peerInfo, peerKey, err := c.Handshake(ctx, sw.nodeInfo, sw.nodeKey.PrivKey) - if err != nil { - return peerInfo, peerKey, ErrRejected{ - conn: c.(*mConnConnection).conn, - err: fmt.Errorf("handshake failed: %v", err), - isAuthFailure: true, - } - } - - if err = peerInfo.Validate(); err != nil { - return peerInfo, peerKey, ErrRejected{ - conn: c.(*mConnConnection).conn, - err: err, - isNodeInfoInvalid: true, - } - } - - // For outgoing conns, ensure connection key matches dialed key. - if expectPeerID != "" { - peerID := types.NodeIDFromPubKey(peerKey) - if expectPeerID != peerID { - return peerInfo, peerKey, ErrRejected{ - conn: c.(*mConnConnection).conn, - id: peerID, - err: fmt.Errorf( - "conn.ID (%v) dialed ID (%v) mismatch", - peerID, - expectPeerID, - ), - isAuthFailure: true, - } - } - } - - if sw.nodeInfo.ID() == peerInfo.ID() { - return peerInfo, peerKey, ErrRejected{ - addr: *types.NewNetAddress(peerInfo.ID(), c.(*mConnConnection).conn.RemoteAddr()), - conn: c.(*mConnConnection).conn, - id: peerInfo.ID(), - isSelf: true, - } - } - - if err = sw.nodeInfo.CompatibleWith(peerInfo); err != nil { - return peerInfo, peerKey, ErrRejected{ - conn: c.(*mConnConnection).conn, - err: err, - id: peerInfo.ID(), - isIncompatible: true, - } - } - - return peerInfo, peerKey, nil -} - -func (sw *Switch) filterPeer(p Peer) error { - // Avoid duplicate - if sw.peers.Has(p.ID()) { - return ErrRejected{id: p.ID(), isDuplicate: true} - } - - errc := make(chan error, len(sw.peerFilters)) - - for _, f := range sw.peerFilters { - go func(f PeerFilterFunc, p Peer, errc chan<- error) { - errc <- f(sw.peers, p) - }(f, p, errc) - } - - for i := 0; i < cap(errc); i++ { - select { - case err := <-errc: - if err != nil { - return ErrRejected{id: p.ID(), err: err, isFiltered: true} - } - case <-time.After(sw.filterTimeout): - return ErrFilterTimeout{} - } - } - - return nil -} - -// filterConn filters a connection, rejecting it if this function errors. -// -// FIXME: This is only here for compatibility with the current Switch code. In -// the new P2P stack, peer/connection filtering should be moved into the Router -// or PeerManager and removed from here. -func (sw *Switch) filterConn(conn net.Conn) error { - if sw.conns.Has(conn) { - return ErrRejected{conn: conn, isDuplicate: true} - } - - host, _, err := net.SplitHostPort(conn.RemoteAddr().String()) - if err != nil { - return err - } - ip := net.ParseIP(host) - if ip == nil { - return fmt.Errorf("connection address has invalid IP address %q", host) - } - - // Apply filter callbacks. - chErr := make(chan error, len(sw.connFilters)) - for _, connFilter := range sw.connFilters { - go func(connFilter ConnFilterFunc) { - chErr <- connFilter(sw.conns, conn, []net.IP{ip}) - }(connFilter) - } - - for i := 0; i < cap(chErr); i++ { - select { - case err := <-chErr: - if err != nil { - return ErrRejected{conn: conn, err: err, isFiltered: true} - } - case <-time.After(sw.filterTimeout): - return ErrFilterTimeout{} - } - - } - - // FIXME: Doesn't really make sense to set this here, but we preserve the - // behavior from the previous P2P transport implementation. - sw.conns.Set(conn, []net.IP{ip}) - return nil -} - -// addPeer starts up the Peer and adds it to the Switch. Error is returned if -// the peer is filtered out or failed to start or can't be added. -func (sw *Switch) addPeer(p Peer) error { - if err := sw.filterPeer(p); err != nil { - return err - } - - p.SetLogger(sw.Logger.With("peer", p.SocketAddr())) - - // Handle the shut down case where the switch has stopped but we're - // concurrently trying to add a peer. - if !sw.IsRunning() { - // XXX should this return an error or just log and terminate? - sw.Logger.Error("Won't start a peer - switch is not running", "peer", p) - return nil - } - - // Add some data to the peer, which is required by reactors. - for _, reactor := range sw.reactors { - p = reactor.InitPeer(p) - } - - // Start the peer's send/recv routines. - // Must start it before adding it to the peer set - // to prevent Start and Stop from being called concurrently. - err := p.Start() - if err != nil { - // Should never happen - sw.Logger.Error("Error starting peer", "err", err, "peer", p) - return err - } - - // Add the peer to PeerSet. Do this before starting the reactors - // so that if Receive errors, we will find the peer and remove it. - // Add should not err since we already checked peers.Has(). - if err := sw.peers.Add(p); err != nil { - return err - } - sw.metrics.Peers.Add(float64(1)) - - // Start all the reactor protocols on the peer. - for _, reactor := range sw.reactors { - reactor.AddPeer(p) - } - - sw.Logger.Info("Added peer", "peer", p) - - return nil -} - -// NewNetAddressStrings returns an array of NetAddress'es build using -// the provided strings. -func NewNetAddressStrings(addrs []string) ([]*NetAddress, []error) { - netAddrs := make([]*NetAddress, 0) - errs := make([]error, 0) - for _, addr := range addrs { - netAddr, err := types.NewNetAddressString(addr) - if err != nil { - errs = append(errs, err) - } else { - netAddrs = append(netAddrs, netAddr) - } - } - return netAddrs, errs -} diff --git a/internal/p2p/switch_test.go b/internal/p2p/switch_test.go deleted file mode 100644 index 8cb755c9f..000000000 --- a/internal/p2p/switch_test.go +++ /dev/null @@ -1,932 +0,0 @@ -package p2p - -import ( - "bytes" - "context" - "errors" - "fmt" - "io/ioutil" - "net" - "net/http" - "net/http/httptest" - "regexp" - "strconv" - "sync/atomic" - "testing" - "time" - - "github.com/prometheus/client_golang/prometheus/promhttp" - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" - - "github.com/tendermint/tendermint/config" - "github.com/tendermint/tendermint/crypto/ed25519" - tmsync "github.com/tendermint/tendermint/internal/libs/sync" - "github.com/tendermint/tendermint/internal/p2p/conn" - "github.com/tendermint/tendermint/libs/log" - "github.com/tendermint/tendermint/types" -) - -var ( - cfg *config.P2PConfig - ctx = context.Background() -) - -func init() { - cfg = config.DefaultP2PConfig() - cfg.PexReactor = true - cfg.AllowDuplicateIP = true -} - -type PeerMessage struct { - PeerID types.NodeID - Bytes []byte - Counter int -} - -type TestReactor struct { - BaseReactor - - mtx tmsync.Mutex - channels []*conn.ChannelDescriptor - logMessages bool - msgsCounter int - msgsReceived map[byte][]PeerMessage -} - -func NewTestReactor(channels []*conn.ChannelDescriptor, logMessages bool) *TestReactor { - tr := &TestReactor{ - channels: channels, - logMessages: logMessages, - msgsReceived: make(map[byte][]PeerMessage), - } - tr.BaseReactor = *NewBaseReactor("TestReactor", tr) - tr.SetLogger(log.TestingLogger()) - return tr -} - -func (tr *TestReactor) GetChannels() []*conn.ChannelDescriptor { - return tr.channels -} - -func (tr *TestReactor) AddPeer(peer Peer) {} - -func (tr *TestReactor) RemovePeer(peer Peer, reason interface{}) {} - -func (tr *TestReactor) Receive(chID byte, peer Peer, msgBytes []byte) { - if tr.logMessages { - tr.mtx.Lock() - defer tr.mtx.Unlock() - // fmt.Printf("Received: %X, %X\n", chID, msgBytes) - tr.msgsReceived[chID] = append(tr.msgsReceived[chID], PeerMessage{peer.ID(), msgBytes, tr.msgsCounter}) - tr.msgsCounter++ - } -} - -func (tr *TestReactor) getMsgs(chID byte) []PeerMessage { - tr.mtx.Lock() - defer tr.mtx.Unlock() - return tr.msgsReceived[chID] -} - -//----------------------------------------------------------------------------- - -// convenience method for creating two switches connected to each other. -// XXX: note this uses net.Pipe and not a proper TCP conn -func MakeSwitchPair(t testing.TB, initSwitch func(int, *Switch) *Switch) (*Switch, *Switch) { - // Create two switches that will be interconnected. - switches := MakeConnectedSwitches(cfg, 2, initSwitch, Connect2Switches) - return switches[0], switches[1] -} - -func initSwitchFunc(i int, sw *Switch) *Switch { - sw.SetAddrBook(&AddrBookMock{ - Addrs: make(map[string]struct{}), - OurAddrs: make(map[string]struct{})}) - - // Make two reactors of two channels each - sw.AddReactor("foo", NewTestReactor([]*conn.ChannelDescriptor{ - {ID: byte(0x00), Priority: 10}, - {ID: byte(0x01), Priority: 10}, - }, true)) - sw.AddReactor("bar", NewTestReactor([]*conn.ChannelDescriptor{ - {ID: byte(0x02), Priority: 10}, - {ID: byte(0x03), Priority: 10}, - }, true)) - - return sw -} - -func TestSwitches(t *testing.T) { - s1, s2 := MakeSwitchPair(t, initSwitchFunc) - t.Cleanup(func() { - if err := s1.Stop(); err != nil { - t.Error(err) - } - }) - t.Cleanup(func() { - if err := s2.Stop(); err != nil { - t.Error(err) - } - }) - - if s1.Peers().Size() != 1 { - t.Errorf("expected exactly 1 peer in s1, got %v", s1.Peers().Size()) - } - if s2.Peers().Size() != 1 { - t.Errorf("expected exactly 1 peer in s2, got %v", s2.Peers().Size()) - } - - // Lets send some messages - ch0Msg := []byte("channel zero") - ch1Msg := []byte("channel foo") - ch2Msg := []byte("channel bar") - - s1.Broadcast(byte(0x00), ch0Msg) - s1.Broadcast(byte(0x01), ch1Msg) - s1.Broadcast(byte(0x02), ch2Msg) - - assertMsgReceivedWithTimeout(t, - ch0Msg, - byte(0x00), - s2.Reactor("foo").(*TestReactor), 10*time.Millisecond, 5*time.Second) - assertMsgReceivedWithTimeout(t, - ch1Msg, - byte(0x01), - s2.Reactor("foo").(*TestReactor), 10*time.Millisecond, 5*time.Second) - assertMsgReceivedWithTimeout(t, - ch2Msg, - byte(0x02), - s2.Reactor("bar").(*TestReactor), 10*time.Millisecond, 5*time.Second) -} - -func assertMsgReceivedWithTimeout( - t *testing.T, - msgBytes []byte, - channel byte, - reactor *TestReactor, - checkPeriod, - timeout time.Duration, -) { - ticker := time.NewTicker(checkPeriod) - for { - select { - case <-ticker.C: - msgs := reactor.getMsgs(channel) - if len(msgs) > 0 { - if !bytes.Equal(msgs[0].Bytes, msgBytes) { - t.Fatalf("Unexpected message bytes. Wanted: %X, Got: %X", msgBytes, msgs[0].Bytes) - } - return - } - - case <-time.After(timeout): - t.Fatalf("Expected to have received 1 message in channel #%v, got zero", channel) - } - } -} - -func TestSwitchFiltersOutItself(t *testing.T) { - s1 := MakeSwitch(cfg, 1, "127.0.0.1", "123.123.123", initSwitchFunc, log.TestingLogger()) - - // simulate s1 having a public IP by creating a remote peer with the same ID - rp := &remotePeer{PrivKey: s1.nodeKey.PrivKey, Config: cfg} - rp.Start() - - // addr should be rejected in addPeer based on the same ID - err := s1.DialPeerWithAddress(rp.Addr()) - if assert.Error(t, err) { - if err, ok := err.(ErrRejected); ok { - if !err.IsSelf() { - t.Errorf("expected self to be rejected") - } - } else { - t.Errorf("expected ErrRejected") - } - } - - assert.True(t, s1.addrBook.OurAddress(rp.Addr())) - assert.False(t, s1.addrBook.HasAddress(rp.Addr())) - - rp.Stop() - - assertNoPeersAfterTimeout(t, s1, 100*time.Millisecond) -} - -func TestSwitchDialFailsOnIncompatiblePeer(t *testing.T) { - s1 := MakeSwitch(cfg, 1, "127.0.0.1", "123.123.123", initSwitchFunc, log.TestingLogger()) - ni := s1.NodeInfo() - ni.Network = "network-a" - s1.SetNodeInfo(ni) - - rp := &remotePeer{PrivKey: ed25519.GenPrivKey(), Config: cfg, Network: "network-b"} - rp.Start() - defer rp.Stop() - - err := s1.DialPeerWithAddress(rp.Addr()) - require.Error(t, err) - errRejected, ok := err.(ErrRejected) - require.True(t, ok, "expected error to be of type IsRejected") - require.True(t, errRejected.IsIncompatible(), "expected error to be IsIncompatible") - - // remote peer should not have been added to the addressbook - require.False(t, s1.addrBook.HasAddress(rp.Addr())) -} - -func TestSwitchPeerFilter(t *testing.T) { - var ( - filters = []PeerFilterFunc{ - func(_ IPeerSet, _ Peer) error { return nil }, - func(_ IPeerSet, _ Peer) error { return fmt.Errorf("denied") }, - func(_ IPeerSet, _ Peer) error { return nil }, - } - sw = MakeSwitch( - cfg, - 1, - "testing", - "123.123.123", - initSwitchFunc, - log.TestingLogger(), - SwitchPeerFilters(filters...), - ) - ) - err := sw.Start() - require.NoError(t, err) - t.Cleanup(func() { - if err := sw.Stop(); err != nil { - t.Error(err) - } - }) - - // simulate remote peer - rp := &remotePeer{PrivKey: ed25519.GenPrivKey(), Config: cfg} - rp.Start() - t.Cleanup(rp.Stop) - - c, err := sw.transport.Dial(ctx, NewEndpoint(rp.Addr())) - if err != nil { - t.Fatal(err) - } - peerInfo, _, err := c.Handshake(ctx, sw.nodeInfo, sw.nodeKey.PrivKey) - if err != nil { - t.Fatal(err) - } - p := newPeer( - peerInfo, - newPeerConn(true, false, c), - sw.reactorsByCh, - sw.StopPeerForError, - ) - - err = sw.addPeer(p) - if err, ok := err.(ErrRejected); ok { - if !err.IsFiltered() { - t.Errorf("expected peer to be filtered") - } - } else { - t.Errorf("expected ErrRejected") - } -} - -func TestSwitchPeerFilterTimeout(t *testing.T) { - var ( - filters = []PeerFilterFunc{ - func(_ IPeerSet, _ Peer) error { - time.Sleep(10 * time.Millisecond) - return nil - }, - } - sw = MakeSwitch( - cfg, - 1, - "testing", - "123.123.123", - initSwitchFunc, - log.TestingLogger(), - SwitchFilterTimeout(5*time.Millisecond), - SwitchPeerFilters(filters...), - ) - ) - err := sw.Start() - require.NoError(t, err) - t.Cleanup(func() { - if err := sw.Stop(); err != nil { - t.Log(err) - } - }) - - // simulate remote peer - rp := &remotePeer{PrivKey: ed25519.GenPrivKey(), Config: cfg} - rp.Start() - defer rp.Stop() - - c, err := sw.transport.Dial(ctx, NewEndpoint(rp.Addr())) - if err != nil { - t.Fatal(err) - } - peerInfo, _, err := c.Handshake(ctx, sw.nodeInfo, sw.nodeKey.PrivKey) - if err != nil { - t.Fatal(err) - } - p := newPeer( - peerInfo, - newPeerConn(true, false, c), - sw.reactorsByCh, - sw.StopPeerForError, - ) - - err = sw.addPeer(p) - if _, ok := err.(ErrFilterTimeout); !ok { - t.Errorf("expected ErrFilterTimeout") - } -} - -func TestSwitchPeerFilterDuplicate(t *testing.T) { - sw := MakeSwitch(cfg, 1, "testing", "123.123.123", initSwitchFunc, log.TestingLogger()) - err := sw.Start() - require.NoError(t, err) - t.Cleanup(func() { - if err := sw.Stop(); err != nil { - t.Error(err) - } - }) - - // simulate remote peer - rp := &remotePeer{PrivKey: ed25519.GenPrivKey(), Config: cfg} - rp.Start() - defer rp.Stop() - - c, err := sw.transport.Dial(ctx, NewEndpoint(rp.Addr())) - if err != nil { - t.Fatal(err) - } - peerInfo, _, err := c.Handshake(ctx, sw.nodeInfo, sw.nodeKey.PrivKey) - if err != nil { - t.Fatal(err) - } - p := newPeer( - peerInfo, - newPeerConn(true, false, c), - sw.reactorsByCh, - sw.StopPeerForError, - ) - - if err := sw.addPeer(p); err != nil { - t.Fatal(err) - } - - err = sw.addPeer(p) - if errRej, ok := err.(ErrRejected); ok { - if !errRej.IsDuplicate() { - t.Errorf("expected peer to be duplicate. got %v", errRej) - } - } else { - t.Errorf("expected ErrRejected, got %v", err) - } -} - -func assertNoPeersAfterTimeout(t *testing.T, sw *Switch, timeout time.Duration) { - time.Sleep(timeout) - if sw.Peers().Size() != 0 { - t.Fatalf("Expected %v to not connect to some peers, got %d", sw, sw.Peers().Size()) - } -} - -func TestSwitchStopsNonPersistentPeerOnError(t *testing.T) { - assert, require := assert.New(t), require.New(t) - - sw := MakeSwitch(cfg, 1, "testing", "123.123.123", initSwitchFunc, log.TestingLogger()) - err := sw.Start() - if err != nil { - t.Error(err) - } - t.Cleanup(func() { - if err := sw.Stop(); err != nil { - t.Error(err) - } - }) - - // simulate remote peer - rp := &remotePeer{PrivKey: ed25519.GenPrivKey(), Config: cfg} - rp.Start() - defer rp.Stop() - - c, err := sw.transport.Dial(ctx, NewEndpoint(rp.Addr())) - if err != nil { - t.Fatal(err) - } - peerInfo, _, err := c.Handshake(ctx, sw.nodeInfo, sw.nodeKey.PrivKey) - if err != nil { - t.Fatal(err) - } - p := newPeer( - peerInfo, - newPeerConn(true, false, c), - sw.reactorsByCh, - sw.StopPeerForError, - ) - - err = sw.addPeer(p) - require.Nil(err) - - require.NotNil(sw.Peers().Get(rp.ID())) - - // simulate failure by closing connection - err = p.CloseConn() - require.NoError(err) - - assertNoPeersAfterTimeout(t, sw, 100*time.Millisecond) - assert.False(p.IsRunning()) -} - -func TestSwitchStopPeerForError(t *testing.T) { - s := httptest.NewServer(promhttp.Handler()) - defer s.Close() - - scrapeMetrics := func() string { - resp, err := http.Get(s.URL) - require.NoError(t, err) - defer resp.Body.Close() - buf, _ := ioutil.ReadAll(resp.Body) - return string(buf) - } - - namespace, subsystem, name := config.TestInstrumentationConfig().Namespace, MetricsSubsystem, "peers" - re := regexp.MustCompile(namespace + `_` + subsystem + `_` + name + ` ([0-9\.]+)`) - peersMetricValue := func() float64 { - matches := re.FindStringSubmatch(scrapeMetrics()) - f, _ := strconv.ParseFloat(matches[1], 64) - return f - } - - p2pMetrics := PrometheusMetrics(namespace) - - // make two connected switches - sw1, sw2 := MakeSwitchPair(t, func(i int, sw *Switch) *Switch { - // set metrics on sw1 - if i == 0 { - opt := WithMetrics(p2pMetrics) - opt(sw) - } - return initSwitchFunc(i, sw) - }) - - assert.Equal(t, len(sw1.Peers().List()), 1) - assert.EqualValues(t, 1, peersMetricValue()) - - // send messages to the peer from sw1 - p := sw1.Peers().List()[0] - p.Send(0x1, []byte("here's a message to send")) - - // stop sw2. this should cause the p to fail, - // which results in calling StopPeerForError internally - t.Cleanup(func() { - if err := sw2.Stop(); err != nil { - t.Error(err) - } - }) - - // now call StopPeerForError explicitly, eg. from a reactor - sw1.StopPeerForError(p, fmt.Errorf("some err")) - - assert.Equal(t, len(sw1.Peers().List()), 0) - assert.EqualValues(t, 0, peersMetricValue()) -} - -func TestSwitchReconnectsToOutboundPersistentPeer(t *testing.T) { - sw := MakeSwitch(cfg, 1, "testing", "123.123.123", initSwitchFunc, log.TestingLogger()) - err := sw.Start() - require.NoError(t, err) - t.Cleanup(func() { - if err := sw.Stop(); err != nil { - t.Error(err) - } - }) - - // 1. simulate failure by closing connection - rp := &remotePeer{PrivKey: ed25519.GenPrivKey(), Config: cfg} - rp.Start() - defer rp.Stop() - - err = sw.AddPersistentPeers([]string{rp.Addr().String()}) - require.NoError(t, err) - - err = sw.DialPeerWithAddress(rp.Addr()) - require.Nil(t, err) - require.NotNil(t, sw.Peers().Get(rp.ID())) - - p := sw.Peers().List()[0] - err = p.(*peer).CloseConn() - require.NoError(t, err) - - waitUntilSwitchHasAtLeastNPeers(sw, 1) - assert.False(t, p.IsRunning()) // old peer instance - assert.Equal(t, 1, sw.Peers().Size()) // new peer instance - - // 2. simulate first time dial failure - rp = &remotePeer{ - PrivKey: ed25519.GenPrivKey(), - Config: cfg, - // Use different interface to prevent duplicate IP filter, this will break - // beyond two peers. - listenAddr: "127.0.0.1:0", - } - rp.Start() - defer rp.Stop() - - conf := config.DefaultP2PConfig() - conf.TestDialFail = true // will trigger a reconnect - err = sw.addOutboundPeerWithConfig(rp.Addr(), conf) - require.NotNil(t, err) - // DialPeerWithAddres - sw.peerConfig resets the dialer - waitUntilSwitchHasAtLeastNPeers(sw, 2) - assert.Equal(t, 2, sw.Peers().Size()) -} - -func TestSwitchReconnectsToInboundPersistentPeer(t *testing.T) { - sw := MakeSwitch(cfg, 1, "testing", "123.123.123", initSwitchFunc, log.TestingLogger()) - err := sw.Start() - require.NoError(t, err) - t.Cleanup(func() { - if err := sw.Stop(); err != nil { - t.Error(err) - } - }) - - // 1. simulate failure by closing the connection - rp := &remotePeer{PrivKey: ed25519.GenPrivKey(), Config: cfg} - rp.Start() - defer rp.Stop() - - err = sw.AddPersistentPeers([]string{rp.Addr().String()}) - require.NoError(t, err) - - conn, err := rp.Dial(sw.NetAddress()) - require.NoError(t, err) - time.Sleep(50 * time.Millisecond) - require.NotNil(t, sw.Peers().Get(rp.ID())) - - conn.Close() - - waitUntilSwitchHasAtLeastNPeers(sw, 1) - assert.Equal(t, 1, sw.Peers().Size()) -} - -func TestSwitchDialPeersAsync(t *testing.T) { - if testing.Short() { - return - } - - sw := MakeSwitch(cfg, 1, "testing", "123.123.123", initSwitchFunc, log.TestingLogger()) - err := sw.Start() - require.NoError(t, err) - t.Cleanup(func() { - if err := sw.Stop(); err != nil { - t.Error(err) - } - }) - - rp := &remotePeer{PrivKey: ed25519.GenPrivKey(), Config: cfg} - rp.Start() - defer rp.Stop() - - err = sw.DialPeersAsync([]string{rp.Addr().String()}) - require.NoError(t, err) - time.Sleep(dialRandomizerIntervalMilliseconds * time.Millisecond) - require.NotNil(t, sw.Peers().Get(rp.ID())) -} - -func waitUntilSwitchHasAtLeastNPeers(sw *Switch, n int) { - for i := 0; i < 20; i++ { - time.Sleep(250 * time.Millisecond) - has := sw.Peers().Size() - if has >= n { - break - } - } -} - -func TestSwitchFullConnectivity(t *testing.T) { - switches := MakeConnectedSwitches(cfg, 3, initSwitchFunc, Connect2Switches) - defer func() { - for _, sw := range switches { - sw := sw - t.Cleanup(func() { - if err := sw.Stop(); err != nil { - t.Error(err) - } - }) - } - }() - - for i, sw := range switches { - if sw.Peers().Size() != 2 { - t.Fatalf("Expected each switch to be connected to 2 other, but %d switch only connected to %d", sw.Peers().Size(), i) - } - } -} - -func TestSwitchAcceptRoutine(t *testing.T) { - cfg.MaxNumInboundPeers = 5 - - // Create some unconditional peers. - const unconditionalPeersNum = 2 - var ( - unconditionalPeers = make([]*remotePeer, unconditionalPeersNum) - unconditionalPeerIDs = make([]string, unconditionalPeersNum) - ) - for i := 0; i < unconditionalPeersNum; i++ { - peer := &remotePeer{PrivKey: ed25519.GenPrivKey(), Config: cfg} - peer.Start() - unconditionalPeers[i] = peer - unconditionalPeerIDs[i] = string(peer.ID()) - } - - // make switch - sw := MakeSwitch(cfg, 1, "testing", "123.123.123", initSwitchFunc, log.TestingLogger()) - err := sw.AddUnconditionalPeerIDs(unconditionalPeerIDs) - require.NoError(t, err) - err = sw.Start() - require.NoError(t, err) - t.Cleanup(func() { - err := sw.Stop() - require.NoError(t, err) - }) - - // 0. check there are no peers - assert.Equal(t, 0, sw.Peers().Size()) - - // 1. check we connect up to MaxNumInboundPeers - peers := make([]*remotePeer, 0) - for i := 0; i < cfg.MaxNumInboundPeers; i++ { - peer := &remotePeer{PrivKey: ed25519.GenPrivKey(), Config: cfg} - peers = append(peers, peer) - peer.Start() - c, err := peer.Dial(sw.NetAddress()) - require.NoError(t, err) - // spawn a reading routine to prevent connection from closing - go func(c net.Conn) { - for { - one := make([]byte, 1) - _, err := c.Read(one) - if err != nil { - return - } - } - }(c) - } - time.Sleep(100 * time.Millisecond) - assert.Equal(t, cfg.MaxNumInboundPeers, sw.Peers().Size()) - - // 2. check we close new connections if we already have MaxNumInboundPeers peers - peer := &remotePeer{PrivKey: ed25519.GenPrivKey(), Config: cfg} - peer.Start() - conn, err := peer.Dial(sw.NetAddress()) - require.NoError(t, err) - // check conn is closed - one := make([]byte, 1) - _ = conn.SetReadDeadline(time.Now().Add(10 * time.Millisecond)) - _, err = conn.Read(one) - assert.Error(t, err) - assert.Equal(t, cfg.MaxNumInboundPeers, sw.Peers().Size()) - peer.Stop() - - // 3. check we connect to unconditional peers despite the limit. - for _, peer := range unconditionalPeers { - c, err := peer.Dial(sw.NetAddress()) - require.NoError(t, err) - // spawn a reading routine to prevent connection from closing - go func(c net.Conn) { - for { - one := make([]byte, 1) - _, err := c.Read(one) - if err != nil { - return - } - } - }(c) - } - time.Sleep(10 * time.Millisecond) - assert.Equal(t, cfg.MaxNumInboundPeers+unconditionalPeersNum, sw.Peers().Size()) - - for _, peer := range peers { - peer.Stop() - } - for _, peer := range unconditionalPeers { - peer.Stop() - } -} - -func TestSwitchRejectsIncompatiblePeers(t *testing.T) { - sw := MakeSwitch(cfg, 1, "127.0.0.1", "123.123.123", initSwitchFunc, log.TestingLogger()) - ni := sw.NodeInfo() - ni.Network = "network-a" - sw.SetNodeInfo(ni) - - err := sw.Start() - require.NoError(t, err) - t.Cleanup(func() { - err := sw.Stop() - require.NoError(t, err) - }) - - rp := &remotePeer{PrivKey: ed25519.GenPrivKey(), Config: cfg, Network: "network-b"} - rp.Start() - defer rp.Stop() - - assert.Equal(t, 0, sw.Peers().Size()) - - conn, err := rp.Dial(sw.NetAddress()) - assert.Nil(t, err) - - one := make([]byte, 1) - _ = conn.SetReadDeadline(time.Now().Add(10 * time.Millisecond)) - _, err = conn.Read(one) - assert.Error(t, err) - - assert.Equal(t, 0, sw.Peers().Size()) -} - -type errorTransport struct { - acceptErr error -} - -func (et errorTransport) String() string { - return "error" -} - -func (et errorTransport) Protocols() []Protocol { - return []Protocol{"error"} -} - -func (et errorTransport) Accept() (Connection, error) { - return nil, et.acceptErr -} -func (errorTransport) Dial(context.Context, Endpoint) (Connection, error) { - panic("not implemented") -} -func (errorTransport) Close() error { panic("not implemented") } -func (errorTransport) FlushClose() error { panic("not implemented") } -func (errorTransport) Endpoints() []Endpoint { panic("not implemented") } - -func TestSwitchAcceptRoutineErrorCases(t *testing.T) { - sw := NewSwitch(cfg, errorTransport{ErrFilterTimeout{}}) - assert.NotPanics(t, func() { - err := sw.Start() - require.NoError(t, err) - err = sw.Stop() - require.NoError(t, err) - }) - - sw = NewSwitch(cfg, errorTransport{ErrRejected{conn: nil, err: errors.New("filtered"), isFiltered: true}}) - assert.NotPanics(t, func() { - err := sw.Start() - require.NoError(t, err) - err = sw.Stop() - require.NoError(t, err) - }) - // TODO(melekes) check we remove our address from addrBook - - sw = NewSwitch(cfg, errorTransport{ErrTransportClosed{}}) - assert.NotPanics(t, func() { - err := sw.Start() - require.NoError(t, err) - err = sw.Stop() - require.NoError(t, err) - }) -} - -// mockReactor checks that InitPeer never called before RemovePeer. If that's -// not true, InitCalledBeforeRemoveFinished will return true. -type mockReactor struct { - *BaseReactor - - // atomic - removePeerInProgress uint32 - initCalledBeforeRemoveFinished uint32 -} - -func (r *mockReactor) GetChannels() []*ChannelDescriptor { - return []*ChannelDescriptor{{ID: testCh, Priority: 10}} -} - -func (r *mockReactor) RemovePeer(peer Peer, reason interface{}) { - atomic.StoreUint32(&r.removePeerInProgress, 1) - defer atomic.StoreUint32(&r.removePeerInProgress, 0) - time.Sleep(100 * time.Millisecond) -} - -func (r *mockReactor) InitPeer(peer Peer) Peer { - if atomic.LoadUint32(&r.removePeerInProgress) == 1 { - atomic.StoreUint32(&r.initCalledBeforeRemoveFinished, 1) - } - - return peer -} - -func (r *mockReactor) InitCalledBeforeRemoveFinished() bool { - return atomic.LoadUint32(&r.initCalledBeforeRemoveFinished) == 1 -} - -// see stopAndRemovePeer -func TestSwitchInitPeerIsNotCalledBeforeRemovePeer(t *testing.T) { - // make reactor - reactor := &mockReactor{} - reactor.BaseReactor = NewBaseReactor("mockReactor", reactor) - - // make switch - sw := MakeSwitch(cfg, 1, "testing", "123.123.123", func(i int, sw *Switch) *Switch { - sw.AddReactor("mock", reactor) - return sw - }, log.TestingLogger()) - err := sw.Start() - require.NoError(t, err) - t.Cleanup(func() { - if err := sw.Stop(); err != nil { - t.Error(err) - } - }) - - // add peer - rp := &remotePeer{PrivKey: ed25519.GenPrivKey(), Config: cfg} - rp.Start() - defer rp.Stop() - _, err = rp.Dial(sw.NetAddress()) - require.NoError(t, err) - - // wait till the switch adds rp to the peer set, then stop the peer asynchronously - for { - time.Sleep(20 * time.Millisecond) - if peer := sw.Peers().Get(rp.ID()); peer != nil { - go sw.StopPeerForError(peer, "test") - break - } - } - - // simulate peer reconnecting to us - _, err = rp.Dial(sw.NetAddress()) - require.NoError(t, err) - // wait till the switch adds rp to the peer set - time.Sleep(50 * time.Millisecond) - - // make sure reactor.RemovePeer is finished before InitPeer is called - assert.False(t, reactor.InitCalledBeforeRemoveFinished()) -} - -func BenchmarkSwitchBroadcast(b *testing.B) { - s1, s2 := MakeSwitchPair(b, func(i int, sw *Switch) *Switch { - // Make bar reactors of bar channels each - sw.AddReactor("foo", NewTestReactor([]*conn.ChannelDescriptor{ - {ID: byte(0x00), Priority: 10}, - {ID: byte(0x01), Priority: 10}, - }, false)) - sw.AddReactor("bar", NewTestReactor([]*conn.ChannelDescriptor{ - {ID: byte(0x02), Priority: 10}, - {ID: byte(0x03), Priority: 10}, - }, false)) - return sw - }) - - b.Cleanup(func() { - if err := s1.Stop(); err != nil { - b.Error(err) - } - }) - - b.Cleanup(func() { - if err := s2.Stop(); err != nil { - b.Error(err) - } - }) - - // Allow time for goroutines to boot up - time.Sleep(1 * time.Second) - - b.ResetTimer() - - numSuccess, numFailure := 0, 0 - - // Send random message from foo channel to another - for i := 0; i < b.N; i++ { - chID := byte(i % 4) - successChan := s1.Broadcast(chID, []byte("test data")) - for s := range successChan { - if s { - numSuccess++ - } else { - numFailure++ - } - } - } - - b.Logf("success: %v, failure: %v", numSuccess, numFailure) -} - -func TestNewNetAddressStrings(t *testing.T) { - addrs, errs := NewNetAddressStrings([]string{ - "127.0.0.1:8080", - "deadbeefdeadbeefdeadbeefdeadbeefdeadbeef@127.0.0.1:8080", - "deadbeefdeadbeefdeadbeefdeadbeefdeadbeed@127.0.0.2:8080"}) - assert.Len(t, errs, 1) - assert.Equal(t, 2, len(addrs)) -} diff --git a/internal/p2p/test_util.go b/internal/p2p/test_util.go index b2851646d..d29709a89 100644 --- a/internal/p2p/test_util.go +++ b/internal/p2p/test_util.go @@ -1,42 +1,15 @@ package p2p import ( - "context" "fmt" mrand "math/rand" - "net" - "github.com/tendermint/tendermint/libs/log" - tmnet "github.com/tendermint/tendermint/libs/net" tmrand "github.com/tendermint/tendermint/libs/rand" "github.com/tendermint/tendermint/types" - - "github.com/tendermint/tendermint/config" - "github.com/tendermint/tendermint/internal/p2p/conn" ) -const testCh = 0x01 - //------------------------------------------------ -func AddPeerToSwitchPeerSet(sw *Switch, peer Peer) { - sw.peers.Add(peer) //nolint:errcheck // ignore error -} - -func CreateRandomPeer(outbound bool) Peer { - addr, netAddr := CreateRoutableAddr() - p := &peer{ - peerConn: peerConn{outbound: outbound}, - nodeInfo: types.NodeInfo{ - NodeID: netAddr.ID, - ListenAddr: netAddr.DialString(), - }, - metrics: NopMetrics(), - } - p.SetLogger(log.TestingLogger().With("peer", addr)) - return p -} - // nolint:gosec // G404: Use of weak random number generator func CreateRoutableAddr() (addr string, netAddr *NetAddress) { for { @@ -57,232 +30,3 @@ func CreateRoutableAddr() (addr string, netAddr *NetAddress) { } return } - -//------------------------------------------------------------------ -// Connects switches via arbitrary net.Conn. Used for testing. - -const TestHost = "localhost" - -// MakeConnectedSwitches returns n switches, connected according to the connect func. -// If connect==Connect2Switches, the switches will be fully connected. -// initSwitch defines how the i'th switch should be initialized (ie. with what reactors). -// NOTE: panics if any switch fails to start. -func MakeConnectedSwitches(cfg *config.P2PConfig, - n int, - initSwitch func(int, *Switch) *Switch, - connect func([]*Switch, int, int), -) []*Switch { - switches := make([]*Switch, n) - for i := 0; i < n; i++ { - switches[i] = MakeSwitch(cfg, i, TestHost, "123.123.123", initSwitch, log.TestingLogger()) - } - - if err := StartSwitches(switches); err != nil { - panic(err) - } - - for i := 0; i < n; i++ { - for j := i + 1; j < n; j++ { - connect(switches, i, j) - } - } - - return switches -} - -// Connect2Switches will connect switches i and j via net.Pipe(). -// Blocks until a connection is established. -// NOTE: caller ensures i and j are within bounds. -func Connect2Switches(switches []*Switch, i, j int) { - switchI := switches[i] - switchJ := switches[j] - - c1, c2 := conn.NetPipe() - - doneCh := make(chan struct{}) - go func() { - err := switchI.addPeerWithConnection(c1) - if err != nil { - panic(err) - } - doneCh <- struct{}{} - }() - go func() { - err := switchJ.addPeerWithConnection(c2) - if err != nil { - panic(err) - } - doneCh <- struct{}{} - }() - <-doneCh - <-doneCh -} - -func (sw *Switch) addPeerWithConnection(conn net.Conn) error { - pc, err := testInboundPeerConn(sw.transport.(*MConnTransport), conn) - if err != nil { - if err := conn.Close(); err != nil { - sw.Logger.Error("Error closing connection", "err", err) - } - return err - } - peerNodeInfo, _, err := pc.conn.Handshake(context.Background(), sw.nodeInfo, sw.nodeKey.PrivKey) - if err != nil { - if err := conn.Close(); err != nil { - sw.Logger.Error("Error closing connection", "err", err) - } - return err - } - - p := newPeer( - peerNodeInfo, - pc, - sw.reactorsByCh, - sw.StopPeerForError, - ) - - if err = sw.addPeer(p); err != nil { - pc.CloseConn() - return err - } - - return nil -} - -// StartSwitches calls sw.Start() for each given switch. -// It returns the first encountered error. -func StartSwitches(switches []*Switch) error { - for _, s := range switches { - err := s.Start() // start switch and reactors - if err != nil { - return err - } - } - return nil -} - -func MakeSwitch( - cfg *config.P2PConfig, - i int, - network, version string, - initSwitch func(int, *Switch) *Switch, - logger log.Logger, - opts ...SwitchOption, -) *Switch { - - nodeKey := types.GenNodeKey() - nodeInfo := testNodeInfo(nodeKey.ID, fmt.Sprintf("node%d", i)) - addr, err := types.NewNetAddressString( - nodeKey.ID.AddressString(nodeInfo.ListenAddr), - ) - if err != nil { - panic(err) - } - - swLogger := logger.With("switch", i) - t := NewMConnTransport(swLogger, MConnConfig(cfg), - []*ChannelDescriptor{}, MConnTransportOptions{}) - - // TODO: let the config be passed in? - sw := initSwitch(i, NewSwitch(cfg, t, opts...)) - sw.SetLogger(swLogger) - sw.SetNodeKey(nodeKey) - - if err := t.Listen(NewEndpoint(addr)); err != nil { - panic(err) - } - - ni := nodeInfo - ni.Channels = []byte{} - for ch := range sw.reactorsByCh { - ni.Channels = append(ni.Channels, ch) - } - nodeInfo = ni - - // TODO: We need to setup reactors ahead of time so the NodeInfo is properly - // populated and we don't have to do those awkward overrides and setters. - sw.SetNodeInfo(nodeInfo) - - return sw -} - -func testInboundPeerConn( - transport *MConnTransport, - conn net.Conn, -) (peerConn, error) { - return testPeerConn(transport, conn, false, false) -} - -func testPeerConn( - transport *MConnTransport, - rawConn net.Conn, - outbound, persistent bool, -) (pc peerConn, err error) { - - conn := newMConnConnection(transport.logger, rawConn, transport.mConnConfig, transport.channelDescs) - - return newPeerConn(outbound, persistent, conn), nil -} - -//---------------------------------------------------------------- -// rand node info - -func testNodeInfo(id types.NodeID, name string) types.NodeInfo { - return testNodeInfoWithNetwork(id, name, "testing") -} - -func testNodeInfoWithNetwork(id types.NodeID, name, network string) types.NodeInfo { - return types.NodeInfo{ - ProtocolVersion: defaultProtocolVersion, - NodeID: id, - ListenAddr: fmt.Sprintf("127.0.0.1:%d", getFreePort()), - Network: network, - Version: "1.2.3-rc0-deadbeef", - Channels: []byte{testCh}, - Moniker: name, - Other: types.NodeInfoOther{ - TxIndex: "on", - RPCAddress: fmt.Sprintf("127.0.0.1:%d", getFreePort()), - }, - } -} - -func getFreePort() int { - port, err := tmnet.GetFreePort() - if err != nil { - panic(err) - } - return port -} - -type AddrBookMock struct { - Addrs map[string]struct{} - OurAddrs map[string]struct{} - PrivateAddrs map[string]struct{} -} - -var _ AddrBook = (*AddrBookMock)(nil) - -func (book *AddrBookMock) AddAddress(addr *NetAddress, src *NetAddress) error { - book.Addrs[addr.String()] = struct{}{} - return nil -} -func (book *AddrBookMock) AddOurAddress(addr *NetAddress) { book.OurAddrs[addr.String()] = struct{}{} } -func (book *AddrBookMock) OurAddress(addr *NetAddress) bool { - _, ok := book.OurAddrs[addr.String()] - return ok -} -func (book *AddrBookMock) MarkGood(types.NodeID) {} -func (book *AddrBookMock) HasAddress(addr *NetAddress) bool { - _, ok := book.Addrs[addr.String()] - return ok -} -func (book *AddrBookMock) RemoveAddress(addr *NetAddress) { - delete(book.Addrs, addr.String()) -} -func (book *AddrBookMock) Save() {} -func (book *AddrBookMock) AddPrivateIDs(addrs []string) { - for _, addr := range addrs { - book.PrivateAddrs[addr] = struct{}{} - } -} diff --git a/internal/p2p/transport.go b/internal/p2p/transport.go index a3245dfc8..0b2311fa3 100644 --- a/internal/p2p/transport.go +++ b/internal/p2p/transport.go @@ -9,7 +9,6 @@ import ( "github.com/tendermint/tendermint/crypto" "github.com/tendermint/tendermint/internal/p2p/conn" "github.com/tendermint/tendermint/types" - "github.com/tendermint/tendermint/version" ) //go:generate ../../scripts/mockery_generate.sh Transport|Connection @@ -20,14 +19,6 @@ const ( defaultProtocol Protocol = MConnProtocol ) -// defaultProtocolVersion populates the Block and P2P versions using -// the global values, but not the App. -var defaultProtocolVersion = types.ProtocolVersion{ - P2P: version.P2PProtocol, - Block: version.BlockProtocol, - App: 0, -} - // Protocol identifies a transport protocol. type Protocol string diff --git a/internal/rpc/core/consensus.go b/internal/rpc/core/consensus.go index ac49bbd31..d17796fff 100644 --- a/internal/rpc/core/consensus.go +++ b/internal/rpc/core/consensus.go @@ -1,13 +1,9 @@ package core import ( - "errors" - - "github.com/tendermint/tendermint/internal/consensus" tmmath "github.com/tendermint/tendermint/libs/math" "github.com/tendermint/tendermint/rpc/coretypes" rpctypes "github.com/tendermint/tendermint/rpc/jsonrpc/types" - "github.com/tendermint/tendermint/types" ) // Validators gets the validator set at the given block height. @@ -58,52 +54,28 @@ func (env *Environment) DumpConsensusState(ctx *rpctypes.Context) (*coretypes.Re // Get Peer consensus states. var peerStates []coretypes.PeerStateInfo - switch { - case env.P2PPeers != nil: - peers := env.P2PPeers.Peers().List() - peerStates = make([]coretypes.PeerStateInfo, 0, len(peers)) - for _, peer := range peers { - peerState, ok := peer.Get(types.PeerStateKey).(*consensus.PeerState) - if !ok { // peer does not have a state yet - continue - } - peerStateJSON, err := peerState.ToJSON() - if err != nil { - return nil, err - } + peers := env.PeerManager.Peers() + peerStates = make([]coretypes.PeerStateInfo, 0, len(peers)) + for _, pid := range peers { + peerState, ok := env.ConsensusReactor.GetPeerState(pid) + if !ok { + continue + } + + peerStateJSON, err := peerState.ToJSON() + if err != nil { + return nil, err + } + + addr := env.PeerManager.Addresses(pid) + if len(addr) != 0 { peerStates = append(peerStates, coretypes.PeerStateInfo{ // Peer basic info. - NodeAddress: peer.SocketAddr().String(), + NodeAddress: addr[0].String(), // Peer consensus state. PeerState: peerStateJSON, }) } - case env.PeerManager != nil: - peers := env.PeerManager.Peers() - peerStates = make([]coretypes.PeerStateInfo, 0, len(peers)) - for _, pid := range peers { - peerState, ok := env.ConsensusReactor.GetPeerState(pid) - if !ok { - continue - } - - peerStateJSON, err := peerState.ToJSON() - if err != nil { - return nil, err - } - - addr := env.PeerManager.Addresses(pid) - if len(addr) >= 1 { - peerStates = append(peerStates, coretypes.PeerStateInfo{ - // Peer basic info. - NodeAddress: addr[0].String(), - // Peer consensus state. - PeerState: peerStateJSON, - }) - } - } - default: - return nil, errors.New("no peer system configured") } // Get self round state. diff --git a/internal/rpc/core/env.go b/internal/rpc/core/env.go index 11b138eac..f05c34f14 100644 --- a/internal/rpc/core/env.go +++ b/internal/rpc/core/env.go @@ -51,14 +51,6 @@ type transport interface { NodeInfo() types.NodeInfo } -type peers interface { - AddPersistentPeers([]string) error - AddUnconditionalPeerIDs([]string) error - AddPrivatePeerIDs([]string) error - DialPeersAsync([]string) error - Peers() p2p.IPeerSet -} - type consensusReactor interface { WaitSync() bool GetPeerState(peerID types.NodeID) (*consensus.PeerState, bool) @@ -83,7 +75,6 @@ type Environment struct { EvidencePool sm.EvidencePool ConsensusState consensusState ConsensusReactor consensusReactor - P2PPeers peers // Legacy p2p stack P2PTransport transport diff --git a/internal/rpc/core/net.go b/internal/rpc/core/net.go index 8bcc04dd0..fdf4be69b 100644 --- a/internal/rpc/core/net.go +++ b/internal/rpc/core/net.go @@ -3,9 +3,7 @@ package core import ( "errors" "fmt" - "strings" - "github.com/tendermint/tendermint/internal/p2p" "github.com/tendermint/tendermint/rpc/coretypes" rpctypes "github.com/tendermint/tendermint/rpc/jsonrpc/types" ) @@ -13,33 +11,19 @@ import ( // NetInfo returns network info. // More: https://docs.tendermint.com/master/rpc/#/Info/net_info func (env *Environment) NetInfo(ctx *rpctypes.Context) (*coretypes.ResultNetInfo, error) { - var peers []coretypes.Peer + peerList := env.PeerManager.Peers() - switch { - case env.P2PPeers != nil: - peersList := env.P2PPeers.Peers().List() - peers = make([]coretypes.Peer, 0, len(peersList)) - for _, peer := range peersList { - peers = append(peers, coretypes.Peer{ - ID: peer.ID(), - URL: peer.SocketAddr().String(), - }) + peers := make([]coretypes.Peer, 0, len(peerList)) + for _, peer := range peerList { + addrs := env.PeerManager.Addresses(peer) + if len(addrs) == 0 { + continue } - case env.PeerManager != nil: - peerList := env.PeerManager.Peers() - for _, peer := range peerList { - addrs := env.PeerManager.Addresses(peer) - if len(addrs) == 0 { - continue - } - peers = append(peers, coretypes.Peer{ - ID: peer, - URL: addrs[0].String(), - }) - } - default: - return nil, errors.New("peer management system does not support NetInfo responses") + peers = append(peers, coretypes.Peer{ + ID: peer, + URL: addrs[0].String(), + }) } return &coretypes.ResultNetInfo{ @@ -50,70 +34,6 @@ func (env *Environment) NetInfo(ctx *rpctypes.Context) (*coretypes.ResultNetInfo }, nil } -// UnsafeDialSeeds dials the given seeds (comma-separated id@IP:PORT). -func (env *Environment) UnsafeDialSeeds(ctx *rpctypes.Context, seeds []string) (*coretypes.ResultDialSeeds, error) { - if env.P2PPeers == nil { - return nil, errors.New("peer management system does not support this operation") - } - - if len(seeds) == 0 { - return &coretypes.ResultDialSeeds{}, fmt.Errorf("%w: no seeds provided", coretypes.ErrInvalidRequest) - } - env.Logger.Info("DialSeeds", "seeds", seeds) - if err := env.P2PPeers.DialPeersAsync(seeds); err != nil { - return &coretypes.ResultDialSeeds{}, err - } - return &coretypes.ResultDialSeeds{Log: "Dialing seeds in progress. See /net_info for details"}, nil -} - -// UnsafeDialPeers dials the given peers (comma-separated id@IP:PORT), -// optionally making them persistent. -func (env *Environment) UnsafeDialPeers( - ctx *rpctypes.Context, - peers []string, - persistent, unconditional, private bool) (*coretypes.ResultDialPeers, error) { - - if env.P2PPeers == nil { - return nil, errors.New("peer management system does not support this operation") - } - - if len(peers) == 0 { - return &coretypes.ResultDialPeers{}, fmt.Errorf("%w: no peers provided", coretypes.ErrInvalidRequest) - } - - ids, err := getIDs(peers) - if err != nil { - return &coretypes.ResultDialPeers{}, err - } - - env.Logger.Info("DialPeers", "peers", peers, "persistent", - persistent, "unconditional", unconditional, "private", private) - - if persistent { - if err := env.P2PPeers.AddPersistentPeers(peers); err != nil { - return &coretypes.ResultDialPeers{}, err - } - } - - if private { - if err := env.P2PPeers.AddPrivatePeerIDs(ids); err != nil { - return &coretypes.ResultDialPeers{}, err - } - } - - if unconditional { - if err := env.P2PPeers.AddUnconditionalPeerIDs(ids); err != nil { - return &coretypes.ResultDialPeers{}, err - } - } - - if err := env.P2PPeers.DialPeersAsync(peers); err != nil { - return &coretypes.ResultDialPeers{}, err - } - - return &coretypes.ResultDialPeers{Log: "Dialing peers in progress. See /net_info for details"}, nil -} - // Genesis returns genesis file. // More: https://docs.tendermint.com/master/rpc/#/Info/genesis func (env *Environment) Genesis(ctx *rpctypes.Context) (*coretypes.ResultGenesis, error) { @@ -145,18 +65,3 @@ func (env *Environment) GenesisChunked(ctx *rpctypes.Context, chunk uint) (*core Data: env.genChunks[id], }, nil } - -func getIDs(peers []string) ([]string, error) { - ids := make([]string, 0, len(peers)) - - for _, peer := range peers { - - spl := strings.Split(peer, "@") - if len(spl) != 2 { - return nil, p2p.ErrNetAddressNoID{Addr: peer} - } - ids = append(ids, spl[0]) - - } - return ids, nil -} diff --git a/internal/rpc/core/net_test.go b/internal/rpc/core/net_test.go deleted file mode 100644 index 7c12a69d6..000000000 --- a/internal/rpc/core/net_test.go +++ /dev/null @@ -1,89 +0,0 @@ -package core - -import ( - "testing" - - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" - - "github.com/tendermint/tendermint/config" - "github.com/tendermint/tendermint/internal/p2p" - "github.com/tendermint/tendermint/libs/log" - rpctypes "github.com/tendermint/tendermint/rpc/jsonrpc/types" -) - -func TestUnsafeDialSeeds(t *testing.T) { - sw := p2p.MakeSwitch(config.DefaultP2PConfig(), 1, "testing", "123.123.123", - func(n int, sw *p2p.Switch) *p2p.Switch { return sw }, log.TestingLogger()) - err := sw.Start() - require.NoError(t, err) - t.Cleanup(func() { - if err := sw.Stop(); err != nil { - t.Error(err) - } - }) - - env := &Environment{} - env.Logger = log.TestingLogger() - env.P2PPeers = sw - - testCases := []struct { - seeds []string - isErr bool - }{ - {[]string{}, true}, - {[]string{"d51fb70907db1c6c2d5237e78379b25cf1a37ab4@127.0.0.1:41198"}, false}, - {[]string{"127.0.0.1:41198"}, true}, - } - - for _, tc := range testCases { - res, err := env.UnsafeDialSeeds(&rpctypes.Context{}, tc.seeds) - if tc.isErr { - assert.Error(t, err) - } else { - assert.NoError(t, err) - assert.NotNil(t, res) - } - } -} - -func TestUnsafeDialPeers(t *testing.T) { - sw := p2p.MakeSwitch(config.DefaultP2PConfig(), 1, "testing", "123.123.123", - func(n int, sw *p2p.Switch) *p2p.Switch { return sw }, log.TestingLogger()) - sw.SetAddrBook(&p2p.AddrBookMock{ - Addrs: make(map[string]struct{}), - OurAddrs: make(map[string]struct{}), - PrivateAddrs: make(map[string]struct{}), - }) - err := sw.Start() - require.NoError(t, err) - t.Cleanup(func() { - if err := sw.Stop(); err != nil { - t.Error(err) - } - }) - - env := &Environment{} - env.Logger = log.TestingLogger() - env.P2PPeers = sw - - testCases := []struct { - peers []string - persistence, unconditional, private bool - isErr bool - }{ - {[]string{}, false, false, false, true}, - {[]string{"d51fb70907db1c6c2d5237e78379b25cf1a37ab4@127.0.0.1:41198"}, true, true, true, false}, - {[]string{"127.0.0.1:41198"}, true, true, false, true}, - } - - for _, tc := range testCases { - res, err := env.UnsafeDialPeers(&rpctypes.Context{}, tc.peers, tc.persistence, tc.unconditional, tc.private) - if tc.isErr { - assert.Error(t, err) - } else { - assert.NoError(t, err) - assert.NotNil(t, res) - } - } -} diff --git a/internal/rpc/core/routes.go b/internal/rpc/core/routes.go index 1eb50fe4e..73eaaf14c 100644 --- a/internal/rpc/core/routes.go +++ b/internal/rpc/core/routes.go @@ -55,7 +55,5 @@ func (env *Environment) GetRoutes() RoutesMap { // AddUnsafeRoutes adds unsafe routes. func (env *Environment) AddUnsafe(routes RoutesMap) { // control API - routes["dial_seeds"] = rpc.NewRPCFunc(env.UnsafeDialSeeds, "seeds", false) - routes["dial_peers"] = rpc.NewRPCFunc(env.UnsafeDialPeers, "peers,persistent,unconditional,private", false) routes["unsafe_flush_mempool"] = rpc.NewRPCFunc(env.UnsafeFlushMempool, "", false) } diff --git a/internal/rpc/core/status.go b/internal/rpc/core/status.go index bdd7ee1fa..b883c6dc2 100644 --- a/internal/rpc/core/status.go +++ b/internal/rpc/core/status.go @@ -58,6 +58,7 @@ func (env *Environment) Status(ctx *rpctypes.Context) (*coretypes.ResultStatus, VotingPower: votingPower, } } + result := &coretypes.ResultStatus{ NodeInfo: env.P2PTransport.NodeInfo(), SyncInfo: coretypes.SyncInfo{ diff --git a/node/node.go b/node/node.go index 820e0630a..891c5fce6 100644 --- a/node/node.go +++ b/node/node.go @@ -54,10 +54,8 @@ type nodeImpl struct { // network transport *p2p.MConnTransport - sw *p2p.Switch // p2p connections peerManager *p2p.PeerManager router *p2p.Router - addrBook pex.AddrBook // known peers nodeInfo types.NodeInfo nodeKey types.NodeKey // our node privkey isListening bool @@ -292,14 +290,6 @@ func makeNode(cfg *config.Config, return nil, fmt.Errorf("could not create blockchain reactor: %w", err) } - // TODO: Remove this once the switch is removed. - var bcReactorForSwitch p2p.Reactor - if bcReactorShim != nil { - bcReactorForSwitch = bcReactorShim - } else { - bcReactorForSwitch = bcReactor.(p2p.Reactor) - } - // Make ConsensusReactor. Don't enable fully if doing a state sync and/or block sync first. // FIXME We need to update metrics here, since other reactors don't have access to them. if stateSync { @@ -312,29 +302,15 @@ func makeNode(cfg *config.Config, // FIXME The way we do phased startups (e.g. replay -> block sync -> consensus) is very messy, // we should clean this whole thing up. See: // https://github.com/tendermint/tendermint/issues/4644 - var ( - stateSyncReactor *statesync.Reactor - stateSyncReactorShim *p2p.ReactorShim - - channels map[p2p.ChannelID]*p2p.Channel - peerUpdates *p2p.PeerUpdates - ) - - stateSyncReactorShim = p2p.NewReactorShim(logger.With("module", "statesync"), "StateSyncShim", statesync.ChannelShims) - - if cfg.P2P.UseLegacy { - channels = getChannelsFromShim(stateSyncReactorShim) - peerUpdates = stateSyncReactorShim.PeerUpdates - } else { - channels = makeChannelsFromShims(router, statesync.ChannelShims) - peerUpdates = peerManager.Subscribe() - } - - stateSyncReactor = statesync.NewReactor( + ssLogger := logger.With("module", "statesync") + ssReactorShim := p2p.NewReactorShim(ssLogger, "StateSyncShim", statesync.ChannelShims) + channels := makeChannelsFromShims(router, statesync.ChannelShims) + peerUpdates := peerManager.Subscribe() + stateSyncReactor := statesync.NewReactor( genDoc.ChainID, genDoc.InitialHeight, *cfg.StateSync, - stateSyncReactorShim.Logger, + ssLogger, proxyApp.Snapshot(), proxyApp.Query(), channels[statesync.SnapshotChannel], @@ -353,10 +329,10 @@ func makeNode(cfg *config.Config, // transports can either be agnostic to channel descriptors or can be // declared in the constructor. transport.AddChannelDescriptors(mpReactorShim.GetChannels()) - transport.AddChannelDescriptors(bcReactorForSwitch.GetChannels()) + transport.AddChannelDescriptors(bcReactorShim.GetChannels()) transport.AddChannelDescriptors(csReactorShim.GetChannels()) transport.AddChannelDescriptors(evReactorShim.GetChannels()) - transport.AddChannelDescriptors(stateSyncReactorShim.GetChannels()) + transport.AddChannelDescriptors(ssReactorShim.GetChannels()) // Optionally, start the pex reactor // @@ -371,44 +347,14 @@ func makeNode(cfg *config.Config, // If PEX is on, it should handle dialing the seeds. Otherwise the switch does it. // Note we currently use the addrBook regardless at least for AddOurAddress - var ( - pexReactor service.Service - sw *p2p.Switch - addrBook pex.AddrBook - ) + var pexReactor service.Service pexCh := pex.ChannelDescriptor() transport.AddChannelDescriptors([]*p2p.ChannelDescriptor{&pexCh}) - if cfg.P2P.UseLegacy { - // setup Transport and Switch - sw = createSwitch( - cfg, transport, nodeMetrics.p2p, mpReactorShim, bcReactorForSwitch, - stateSyncReactorShim, csReactorShim, evReactorShim, proxyApp, nodeInfo, nodeKey, p2pLogger, - ) - - err = sw.AddPersistentPeers(strings.SplitAndTrimEmpty(cfg.P2P.PersistentPeers, ",", " ")) - if err != nil { - return nil, fmt.Errorf("could not add peers from persistent-peers field: %w", err) - } - - err = sw.AddUnconditionalPeerIDs(strings.SplitAndTrimEmpty(cfg.P2P.UnconditionalPeerIDs, ",", " ")) - if err != nil { - return nil, fmt.Errorf("could not add peer ids from unconditional_peer_ids field: %w", err) - } - - addrBook, err = createAddrBookAndSetOnSwitch(cfg, sw, p2pLogger, nodeKey) - if err != nil { - return nil, fmt.Errorf("could not create addrbook: %w", err) - } - - pexReactor = createPEXReactorAndAddToSwitch(addrBook, cfg, sw, logger) - } else { - addrBook = nil - pexReactor, err = createPEXReactorV2(cfg, logger, peerManager, router) - if err != nil { - return nil, err - } + pexReactor, err = createPEXReactorV2(cfg, logger, peerManager, router) + if err != nil { + return nil, err } if cfg.RPC.PprofListenAddress != "" { @@ -424,10 +370,8 @@ func makeNode(cfg *config.Config, privValidator: privValidator, transport: transport, - sw: sw, peerManager: peerManager, router: router, - addrBook: addrBook, nodeInfo: nodeInfo, nodeKey: nodeKey, @@ -456,7 +400,6 @@ func makeNode(cfg *config.Config, ConsensusReactor: csReactor, BlockSyncReactor: bcReactor.(consensus.BlockSyncReactor), - P2PPeers: sw, PeerManager: peerManager, GenDoc: genDoc, @@ -468,17 +411,6 @@ func makeNode(cfg *config.Config, }, } - // this is a terrible, because typed nil interfaces are not == - // nil, so this is just cleanup to avoid having a non-nil - // value in the RPC environment that has the semantic - // properties of nil. - if sw == nil { - node.rpcEnv.P2PPeers = nil - } else if peerManager == nil { - node.rpcEnv.PeerManager = nil - } - // end hack - node.rpcEnv.P2PTransport = node node.BaseService = *service.NewBaseService(logger, "Node", node) @@ -525,11 +457,7 @@ func makeSeedNode(cfg *config.Config, return nil, fmt.Errorf("failed to create router: %w", err) } - var ( - pexReactor service.Service - sw *p2p.Switch - addrBook pex.AddrBook - ) + var pexReactor service.Service // add the pex reactor // FIXME: we add channel descriptors to both the router and the transport but only the router @@ -538,33 +466,9 @@ func makeSeedNode(cfg *config.Config, pexCh := pex.ChannelDescriptor() transport.AddChannelDescriptors([]*p2p.ChannelDescriptor{&pexCh}) - if cfg.P2P.UseLegacy { - sw = createSwitch( - cfg, transport, p2pMetrics, nil, nil, - nil, nil, nil, nil, nodeInfo, nodeKey, p2pLogger, - ) - - err = sw.AddPersistentPeers(strings.SplitAndTrimEmpty(cfg.P2P.PersistentPeers, ",", " ")) - if err != nil { - return nil, fmt.Errorf("could not add peers from persistent_peers field: %w", err) - } - - err = sw.AddUnconditionalPeerIDs(strings.SplitAndTrimEmpty(cfg.P2P.UnconditionalPeerIDs, ",", " ")) - if err != nil { - return nil, fmt.Errorf("could not add peer ids from unconditional_peer_ids field: %w", err) - } - - addrBook, err = createAddrBookAndSetOnSwitch(cfg, sw, p2pLogger, nodeKey) - if err != nil { - return nil, fmt.Errorf("could not create addrbook: %w", err) - } - - pexReactor = createPEXReactorAndAddToSwitch(addrBook, cfg, sw, logger) - } else { - pexReactor, err = createPEXReactorV2(cfg, logger, peerManager, router) - if err != nil { - return nil, err - } + pexReactor, err = createPEXReactorV2(cfg, logger, peerManager, router) + if err != nil { + return nil, err } if cfg.RPC.PprofListenAddress != "" { @@ -579,8 +483,6 @@ func makeSeedNode(cfg *config.Config, genesisDoc: genDoc, transport: transport, - sw: sw, - addrBook: addrBook, nodeInfo: nodeInfo, nodeKey: nodeKey, peerManager: peerManager, @@ -627,15 +529,8 @@ func (n *nodeImpl) OnStart() error { } n.isListening = true - n.Logger.Info("p2p service", "legacy_enabled", n.config.P2P.UseLegacy) - if n.config.P2P.UseLegacy { - // Add private IDs to addrbook to block those peers being added - n.addrBook.AddPrivateIDs(strings.SplitAndTrimEmpty(n.config.P2P.PrivatePeerIDs, ",", " ")) - if err = n.sw.Start(); err != nil { - return err - } - } else if err = n.router.Start(); err != nil { + if err = n.router.Start(); err != nil { return err } @@ -667,13 +562,7 @@ func (n *nodeImpl) OnStart() error { } } - if n.config.P2P.UseLegacy { - // Always connect to persistent peers - err = n.sw.DialPeersAsync(strings.SplitAndTrimEmpty(n.config.P2P.PersistentPeers, ",", " ")) - if err != nil { - return fmt.Errorf("could not dial peers from persistent-peers field: %w", err) - } - } else if err := n.pexReactor.Start(); err != nil { + if err := n.pexReactor.Start(); err != nil { return err } @@ -794,14 +683,8 @@ func (n *nodeImpl) OnStop() { n.Logger.Error("failed to stop the PEX v2 reactor", "err", err) } - if n.config.P2P.UseLegacy { - if err := n.sw.Stop(); err != nil { - n.Logger.Error("failed to stop switch", "err", err) - } - } else { - if err := n.router.Stop(); err != nil { - n.Logger.Error("failed to stop router", "err", err) - } + if err := n.router.Stop(); err != nil { + n.Logger.Error("failed to stop router", "err", err) } if err := n.transport.Close(); err != nil { @@ -1216,12 +1099,3 @@ func makeChannelsFromShims( return channels } - -func getChannelsFromShim(reactorShim *p2p.ReactorShim) map[p2p.ChannelID]*p2p.Channel { - channels := map[p2p.ChannelID]*p2p.Channel{} - for chID := range reactorShim.Channels { - channels[chID] = reactorShim.GetChannel(chID) - } - - return channels -} diff --git a/node/setup.go b/node/setup.go index 16aa715c9..8889edc4e 100644 --- a/node/setup.go +++ b/node/setup.go @@ -2,16 +2,13 @@ package node import ( "bytes" - "context" "fmt" "math" - "net" "time" dbm "github.com/tendermint/tm-db" abciclient "github.com/tendermint/tendermint/abci/client" - abci "github.com/tendermint/tendermint/abci/types" "github.com/tendermint/tendermint/config" "github.com/tendermint/tendermint/crypto" bcv0 "github.com/tendermint/tendermint/internal/blocksync/v0" @@ -161,18 +158,8 @@ func createMempoolReactor( channelShims := mempoolv0.GetChannelShims(cfg.Mempool) reactorShim := p2p.NewReactorShim(logger, "MempoolShim", channelShims) - var ( - channels map[p2p.ChannelID]*p2p.Channel - peerUpdates *p2p.PeerUpdates - ) - - if cfg.P2P.UseLegacy { - channels = getChannelsFromShim(reactorShim) - peerUpdates = reactorShim.PeerUpdates - } else { - channels = makeChannelsFromShims(router, channelShims) - peerUpdates = peerManager.Subscribe() - } + channels := makeChannelsFromShims(router, channelShims) + peerUpdates := peerManager.Subscribe() switch cfg.Mempool.Version { case config.MempoolV0: @@ -255,23 +242,10 @@ func createEvidenceReactor( return nil, nil, nil, fmt.Errorf("creating evidence pool: %w", err) } - var ( - channels map[p2p.ChannelID]*p2p.Channel - peerUpdates *p2p.PeerUpdates - ) - - if cfg.P2P.UseLegacy { - channels = getChannelsFromShim(reactorShim) - peerUpdates = reactorShim.PeerUpdates - } else { - channels = makeChannelsFromShims(router, evidence.ChannelShims) - peerUpdates = peerManager.Subscribe() - } - evidenceReactor := evidence.NewReactor( logger, - channels[evidence.EvidenceChannel], - peerUpdates, + makeChannelsFromShims(router, evidence.ChannelShims)[evidence.EvidenceChannel], + peerManager.Subscribe(), evidencePool, ) @@ -294,19 +268,8 @@ func createBlockchainReactor( logger = logger.With("module", "blockchain") reactorShim := p2p.NewReactorShim(logger, "BlockchainShim", bcv0.ChannelShims) - - var ( - channels map[p2p.ChannelID]*p2p.Channel - peerUpdates *p2p.PeerUpdates - ) - - if cfg.P2P.UseLegacy { - channels = getChannelsFromShim(reactorShim) - peerUpdates = reactorShim.PeerUpdates - } else { - channels = makeChannelsFromShims(router, bcv0.ChannelShims) - peerUpdates = peerManager.Subscribe() - } + channels := makeChannelsFromShims(router, bcv0.ChannelShims) + peerUpdates := peerManager.Subscribe() reactor, err := bcv0.NewReactor( logger, state.Copy(), blockExec, blockStore, csReactor, @@ -357,13 +320,8 @@ func createConsensusReactor( peerUpdates *p2p.PeerUpdates ) - if cfg.P2P.UseLegacy { - channels = getChannelsFromShim(reactorShim) - peerUpdates = reactorShim.PeerUpdates - } else { - channels = makeChannelsFromShims(router, consensus.ChannelShims) - peerUpdates = peerManager.Subscribe() - } + channels = makeChannelsFromShims(router, consensus.ChannelShims) + peerUpdates = peerManager.Subscribe() reactor := consensus.NewReactor( logger, @@ -500,142 +458,6 @@ func createRouter( ) } -func createSwitch( - cfg *config.Config, - transport p2p.Transport, - p2pMetrics *p2p.Metrics, - mempoolReactor *p2p.ReactorShim, - bcReactor p2p.Reactor, - stateSyncReactor *p2p.ReactorShim, - consensusReactor *p2p.ReactorShim, - evidenceReactor *p2p.ReactorShim, - proxyApp proxy.AppConns, - nodeInfo types.NodeInfo, - nodeKey types.NodeKey, - p2pLogger log.Logger, -) *p2p.Switch { - - var ( - connFilters = []p2p.ConnFilterFunc{} - peerFilters = []p2p.PeerFilterFunc{} - ) - - if !cfg.P2P.AllowDuplicateIP { - connFilters = append(connFilters, p2p.ConnDuplicateIPFilter) - } - - // Filter peers by addr or pubkey with an ABCI query. - // If the query return code is OK, add peer. - if cfg.FilterPeers { - connFilters = append( - connFilters, - // ABCI query for address filtering. - func(_ p2p.ConnSet, c net.Conn, _ []net.IP) error { - res, err := proxyApp.Query().QuerySync(context.Background(), abci.RequestQuery{ - Path: fmt.Sprintf("/p2p/filter/addr/%s", c.RemoteAddr().String()), - }) - if err != nil { - return err - } - if res.IsErr() { - return fmt.Errorf("error querying abci app: %v", res) - } - - return nil - }, - ) - - peerFilters = append( - peerFilters, - // ABCI query for ID filtering. - func(_ p2p.IPeerSet, p p2p.Peer) error { - res, err := proxyApp.Query().QuerySync(context.Background(), abci.RequestQuery{ - Path: fmt.Sprintf("/p2p/filter/id/%s", p.ID()), - }) - if err != nil { - return err - } - if res.IsErr() { - return fmt.Errorf("error querying abci app: %v", res) - } - - return nil - }, - ) - } - - sw := p2p.NewSwitch( - cfg.P2P, - transport, - p2p.WithMetrics(p2pMetrics), - p2p.SwitchPeerFilters(peerFilters...), - p2p.SwitchConnFilters(connFilters...), - ) - - sw.SetLogger(p2pLogger) - if cfg.Mode != config.ModeSeed { - sw.AddReactor("MEMPOOL", mempoolReactor) - sw.AddReactor("BLOCKCHAIN", bcReactor) - sw.AddReactor("CONSENSUS", consensusReactor) - sw.AddReactor("EVIDENCE", evidenceReactor) - sw.AddReactor("STATESYNC", stateSyncReactor) - } - - sw.SetNodeInfo(nodeInfo) - sw.SetNodeKey(nodeKey) - - p2pLogger.Info("P2P Node ID", "ID", nodeKey.ID, "file", cfg.NodeKeyFile()) - return sw -} - -func createAddrBookAndSetOnSwitch(cfg *config.Config, sw *p2p.Switch, - p2pLogger log.Logger, nodeKey types.NodeKey) (pex.AddrBook, error) { - - addrBook := pex.NewAddrBook(cfg.P2P.AddrBookFile(), cfg.P2P.AddrBookStrict) - addrBook.SetLogger(p2pLogger.With("book", cfg.P2P.AddrBookFile())) - - // Add ourselves to addrbook to prevent dialing ourselves - if cfg.P2P.ExternalAddress != "" { - addr, err := types.NewNetAddressString(nodeKey.ID.AddressString(cfg.P2P.ExternalAddress)) - if err != nil { - return nil, fmt.Errorf("p2p.external_address is incorrect: %w", err) - } - addrBook.AddOurAddress(addr) - } - if cfg.P2P.ListenAddress != "" { - addr, err := types.NewNetAddressString(nodeKey.ID.AddressString(cfg.P2P.ListenAddress)) - if err != nil { - return nil, fmt.Errorf("p2p.laddr is incorrect: %w", err) - } - addrBook.AddOurAddress(addr) - } - - sw.SetAddrBook(addrBook) - - return addrBook, nil -} - -func createPEXReactorAndAddToSwitch(addrBook pex.AddrBook, cfg *config.Config, - sw *p2p.Switch, logger log.Logger) *pex.Reactor { - - reactorConfig := &pex.ReactorConfig{ - Seeds: tmstrings.SplitAndTrimEmpty(cfg.P2P.Seeds, ",", " "), - SeedMode: cfg.Mode == config.ModeSeed, - // See consensus/reactor.go: blocksToContributeToBecomeGoodPeer 10000 - // blocks assuming 10s blocks ~ 28 hours. - // TODO (melekes): make it dynamic based on the actual block latencies - // from the live network. - // https://github.com/tendermint/tendermint/issues/3523 - SeedDisconnectWaitPeriod: 28 * time.Hour, - PersistentPeersMaxDialPeriod: cfg.P2P.PersistentPeersMaxDialPeriod, - } - // TODO persistent peers ? so we can have their DNS addrs saved - pexReactor := pex.NewReactor(addrBook, reactorConfig) - pexReactor.SetLogger(logger.With("module", "pex")) - sw.AddReactor("PEX", pexReactor) - return pexReactor -} - func createPEXReactorV2( cfg *config.Config, logger log.Logger, diff --git a/rpc/client/local/local.go b/rpc/client/local/local.go index 85302691d..108510b02 100644 --- a/rpc/client/local/local.go +++ b/rpc/client/local/local.go @@ -136,20 +136,6 @@ func (c *Local) Health(ctx context.Context) (*coretypes.ResultHealth, error) { return c.env.Health(c.ctx) } -func (c *Local) DialSeeds(ctx context.Context, seeds []string) (*coretypes.ResultDialSeeds, error) { - return c.env.UnsafeDialSeeds(c.ctx, seeds) -} - -func (c *Local) DialPeers( - ctx context.Context, - peers []string, - persistent, - unconditional, - private bool, -) (*coretypes.ResultDialPeers, error) { - return c.env.UnsafeDialPeers(c.ctx, peers, persistent, unconditional, private) -} - func (c *Local) BlockchainInfo(ctx context.Context, minHeight, maxHeight int64) (*coretypes.ResultBlockchainInfo, error) { //nolint:lll return c.env.BlockchainInfo(c.ctx, minHeight, maxHeight) } diff --git a/rpc/client/mock/client.go b/rpc/client/mock/client.go index 4b858d067..a1a42e28d 100644 --- a/rpc/client/mock/client.go +++ b/rpc/client/mock/client.go @@ -131,20 +131,6 @@ func (c Client) Health(ctx context.Context) (*coretypes.ResultHealth, error) { return c.env.Health(&rpctypes.Context{}) } -func (c Client) DialSeeds(ctx context.Context, seeds []string) (*coretypes.ResultDialSeeds, error) { - return c.env.UnsafeDialSeeds(&rpctypes.Context{}, seeds) -} - -func (c Client) DialPeers( - ctx context.Context, - peers []string, - persistent, - unconditional, - private bool, -) (*coretypes.ResultDialPeers, error) { - return c.env.UnsafeDialPeers(&rpctypes.Context{}, peers, persistent, unconditional, private) -} - func (c Client) BlockchainInfo(ctx context.Context, minHeight, maxHeight int64) (*coretypes.ResultBlockchainInfo, error) { //nolint:lll return c.env.BlockchainInfo(&rpctypes.Context{}, minHeight, maxHeight) } diff --git a/test/e2e/generator/generate.go b/test/e2e/generator/generate.go index deffb533a..5c47faa23 100644 --- a/test/e2e/generator/generate.go +++ b/test/e2e/generator/generate.go @@ -15,7 +15,6 @@ var ( // separate testnet for each combination (Cartesian product) of options. testnetCombinations = map[string][]interface{}{ "topology": {"single", "quad", "large"}, - "p2p": {NewP2PMode, LegacyP2PMode, HybridP2PMode}, "queueType": {"priority"}, // "fifo", "wdrr" "initialHeight": {0, 1000}, "initialState": { @@ -71,19 +70,6 @@ var ( // Generate generates random testnets using the given RNG. func Generate(r *rand.Rand, opts Options) ([]e2e.Manifest, error) { manifests := []e2e.Manifest{} - switch opts.P2P { - case NewP2PMode, LegacyP2PMode, HybridP2PMode: - defer func() { - // avoid modifying the global state. - original := make([]interface{}, len(testnetCombinations["p2p"])) - copy(original, testnetCombinations["p2p"]) - testnetCombinations["p2p"] = original - }() - - testnetCombinations["p2p"] = []interface{}{opts.P2P} - case MixedP2PMode: - testnetCombinations["p2p"] = []interface{}{NewP2PMode, LegacyP2PMode, HybridP2PMode} - } for _, opt := range combinations(testnetCombinations) { manifest, err := generateTestnet(r, opt) @@ -95,12 +81,6 @@ func Generate(r *rand.Rand, opts Options) ([]e2e.Manifest, error) { continue } - if len(manifest.Nodes) == 1 { - if opt["p2p"] == HybridP2PMode { - continue - } - } - if opts.MaxNetworkSize > 0 && len(manifest.Nodes) >= opts.MaxNetworkSize { continue } @@ -116,20 +96,9 @@ type Options struct { MaxNetworkSize int NumGroups int Directory string - P2P P2PMode Reverse bool } -type P2PMode string - -const ( - NewP2PMode P2PMode = "new" - LegacyP2PMode P2PMode = "legacy" - HybridP2PMode P2PMode = "hybrid" - // mixed means that all combination are generated - MixedP2PMode P2PMode = "mixed" -) - // generateTestnet generates a single testnet with the given options. func generateTestnet(r *rand.Rand, opt map[string]interface{}) (e2e.Manifest, error) { manifest := e2e.Manifest{ @@ -145,13 +114,6 @@ func generateTestnet(r *rand.Rand, opt map[string]interface{}) (e2e.Manifest, er TxSize: int64(txSize.Choose(r).(int)), } - p2pMode := opt["p2p"].(P2PMode) - switch p2pMode { - case NewP2PMode, LegacyP2PMode, HybridP2PMode: - default: - return manifest, fmt.Errorf("unknown p2p mode %s", p2pMode) - } - var numSeeds, numValidators, numFulls, numLightClients int switch opt["topology"].(string) { case "single": @@ -168,27 +130,13 @@ func generateTestnet(r *rand.Rand, opt map[string]interface{}) (e2e.Manifest, er return manifest, fmt.Errorf("unknown topology %q", opt["topology"]) } - const legacyP2PFactor float64 = 0.5 - // First we generate seed nodes, starting at the initial height. for i := 1; i <= numSeeds; i++ { node := generateNode(r, manifest, e2e.ModeSeed, 0, false) - - switch p2pMode { - case LegacyP2PMode: - node.UseLegacyP2P = true - case HybridP2PMode: - node.UseLegacyP2P = r.Float64() < legacyP2PFactor - } - manifest.Nodes[fmt.Sprintf("seed%02d", i)] = node } - var ( - numSyncingNodes = 0 - hybridNumNew = 0 - hybridNumLegacy = 0 - ) + var numSyncingNodes = 0 // Next, we generate validators. We make sure a BFT quorum of validators start // at the initial height, and that we have two archive nodes. We also set up @@ -205,29 +153,6 @@ func generateTestnet(r *rand.Rand, opt map[string]interface{}) (e2e.Manifest, er name := fmt.Sprintf("validator%02d", i) node := generateNode(r, manifest, e2e.ModeValidator, startAt, i <= 2) - switch p2pMode { - case LegacyP2PMode: - node.UseLegacyP2P = true - case HybridP2PMode: - node.UseLegacyP2P = r.Float64() < legacyP2PFactor - if node.UseLegacyP2P { - hybridNumLegacy++ - if hybridNumNew == 0 { - hybridNumNew++ - hybridNumLegacy-- - node.UseLegacyP2P = false - } - } else { - hybridNumNew++ - if hybridNumLegacy == 0 { - hybridNumNew-- - hybridNumLegacy++ - node.UseLegacyP2P = true - - } - } - } - manifest.Nodes[name] = node if startAt == 0 { @@ -259,13 +184,6 @@ func generateTestnet(r *rand.Rand, opt map[string]interface{}) (e2e.Manifest, er } node := generateNode(r, manifest, e2e.ModeFull, startAt, false) - switch p2pMode { - case LegacyP2PMode: - node.UseLegacyP2P = true - case HybridP2PMode: - node.UseLegacyP2P = r.Float64() > legacyP2PFactor - } - manifest.Nodes[fmt.Sprintf("full%02d", i)] = node } @@ -336,13 +254,6 @@ func generateTestnet(r *rand.Rand, opt map[string]interface{}) (e2e.Manifest, er r, startAt+(5*int64(i)), lightProviders, ) - switch p2pMode { - case LegacyP2PMode: - node.UseLegacyP2P = true - case HybridP2PMode: - node.UseLegacyP2P = r.Float64() < legacyP2PFactor - } - manifest.Nodes[fmt.Sprintf("light%02d", i)] = node } diff --git a/test/e2e/generator/generate_test.go b/test/e2e/generator/generate_test.go index c38b6b20b..b7f790259 100644 --- a/test/e2e/generator/generate_test.go +++ b/test/e2e/generator/generate_test.go @@ -5,15 +5,14 @@ import ( "math/rand" "testing" - "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" e2e "github.com/tendermint/tendermint/test/e2e/pkg" ) func TestGenerator(t *testing.T) { - manifests, err := Generate(rand.New(rand.NewSource(randomSeed)), Options{P2P: MixedP2PMode}) + manifests, err := Generate(rand.New(rand.NewSource(randomSeed)), Options{}) require.NoError(t, err) - require.True(t, len(manifests) >= 64, "insufficient combinations") + require.True(t, len(manifests) >= 24, "insufficient combinations %d", len(manifests)) // this just means that the numbers reported by the test // failures map to the test cases that you'd see locally. @@ -41,71 +40,4 @@ func TestGenerator(t *testing.T) { require.True(t, numStateSyncs <= 2) }) } - - t.Run("Hybrid", func(t *testing.T) { - manifests, err := Generate(rand.New(rand.NewSource(randomSeed)), Options{P2P: HybridP2PMode}) - require.NoError(t, err) - require.True(t, len(manifests) >= 16, "insufficient combinations: %d", len(manifests)) - - // failures map to the test cases that you'd see locally. - e2e.SortManifests(manifests, false /* ascending */) - - for idx, m := range manifests { - t.Run(fmt.Sprintf("Case%04d", idx), func(t *testing.T) { - require.True(t, len(m.Nodes) > 1) - - var numLegacy, numNew int - for _, node := range m.Nodes { - if node.UseLegacyP2P { - numLegacy++ - } else { - numNew++ - } - } - - assert.True(t, numLegacy >= 1, "not enough legacy nodes [%d/%d]", - numLegacy, len(m.Nodes)) - assert.True(t, numNew >= 1, "not enough new nodes [%d/%d]", - numNew, len(m.Nodes)) - }) - } - }) - t.Run("UnmixedP2P", func(t *testing.T) { - t.Run("New", func(t *testing.T) { - manifests, err := Generate(rand.New(rand.NewSource(randomSeed)), Options{P2P: NewP2PMode}) - require.NoError(t, err) - require.True(t, len(manifests) >= 16, "insufficient combinations: %d", len(manifests)) - - // failures map to the test cases that you'd see locally. - e2e.SortManifests(manifests, false /* ascending */) - - for idx, m := range manifests { - t.Run(fmt.Sprintf("Case%04d", idx), func(t *testing.T) { - for name, node := range m.Nodes { - t.Run(name, func(t *testing.T) { - require.False(t, node.UseLegacyP2P) - }) - } - }) - } - }) - t.Run("Legacy", func(t *testing.T) { - manifests, err := Generate(rand.New(rand.NewSource(randomSeed)), Options{P2P: LegacyP2PMode}) - require.NoError(t, err) - require.True(t, len(manifests) >= 16, "insufficient combinations: %d", len(manifests)) - - // failures map to the test cases that you'd see locally. - e2e.SortManifests(manifests, false /* ascending */) - - for idx, m := range manifests { - t.Run(fmt.Sprintf("Case%04d", idx), func(t *testing.T) { - for name, node := range m.Nodes { - t.Run(name, func(t *testing.T) { - require.True(t, node.UseLegacyP2P) - }) - } - }) - } - }) - }) } diff --git a/test/e2e/generator/main.go b/test/e2e/generator/main.go index 4668f6a8f..38f36d0da 100644 --- a/test/e2e/generator/main.go +++ b/test/e2e/generator/main.go @@ -38,20 +38,6 @@ func NewCLI() *CLI { SilenceUsage: true, SilenceErrors: true, // we'll output them ourselves in Run() RunE: func(cmd *cobra.Command, args []string) error { - var err error - - p2pMode, err := cmd.Flags().GetString("p2p") - if err != nil { - return err - } - - switch mode := P2PMode(p2pMode); mode { - case NewP2PMode, LegacyP2PMode, HybridP2PMode, MixedP2PMode: - cli.opts.P2P = mode - default: - return fmt.Errorf("p2p mode must be either new, legacy, hybrid or mixed got %s", p2pMode) - } - return cli.generate() }, } @@ -60,8 +46,6 @@ func NewCLI() *CLI { _ = cli.root.MarkPersistentFlagRequired("dir") cli.root.Flags().BoolVarP(&cli.opts.Reverse, "reverse", "r", false, "Reverse sort order") cli.root.PersistentFlags().IntVarP(&cli.opts.NumGroups, "groups", "g", 0, "Number of groups") - cli.root.PersistentFlags().StringP("p2p", "p", string(MixedP2PMode), - "P2P typology to be generated [\"new\", \"legacy\", \"hybrid\" or \"mixed\" ]") cli.root.PersistentFlags().IntVarP(&cli.opts.MinNetworkSize, "min-size", "", 1, "Minimum network size (nodes)") cli.root.PersistentFlags().IntVarP(&cli.opts.MaxNetworkSize, "max-size", "", 0, diff --git a/test/e2e/networks/ci.toml b/test/e2e/networks/ci.toml index 6eb32e56f..f73a18859 100644 --- a/test/e2e/networks/ci.toml +++ b/test/e2e/networks/ci.toml @@ -1,7 +1,6 @@ # This testnet is run by CI, and attempts to cover a broad range of # functionality with a single network. -disable_legacy_p2p = false evidence = 5 initial_height = 1000 initial_state = {initial01 = "a", initial02 = "b", initial03 = "c"} diff --git a/test/e2e/pkg/manifest.go b/test/e2e/pkg/manifest.go index 4492f4037..d5c9cb7f2 100644 --- a/test/e2e/pkg/manifest.go +++ b/test/e2e/pkg/manifest.go @@ -145,9 +145,6 @@ type ManifestNode struct { // This is helpful when debugging a specific problem. This overrides the network // level. LogLevel string `toml:"log_level"` - - // UseLegacyP2P enables use of the legacy p2p layer for this node. - UseLegacyP2P bool `toml:"use_legacy_p2p"` } // Stateless reports whether m is a node that does not own state, including light and seed nodes. diff --git a/test/e2e/pkg/testnet.go b/test/e2e/pkg/testnet.go index b54dd2bf0..3a75b169e 100644 --- a/test/e2e/pkg/testnet.go +++ b/test/e2e/pkg/testnet.go @@ -96,7 +96,6 @@ type Node struct { PersistentPeers []*Node Perturbations []Perturbation LogLevel string - UseLegacyP2P bool QueueType string HasStarted bool } @@ -182,7 +181,6 @@ func LoadTestnet(file string) (*Testnet, error) { Perturbations: []Perturbation{}, LogLevel: manifest.LogLevel, QueueType: manifest.QueueType, - UseLegacyP2P: nodeManifest.UseLegacyP2P, } if node.StartAt == testnet.InitialHeight { diff --git a/test/e2e/runner/setup.go b/test/e2e/runner/setup.go index 5a8407ab2..3b1184e9c 100644 --- a/test/e2e/runner/setup.go +++ b/test/e2e/runner/setup.go @@ -238,7 +238,6 @@ func MakeConfig(node *e2e.Node) (*config.Config, error) { cfg.RPC.PprofListenAddress = ":6060" cfg.P2P.ExternalAddress = fmt.Sprintf("tcp://%v", node.AddressP2P(false)) cfg.P2P.AddrBookStrict = false - cfg.P2P.UseLegacy = node.UseLegacyP2P cfg.P2P.QueueType = node.QueueType cfg.DBBackend = node.Database cfg.StateSync.DiscoveryTime = 5 * time.Second @@ -354,7 +353,6 @@ func MakeAppConfig(node *e2e.Node) ([]byte, error) { "snapshot_interval": node.SnapshotInterval, "retain_blocks": node.RetainBlocks, "key_type": node.PrivvalKey.Type(), - "use_legacy_p2p": node.UseLegacyP2P, } switch node.ABCIProtocol { case e2e.ProtocolUNIX: diff --git a/test/fuzz/p2p/addrbook/fuzz.go b/test/fuzz/p2p/addrbook/fuzz.go deleted file mode 100644 index 6d5548fc7..000000000 --- a/test/fuzz/p2p/addrbook/fuzz.go +++ /dev/null @@ -1,35 +0,0 @@ -// nolint: gosec -package addrbook - -import ( - "encoding/json" - "fmt" - "math/rand" - - "github.com/tendermint/tendermint/internal/p2p" - "github.com/tendermint/tendermint/internal/p2p/pex" -) - -var addrBook = pex.NewAddrBook("./testdata/addrbook.json", true) - -func Fuzz(data []byte) int { - addr := new(p2p.NetAddress) - if err := json.Unmarshal(data, addr); err != nil { - return -1 - } - - // Fuzz AddAddress. - err := addrBook.AddAddress(addr, addr) - if err != nil { - return 0 - } - - // Also, make sure PickAddress always returns a non-nil address. - bias := rand.Intn(100) - if p := addrBook.PickAddress(bias); p == nil { - panic(fmt.Sprintf("picked a nil address (bias: %d, addrBook size: %v)", - bias, addrBook.Size())) - } - - return 1 -} diff --git a/test/fuzz/p2p/addrbook/fuzz_test.go b/test/fuzz/p2p/addrbook/fuzz_test.go deleted file mode 100644 index 4ec7aebd9..000000000 --- a/test/fuzz/p2p/addrbook/fuzz_test.go +++ /dev/null @@ -1,33 +0,0 @@ -package addrbook_test - -import ( - "io/ioutil" - "os" - "path/filepath" - "testing" - - "github.com/stretchr/testify/require" - "github.com/tendermint/tendermint/test/fuzz/p2p/addrbook" -) - -const testdataCasesDir = "testdata/cases" - -func TestAddrbookTestdataCases(t *testing.T) { - entries, err := os.ReadDir(testdataCasesDir) - require.NoError(t, err) - - for _, e := range entries { - entry := e - t.Run(entry.Name(), func(t *testing.T) { - defer func() { - r := recover() - require.Nilf(t, r, "testdata/cases test panic") - }() - f, err := os.Open(filepath.Join(testdataCasesDir, entry.Name())) - require.NoError(t, err) - input, err := ioutil.ReadAll(f) - require.NoError(t, err) - addrbook.Fuzz(input) - }) - } -} diff --git a/test/fuzz/p2p/addrbook/init-corpus/main.go b/test/fuzz/p2p/addrbook/init-corpus/main.go deleted file mode 100644 index 1166f9bd7..000000000 --- a/test/fuzz/p2p/addrbook/init-corpus/main.go +++ /dev/null @@ -1,59 +0,0 @@ -// nolint: gosec -package main - -import ( - "encoding/json" - "flag" - "fmt" - "io/ioutil" - "log" - "net" - "os" - "path/filepath" - - "github.com/tendermint/tendermint/crypto/ed25519" - "github.com/tendermint/tendermint/internal/p2p" - "github.com/tendermint/tendermint/types" -) - -func main() { - baseDir := flag.String("base", ".", `where the "corpus" directory will live`) - flag.Parse() - - initCorpus(*baseDir) -} - -func initCorpus(baseDir string) { - log.SetFlags(0) - - // create "corpus" directory - corpusDir := filepath.Join(baseDir, "corpus") - if err := os.MkdirAll(corpusDir, 0755); err != nil { - log.Fatalf("Creating %q err: %v", corpusDir, err) - } - - // create corpus - privKey := ed25519.GenPrivKey() - addrs := []*p2p.NetAddress{ - {ID: types.NodeIDFromPubKey(privKey.PubKey()), IP: net.IPv4(0, 0, 0, 0), Port: 0}, - {ID: types.NodeIDFromPubKey(privKey.PubKey()), IP: net.IPv4(127, 0, 0, 0), Port: 80}, - {ID: types.NodeIDFromPubKey(privKey.PubKey()), IP: net.IPv4(213, 87, 10, 200), Port: 8808}, - {ID: types.NodeIDFromPubKey(privKey.PubKey()), IP: net.IPv4(111, 111, 111, 111), Port: 26656}, - {ID: types.NodeIDFromPubKey(privKey.PubKey()), IP: net.ParseIP("2001:db8::68"), Port: 26656}, - } - - for i, addr := range addrs { - filename := filepath.Join(corpusDir, fmt.Sprintf("%d.json", i)) - - bz, err := json.Marshal(addr) - if err != nil { - log.Fatalf("can't marshal %v: %v", addr, err) - } - - if err := ioutil.WriteFile(filename, bz, 0644); err != nil { - log.Fatalf("can't write %v to %q: %v", addr, filename, err) - } - - log.Printf("wrote %q", filename) - } -} diff --git a/test/fuzz/p2p/addrbook/testdata/cases/empty b/test/fuzz/p2p/addrbook/testdata/cases/empty deleted file mode 100644 index e69de29bb..000000000 diff --git a/test/fuzz/p2p/pex/fuzz_test.go b/test/fuzz/p2p/pex/fuzz_test.go deleted file mode 100644 index 8a194e730..000000000 --- a/test/fuzz/p2p/pex/fuzz_test.go +++ /dev/null @@ -1,33 +0,0 @@ -package pex_test - -import ( - "io/ioutil" - "os" - "path/filepath" - "testing" - - "github.com/stretchr/testify/require" - "github.com/tendermint/tendermint/test/fuzz/p2p/pex" -) - -const testdataCasesDir = "testdata/cases" - -func TestPexTestdataCases(t *testing.T) { - entries, err := os.ReadDir(testdataCasesDir) - require.NoError(t, err) - - for _, e := range entries { - entry := e - t.Run(entry.Name(), func(t *testing.T) { - defer func() { - r := recover() - require.Nilf(t, r, "testdata/cases test panic") - }() - f, err := os.Open(filepath.Join(testdataCasesDir, entry.Name())) - require.NoError(t, err) - input, err := ioutil.ReadAll(f) - require.NoError(t, err) - pex.Fuzz(input) - }) - } -} diff --git a/test/fuzz/p2p/pex/init-corpus/main.go b/test/fuzz/p2p/pex/init-corpus/main.go deleted file mode 100644 index e90216864..000000000 --- a/test/fuzz/p2p/pex/init-corpus/main.go +++ /dev/null @@ -1,84 +0,0 @@ -// nolint: gosec -package main - -import ( - "flag" - "fmt" - "io/ioutil" - "log" - "math/rand" - "os" - "path/filepath" - - "github.com/tendermint/tendermint/crypto/ed25519" - "github.com/tendermint/tendermint/internal/p2p" - "github.com/tendermint/tendermint/internal/p2p/pex" - tmp2p "github.com/tendermint/tendermint/proto/tendermint/p2p" - "github.com/tendermint/tendermint/types" -) - -func main() { - baseDir := flag.String("base", ".", `where the "corpus" directory will live`) - flag.Parse() - - initCorpus(*baseDir) -} - -func initCorpus(rootDir string) { - log.SetFlags(0) - - corpusDir := filepath.Join(rootDir, "corpus") - if err := os.MkdirAll(corpusDir, 0755); err != nil { - log.Fatalf("Creating %q err: %v", corpusDir, err) - } - sizes := []int{0, 1, 2, 17, 5, 31} - - // Make the PRNG predictable - rand.Seed(10) - - for _, n := range sizes { - var addrs []*p2p.NetAddress - - // IPv4 addresses - for i := 0; i < n; i++ { - privKey := ed25519.GenPrivKey() - addr := fmt.Sprintf( - "%s@%v.%v.%v.%v:26656", - types.NodeIDFromPubKey(privKey.PubKey()), - rand.Int()%256, - rand.Int()%256, - rand.Int()%256, - rand.Int()%256, - ) - netAddr, _ := types.NewNetAddressString(addr) - addrs = append(addrs, netAddr) - } - - // IPv6 addresses - privKey := ed25519.GenPrivKey() - ipv6a, err := types.NewNetAddressString( - fmt.Sprintf("%s@[ff02::1:114]:26656", types.NodeIDFromPubKey(privKey.PubKey()))) - if err != nil { - log.Fatalf("can't create a new netaddress: %v", err) - } - addrs = append(addrs, ipv6a) - - msg := tmp2p.PexMessage{ - Sum: &tmp2p.PexMessage_PexResponse{ - PexResponse: &tmp2p.PexResponse{Addresses: pex.NetAddressesToProto(addrs)}, - }, - } - bz, err := msg.Marshal() - if err != nil { - log.Fatalf("unable to marshal: %v", err) - } - - filename := filepath.Join(rootDir, "corpus", fmt.Sprintf("%d", n)) - - if err := ioutil.WriteFile(filename, bz, 0644); err != nil { - log.Fatalf("can't write %X to %q: %v", bz, filename, err) - } - - log.Printf("wrote %q", filename) - } -} diff --git a/test/fuzz/p2p/pex/reactor_receive.go b/test/fuzz/p2p/pex/reactor_receive.go deleted file mode 100644 index 388361a4e..000000000 --- a/test/fuzz/p2p/pex/reactor_receive.go +++ /dev/null @@ -1,95 +0,0 @@ -package pex - -import ( - "net" - - "github.com/tendermint/tendermint/config" - "github.com/tendermint/tendermint/crypto/ed25519" - "github.com/tendermint/tendermint/internal/p2p" - "github.com/tendermint/tendermint/internal/p2p/pex" - "github.com/tendermint/tendermint/libs/log" - "github.com/tendermint/tendermint/libs/service" - "github.com/tendermint/tendermint/types" - "github.com/tendermint/tendermint/version" -) - -var ( - pexR *pex.Reactor - peer p2p.Peer - logger = log.MustNewDefaultLogger(log.LogFormatPlain, log.LogLevelInfo, false) -) - -func init() { - addrB := pex.NewAddrBook("./testdata/addrbook1", false) - pexR = pex.NewReactor(addrB, &pex.ReactorConfig{SeedMode: false}) - pexR.SetLogger(logger) - peer = newFuzzPeer() - pexR.AddPeer(peer) - - cfg := config.DefaultP2PConfig() - cfg.PexReactor = true - sw := p2p.MakeSwitch(cfg, 0, "127.0.0.1", "123.123.123", func(i int, sw *p2p.Switch) *p2p.Switch { - return sw - }, logger) - pexR.SetSwitch(sw) -} - -func Fuzz(data []byte) int { - if len(data) == 0 { - return -1 - } - - pexR.Receive(pex.PexChannel, peer, data) - - if !peer.IsRunning() { - // do not increase priority for msgs which lead to peer being stopped - return 0 - } - - return 1 -} - -type fuzzPeer struct { - *service.BaseService - m map[string]interface{} -} - -var _ p2p.Peer = (*fuzzPeer)(nil) - -func newFuzzPeer() *fuzzPeer { - fp := &fuzzPeer{m: make(map[string]interface{})} - fp.BaseService = service.NewBaseService(nil, "fuzzPeer", fp) - return fp -} - -var privKey = ed25519.GenPrivKey() -var nodeID = types.NodeIDFromPubKey(privKey.PubKey()) -var defaultNodeInfo = types.NodeInfo{ - ProtocolVersion: types.ProtocolVersion{ - P2P: version.P2PProtocol, - Block: version.BlockProtocol, - App: 0, - }, - NodeID: nodeID, - ListenAddr: "127.0.0.1:0", - Moniker: "foo1", -} - -func (fp *fuzzPeer) FlushStop() {} -func (fp *fuzzPeer) ID() types.NodeID { return nodeID } -func (fp *fuzzPeer) RemoteIP() net.IP { return net.IPv4(198, 163, 190, 214) } -func (fp *fuzzPeer) RemoteAddr() net.Addr { - return &net.TCPAddr{IP: fp.RemoteIP(), Port: 26656, Zone: ""} -} -func (fp *fuzzPeer) IsOutbound() bool { return false } -func (fp *fuzzPeer) IsPersistent() bool { return false } -func (fp *fuzzPeer) CloseConn() error { return nil } -func (fp *fuzzPeer) NodeInfo() types.NodeInfo { return defaultNodeInfo } -func (fp *fuzzPeer) Status() p2p.ConnectionStatus { var cs p2p.ConnectionStatus; return cs } -func (fp *fuzzPeer) SocketAddr() *p2p.NetAddress { - return types.NewNetAddress(fp.ID(), fp.RemoteAddr()) -} -func (fp *fuzzPeer) Send(byte, []byte) bool { return true } -func (fp *fuzzPeer) TrySend(byte, []byte) bool { return true } -func (fp *fuzzPeer) Set(key string, value interface{}) { fp.m[key] = value } -func (fp *fuzzPeer) Get(key string) interface{} { return fp.m[key] } diff --git a/test/fuzz/p2p/pex/testdata/addrbook1 b/test/fuzz/p2p/pex/testdata/addrbook1 deleted file mode 100644 index acf3e721d..000000000 --- a/test/fuzz/p2p/pex/testdata/addrbook1 +++ /dev/null @@ -1,1705 +0,0 @@ -{ - "Key": "badd73ebd4eeafbaefc01e0c", - "Addrs": [ - { - "Addr": { - "IP": "233.174.138.192", - "Port": 48186 - }, - "Src": { - "IP": "198.37.90.115", - "Port": 29492 - }, - "Attempts": 0, - "LastAttempt": "2017-11-07T01:11:34.692278-07:00", - "LastSuccess": "0001-01-01T00:00:00Z", - "BucketType": 1, - "Buckets": [ - 55 - ] - }, - { - "Addr": { - "IP": "181.28.96.104", - "Port": 26776 - }, - "Src": { - "IP": "183.12.35.241", - "Port": 26794 - }, - "Attempts": 0, - "LastAttempt": "2017-11-07T01:11:34.692289-07:00", - "LastSuccess": "0001-01-01T00:00:00Z", - "BucketType": 1, - "Buckets": [ - 18 - ] - }, - { - "Addr": { - "IP": "141.85.194.118", - "Port": 39768 - }, - "Src": { - "IP": "120.130.90.63", - "Port": 61750 - }, - "Attempts": 0, - "LastAttempt": "2017-11-07T01:11:34.692383-07:00", - "LastSuccess": "0001-01-01T00:00:00Z", - "BucketType": 1, - "Buckets": [ - 185 - ] - }, - { - "Addr": { - "IP": "167.72.9.155", - "Port": 9542 - }, - "Src": { - "IP": "95.158.40.108", - "Port": 14929 - }, - "Attempts": 0, - "LastAttempt": "2017-11-07T01:11:34.692604-07:00", - "LastSuccess": "0001-01-01T00:00:00Z", - "BucketType": 1, - "Buckets": [ - 250 - ] - }, - { - "Addr": { - "IP": "124.118.94.27", - "Port": 50333 - }, - "Src": { - "IP": "208.169.57.96", - "Port": 19754 - }, - "Attempts": 0, - "LastAttempt": "2017-11-07T01:11:34.692046-07:00", - "LastSuccess": "0001-01-01T00:00:00Z", - "BucketType": 1, - "Buckets": [ - 0 - ] - }, - { - "Addr": { - "IP": "158.197.4.226", - "Port": 25979 - }, - "Src": { - "IP": "3.129.219.107", - "Port": 50374 - }, - "Attempts": 0, - "LastAttempt": "2017-11-07T01:11:34.692211-07:00", - "LastSuccess": "0001-01-01T00:00:00Z", - "BucketType": 1, - "Buckets": [ - 174 - ] - }, - { - "Addr": { - "IP": "170.42.135.37", - "Port": 34524 - }, - "Src": { - "IP": "73.125.53.212", - "Port": 49691 - }, - "Attempts": 0, - "LastAttempt": "2017-11-07T01:11:34.692241-07:00", - "LastSuccess": "0001-01-01T00:00:00Z", - "BucketType": 1, - "Buckets": [ - 14 - ] - }, - { - "Addr": { - "IP": "234.69.254.147", - "Port": 31885 - }, - "Src": { - "IP": "167.106.61.34", - "Port": 22187 - }, - "Attempts": 0, - "LastAttempt": "2017-11-07T01:11:34.692609-07:00", - "LastSuccess": "0001-01-01T00:00:00Z", - "BucketType": 1, - "Buckets": [ - 213 - ] - }, - { - "Addr": { - "IP": "32.176.173.90", - "Port": 17250 - }, - "Src": { - "IP": "118.91.243.12", - "Port": 26781 - }, - "Attempts": 0, - "LastAttempt": "2017-11-07T01:11:34.692273-07:00", - "LastSuccess": "0001-01-01T00:00:00Z", - "BucketType": 1, - "Buckets": [ - 35 - ] - }, - { - "Addr": { - "IP": "162.154.114.145", - "Port": 13875 - }, - "Src": { - "IP": "198.178.108.166", - "Port": 59623 - }, - "Attempts": 0, - "LastAttempt": "2017-11-07T01:11:34.692373-07:00", - "LastSuccess": "0001-01-01T00:00:00Z", - "BucketType": 1, - "Buckets": [ - 216 - ] - }, - { - "Addr": { - "IP": "67.128.167.93", - "Port": 50513 - }, - "Src": { - "IP": "104.93.115.28", - "Port": 48298 - }, - "Attempts": 0, - "LastAttempt": "2017-11-07T01:11:34.692399-07:00", - "LastSuccess": "0001-01-01T00:00:00Z", - "BucketType": 1, - "Buckets": [ - 18 - ] - }, - { - "Addr": { - "IP": "132.175.221.206", - "Port": 61037 - }, - "Src": { - "IP": "112.49.189.65", - "Port": 56186 - }, - "Attempts": 0, - "LastAttempt": "2017-11-07T01:11:34.692422-07:00", - "LastSuccess": "0001-01-01T00:00:00Z", - "BucketType": 1, - "Buckets": [ - 31 - ] - }, - { - "Addr": { - "IP": "155.49.24.238", - "Port": 26261 - }, - "Src": { - "IP": "97.10.121.246", - "Port": 8694 - }, - "Attempts": 0, - "LastAttempt": "2017-11-07T01:11:34.692473-07:00", - "LastSuccess": "0001-01-01T00:00:00Z", - "BucketType": 1, - "Buckets": [ - 23 - ] - }, - { - "Addr": { - "IP": "22.215.7.233", - "Port": 32487 - }, - "Src": { - "IP": "214.236.105.23", - "Port": 26870 - }, - "Attempts": 0, - "LastAttempt": "2017-11-07T01:11:34.692572-07:00", - "LastSuccess": "0001-01-01T00:00:00Z", - "BucketType": 1, - "Buckets": [ - 255 - ] - }, - { - "Addr": { - "IP": "253.170.228.231", - "Port": 5002 - }, - "Src": { - "IP": "225.49.137.209", - "Port": 16908 - }, - "Attempts": 0, - "LastAttempt": "2017-11-07T01:11:34.692619-07:00", - "LastSuccess": "0001-01-01T00:00:00Z", - "BucketType": 1, - "Buckets": [ - 161 - ] - }, - { - "Addr": { - "IP": "162.126.204.39", - "Port": 62618 - }, - "Src": { - "IP": "250.214.168.131", - "Port": 3237 - }, - "Attempts": 0, - "LastAttempt": "2017-11-07T01:11:34.69203-07:00", - "LastSuccess": "0001-01-01T00:00:00Z", - "BucketType": 1, - "Buckets": [ - 7 - ] - }, - { - "Addr": { - "IP": "83.154.228.215", - "Port": 23508 - }, - "Src": { - "IP": "66.33.77.170", - "Port": 52207 - }, - "Attempts": 0, - "LastAttempt": "2017-11-07T01:11:34.692153-07:00", - "LastSuccess": "0001-01-01T00:00:00Z", - "BucketType": 1, - "Buckets": [ - 8 - ] - }, - { - "Addr": { - "IP": "132.49.63.65", - "Port": 53651 - }, - "Src": { - "IP": "250.164.163.212", - "Port": 8612 - }, - "Attempts": 0, - "LastAttempt": "2017-11-07T01:11:34.692253-07:00", - "LastSuccess": "0001-01-01T00:00:00Z", - "BucketType": 1, - "Buckets": [ - 98 - ] - }, - { - "Addr": { - "IP": "200.168.34.12", - "Port": 61901 - }, - "Src": { - "IP": "133.185.186.115", - "Port": 14186 - }, - "Attempts": 0, - "LastAttempt": "2017-11-07T01:11:34.692488-07:00", - "LastSuccess": "0001-01-01T00:00:00Z", - "BucketType": 1, - "Buckets": [ - 214 - ] - }, - { - "Addr": { - "IP": "31.93.45.219", - "Port": 61036 - }, - "Src": { - "IP": "176.191.214.170", - "Port": 33402 - }, - "Attempts": 0, - "LastAttempt": "2017-11-07T01:11:34.692024-07:00", - "LastSuccess": "0001-01-01T00:00:00Z", - "BucketType": 1, - "Buckets": [ - 68 - ] - }, - { - "Addr": { - "IP": "250.189.27.93", - "Port": 51665 - }, - "Src": { - "IP": "93.161.116.107", - "Port": 53482 - }, - "Attempts": 0, - "LastAttempt": "2017-11-07T01:11:34.692196-07:00", - "LastSuccess": "0001-01-01T00:00:00Z", - "BucketType": 1, - "Buckets": [ - 42 - ] - }, - { - "Addr": { - "IP": "50.7.17.126", - "Port": 64300 - }, - "Src": { - "IP": "233.234.64.214", - "Port": 61061 - }, - "Attempts": 0, - "LastAttempt": "2017-11-07T01:11:34.692444-07:00", - "LastSuccess": "0001-01-01T00:00:00Z", - "BucketType": 1, - "Buckets": [ - 13 - ] - }, - { - "Addr": { - "IP": "88.85.81.64", - "Port": 34834 - }, - "Src": { - "IP": "4.240.150.250", - "Port": 63064 - }, - "Attempts": 0, - "LastAttempt": "2017-11-07T01:11:34.692248-07:00", - "LastSuccess": "0001-01-01T00:00:00Z", - "BucketType": 1, - "Buckets": [ - 195 - ] - }, - { - "Addr": { - "IP": "242.117.244.198", - "Port": 4363 - }, - "Src": { - "IP": "149.29.34.42", - "Port": 62567 - }, - "Attempts": 0, - "LastAttempt": "2017-11-07T01:11:34.692263-07:00", - "LastSuccess": "0001-01-01T00:00:00Z", - "BucketType": 1, - "Buckets": [ - 174 - ] - }, - { - "Addr": { - "IP": "245.155.175.114", - "Port": 37262 - }, - "Src": { - "IP": "75.85.36.49", - "Port": 7101 - }, - "Attempts": 0, - "LastAttempt": "2017-11-07T01:11:34.692313-07:00", - "LastSuccess": "0001-01-01T00:00:00Z", - "BucketType": 1, - "Buckets": [ - 135 - ] - }, - { - "Addr": { - "IP": "224.184.241.26", - "Port": 55870 - }, - "Src": { - "IP": "52.15.194.216", - "Port": 4733 - }, - "Attempts": 0, - "LastAttempt": "2017-11-07T01:11:34.692327-07:00", - "LastSuccess": "0001-01-01T00:00:00Z", - "BucketType": 1, - "Buckets": [ - 74 - ] - }, - { - "Addr": { - "IP": "43.178.26.188", - "Port": 55914 - }, - "Src": { - "IP": "103.250.250.35", - "Port": 1566 - }, - "Attempts": 0, - "LastAttempt": "2017-11-07T01:11:34.692577-07:00", - "LastSuccess": "0001-01-01T00:00:00Z", - "BucketType": 1, - "Buckets": [ - 65 - ] - }, - { - "Addr": { - "IP": "102.117.172.117", - "Port": 35855 - }, - "Src": { - "IP": "114.152.204.187", - "Port": 21156 - }, - "Attempts": 0, - "LastAttempt": "2017-11-07T01:11:34.692158-07:00", - "LastSuccess": "0001-01-01T00:00:00Z", - "BucketType": 1, - "Buckets": [ - 80 - ] - }, - { - "Addr": { - "IP": "39.33.41.199", - "Port": 51600 - }, - "Src": { - "IP": "119.65.88.38", - "Port": 41239 - }, - "Attempts": 0, - "LastAttempt": "2017-11-07T01:11:34.692188-07:00", - "LastSuccess": "0001-01-01T00:00:00Z", - "BucketType": 1, - "Buckets": [ - 24 - ] - }, - { - "Addr": { - "IP": "63.164.56.227", - "Port": 1660 - }, - "Src": { - "IP": "169.54.47.92", - "Port": 2818 - }, - "Attempts": 0, - "LastAttempt": "2017-11-07T01:11:34.692227-07:00", - "LastSuccess": "0001-01-01T00:00:00Z", - "BucketType": 1, - "Buckets": [ - 10 - ] - }, - { - "Addr": { - "IP": "50.183.223.115", - "Port": 26910 - }, - "Src": { - "IP": "115.98.199.4", - "Port": 8767 - }, - "Attempts": 0, - "LastAttempt": "2017-11-07T01:11:34.692201-07:00", - "LastSuccess": "0001-01-01T00:00:00Z", - "BucketType": 1, - "Buckets": [ - 65 - ] - }, - { - "Addr": { - "IP": "132.94.203.167", - "Port": 53156 - }, - "Src": { - "IP": "17.195.234.168", - "Port": 29405 - }, - "Attempts": 0, - "LastAttempt": "2017-11-07T01:11:34.692294-07:00", - "LastSuccess": "0001-01-01T00:00:00Z", - "BucketType": 1, - "Buckets": [ - 240 - ] - }, - { - "Addr": { - "IP": "135.194.230.212", - "Port": 14340 - }, - "Src": { - "IP": "160.2.241.10", - "Port": 36553 - }, - "Attempts": 0, - "LastAttempt": "2017-11-07T01:11:34.692363-07:00", - "LastSuccess": "0001-01-01T00:00:00Z", - "BucketType": 1, - "Buckets": [ - 63 - ] - }, - { - "Addr": { - "IP": "116.53.200.25", - "Port": 27092 - }, - "Src": { - "IP": "219.104.163.247", - "Port": 50476 - }, - "Attempts": 0, - "LastAttempt": "2017-11-07T01:11:34.692543-07:00", - "LastSuccess": "0001-01-01T00:00:00Z", - "BucketType": 1, - "Buckets": [ - 201 - ] - }, - { - "Addr": { - "IP": "125.77.44.185", - "Port": 55291 - }, - "Src": { - "IP": "77.15.232.117", - "Port": 6934 - }, - "Attempts": 0, - "LastAttempt": "2017-11-07T01:11:34.692589-07:00", - "LastSuccess": "0001-01-01T00:00:00Z", - "BucketType": 1, - "Buckets": [ - 8 - ] - }, - { - "Addr": { - "IP": "27.221.35.172", - "Port": 26418 - }, - "Src": { - "IP": "252.18.49.70", - "Port": 9835 - }, - "Attempts": 0, - "LastAttempt": "2017-11-07T01:11:34.692068-07:00", - "LastSuccess": "0001-01-01T00:00:00Z", - "BucketType": 1, - "Buckets": [ - 80 - ] - }, - { - "Addr": { - "IP": "133.225.167.135", - "Port": 59468 - }, - "Src": { - "IP": "110.223.163.74", - "Port": 22576 - }, - "Attempts": 0, - "LastAttempt": "2017-11-07T01:11:34.69213-07:00", - "LastSuccess": "0001-01-01T00:00:00Z", - "BucketType": 1, - "Buckets": [ - 164 - ] - }, - { - "Addr": { - "IP": "155.131.178.240", - "Port": 60476 - }, - "Src": { - "IP": "143.82.157.1", - "Port": 43821 - }, - "Attempts": 0, - "LastAttempt": "2017-11-07T01:11:34.692173-07:00", - "LastSuccess": "0001-01-01T00:00:00Z", - "BucketType": 1, - "Buckets": [ - 34 - ] - }, - { - "Addr": { - "IP": "207.13.48.52", - "Port": 28549 - }, - "Src": { - "IP": "238.224.177.29", - "Port": 44100 - }, - "Attempts": 0, - "LastAttempt": "2017-11-07T01:11:34.692594-07:00", - "LastSuccess": "0001-01-01T00:00:00Z", - "BucketType": 1, - "Buckets": [ - 113 - ] - }, - { - "Addr": { - "IP": "91.137.2.184", - "Port": 44887 - }, - "Src": { - "IP": "72.131.70.84", - "Port": 29960 - }, - "Attempts": 0, - "LastAttempt": "2017-11-07T01:11:34.692627-07:00", - "LastSuccess": "0001-01-01T00:00:00Z", - "BucketType": 1, - "Buckets": [ - 199 - ] - }, - { - "Addr": { - "IP": "169.59.252.76", - "Port": 57711 - }, - "Src": { - "IP": "194.132.91.119", - "Port": 18037 - }, - "Attempts": 0, - "LastAttempt": "2017-11-07T01:11:34.692478-07:00", - "LastSuccess": "0001-01-01T00:00:00Z", - "BucketType": 1, - "Buckets": [ - 124 - ] - }, - { - "Addr": { - "IP": "25.174.143.229", - "Port": 41540 - }, - "Src": { - "IP": "58.215.132.148", - "Port": 64950 - }, - "Attempts": 0, - "LastAttempt": "2017-11-07T01:11:34.692534-07:00", - "LastSuccess": "0001-01-01T00:00:00Z", - "BucketType": 1, - "Buckets": [ - 88 - ] - }, - { - "Addr": { - "IP": "71.239.78.239", - "Port": 46938 - }, - "Src": { - "IP": "156.98.186.169", - "Port": 32046 - }, - "Attempts": 0, - "LastAttempt": "2017-11-07T01:11:34.692116-07:00", - "LastSuccess": "0001-01-01T00:00:00Z", - "BucketType": 1, - "Buckets": [ - 154 - ] - }, - { - "Addr": { - "IP": "94.137.107.61", - "Port": 20756 - }, - "Src": { - "IP": "101.201.138.179", - "Port": 22877 - }, - "Attempts": 0, - "LastAttempt": "2017-11-07T01:11:34.692414-07:00", - "LastSuccess": "0001-01-01T00:00:00Z", - "BucketType": 1, - "Buckets": [ - 233 - ] - }, - { - "Addr": { - "IP": "216.62.174.112", - "Port": 60162 - }, - "Src": { - "IP": "225.114.119.144", - "Port": 1575 - }, - "Attempts": 0, - "LastAttempt": "2017-11-07T01:11:34.692464-07:00", - "LastSuccess": "0001-01-01T00:00:00Z", - "BucketType": 1, - "Buckets": [ - 132 - ] - }, - { - "Addr": { - "IP": "65.183.81.125", - "Port": 17511 - }, - "Src": { - "IP": "12.96.14.61", - "Port": 42308 - }, - "Attempts": 0, - "LastAttempt": "2017-11-07T01:11:34.692308-07:00", - "LastSuccess": "0001-01-01T00:00:00Z", - "BucketType": 1, - "Buckets": [ - 153 - ] - }, - { - "Addr": { - "IP": "142.26.87.52", - "Port": 41967 - }, - "Src": { - "IP": "60.124.157.139", - "Port": 20727 - }, - "Attempts": 0, - "LastAttempt": "2017-11-07T01:11:34.692321-07:00", - "LastSuccess": "0001-01-01T00:00:00Z", - "BucketType": 1, - "Buckets": [ - 29 - ] - }, - { - "Addr": { - "IP": "13.77.198.44", - "Port": 54508 - }, - "Src": { - "IP": "142.73.70.174", - "Port": 19525 - }, - "Attempts": 0, - "LastAttempt": "2017-11-07T01:11:34.692553-07:00", - "LastSuccess": "0001-01-01T00:00:00Z", - "BucketType": 1, - "Buckets": [ - 170 - ] - }, - { - "Addr": { - "IP": "63.192.219.12", - "Port": 46603 - }, - "Src": { - "IP": "26.136.66.29", - "Port": 38924 - }, - "Attempts": 0, - "LastAttempt": "2017-11-07T01:11:34.692558-07:00", - "LastSuccess": "0001-01-01T00:00:00Z", - "BucketType": 1, - "Buckets": [ - 203 - ] - }, - { - "Addr": { - "IP": "120.82.251.151", - "Port": 43723 - }, - "Src": { - "IP": "136.104.122.219", - "Port": 47452 - }, - "Attempts": 0, - "LastAttempt": "2017-11-07T01:11:34.692599-07:00", - "LastSuccess": "0001-01-01T00:00:00Z", - "BucketType": 1, - "Buckets": [ - 103 - ] - }, - { - "Addr": { - "IP": "74.79.96.159", - "Port": 46646 - }, - "Src": { - "IP": "218.60.242.116", - "Port": 5361 - }, - "Attempts": 0, - "LastAttempt": "2017-11-07T01:11:34.692145-07:00", - "LastSuccess": "0001-01-01T00:00:00Z", - "BucketType": 1, - "Buckets": [ - 121 - ] - }, - { - "Addr": { - "IP": "194.65.211.174", - "Port": 43464 - }, - "Src": { - "IP": "87.5.112.153", - "Port": 56348 - }, - "Attempts": 0, - "LastAttempt": "2017-11-07T01:11:34.692163-07:00", - "LastSuccess": "0001-01-01T00:00:00Z", - "BucketType": 1, - "Buckets": [ - 96 - ] - }, - { - "Addr": { - "IP": "237.158.179.80", - "Port": 32231 - }, - "Src": { - "IP": "210.240.52.244", - "Port": 29142 - }, - "Attempts": 0, - "LastAttempt": "2017-11-07T01:11:34.692183-07:00", - "LastSuccess": "0001-01-01T00:00:00Z", - "BucketType": 1, - "Buckets": [ - 27 - ] - }, - { - "Addr": { - "IP": "81.157.122.4", - "Port": 9917 - }, - "Src": { - "IP": "213.226.144.152", - "Port": 29950 - }, - "Attempts": 0, - "LastAttempt": "2017-11-07T01:11:34.692614-07:00", - "LastSuccess": "0001-01-01T00:00:00Z", - "BucketType": 1, - "Buckets": [ - 33 - ] - }, - { - "Addr": { - "IP": "180.147.73.220", - "Port": 367 - }, - "Src": { - "IP": "32.229.253.215", - "Port": 62165 - }, - "Attempts": 0, - "LastAttempt": "2017-11-07T01:11:34.692529-07:00", - "LastSuccess": "0001-01-01T00:00:00Z", - "BucketType": 1, - "Buckets": [ - 240 - ] - }, - { - "Addr": { - "IP": "83.110.235.17", - "Port": 33231 - }, - "Src": { - "IP": "230.54.162.85", - "Port": 51569 - }, - "Attempts": 0, - "LastAttempt": "2017-11-07T01:11:34.692563-07:00", - "LastSuccess": "0001-01-01T00:00:00Z", - "BucketType": 1, - "Buckets": [ - 234 - ] - }, - { - "Addr": { - "IP": "100.252.20.2", - "Port": 1633 - }, - "Src": { - "IP": "52.136.47.198", - "Port": 31916 - }, - "Attempts": 0, - "LastAttempt": "2017-11-07T01:11:34.692644-07:00", - "LastSuccess": "0001-01-01T00:00:00Z", - "BucketType": 1, - "Buckets": [ - 254 - ] - }, - { - "Addr": { - "IP": "74.5.247.79", - "Port": 18703 - }, - "Src": { - "IP": "200.247.68.128", - "Port": 55844 - }, - "Attempts": 0, - "LastAttempt": "2017-11-07T01:11:34.692378-07:00", - "LastSuccess": "0001-01-01T00:00:00Z", - "BucketType": 1, - "Buckets": [ - 160 - ] - }, - { - "Addr": { - "IP": "17.220.231.87", - "Port": 59015 - }, - "Src": { - "IP": "54.207.49.4", - "Port": 17877 - }, - "Attempts": 0, - "LastAttempt": "2017-11-07T01:11:34.692404-07:00", - "LastSuccess": "0001-01-01T00:00:00Z", - "BucketType": 1, - "Buckets": [ - 21 - ] - }, - { - "Addr": { - "IP": "156.194.57.127", - "Port": 18944 - }, - "Src": { - "IP": "154.94.235.84", - "Port": 61610 - }, - "Attempts": 0, - "LastAttempt": "2017-11-07T01:11:34.692439-07:00", - "LastSuccess": "0001-01-01T00:00:00Z", - "BucketType": 1, - "Buckets": [ - 32 - ] - }, - { - "Addr": { - "IP": "137.57.172.158", - "Port": 32031 - }, - "Src": { - "IP": "144.160.225.126", - "Port": 43225 - }, - "Attempts": 0, - "LastAttempt": "2017-11-07T01:11:34.692568-07:00", - "LastSuccess": "0001-01-01T00:00:00Z", - "BucketType": 1, - "Buckets": [ - 4 - ] - }, - { - "Addr": { - "IP": "101.220.101.200", - "Port": 26480 - }, - "Src": { - "IP": "130.225.42.1", - "Port": 2522 - }, - "Attempts": 0, - "LastAttempt": "2017-11-07T01:11:34.692637-07:00", - "LastSuccess": "0001-01-01T00:00:00Z", - "BucketType": 1, - "Buckets": [ - 173 - ] - }, - { - "Addr": { - "IP": "136.233.185.164", - "Port": 34011 - }, - "Src": { - "IP": "112.127.216.43", - "Port": 55317 - }, - "Attempts": 0, - "LastAttempt": "2017-11-07T01:11:34.692649-07:00", - "LastSuccess": "0001-01-01T00:00:00Z", - "BucketType": 1, - "Buckets": [ - 106 - ] - }, - { - "Addr": { - "IP": "101.189.107.148", - "Port": 28671 - }, - "Src": { - "IP": "213.55.140.235", - "Port": 2547 - }, - "Attempts": 0, - "LastAttempt": "2017-11-07T01:11:34.692178-07:00", - "LastSuccess": "0001-01-01T00:00:00Z", - "BucketType": 1, - "Buckets": [ - 72 - ] - }, - { - "Addr": { - "IP": "61.190.60.64", - "Port": 58467 - }, - "Src": { - "IP": "206.86.120.31", - "Port": 54422 - }, - "Attempts": 0, - "LastAttempt": "2017-11-07T01:11:34.692358-07:00", - "LastSuccess": "0001-01-01T00:00:00Z", - "BucketType": 1, - "Buckets": [ - 191 - ] - }, - { - "Addr": { - "IP": "227.51.127.223", - "Port": 52754 - }, - "Src": { - "IP": "124.24.12.47", - "Port": 59878 - }, - "Attempts": 0, - "LastAttempt": "2017-11-07T01:11:34.692393-07:00", - "LastSuccess": "0001-01-01T00:00:00Z", - "BucketType": 1, - "Buckets": [ - 122 - ] - }, - { - "Addr": { - "IP": "101.19.152.238", - "Port": 47491 - }, - "Src": { - "IP": "211.30.216.184", - "Port": 17610 - }, - "Attempts": 0, - "LastAttempt": "2017-11-07T01:11:34.692135-07:00", - "LastSuccess": "0001-01-01T00:00:00Z", - "BucketType": 1, - "Buckets": [ - 251 - ] - }, - { - "Addr": { - "IP": "182.198.35.238", - "Port": 15065 - }, - "Src": { - "IP": "239.67.104.149", - "Port": 43039 - }, - "Attempts": 0, - "LastAttempt": "2017-11-07T01:11:34.692268-07:00", - "LastSuccess": "0001-01-01T00:00:00Z", - "BucketType": 1, - "Buckets": [ - 179 - ] - }, - { - "Addr": { - "IP": "233.12.68.51", - "Port": 47544 - }, - "Src": { - "IP": "203.224.119.48", - "Port": 23337 - }, - "Attempts": 0, - "LastAttempt": "2017-11-07T01:11:34.692454-07:00", - "LastSuccess": "0001-01-01T00:00:00Z", - "BucketType": 1, - "Buckets": [ - 51 - ] - }, - { - "Addr": { - "IP": "181.30.35.80", - "Port": 500 - }, - "Src": { - "IP": "174.200.32.161", - "Port": 10174 - }, - "Attempts": 0, - "LastAttempt": "2017-11-07T01:11:34.692503-07:00", - "LastSuccess": "0001-01-01T00:00:00Z", - "BucketType": 1, - "Buckets": [ - 69 - ] - }, - { - "Addr": { - "IP": "49.104.89.21", - "Port": 54774 - }, - "Src": { - "IP": "245.95.238.161", - "Port": 14339 - }, - "Attempts": 0, - "LastAttempt": "2017-11-07T01:11:34.692654-07:00", - "LastSuccess": "0001-01-01T00:00:00Z", - "BucketType": 1, - "Buckets": [ - 120 - ] - }, - { - "Addr": { - "IP": "65.150.169.199", - "Port": 11589 - }, - "Src": { - "IP": "150.110.183.207", - "Port": 17694 - }, - "Attempts": 0, - "LastAttempt": "2017-11-07T01:11:34.692041-07:00", - "LastSuccess": "0001-01-01T00:00:00Z", - "BucketType": 1, - "Buckets": [ - 182 - ] - }, - { - "Addr": { - "IP": "84.203.198.48", - "Port": 47122 - }, - "Src": { - "IP": "141.209.147.221", - "Port": 26085 - }, - "Attempts": 0, - "LastAttempt": "2017-11-07T01:11:34.692056-07:00", - "LastSuccess": "0001-01-01T00:00:00Z", - "BucketType": 1, - "Buckets": [ - 74 - ] - }, - { - "Addr": { - "IP": "220.10.106.180", - "Port": 27439 - }, - "Src": { - "IP": "124.170.244.46", - "Port": 5249 - }, - "Attempts": 0, - "LastAttempt": "2017-11-07T01:11:34.692125-07:00", - "LastSuccess": "0001-01-01T00:00:00Z", - "BucketType": 1, - "Buckets": [ - 166 - ] - }, - { - "Addr": { - "IP": "120.208.32.34", - "Port": 27224 - }, - "Src": { - "IP": "64.194.118.103", - "Port": 24388 - }, - "Attempts": 0, - "LastAttempt": "2017-11-07T01:11:34.69251-07:00", - "LastSuccess": "0001-01-01T00:00:00Z", - "BucketType": 1, - "Buckets": [ - 149 - ] - }, - { - "Addr": { - "IP": "245.182.67.231", - "Port": 58067 - }, - "Src": { - "IP": "62.108.238.220", - "Port": 41851 - }, - "Attempts": 0, - "LastAttempt": "2017-11-07T01:11:34.692522-07:00", - "LastSuccess": "0001-01-01T00:00:00Z", - "BucketType": 1, - "Buckets": [ - 201 - ] - }, - { - "Addr": { - "IP": "50.81.160.105", - "Port": 8113 - }, - "Src": { - "IP": "129.187.68.121", - "Port": 58612 - }, - "Attempts": 0, - "LastAttempt": "2017-11-07T01:11:34.692284-07:00", - "LastSuccess": "0001-01-01T00:00:00Z", - "BucketType": 1, - "Buckets": [ - 94 - ] - }, - { - "Addr": { - "IP": "101.116.47.155", - "Port": 20287 - }, - "Src": { - "IP": "94.34.167.170", - "Port": 41821 - }, - "Attempts": 0, - "LastAttempt": "2017-11-07T01:11:34.692299-07:00", - "LastSuccess": "0001-01-01T00:00:00Z", - "BucketType": 1, - "Buckets": [ - 134 - ] - }, - { - "Addr": { - "IP": "159.253.213.86", - "Port": 5222 - }, - "Src": { - "IP": "124.47.162.125", - "Port": 45742 - }, - "Attempts": 0, - "LastAttempt": "2017-11-07T01:11:34.692429-07:00", - "LastSuccess": "0001-01-01T00:00:00Z", - "BucketType": 1, - "Buckets": [ - 25 - ] - }, - { - "Addr": { - "IP": "124.72.81.213", - "Port": 35723 - }, - "Src": { - "IP": "201.65.186.55", - "Port": 26602 - }, - "Attempts": 0, - "LastAttempt": "2017-11-07T01:11:34.692493-07:00", - "LastSuccess": "0001-01-01T00:00:00Z", - "BucketType": 1, - "Buckets": [ - 199 - ] - }, - { - "Addr": { - "IP": "77.216.197.130", - "Port": 49129 - }, - "Src": { - "IP": "245.160.14.27", - "Port": 38908 - }, - "Attempts": 0, - "LastAttempt": "2017-11-07T01:11:34.692517-07:00", - "LastSuccess": "0001-01-01T00:00:00Z", - "BucketType": 1, - "Buckets": [ - 58 - ] - }, - { - "Addr": { - "IP": "175.46.154.0", - "Port": 15297 - }, - "Src": { - "IP": "6.10.7.13", - "Port": 9657 - }, - "Attempts": 0, - "LastAttempt": "2017-11-07T01:11:34.692583-07:00", - "LastSuccess": "0001-01-01T00:00:00Z", - "BucketType": 1, - "Buckets": [ - 89 - ] - }, - { - "Addr": { - "IP": "176.71.131.235", - "Port": 14342 - }, - "Src": { - "IP": "1.36.215.198", - "Port": 21709 - }, - "Attempts": 0, - "LastAttempt": "2017-11-07T01:11:34.692206-07:00", - "LastSuccess": "0001-01-01T00:00:00Z", - "BucketType": 1, - "Buckets": [ - 135 - ] - }, - { - "Addr": { - "IP": "34.211.134.186", - "Port": 31608 - }, - "Src": { - "IP": "187.87.12.183", - "Port": 32977 - }, - "Attempts": 0, - "LastAttempt": "2017-11-07T01:11:34.692221-07:00", - "LastSuccess": "0001-01-01T00:00:00Z", - "BucketType": 1, - "Buckets": [ - 71 - ] - }, - { - "Addr": { - "IP": "238.63.227.107", - "Port": 49502 - }, - "Src": { - "IP": "185.51.127.143", - "Port": 22728 - }, - "Attempts": 0, - "LastAttempt": "2017-11-07T01:11:34.692483-07:00", - "LastSuccess": "0001-01-01T00:00:00Z", - "BucketType": 1, - "Buckets": [ - 61 - ] - }, - { - "Addr": { - "IP": "160.65.76.45", - "Port": 27307 - }, - "Src": { - "IP": "170.175.198.16", - "Port": 44759 - }, - "Attempts": 0, - "LastAttempt": "2017-11-07T01:11:34.692051-07:00", - "LastSuccess": "0001-01-01T00:00:00Z", - "BucketType": 1, - "Buckets": [ - 36 - ] - }, - { - "Addr": { - "IP": "152.22.79.90", - "Port": 25861 - }, - "Src": { - "IP": "216.183.31.190", - "Port": 9185 - }, - "Attempts": 0, - "LastAttempt": "2017-11-07T01:11:34.692409-07:00", - "LastSuccess": "0001-01-01T00:00:00Z", - "BucketType": 1, - "Buckets": [ - 163 - ] - }, - { - "Addr": { - "IP": "200.2.175.37", - "Port": 57270 - }, - "Src": { - "IP": "108.20.254.94", - "Port": 32812 - }, - "Attempts": 0, - "LastAttempt": "2017-11-07T01:11:34.692434-07:00", - "LastSuccess": "0001-01-01T00:00:00Z", - "BucketType": 1, - "Buckets": [ - 96 - ] - }, - { - "Addr": { - "IP": "111.16.237.10", - "Port": 45200 - }, - "Src": { - "IP": "215.82.246.115", - "Port": 42333 - }, - "Attempts": 0, - "LastAttempt": "2017-11-07T01:11:34.692469-07:00", - "LastSuccess": "0001-01-01T00:00:00Z", - "BucketType": 1, - "Buckets": [ - 21 - ] - }, - { - "Addr": { - "IP": "166.217.195.221", - "Port": 4579 - }, - "Src": { - "IP": "148.153.131.183", - "Port": 13848 - }, - "Attempts": 0, - "LastAttempt": "2017-11-07T01:11:34.692498-07:00", - "LastSuccess": "0001-01-01T00:00:00Z", - "BucketType": 1, - "Buckets": [ - 78 - ] - }, - { - "Addr": { - "IP": "1.226.156.147", - "Port": 61660 - }, - "Src": { - "IP": "169.138.16.69", - "Port": 23455 - }, - "Attempts": 0, - "LastAttempt": "2017-11-07T01:11:34.692548-07:00", - "LastSuccess": "0001-01-01T00:00:00Z", - "BucketType": 1, - "Buckets": [ - 121 - ] - }, - { - "Addr": { - "IP": "108.209.27.58", - "Port": 59102 - }, - "Src": { - "IP": "140.27.139.90", - "Port": 52154 - }, - "Attempts": 0, - "LastAttempt": "2017-11-07T01:11:34.692014-07:00", - "LastSuccess": "0001-01-01T00:00:00Z", - "BucketType": 1, - "Buckets": [ - 53 - ] - }, - { - "Addr": { - "IP": "221.244.202.95", - "Port": 5032 - }, - "Src": { - "IP": "230.152.141.80", - "Port": 19457 - }, - "Attempts": 0, - "LastAttempt": "2017-11-07T01:11:34.692168-07:00", - "LastSuccess": "0001-01-01T00:00:00Z", - "BucketType": 1, - "Buckets": [ - 106 - ] - }, - { - "Addr": { - "IP": "55.87.1.138", - "Port": 39686 - }, - "Src": { - "IP": "55.22.167.132", - "Port": 35663 - }, - "Attempts": 0, - "LastAttempt": "2017-11-07T01:11:34.692258-07:00", - "LastSuccess": "0001-01-01T00:00:00Z", - "BucketType": 1, - "Buckets": [ - 174 - ] - }, - { - "Addr": { - "IP": "209.53.148.74", - "Port": 18502 - }, - "Src": { - "IP": "195.108.121.25", - "Port": 16730 - }, - "Attempts": 0, - "LastAttempt": "2017-11-07T01:11:34.692304-07:00", - "LastSuccess": "0001-01-01T00:00:00Z", - "BucketType": 1, - "Buckets": [ - 180 - ] - }, - { - "Addr": { - "IP": "21.66.206.236", - "Port": 10771 - }, - "Src": { - "IP": "236.195.50.16", - "Port": 30697 - }, - "Attempts": 0, - "LastAttempt": "2017-11-07T01:11:34.692368-07:00", - "LastSuccess": "0001-01-01T00:00:00Z", - "BucketType": 1, - "Buckets": [ - 22 - ] - }, - { - "Addr": { - "IP": "190.87.236.91", - "Port": 58378 - }, - "Src": { - "IP": "72.224.218.34", - "Port": 44817 - }, - "Attempts": 0, - "LastAttempt": "2017-11-07T01:11:34.692459-07:00", - "LastSuccess": "0001-01-01T00:00:00Z", - "BucketType": 1, - "Buckets": [ - 127 - ] - }, - { - "Addr": { - "IP": "197.172.79.170", - "Port": 24958 - }, - "Src": { - "IP": "71.22.4.12", - "Port": 28558 - }, - "Attempts": 0, - "LastAttempt": "2017-11-07T01:11:34.692036-07:00", - "LastSuccess": "0001-01-01T00:00:00Z", - "BucketType": 1, - "Buckets": [ - 251 - ] - }, - { - "Addr": { - "IP": "160.176.234.94", - "Port": 47013 - }, - "Src": { - "IP": "212.172.24.59", - "Port": 29594 - }, - "Attempts": 0, - "LastAttempt": "2017-11-07T01:11:34.692062-07:00", - "LastSuccess": "0001-01-01T00:00:00Z", - "BucketType": 1, - "Buckets": [ - 99 - ] - }, - { - "Addr": { - "IP": "170.206.180.18", - "Port": 26212 - }, - "Src": { - "IP": "228.135.62.18", - "Port": 26164 - }, - "Attempts": 0, - "LastAttempt": "2017-11-07T01:11:34.692234-07:00", - "LastSuccess": "0001-01-01T00:00:00Z", - "BucketType": 1, - "Buckets": [ - 34 - ] - } - ] -} diff --git a/test/fuzz/p2p/pex/testdata/cases/empty b/test/fuzz/p2p/pex/testdata/cases/empty deleted file mode 100644 index e69de29bb..000000000