Browse Source

p2p: rename pexV2 to pex (#7088)

pull/7093/head
Callum Waters 3 years ago
committed by GitHub
parent
commit
59404003ee
No known key found for this signature in database GPG Key ID: 4AEE18F83AFDEB23
12 changed files with 38 additions and 41 deletions
  1. +1
    -0
      crypto/secp256k1/secp256k1_nocgo.go
  2. +1
    -0
      internal/consensus/wal_fuzz.go
  3. +1
    -0
      internal/libs/sync/deadlock.go
  4. +1
    -0
      internal/libs/sync/sync.go
  5. +1
    -0
      internal/p2p/conn/conn_go110.go
  6. +1
    -0
      internal/p2p/conn/conn_notgo110.go
  7. +2
    -7
      internal/p2p/pex/doc.go
  8. +19
    -24
      internal/p2p/pex/reactor.go
  9. +6
    -6
      internal/p2p/pex/reactor_test.go
  10. +2
    -2
      node/node.go
  11. +2
    -2
      node/setup.go
  12. +1
    -0
      rpc/jsonrpc/client/integration_test.go

+ 1
- 0
crypto/secp256k1/secp256k1_nocgo.go View File

@ -1,3 +1,4 @@
//go:build !libsecp256k1
// +build !libsecp256k1 // +build !libsecp256k1
package secp256k1 package secp256k1


+ 1
- 0
internal/consensus/wal_fuzz.go View File

@ -1,3 +1,4 @@
//go:build gofuzz
// +build gofuzz // +build gofuzz
package consensus package consensus


+ 1
- 0
internal/libs/sync/deadlock.go View File

@ -1,3 +1,4 @@
//go:build deadlock
// +build deadlock // +build deadlock
package sync package sync


+ 1
- 0
internal/libs/sync/sync.go View File

@ -1,3 +1,4 @@
//go:build !deadlock
// +build !deadlock // +build !deadlock
package sync package sync


+ 1
- 0
internal/p2p/conn/conn_go110.go View File

@ -1,3 +1,4 @@
//go:build go1.10
// +build go1.10 // +build go1.10
package conn package conn


+ 1
- 0
internal/p2p/conn/conn_notgo110.go View File

@ -1,3 +1,4 @@
//go:build !go1.10
// +build !go1.10 // +build !go1.10
package conn package conn


+ 2
- 7
internal/p2p/pex/doc.go View File

@ -7,19 +7,14 @@ The PEX reactor is a continuous service which periodically requests addresses
and serves addresses to other peers. There are two versions of this service and serves addresses to other peers. There are two versions of this service
aligning with the two p2p frameworks that Tendermint currently supports. aligning with the two p2p frameworks that Tendermint currently supports.
V1 is coupled with the Switch (which handles peer connections and routing of
messages) and, alongside exchanging peer information in the form of port/IP
pairs, also has the responsibility of dialing peers and ensuring that a
node has a sufficient amount of peers connected.
V2 is embedded with the new p2p stack and uses the peer manager to advertise
The reactor is embedded with the new p2p stack and uses the peer manager to advertise
peers as well as add new peers to the peer store. The V2 reactor passes a peers as well as add new peers to the peer store. The V2 reactor passes a
different set of proto messages which include a list of different set of proto messages which include a list of
[urls](https://golang.org/pkg/net/url/#URL).These can be used to save a set of [urls](https://golang.org/pkg/net/url/#URL).These can be used to save a set of
endpoints that each peer uses. The V2 reactor has backwards compatibility with endpoints that each peer uses. The V2 reactor has backwards compatibility with
V1. It can also handle V1 messages. V1. It can also handle V1 messages.
The V2 reactor is able to tweak the intensity of it's search by decreasing or
The reactor is able to tweak the intensity of it's search by decreasing or
increasing the interval between each request. It tracks connected peers via a increasing the interval between each request. It tracks connected peers via a
linked list, sending a request to the node at the front of the list and adding linked list, sending a request to the node at the front of the list and adding
it to the back of the list once a response is received. Using this method, a it to the back of the list once a response is received. Using this method, a


+ 19
- 24
internal/p2p/pex/reactor.go View File

@ -17,7 +17,7 @@ import (
) )
var ( var (
_ service.Service = (*ReactorV2)(nil)
_ service.Service = (*Reactor)(nil)
_ p2p.Wrapper = (*protop2p.PexMessage)(nil) _ p2p.Wrapper = (*protop2p.PexMessage)(nil)
) )
@ -73,11 +73,6 @@ func ChannelDescriptor() conn.ChannelDescriptor {
} }
} }
// ReactorV2 is a PEX reactor for the new P2P stack. The legacy reactor
// is Reactor.
//
// FIXME: Rename this when Reactor is removed, and consider moving to p2p/.
//
// The peer exchange or PEX reactor supports the peer manager by sending // The peer exchange or PEX reactor supports the peer manager by sending
// requests to other peers for addresses that can be given to the peer manager // requests to other peers for addresses that can be given to the peer manager
// and at the same time advertises addresses to peers that need more. // and at the same time advertises addresses to peers that need more.
@ -86,7 +81,7 @@ func ChannelDescriptor() conn.ChannelDescriptor {
// increasing the interval between each request. It tracks connected peers via // increasing the interval between each request. It tracks connected peers via
// a linked list, sending a request to the node at the front of the list and // a linked list, sending a request to the node at the front of the list and
// adding it to the back of the list once a response is received. // adding it to the back of the list once a response is received.
type ReactorV2 struct {
type Reactor struct {
service.BaseService service.BaseService
peerManager *p2p.PeerManager peerManager *p2p.PeerManager
@ -125,14 +120,14 @@ type ReactorV2 struct {
} }
// NewReactor returns a reference to a new reactor. // NewReactor returns a reference to a new reactor.
func NewReactorV2(
func NewReactor(
logger log.Logger, logger log.Logger,
peerManager *p2p.PeerManager, peerManager *p2p.PeerManager,
pexCh *p2p.Channel, pexCh *p2p.Channel,
peerUpdates *p2p.PeerUpdates, peerUpdates *p2p.PeerUpdates,
) *ReactorV2 {
) *Reactor {
r := &ReactorV2{
r := &Reactor{
peerManager: peerManager, peerManager: peerManager,
pexCh: pexCh, pexCh: pexCh,
peerUpdates: peerUpdates, peerUpdates: peerUpdates,
@ -150,7 +145,7 @@ func NewReactorV2(
// envelopes on each. In addition, it also listens for peer updates and handles // envelopes on each. In addition, it also listens for peer updates and handles
// messages on that p2p channel accordingly. The caller must be sure to execute // messages on that p2p channel accordingly. The caller must be sure to execute
// OnStop to ensure the outbound p2p Channels are closed. // OnStop to ensure the outbound p2p Channels are closed.
func (r *ReactorV2) OnStart() error {
func (r *Reactor) OnStart() error {
go r.processPexCh() go r.processPexCh()
go r.processPeerUpdates() go r.processPeerUpdates()
return nil return nil
@ -158,7 +153,7 @@ func (r *ReactorV2) OnStart() error {
// OnStop stops the reactor by signaling to all spawned goroutines to exit and // OnStop stops the reactor by signaling to all spawned goroutines to exit and
// blocking until they all exit. // blocking until they all exit.
func (r *ReactorV2) OnStop() {
func (r *Reactor) OnStop() {
// Close closeCh to signal to all spawned goroutines to gracefully exit. All // Close closeCh to signal to all spawned goroutines to gracefully exit. All
// p2p Channels should execute Close(). // p2p Channels should execute Close().
close(r.closeCh) close(r.closeCh)
@ -172,7 +167,7 @@ func (r *ReactorV2) OnStop() {
// processPexCh implements a blocking event loop where we listen for p2p // processPexCh implements a blocking event loop where we listen for p2p
// Envelope messages from the pexCh. // Envelope messages from the pexCh.
func (r *ReactorV2) processPexCh() {
func (r *Reactor) processPexCh() {
defer r.pexCh.Close() defer r.pexCh.Close()
for { for {
@ -202,7 +197,7 @@ func (r *ReactorV2) processPexCh() {
// processPeerUpdates initiates a blocking process where we listen for and handle // processPeerUpdates initiates a blocking process where we listen for and handle
// PeerUpdate messages. When the reactor is stopped, we will catch the signal and // PeerUpdate messages. When the reactor is stopped, we will catch the signal and
// close the p2p PeerUpdatesCh gracefully. // close the p2p PeerUpdatesCh gracefully.
func (r *ReactorV2) processPeerUpdates() {
func (r *Reactor) processPeerUpdates() {
defer r.peerUpdates.Close() defer r.peerUpdates.Close()
for { for {
@ -218,7 +213,7 @@ func (r *ReactorV2) processPeerUpdates() {
} }
// handlePexMessage handles envelopes sent from peers on the PexChannel. // handlePexMessage handles envelopes sent from peers on the PexChannel.
func (r *ReactorV2) handlePexMessage(envelope p2p.Envelope) error {
func (r *Reactor) handlePexMessage(envelope p2p.Envelope) error {
logger := r.Logger.With("peer", envelope.From) logger := r.Logger.With("peer", envelope.From)
switch msg := envelope.Message.(type) { switch msg := envelope.Message.(type) {
@ -337,7 +332,7 @@ func (r *ReactorV2) handlePexMessage(envelope p2p.Envelope) error {
// //
// FIXME: We may want to cache and parallelize this, but for now we'll just rely // FIXME: We may want to cache and parallelize this, but for now we'll just rely
// on the operating system to cache it for us. // on the operating system to cache it for us.
func (r *ReactorV2) resolve(addresses []p2p.NodeAddress) []protop2p.PexAddress {
func (r *Reactor) resolve(addresses []p2p.NodeAddress) []protop2p.PexAddress {
limit := len(addresses) limit := len(addresses)
pexAddresses := make([]protop2p.PexAddress, 0, limit) pexAddresses := make([]protop2p.PexAddress, 0, limit)
@ -380,7 +375,7 @@ func (r *ReactorV2) resolve(addresses []p2p.NodeAddress) []protop2p.PexAddress {
// handleMessage handles an Envelope sent from a peer on a specific p2p Channel. // handleMessage handles an Envelope sent from a peer on a specific p2p Channel.
// It will handle errors and any possible panics gracefully. A caller can handle // It will handle errors and any possible panics gracefully. A caller can handle
// any error returned by sending a PeerError on the respective channel. // any error returned by sending a PeerError on the respective channel.
func (r *ReactorV2) handleMessage(chID p2p.ChannelID, envelope p2p.Envelope) (err error) {
func (r *Reactor) handleMessage(chID p2p.ChannelID, envelope p2p.Envelope) (err error) {
defer func() { defer func() {
if e := recover(); e != nil { if e := recover(); e != nil {
err = fmt.Errorf("panic in processing message: %v", e) err = fmt.Errorf("panic in processing message: %v", e)
@ -407,7 +402,7 @@ func (r *ReactorV2) handleMessage(chID p2p.ChannelID, envelope p2p.Envelope) (er
// processPeerUpdate processes a PeerUpdate. For added peers, PeerStatusUp, we // processPeerUpdate processes a PeerUpdate. For added peers, PeerStatusUp, we
// send a request for addresses. // send a request for addresses.
func (r *ReactorV2) processPeerUpdate(peerUpdate p2p.PeerUpdate) {
func (r *Reactor) processPeerUpdate(peerUpdate p2p.PeerUpdate) {
r.Logger.Debug("received PEX peer update", "peer", peerUpdate.NodeID, "status", peerUpdate.Status) r.Logger.Debug("received PEX peer update", "peer", peerUpdate.NodeID, "status", peerUpdate.Status)
r.mtx.Lock() r.mtx.Lock()
@ -424,7 +419,7 @@ func (r *ReactorV2) processPeerUpdate(peerUpdate p2p.PeerUpdate) {
} }
} }
func (r *ReactorV2) waitUntilNextRequest() <-chan time.Time {
func (r *Reactor) waitUntilNextRequest() <-chan time.Time {
return time.After(time.Until(r.nextRequestTime)) return time.After(time.Until(r.nextRequestTime))
} }
@ -432,7 +427,7 @@ func (r *ReactorV2) waitUntilNextRequest() <-chan time.Time {
// peer a request for more peer addresses. The function then moves the // peer a request for more peer addresses. The function then moves the
// peer into the requestsSent bucket and calculates when the next request // peer into the requestsSent bucket and calculates when the next request
// time should be // time should be
func (r *ReactorV2) sendRequestForPeers() {
func (r *Reactor) sendRequestForPeers() {
r.mtx.Lock() r.mtx.Lock()
defer r.mtx.Unlock() defer r.mtx.Unlock()
if len(r.availablePeers) == 0 { if len(r.availablePeers) == 0 {
@ -480,7 +475,7 @@ func (r *ReactorV2) sendRequestForPeers() {
// new nodes will plummet to a very small number, meaning the interval expands // new nodes will plummet to a very small number, meaning the interval expands
// to its upper bound. // to its upper bound.
// CONTRACT: Must use a write lock as nextRequestTime is updated // CONTRACT: Must use a write lock as nextRequestTime is updated
func (r *ReactorV2) calculateNextRequestTime() {
func (r *Reactor) calculateNextRequestTime() {
// check if the peer store is full. If so then there is no need // check if the peer store is full. If so then there is no need
// to send peer requests too often // to send peer requests too often
if ratio := r.peerManager.PeerRatio(); ratio >= 0.95 { if ratio := r.peerManager.PeerRatio(); ratio >= 0.95 {
@ -516,7 +511,7 @@ func (r *ReactorV2) calculateNextRequestTime() {
r.nextRequestTime = time.Now().Add(baseTime * time.Duration(r.discoveryRatio)) r.nextRequestTime = time.Now().Add(baseTime * time.Duration(r.discoveryRatio))
} }
func (r *ReactorV2) markPeerRequest(peer types.NodeID) error {
func (r *Reactor) markPeerRequest(peer types.NodeID) error {
r.mtx.Lock() r.mtx.Lock()
defer r.mtx.Unlock() defer r.mtx.Unlock()
if lastRequestTime, ok := r.lastReceivedRequests[peer]; ok { if lastRequestTime, ok := r.lastReceivedRequests[peer]; ok {
@ -529,7 +524,7 @@ func (r *ReactorV2) markPeerRequest(peer types.NodeID) error {
return nil return nil
} }
func (r *ReactorV2) markPeerResponse(peer types.NodeID) error {
func (r *Reactor) markPeerResponse(peer types.NodeID) error {
r.mtx.Lock() r.mtx.Lock()
defer r.mtx.Unlock() defer r.mtx.Unlock()
// check if a request to this peer was sent // check if a request to this peer was sent
@ -546,7 +541,7 @@ func (r *ReactorV2) markPeerResponse(peer types.NodeID) error {
// all addresses must use a MCONN protocol for the peer to be considered part of the // all addresses must use a MCONN protocol for the peer to be considered part of the
// legacy p2p pex system // legacy p2p pex system
func (r *ReactorV2) isLegacyPeer(peer types.NodeID) bool {
func (r *Reactor) isLegacyPeer(peer types.NodeID) bool {
for _, addr := range r.peerManager.Addresses(peer) { for _, addr := range r.peerManager.Addresses(peer) {
if addr.Protocol != p2p.MConnProtocol { if addr.Protocol != p2p.MConnProtocol {
return false return false


+ 6
- 6
internal/p2p/pex/reactor_test.go View File

@ -272,7 +272,7 @@ func TestReactorIntegrationWithLegacyHandleResponse(t *testing.T) {
} }
type singleTestReactor struct { type singleTestReactor struct {
reactor *pex.ReactorV2
reactor *pex.Reactor
pexInCh chan p2p.Envelope pexInCh chan p2p.Envelope
pexOutCh chan p2p.Envelope pexOutCh chan p2p.Envelope
pexErrCh chan p2p.PeerError pexErrCh chan p2p.PeerError
@ -301,7 +301,7 @@ func setupSingle(t *testing.T) *singleTestReactor {
peerManager, err := p2p.NewPeerManager(nodeID, dbm.NewMemDB(), p2p.PeerManagerOptions{}) peerManager, err := p2p.NewPeerManager(nodeID, dbm.NewMemDB(), p2p.PeerManagerOptions{})
require.NoError(t, err) require.NoError(t, err)
reactor := pex.NewReactorV2(log.TestingLogger(), peerManager, pexCh, peerUpdates)
reactor := pex.NewReactor(log.TestingLogger(), peerManager, pexCh, peerUpdates)
require.NoError(t, reactor.Start()) require.NoError(t, reactor.Start())
t.Cleanup(func() { t.Cleanup(func() {
err := reactor.Stop() err := reactor.Stop()
@ -327,7 +327,7 @@ type reactorTestSuite struct {
network *p2ptest.Network network *p2ptest.Network
logger log.Logger logger log.Logger
reactors map[types.NodeID]*pex.ReactorV2
reactors map[types.NodeID]*pex.Reactor
pexChannels map[types.NodeID]*p2p.Channel pexChannels map[types.NodeID]*p2p.Channel
peerChans map[types.NodeID]chan p2p.PeerUpdate peerChans map[types.NodeID]chan p2p.PeerUpdate
@ -370,7 +370,7 @@ func setupNetwork(t *testing.T, opts testOptions) *reactorTestSuite {
rts := &reactorTestSuite{ rts := &reactorTestSuite{
logger: log.TestingLogger().With("testCase", t.Name()), logger: log.TestingLogger().With("testCase", t.Name()),
network: p2ptest.MakeNetwork(t, networkOpts), network: p2ptest.MakeNetwork(t, networkOpts),
reactors: make(map[types.NodeID]*pex.ReactorV2, realNodes),
reactors: make(map[types.NodeID]*pex.Reactor, realNodes),
pexChannels: make(map[types.NodeID]*p2p.Channel, opts.TotalNodes), pexChannels: make(map[types.NodeID]*p2p.Channel, opts.TotalNodes),
peerChans: make(map[types.NodeID]chan p2p.PeerUpdate, opts.TotalNodes), peerChans: make(map[types.NodeID]chan p2p.PeerUpdate, opts.TotalNodes),
peerUpdates: make(map[types.NodeID]*p2p.PeerUpdates, opts.TotalNodes), peerUpdates: make(map[types.NodeID]*p2p.PeerUpdates, opts.TotalNodes),
@ -394,7 +394,7 @@ func setupNetwork(t *testing.T, opts testOptions) *reactorTestSuite {
if idx < opts.MockNodes { if idx < opts.MockNodes {
rts.mocks = append(rts.mocks, nodeID) rts.mocks = append(rts.mocks, nodeID)
} else { } else {
rts.reactors[nodeID] = pex.NewReactorV2(
rts.reactors[nodeID] = pex.NewReactor(
rts.logger.With("nodeID", nodeID), rts.logger.With("nodeID", nodeID),
rts.network.Nodes[nodeID].PeerManager, rts.network.Nodes[nodeID].PeerManager,
rts.pexChannels[nodeID], rts.pexChannels[nodeID],
@ -452,7 +452,7 @@ func (r *reactorTestSuite) addNodes(t *testing.T, nodes int) {
r.peerChans[nodeID] = make(chan p2p.PeerUpdate, r.opts.BufferSize) r.peerChans[nodeID] = make(chan p2p.PeerUpdate, r.opts.BufferSize)
r.peerUpdates[nodeID] = p2p.NewPeerUpdates(r.peerChans[nodeID], r.opts.BufferSize) r.peerUpdates[nodeID] = p2p.NewPeerUpdates(r.peerChans[nodeID], r.opts.BufferSize)
r.network.Nodes[nodeID].PeerManager.Register(r.peerUpdates[nodeID]) r.network.Nodes[nodeID].PeerManager.Register(r.peerUpdates[nodeID])
r.reactors[nodeID] = pex.NewReactorV2(
r.reactors[nodeID] = pex.NewReactor(
r.logger.With("nodeID", nodeID), r.logger.With("nodeID", nodeID),
r.network.Nodes[nodeID].PeerManager, r.network.Nodes[nodeID].PeerManager,
r.pexChannels[nodeID], r.pexChannels[nodeID],


+ 2
- 2
node/node.go View File

@ -352,7 +352,7 @@ func makeNode(cfg *config.Config,
pexCh := pex.ChannelDescriptor() pexCh := pex.ChannelDescriptor()
transport.AddChannelDescriptors([]*p2p.ChannelDescriptor{&pexCh}) transport.AddChannelDescriptors([]*p2p.ChannelDescriptor{&pexCh})
pexReactor, err = createPEXReactorV2(logger, peerManager, router)
pexReactor, err = createPEXReactor(logger, peerManager, router)
if err != nil { if err != nil {
return nil, err return nil, err
} }
@ -466,7 +466,7 @@ func makeSeedNode(cfg *config.Config,
pexCh := pex.ChannelDescriptor() pexCh := pex.ChannelDescriptor()
transport.AddChannelDescriptors([]*p2p.ChannelDescriptor{&pexCh}) transport.AddChannelDescriptors([]*p2p.ChannelDescriptor{&pexCh})
pexReactor, err = createPEXReactorV2(logger, peerManager, router)
pexReactor, err = createPEXReactor(logger, peerManager, router)
if err != nil { if err != nil {
return nil, err return nil, err
} }


+ 2
- 2
node/setup.go View File

@ -440,7 +440,7 @@ func createRouter(
) )
} }
func createPEXReactorV2(
func createPEXReactor(
logger log.Logger, logger log.Logger,
peerManager *p2p.PeerManager, peerManager *p2p.PeerManager,
router *p2p.Router, router *p2p.Router,
@ -452,7 +452,7 @@ func createPEXReactorV2(
} }
peerUpdates := peerManager.Subscribe() peerUpdates := peerManager.Subscribe()
return pex.NewReactorV2(logger, peerManager, channel, peerUpdates), nil
return pex.NewReactor(logger, peerManager, channel, peerUpdates), nil
} }
func makeNodeInfo( func makeNodeInfo(


+ 1
- 0
rpc/jsonrpc/client/integration_test.go View File

@ -1,3 +1,4 @@
//go:build release
// +build release // +build release
// The code in here is comprehensive as an integration // The code in here is comprehensive as an integration


Loading…
Cancel
Save