Browse Source

p2p: reduce buffering on channels (#6609)

Having smaller buffers in each reactor/channel will mean that there will be fewer stale messages.
pull/6622/head
Sam Kleinman 3 years ago
committed by GitHub
parent
commit
917180dfd2
No known key found for this signature in database GPG Key ID: 4AEE18F83AFDEB23
10 changed files with 36 additions and 43 deletions
  1. +2
    -3
      internal/blockchain/v0/reactor.go
  2. +1
    -1
      internal/blockchain/v2/reactor.go
  3. +16
    -19
      internal/consensus/reactor.go
  4. +2
    -2
      internal/evidence/reactor.go
  5. +2
    -2
      internal/mempool/v0/reactor.go
  6. +2
    -2
      internal/mempool/v1/reactor.go
  7. +3
    -2
      internal/p2p/pex/reactor.go
  8. +1
    -5
      internal/p2p/router.go
  9. +6
    -6
      internal/statesync/reactor.go
  10. +1
    -1
      node/setup.go

+ 2
- 3
internal/blockchain/v0/reactor.go View File

@ -33,10 +33,9 @@ var (
ID: byte(BlockchainChannel), ID: byte(BlockchainChannel),
Priority: 5, Priority: 5,
SendQueueCapacity: 1000, SendQueueCapacity: 1000,
RecvBufferCapacity: 50 * 4096,
RecvBufferCapacity: 1024,
RecvMessageCapacity: bc.MaxMsgSize, RecvMessageCapacity: bc.MaxMsgSize,
MaxSendBytes: 100,
MaxSendBytes: 100,
}, },
}, },
} }


+ 1
- 1
internal/blockchain/v2/reactor.go View File

@ -585,7 +585,7 @@ func (r *BlockchainReactor) GetChannels() []*p2p.ChannelDescriptor {
ID: BlockchainChannel, ID: BlockchainChannel,
Priority: 5, Priority: 5,
SendQueueCapacity: 2000, SendQueueCapacity: 2000,
RecvBufferCapacity: 50 * 4096,
RecvBufferCapacity: 1024,
RecvMessageCapacity: bc.MaxMsgSize, RecvMessageCapacity: bc.MaxMsgSize,
}, },
} }


+ 16
- 19
internal/consensus/reactor.go View File

@ -33,11 +33,11 @@ var (
MsgType: new(tmcons.Message), MsgType: new(tmcons.Message),
Descriptor: &p2p.ChannelDescriptor{ Descriptor: &p2p.ChannelDescriptor{
ID: byte(StateChannel), ID: byte(StateChannel),
Priority: 6,
SendQueueCapacity: 100,
Priority: 8,
SendQueueCapacity: 64,
RecvMessageCapacity: maxMsgSize, RecvMessageCapacity: maxMsgSize,
MaxSendBytes: 12000,
RecvBufferCapacity: 128,
MaxSendBytes: 12000,
}, },
}, },
DataChannel: { DataChannel: {
@ -47,36 +47,33 @@ var (
// stuff. Once we gossip the whole block there is nothing left to send // stuff. Once we gossip the whole block there is nothing left to send
// until next height or round. // until next height or round.
ID: byte(DataChannel), ID: byte(DataChannel),
Priority: 10,
SendQueueCapacity: 100,
RecvBufferCapacity: 50 * 4096,
Priority: 12,
SendQueueCapacity: 64,
RecvBufferCapacity: 512,
RecvMessageCapacity: maxMsgSize, RecvMessageCapacity: maxMsgSize,
MaxSendBytes: 40000,
MaxSendBytes: 40000,
}, },
}, },
VoteChannel: { VoteChannel: {
MsgType: new(tmcons.Message), MsgType: new(tmcons.Message),
Descriptor: &p2p.ChannelDescriptor{ Descriptor: &p2p.ChannelDescriptor{
ID: byte(VoteChannel), ID: byte(VoteChannel),
Priority: 7,
SendQueueCapacity: 100,
RecvBufferCapacity: 100 * 100,
Priority: 10,
SendQueueCapacity: 64,
RecvBufferCapacity: 128,
RecvMessageCapacity: maxMsgSize, RecvMessageCapacity: maxMsgSize,
MaxSendBytes: 150,
MaxSendBytes: 150,
}, },
}, },
VoteSetBitsChannel: { VoteSetBitsChannel: {
MsgType: new(tmcons.Message), MsgType: new(tmcons.Message),
Descriptor: &p2p.ChannelDescriptor{ Descriptor: &p2p.ChannelDescriptor{
ID: byte(VoteSetBitsChannel), ID: byte(VoteSetBitsChannel),
Priority: 1,
SendQueueCapacity: 2,
RecvBufferCapacity: 1024,
Priority: 5,
SendQueueCapacity: 8,
RecvBufferCapacity: 128,
RecvMessageCapacity: maxMsgSize, RecvMessageCapacity: maxMsgSize,
MaxSendBytes: 50,
MaxSendBytes: 50,
}, },
}, },
} }


+ 2
- 2
internal/evidence/reactor.go View File

@ -31,8 +31,8 @@ var (
ID: byte(EvidenceChannel), ID: byte(EvidenceChannel),
Priority: 6, Priority: 6,
RecvMessageCapacity: maxMsgSize, RecvMessageCapacity: maxMsgSize,
MaxSendBytes: 400,
RecvBufferCapacity: 32,
MaxSendBytes: 400,
}, },
}, },
} }


+ 2
- 2
internal/mempool/v0/reactor.go View File

@ -104,8 +104,8 @@ func GetChannelShims(config *cfg.MempoolConfig) map[p2p.ChannelID]*p2p.ChannelDe
ID: byte(mempool.MempoolChannel), ID: byte(mempool.MempoolChannel),
Priority: 5, Priority: 5,
RecvMessageCapacity: batchMsg.Size(), RecvMessageCapacity: batchMsg.Size(),
MaxSendBytes: 5000,
RecvBufferCapacity: 128,
MaxSendBytes: 5000,
}, },
}, },
} }


+ 2
- 2
internal/mempool/v1/reactor.go View File

@ -103,8 +103,8 @@ func GetChannelShims(config *cfg.MempoolConfig) map[p2p.ChannelID]*p2p.ChannelDe
ID: byte(mempool.MempoolChannel), ID: byte(mempool.MempoolChannel),
Priority: 5, Priority: 5,
RecvMessageCapacity: batchMsg.Size(), RecvMessageCapacity: batchMsg.Size(),
MaxSendBytes: 5000,
RecvBufferCapacity: 128,
MaxSendBytes: 5000,
}, },
}, },
} }


+ 3
- 2
internal/p2p/pex/reactor.go View File

@ -51,8 +51,8 @@ func ChannelDescriptor() conn.ChannelDescriptor {
Priority: 1, Priority: 1,
SendQueueCapacity: 10, SendQueueCapacity: 10,
RecvMessageCapacity: maxMsgSize, RecvMessageCapacity: maxMsgSize,
MaxSendBytes: 200,
RecvBufferCapacity: 32,
MaxSendBytes: 200,
} }
} }
@ -417,6 +417,7 @@ func (r *ReactorV2) sendRequestForPeers() {
// no peers are available // no peers are available
r.Logger.Debug("no available peers to send request to, waiting...") r.Logger.Debug("no available peers to send request to, waiting...")
r.nextRequestTime = time.Now().Add(noAvailablePeersWaitPeriod) r.nextRequestTime = time.Now().Add(noAvailablePeersWaitPeriod)
return return
} }
var peerID types.NodeID var peerID types.NodeID


+ 1
- 5
internal/p2p/router.go View File

@ -19,7 +19,7 @@ import (
"github.com/tendermint/tendermint/types" "github.com/tendermint/tendermint/types"
) )
const queueBufferDefault = 4096
const queueBufferDefault = 32
// ChannelID is an arbitrary channel ID. // ChannelID is an arbitrary channel ID.
type ChannelID uint16 type ChannelID uint16
@ -365,10 +365,6 @@ func (r *Router) createQueueFactory() (func(int) queue, error) {
// wrapper message. The caller may provide a size to make the channel buffered, // wrapper message. The caller may provide a size to make the channel buffered,
// which internally makes the inbound, outbound, and error channel buffered. // which internally makes the inbound, outbound, and error channel buffered.
func (r *Router) OpenChannel(chDesc ChannelDescriptor, messageType proto.Message, size int) (*Channel, error) { func (r *Router) OpenChannel(chDesc ChannelDescriptor, messageType proto.Message, size int) (*Channel, error) {
if size == 0 {
size = queueBufferDefault
}
r.channelMtx.Lock() r.channelMtx.Lock()
defer r.channelMtx.Unlock() defer r.channelMtx.Unlock()


+ 6
- 6
internal/statesync/reactor.go View File

@ -41,8 +41,8 @@ var (
Priority: 6, Priority: 6,
SendQueueCapacity: 10, SendQueueCapacity: 10,
RecvMessageCapacity: snapshotMsgSize, RecvMessageCapacity: snapshotMsgSize,
MaxSendBytes: 400,
RecvBufferCapacity: 128,
MaxSendBytes: 400,
}, },
}, },
ChunkChannel: { ChunkChannel: {
@ -52,8 +52,8 @@ var (
Priority: 3, Priority: 3,
SendQueueCapacity: 4, SendQueueCapacity: 4,
RecvMessageCapacity: chunkMsgSize, RecvMessageCapacity: chunkMsgSize,
MaxSendBytes: 400,
RecvBufferCapacity: 128,
MaxSendBytes: 400,
}, },
}, },
LightBlockChannel: { LightBlockChannel: {
@ -63,8 +63,8 @@ var (
Priority: 2, Priority: 2,
SendQueueCapacity: 10, SendQueueCapacity: 10,
RecvMessageCapacity: lightBlockMsgSize, RecvMessageCapacity: lightBlockMsgSize,
MaxSendBytes: 400,
RecvBufferCapacity: 128,
MaxSendBytes: 400,
}, },
}, },
} }


+ 1
- 1
node/setup.go View File

@ -698,7 +698,7 @@ func createPEXReactorV2(
router *p2p.Router, router *p2p.Router,
) (*pex.ReactorV2, error) { ) (*pex.ReactorV2, error) {
channel, err := router.OpenChannel(pex.ChannelDescriptor(), &protop2p.PexMessage{}, 4096)
channel, err := router.OpenChannel(pex.ChannelDescriptor(), &protop2p.PexMessage{}, 128)
if err != nil { if err != nil {
return nil, err return nil, err
} }


Loading…
Cancel
Save