Browse Source

statesync: reactor and channel construction (#7529)

pull/7530/head
Sam Kleinman 3 years ago
committed by GitHub
parent
commit
fc36c7782f
No known key found for this signature in database GPG Key ID: 4AEE18F83AFDEB23
3 changed files with 56 additions and 31 deletions
  1. +30
    -9
      internal/statesync/reactor.go
  2. +20
    -5
      internal/statesync/reactor_test.go
  3. +6
    -17
      node/node.go

+ 30
- 9
internal/statesync/reactor.go View File

@ -71,9 +71,9 @@ const (
maxLightBlockRequestRetries = 20 maxLightBlockRequestRetries = 20
) )
func GetChannelDescriptors() []*p2p.ChannelDescriptor {
return []*p2p.ChannelDescriptor{
{
func getChannelDescriptors() map[p2p.ChannelID]*p2p.ChannelDescriptor {
return map[p2p.ChannelID]*p2p.ChannelDescriptor{
SnapshotChannel: {
ID: SnapshotChannel, ID: SnapshotChannel,
MessageType: new(ssproto.Message), MessageType: new(ssproto.Message),
@ -82,7 +82,7 @@ func GetChannelDescriptors() []*p2p.ChannelDescriptor {
RecvMessageCapacity: snapshotMsgSize, RecvMessageCapacity: snapshotMsgSize,
RecvBufferCapacity: 128, RecvBufferCapacity: 128,
}, },
{
ChunkChannel: {
ID: ChunkChannel, ID: ChunkChannel,
Priority: 3, Priority: 3,
MessageType: new(ssproto.Message), MessageType: new(ssproto.Message),
@ -90,7 +90,7 @@ func GetChannelDescriptors() []*p2p.ChannelDescriptor {
RecvMessageCapacity: chunkMsgSize, RecvMessageCapacity: chunkMsgSize,
RecvBufferCapacity: 128, RecvBufferCapacity: 128,
}, },
{
LightBlockChannel: {
ID: LightBlockChannel, ID: LightBlockChannel,
MessageType: new(ssproto.Message), MessageType: new(ssproto.Message),
Priority: 5, Priority: 5,
@ -98,7 +98,7 @@ func GetChannelDescriptors() []*p2p.ChannelDescriptor {
RecvMessageCapacity: lightBlockMsgSize, RecvMessageCapacity: lightBlockMsgSize,
RecvBufferCapacity: 128, RecvBufferCapacity: 128,
}, },
{
ParamsChannel: {
ID: ParamsChannel, ID: ParamsChannel,
MessageType: new(ssproto.Message), MessageType: new(ssproto.Message),
Priority: 2, Priority: 2,
@ -166,19 +166,40 @@ type Reactor struct {
// and querying, references to p2p Channels and a channel to listen for peer // and querying, references to p2p Channels and a channel to listen for peer
// updates on. Note, the reactor will close all p2p Channels when stopping. // updates on. Note, the reactor will close all p2p Channels when stopping.
func NewReactor( func NewReactor(
ctx context.Context,
chainID string, chainID string,
initialHeight int64, initialHeight int64,
cfg config.StateSyncConfig, cfg config.StateSyncConfig,
logger log.Logger, logger log.Logger,
conn proxy.AppConnSnapshot, conn proxy.AppConnSnapshot,
connQuery proxy.AppConnQuery, connQuery proxy.AppConnQuery,
snapshotCh, chunkCh, blockCh, paramsCh *p2p.Channel,
channelCreator p2p.ChannelCreator,
peerUpdates *p2p.PeerUpdates, peerUpdates *p2p.PeerUpdates,
stateStore sm.Store, stateStore sm.Store,
blockStore *store.BlockStore, blockStore *store.BlockStore,
tempDir string, tempDir string,
ssMetrics *Metrics, ssMetrics *Metrics,
) *Reactor {
) (*Reactor, error) {
chDesc := getChannelDescriptors()
snapshotCh, err := channelCreator(ctx, chDesc[SnapshotChannel])
if err != nil {
return nil, err
}
chunkCh, err := channelCreator(ctx, chDesc[ChunkChannel])
if err != nil {
return nil, err
}
blockCh, err := channelCreator(ctx, chDesc[LightBlockChannel])
if err != nil {
return nil, err
}
paramsCh, err := channelCreator(ctx, chDesc[ParamsChannel])
if err != nil {
return nil, err
}
r := &Reactor{ r := &Reactor{
logger: logger, logger: logger,
chainID: chainID, chainID: chainID,
@ -201,7 +222,7 @@ func NewReactor(
} }
r.BaseService = *service.NewBaseService(logger, "StateSync", r) r.BaseService = *service.NewBaseService(logger, "StateSync", r)
return r
return r, nil
} }
// OnStart starts separate go routines for each p2p Channel and listens for // OnStart starts separate go routines for each p2p Channel and listens for


+ 20
- 5
internal/statesync/reactor_test.go View File

@ -146,23 +146,38 @@ func setup(
cfg := config.DefaultStateSyncConfig() cfg := config.DefaultStateSyncConfig()
rts.reactor = NewReactor(
chCreator := func(ctx context.Context, desc *p2p.ChannelDescriptor) (*p2p.Channel, error) {
switch desc.ID {
case SnapshotChannel:
return rts.snapshotChannel, nil
case ChunkChannel:
return rts.chunkChannel, nil
case LightBlockChannel:
return rts.blockChannel, nil
case ParamsChannel:
return rts.paramsChannel, nil
default:
return nil, fmt.Errorf("invalid channel; %v", desc.ID)
}
}
var err error
rts.reactor, err = NewReactor(
ctx,
factory.DefaultTestChainID, factory.DefaultTestChainID,
1, 1,
*cfg, *cfg,
log.TestingLogger(), log.TestingLogger(),
conn, conn,
connQuery, connQuery,
rts.snapshotChannel,
rts.chunkChannel,
rts.blockChannel,
rts.paramsChannel,
chCreator,
rts.peerUpdates, rts.peerUpdates,
rts.stateStore, rts.stateStore,
rts.blockStore, rts.blockStore,
"", "",
m, m,
) )
require.NoError(t, err)
rts.syncer = newSyncer( rts.syncer = newSyncer(
*cfg, *cfg,


+ 6
- 17
node/node.go View File

@ -349,35 +349,24 @@ func makeNode(
// FIXME The way we do phased startups (e.g. replay -> block sync -> consensus) is very messy, // FIXME The way we do phased startups (e.g. replay -> block sync -> consensus) is very messy,
// we should clean this whole thing up. See: // we should clean this whole thing up. See:
// https://github.com/tendermint/tendermint/issues/4644 // https://github.com/tendermint/tendermint/issues/4644
ssChDesc := statesync.GetChannelDescriptors()
channels := make(map[p2p.ChannelID]*p2p.Channel, len(ssChDesc))
for idx := range ssChDesc {
chd := ssChDesc[idx]
ch, err := router.OpenChannel(ctx, chd)
if err != nil {
return nil, err
}
channels[ch.ID] = ch
}
stateSyncReactor := statesync.NewReactor(
stateSyncReactor, err := statesync.NewReactor(
ctx,
genDoc.ChainID, genDoc.ChainID,
genDoc.InitialHeight, genDoc.InitialHeight,
*cfg.StateSync, *cfg.StateSync,
logger.With("module", "statesync"), logger.With("module", "statesync"),
proxyApp.Snapshot(), proxyApp.Snapshot(),
proxyApp.Query(), proxyApp.Query(),
channels[statesync.SnapshotChannel],
channels[statesync.ChunkChannel],
channels[statesync.LightBlockChannel],
channels[statesync.ParamsChannel],
router.OpenChannel,
peerManager.Subscribe(ctx), peerManager.Subscribe(ctx),
stateStore, stateStore,
blockStore, blockStore,
cfg.StateSync.TempDir, cfg.StateSync.TempDir,
nodeMetrics.statesync, nodeMetrics.statesync,
) )
if err != nil {
return nil, combineCloseError(err, makeCloser(closers))
}
var pexReactor service.Service var pexReactor service.Service
if cfg.P2P.PexReactor { if cfg.P2P.PexReactor {


Loading…
Cancel
Save