|
|
@ -151,7 +151,6 @@ func makeNode(cfg *config.Config, |
|
|
|
state, err := loadStateFromDBOrGenesisDocProvider(stateStore, genDoc) |
|
|
|
if err != nil { |
|
|
|
return nil, combineCloseError(err, makeCloser(closers)) |
|
|
|
|
|
|
|
} |
|
|
|
|
|
|
|
nodeMetrics := defaultMetricsProvider(cfg.Instrumentation)(genDoc.ChainID) |
|
|
@ -160,7 +159,6 @@ func makeNode(cfg *config.Config, |
|
|
|
proxyApp, err := createAndStartProxyAppConns(clientCreator, logger, nodeMetrics.proxy) |
|
|
|
if err != nil { |
|
|
|
return nil, combineCloseError(err, makeCloser(closers)) |
|
|
|
|
|
|
|
} |
|
|
|
|
|
|
|
// EventBus and IndexerService must be started before the handshake because
|
|
|
@ -170,7 +168,6 @@ func makeNode(cfg *config.Config, |
|
|
|
eventBus, err := createAndStartEventBus(logger) |
|
|
|
if err != nil { |
|
|
|
return nil, combineCloseError(err, makeCloser(closers)) |
|
|
|
|
|
|
|
} |
|
|
|
|
|
|
|
indexerService, eventSinks, err := createAndStartIndexerService(cfg, dbProvider, eventBus, logger, genDoc.ChainID) |
|
|
@ -224,11 +221,9 @@ func makeNode(cfg *config.Config, |
|
|
|
|
|
|
|
// Create the handshaker, which calls RequestInfo, sets the AppVersion on the state,
|
|
|
|
// and replays any blocks as necessary to sync tendermint with the app.
|
|
|
|
consensusLogger := logger.With("module", "consensus") |
|
|
|
if !stateSync { |
|
|
|
if err := doHandshake(stateStore, state, blockStore, genDoc, eventBus, proxyApp, consensusLogger); err != nil { |
|
|
|
if err := doHandshake(stateStore, state, blockStore, genDoc, eventBus, proxyApp, logger); err != nil { |
|
|
|
return nil, combineCloseError(err, makeCloser(closers)) |
|
|
|
|
|
|
|
} |
|
|
|
|
|
|
|
// Reload the state. It will have the Version.Consensus.App set by the
|
|
|
@ -246,7 +241,7 @@ func makeNode(cfg *config.Config, |
|
|
|
// app may modify the validator set, specifying ourself as the only validator.
|
|
|
|
blockSync := !onlyValidatorIsUs(state, pubKey) |
|
|
|
|
|
|
|
logNodeStartupInfo(state, pubKey, logger, consensusLogger, cfg.Mode) |
|
|
|
logNodeStartupInfo(state, pubKey, logger, cfg.Mode) |
|
|
|
|
|
|
|
// TODO: Fetch and provide real options and do proper p2p bootstrapping.
|
|
|
|
// TODO: Use a persistent peer database.
|
|
|
@ -277,7 +272,6 @@ func makeNode(cfg *config.Config, |
|
|
|
) |
|
|
|
if err != nil { |
|
|
|
return nil, combineCloseError(err, makeCloser(closers)) |
|
|
|
|
|
|
|
} |
|
|
|
|
|
|
|
evReactor, evPool, err := createEvidenceReactor( |
|
|
@ -285,7 +279,6 @@ func makeNode(cfg *config.Config, |
|
|
|
) |
|
|
|
if err != nil { |
|
|
|
return nil, combineCloseError(err, makeCloser(closers)) |
|
|
|
|
|
|
|
} |
|
|
|
|
|
|
|
// make block executor for consensus and blockchain reactors to execute blocks
|
|
|
@ -302,7 +295,7 @@ func makeNode(cfg *config.Config, |
|
|
|
csReactor, csState, err := createConsensusReactor( |
|
|
|
cfg, state, blockExec, blockStore, mp, evPool, |
|
|
|
privValidator, nodeMetrics.consensus, stateSync || blockSync, eventBus, |
|
|
|
peerManager, router, consensusLogger, |
|
|
|
peerManager, router, logger, |
|
|
|
) |
|
|
|
if err != nil { |
|
|
|
return nil, combineCloseError(err, makeCloser(closers)) |
|
|
@ -332,7 +325,6 @@ func makeNode(cfg *config.Config, |
|
|
|
// FIXME The way we do phased startups (e.g. replay -> block sync -> consensus) is very messy,
|
|
|
|
// we should clean this whole thing up. See:
|
|
|
|
// https://github.com/tendermint/tendermint/issues/4644
|
|
|
|
ssLogger := logger.With("module", "statesync") |
|
|
|
ssChDesc := statesync.GetChannelDescriptors() |
|
|
|
channels := make(map[p2p.ChannelID]*p2p.Channel, len(ssChDesc)) |
|
|
|
for idx := range ssChDesc { |
|
|
@ -345,50 +337,29 @@ func makeNode(cfg *config.Config, |
|
|
|
channels[ch.ID] = ch |
|
|
|
} |
|
|
|
|
|
|
|
peerUpdates := peerManager.Subscribe() |
|
|
|
stateSyncReactor := statesync.NewReactor( |
|
|
|
genDoc.ChainID, |
|
|
|
genDoc.InitialHeight, |
|
|
|
*cfg.StateSync, |
|
|
|
ssLogger, |
|
|
|
logger.With("module", "statesync"), |
|
|
|
proxyApp.Snapshot(), |
|
|
|
proxyApp.Query(), |
|
|
|
channels[statesync.SnapshotChannel], |
|
|
|
channels[statesync.ChunkChannel], |
|
|
|
channels[statesync.LightBlockChannel], |
|
|
|
channels[statesync.ParamsChannel], |
|
|
|
peerUpdates, |
|
|
|
peerManager.Subscribe(), |
|
|
|
stateStore, |
|
|
|
blockStore, |
|
|
|
cfg.StateSync.TempDir, |
|
|
|
nodeMetrics.statesync, |
|
|
|
) |
|
|
|
|
|
|
|
// Optionally, start the pex reactor
|
|
|
|
//
|
|
|
|
// TODO:
|
|
|
|
//
|
|
|
|
// We need to set Seeds and PersistentPeers on the switch,
|
|
|
|
// since it needs to be able to use these (and their DNS names)
|
|
|
|
// even if the PEX is off. We can include the DNS name in the NetAddress,
|
|
|
|
// but it would still be nice to have a clear list of the current "PersistentPeers"
|
|
|
|
// somewhere that we can return with net_info.
|
|
|
|
//
|
|
|
|
// If PEX is on, it should handle dialing the seeds. Otherwise the switch does it.
|
|
|
|
// Note we currently use the addrBook regardless at least for AddOurAddress
|
|
|
|
|
|
|
|
pexReactor, err := createPEXReactor(logger, peerManager, router) |
|
|
|
if err != nil { |
|
|
|
return nil, combineCloseError(err, makeCloser(closers)) |
|
|
|
} |
|
|
|
|
|
|
|
if cfg.RPC.PprofListenAddress != "" { |
|
|
|
go func() { |
|
|
|
logger.Info("Starting pprof server", "laddr", cfg.RPC.PprofListenAddress) |
|
|
|
logger.Error("pprof server error", "err", http.ListenAndServe(cfg.RPC.PprofListenAddress, nil)) |
|
|
|
}() |
|
|
|
} |
|
|
|
|
|
|
|
node := &nodeImpl{ |
|
|
|
config: cfg, |
|
|
|
genesisDoc: genDoc, |
|
|
@ -461,7 +432,6 @@ func makeSeedNode(cfg *config.Config, |
|
|
|
state, err := sm.MakeGenesisState(genDoc) |
|
|
|
if err != nil { |
|
|
|
return nil, err |
|
|
|
|
|
|
|
} |
|
|
|
|
|
|
|
nodeInfo, err := makeSeedNodeInfo(cfg, nodeKey, genDoc, state) |
|
|
@ -487,19 +457,9 @@ func makeSeedNode(cfg *config.Config, |
|
|
|
closer) |
|
|
|
} |
|
|
|
|
|
|
|
var pexReactor service.Service |
|
|
|
|
|
|
|
pexReactor, err = createPEXReactor(logger, peerManager, router) |
|
|
|
pexReactor, err := createPEXReactor(logger, peerManager, router) |
|
|
|
if err != nil { |
|
|
|
return nil, combineCloseError(err, closer) |
|
|
|
|
|
|
|
} |
|
|
|
|
|
|
|
if cfg.RPC.PprofListenAddress != "" { |
|
|
|
go func() { |
|
|
|
logger.Info("Starting pprof server", "laddr", cfg.RPC.PprofListenAddress) |
|
|
|
logger.Error("pprof server error", "err", http.ListenAndServe(cfg.RPC.PprofListenAddress, nil)) |
|
|
|
}() |
|
|
|
} |
|
|
|
|
|
|
|
node := &nodeImpl{ |
|
|
@ -522,6 +482,16 @@ func makeSeedNode(cfg *config.Config, |
|
|
|
|
|
|
|
// OnStart starts the Node. It implements service.Service.
|
|
|
|
func (n *nodeImpl) OnStart() error { |
|
|
|
if n.config.RPC.PprofListenAddress != "" { |
|
|
|
// this service is not cleaned up (I believe that we'd
|
|
|
|
// need to have another thread and a potentially a
|
|
|
|
// context to get this functionality.)
|
|
|
|
go func() { |
|
|
|
n.Logger.Info("Starting pprof server", "laddr", n.config.RPC.PprofListenAddress) |
|
|
|
n.Logger.Error("pprof server error", "err", http.ListenAndServe(n.config.RPC.PprofListenAddress, nil)) |
|
|
|
}() |
|
|
|
} |
|
|
|
|
|
|
|
now := tmtime.Now() |
|
|
|
genTime := n.genesisDoc.GenesisTime |
|
|
|
if genTime.After(now) { |
|
|
|