You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

751 lines
20 KiB

  1. package node
  2. import (
  3. "bytes"
  4. "context"
  5. "fmt"
  6. "math"
  7. "net"
  8. _ "net/http/pprof" // nolint: gosec // securely exposed on separate, optional port
  9. "time"
  10. dbm "github.com/tendermint/tm-db"
  11. abci "github.com/tendermint/tendermint/abci/types"
  12. bcv0 "github.com/tendermint/tendermint/blockchain/v0"
  13. bcv2 "github.com/tendermint/tendermint/blockchain/v2"
  14. cfg "github.com/tendermint/tendermint/config"
  15. cs "github.com/tendermint/tendermint/consensus"
  16. "github.com/tendermint/tendermint/crypto"
  17. "github.com/tendermint/tendermint/evidence"
  18. "github.com/tendermint/tendermint/libs/log"
  19. "github.com/tendermint/tendermint/libs/service"
  20. "github.com/tendermint/tendermint/libs/strings"
  21. mempl "github.com/tendermint/tendermint/mempool"
  22. "github.com/tendermint/tendermint/p2p"
  23. "github.com/tendermint/tendermint/p2p/pex"
  24. protop2p "github.com/tendermint/tendermint/proto/tendermint/p2p"
  25. "github.com/tendermint/tendermint/proxy"
  26. sm "github.com/tendermint/tendermint/state"
  27. "github.com/tendermint/tendermint/state/indexer"
  28. blockidxkv "github.com/tendermint/tendermint/state/indexer/block/kv"
  29. blockidxnull "github.com/tendermint/tendermint/state/indexer/block/null"
  30. "github.com/tendermint/tendermint/state/indexer/tx/kv"
  31. "github.com/tendermint/tendermint/state/indexer/tx/null"
  32. "github.com/tendermint/tendermint/statesync"
  33. "github.com/tendermint/tendermint/store"
  34. "github.com/tendermint/tendermint/types"
  35. "github.com/tendermint/tendermint/version"
  36. )
  37. func initDBs(config *cfg.Config, dbProvider DBProvider) (blockStore *store.BlockStore, stateDB dbm.DB, err error) {
  38. var blockStoreDB dbm.DB
  39. blockStoreDB, err = dbProvider(&DBContext{"blockstore", config})
  40. if err != nil {
  41. return
  42. }
  43. blockStore = store.NewBlockStore(blockStoreDB)
  44. stateDB, err = dbProvider(&DBContext{"state", config})
  45. return
  46. }
  47. func createAndStartProxyAppConns(clientCreator proxy.ClientCreator, logger log.Logger) (proxy.AppConns, error) {
  48. proxyApp := proxy.NewAppConns(clientCreator)
  49. proxyApp.SetLogger(logger.With("module", "proxy"))
  50. if err := proxyApp.Start(); err != nil {
  51. return nil, fmt.Errorf("error starting proxy app connections: %v", err)
  52. }
  53. return proxyApp, nil
  54. }
  55. func createAndStartEventBus(logger log.Logger) (*types.EventBus, error) {
  56. eventBus := types.NewEventBus()
  57. eventBus.SetLogger(logger.With("module", "events"))
  58. if err := eventBus.Start(); err != nil {
  59. return nil, err
  60. }
  61. return eventBus, nil
  62. }
  63. func createAndStartIndexerService(
  64. config *cfg.Config,
  65. dbProvider DBProvider,
  66. eventBus *types.EventBus,
  67. logger log.Logger,
  68. ) (*indexer.Service, indexer.TxIndexer, indexer.BlockIndexer, error) {
  69. var (
  70. txIndexer indexer.TxIndexer
  71. blockIndexer indexer.BlockIndexer
  72. )
  73. switch config.TxIndex.Indexer {
  74. case "kv":
  75. store, err := dbProvider(&DBContext{"tx_index", config})
  76. if err != nil {
  77. return nil, nil, nil, err
  78. }
  79. txIndexer = kv.NewTxIndex(store)
  80. blockIndexer = blockidxkv.New(dbm.NewPrefixDB(store, []byte("block_events")))
  81. default:
  82. txIndexer = &null.TxIndex{}
  83. blockIndexer = &blockidxnull.BlockerIndexer{}
  84. }
  85. indexerService := indexer.NewIndexerService(txIndexer, blockIndexer, eventBus)
  86. indexerService.SetLogger(logger.With("module", "txindex"))
  87. if err := indexerService.Start(); err != nil {
  88. return nil, nil, nil, err
  89. }
  90. return indexerService, txIndexer, blockIndexer, nil
  91. }
  92. func doHandshake(
  93. stateStore sm.Store,
  94. state sm.State,
  95. blockStore sm.BlockStore,
  96. genDoc *types.GenesisDoc,
  97. eventBus types.BlockEventPublisher,
  98. proxyApp proxy.AppConns,
  99. consensusLogger log.Logger) error {
  100. handshaker := cs.NewHandshaker(stateStore, state, blockStore, genDoc)
  101. handshaker.SetLogger(consensusLogger)
  102. handshaker.SetEventBus(eventBus)
  103. if err := handshaker.Handshake(proxyApp); err != nil {
  104. return fmt.Errorf("error during handshake: %v", err)
  105. }
  106. return nil
  107. }
  108. func logNodeStartupInfo(state sm.State, pubKey crypto.PubKey, logger, consensusLogger log.Logger, mode string) {
  109. // Log the version info.
  110. logger.Info("Version info",
  111. "software", version.TMCoreSemVer,
  112. "block", version.BlockProtocol,
  113. "p2p", version.P2PProtocol,
  114. "mode", mode,
  115. )
  116. // If the state and software differ in block version, at least log it.
  117. if state.Version.Consensus.Block != version.BlockProtocol {
  118. logger.Info("Software and state have different block protocols",
  119. "software", version.BlockProtocol,
  120. "state", state.Version.Consensus.Block,
  121. )
  122. }
  123. switch {
  124. case mode == cfg.ModeFull:
  125. consensusLogger.Info("This node is a fullnode")
  126. case mode == cfg.ModeValidator:
  127. addr := pubKey.Address()
  128. // Log whether this node is a validator or an observer
  129. if state.Validators.HasAddress(addr) {
  130. consensusLogger.Info("This node is a validator", "addr", addr, "pubKey", pubKey.Bytes())
  131. } else {
  132. consensusLogger.Info("This node is a validator (NOT in the active validator set)",
  133. "addr", addr, "pubKey", pubKey.Bytes())
  134. }
  135. }
  136. }
  137. func onlyValidatorIsUs(state sm.State, pubKey crypto.PubKey) bool {
  138. if state.Validators.Size() > 1 {
  139. return false
  140. }
  141. addr, _ := state.Validators.GetByIndex(0)
  142. return pubKey != nil && bytes.Equal(pubKey.Address(), addr)
  143. }
  144. func createMempoolReactor(
  145. config *cfg.Config,
  146. proxyApp proxy.AppConns,
  147. state sm.State,
  148. memplMetrics *mempl.Metrics,
  149. peerManager *p2p.PeerManager,
  150. router *p2p.Router,
  151. logger log.Logger,
  152. ) (*p2p.ReactorShim, *mempl.Reactor, *mempl.CListMempool) {
  153. logger = logger.With("module", "mempool")
  154. mempool := mempl.NewCListMempool(
  155. config.Mempool,
  156. proxyApp.Mempool(),
  157. state.LastBlockHeight,
  158. mempl.WithMetrics(memplMetrics),
  159. mempl.WithPreCheck(sm.TxPreCheck(state)),
  160. mempl.WithPostCheck(sm.TxPostCheck(state)),
  161. )
  162. mempool.SetLogger(logger)
  163. channelShims := mempl.GetChannelShims(config.Mempool)
  164. reactorShim := p2p.NewReactorShim(logger, "MempoolShim", channelShims)
  165. var (
  166. channels map[p2p.ChannelID]*p2p.Channel
  167. peerUpdates *p2p.PeerUpdates
  168. )
  169. if config.P2P.DisableLegacy {
  170. channels = makeChannelsFromShims(router, channelShims)
  171. peerUpdates = peerManager.Subscribe()
  172. } else {
  173. channels = getChannelsFromShim(reactorShim)
  174. peerUpdates = reactorShim.PeerUpdates
  175. }
  176. reactor := mempl.NewReactor(
  177. logger,
  178. config.Mempool,
  179. peerManager,
  180. mempool,
  181. channels[mempl.MempoolChannel],
  182. peerUpdates,
  183. )
  184. if config.Consensus.WaitForTxs() {
  185. mempool.EnableTxsAvailable()
  186. }
  187. return reactorShim, reactor, mempool
  188. }
  189. func createEvidenceReactor(
  190. config *cfg.Config,
  191. dbProvider DBProvider,
  192. stateDB dbm.DB,
  193. blockStore *store.BlockStore,
  194. peerManager *p2p.PeerManager,
  195. router *p2p.Router,
  196. logger log.Logger,
  197. ) (*p2p.ReactorShim, *evidence.Reactor, *evidence.Pool, error) {
  198. evidenceDB, err := dbProvider(&DBContext{"evidence", config})
  199. if err != nil {
  200. return nil, nil, nil, err
  201. }
  202. logger = logger.With("module", "evidence")
  203. reactorShim := p2p.NewReactorShim(logger, "EvidenceShim", evidence.ChannelShims)
  204. evidencePool, err := evidence.NewPool(logger, evidenceDB, sm.NewStore(stateDB), blockStore)
  205. if err != nil {
  206. return nil, nil, nil, err
  207. }
  208. var (
  209. channels map[p2p.ChannelID]*p2p.Channel
  210. peerUpdates *p2p.PeerUpdates
  211. )
  212. if config.P2P.DisableLegacy {
  213. channels = makeChannelsFromShims(router, evidence.ChannelShims)
  214. peerUpdates = peerManager.Subscribe()
  215. } else {
  216. channels = getChannelsFromShim(reactorShim)
  217. peerUpdates = reactorShim.PeerUpdates
  218. }
  219. evidenceReactor := evidence.NewReactor(
  220. logger,
  221. channels[evidence.EvidenceChannel],
  222. peerUpdates,
  223. evidencePool,
  224. )
  225. return reactorShim, evidenceReactor, evidencePool, nil
  226. }
  227. func createBlockchainReactor(
  228. logger log.Logger,
  229. config *cfg.Config,
  230. state sm.State,
  231. blockExec *sm.BlockExecutor,
  232. blockStore *store.BlockStore,
  233. csReactor *cs.Reactor,
  234. peerManager *p2p.PeerManager,
  235. router *p2p.Router,
  236. fastSync bool,
  237. ) (*p2p.ReactorShim, service.Service, error) {
  238. logger = logger.With("module", "blockchain")
  239. switch config.FastSync.Version {
  240. case cfg.BlockchainV0:
  241. reactorShim := p2p.NewReactorShim(logger, "BlockchainShim", bcv0.ChannelShims)
  242. var (
  243. channels map[p2p.ChannelID]*p2p.Channel
  244. peerUpdates *p2p.PeerUpdates
  245. )
  246. if config.P2P.DisableLegacy {
  247. channels = makeChannelsFromShims(router, bcv0.ChannelShims)
  248. peerUpdates = peerManager.Subscribe()
  249. } else {
  250. channels = getChannelsFromShim(reactorShim)
  251. peerUpdates = reactorShim.PeerUpdates
  252. }
  253. reactor, err := bcv0.NewReactor(
  254. logger, state.Copy(), blockExec, blockStore, csReactor,
  255. channels[bcv0.BlockchainChannel], peerUpdates, fastSync,
  256. )
  257. if err != nil {
  258. return nil, nil, err
  259. }
  260. return reactorShim, reactor, nil
  261. case cfg.BlockchainV2:
  262. reactor := bcv2.NewBlockchainReactor(state.Copy(), blockExec, blockStore, fastSync)
  263. reactor.SetLogger(logger)
  264. return nil, reactor, nil
  265. default:
  266. return nil, nil, fmt.Errorf("unknown fastsync version %s", config.FastSync.Version)
  267. }
  268. }
  269. func createConsensusReactor(
  270. config *cfg.Config,
  271. state sm.State,
  272. blockExec *sm.BlockExecutor,
  273. blockStore sm.BlockStore,
  274. mempool *mempl.CListMempool,
  275. evidencePool *evidence.Pool,
  276. privValidator types.PrivValidator,
  277. csMetrics *cs.Metrics,
  278. waitSync bool,
  279. eventBus *types.EventBus,
  280. peerManager *p2p.PeerManager,
  281. router *p2p.Router,
  282. logger log.Logger,
  283. ) (*p2p.ReactorShim, *cs.Reactor, *cs.State) {
  284. consensusState := cs.NewState(
  285. config.Consensus,
  286. state.Copy(),
  287. blockExec,
  288. blockStore,
  289. mempool,
  290. evidencePool,
  291. cs.StateMetrics(csMetrics),
  292. )
  293. consensusState.SetLogger(logger)
  294. if privValidator != nil && config.Mode == cfg.ModeValidator {
  295. consensusState.SetPrivValidator(privValidator)
  296. }
  297. reactorShim := p2p.NewReactorShim(logger, "ConsensusShim", cs.ChannelShims)
  298. var (
  299. channels map[p2p.ChannelID]*p2p.Channel
  300. peerUpdates *p2p.PeerUpdates
  301. )
  302. if config.P2P.DisableLegacy {
  303. channels = makeChannelsFromShims(router, cs.ChannelShims)
  304. peerUpdates = peerManager.Subscribe()
  305. } else {
  306. channels = getChannelsFromShim(reactorShim)
  307. peerUpdates = reactorShim.PeerUpdates
  308. }
  309. reactor := cs.NewReactor(
  310. logger,
  311. consensusState,
  312. channels[cs.StateChannel],
  313. channels[cs.DataChannel],
  314. channels[cs.VoteChannel],
  315. channels[cs.VoteSetBitsChannel],
  316. peerUpdates,
  317. waitSync,
  318. cs.ReactorMetrics(csMetrics),
  319. )
  320. // Services which will be publishing and/or subscribing for messages (events)
  321. // consensusReactor will set it on consensusState and blockExecutor.
  322. reactor.SetEventBus(eventBus)
  323. return reactorShim, reactor, consensusState
  324. }
  325. func createTransport(logger log.Logger, config *cfg.Config) *p2p.MConnTransport {
  326. return p2p.NewMConnTransport(
  327. logger, p2p.MConnConfig(config.P2P), []*p2p.ChannelDescriptor{},
  328. p2p.MConnTransportOptions{
  329. MaxAcceptedConnections: uint32(config.P2P.MaxNumInboundPeers +
  330. len(strings.SplitAndTrimEmpty(config.P2P.UnconditionalPeerIDs, ",", " ")),
  331. ),
  332. },
  333. )
  334. }
  335. func createPeerManager(
  336. config *cfg.Config,
  337. dbProvider DBProvider,
  338. p2pLogger log.Logger,
  339. nodeID p2p.NodeID,
  340. ) (*p2p.PeerManager, error) {
  341. var maxConns uint16
  342. switch {
  343. case config.P2P.MaxConnections > 0:
  344. maxConns = config.P2P.MaxConnections
  345. case config.P2P.MaxNumInboundPeers > 0 && config.P2P.MaxNumOutboundPeers > 0:
  346. x := config.P2P.MaxNumInboundPeers + config.P2P.MaxNumOutboundPeers
  347. if x > math.MaxUint16 {
  348. return nil, fmt.Errorf(
  349. "max inbound peers (%d) + max outbound peers (%d) exceeds maximum (%d)",
  350. config.P2P.MaxNumInboundPeers,
  351. config.P2P.MaxNumOutboundPeers,
  352. math.MaxUint16,
  353. )
  354. }
  355. maxConns = uint16(x)
  356. default:
  357. maxConns = 64
  358. }
  359. privatePeerIDs := make(map[p2p.NodeID]struct{})
  360. for _, id := range strings.SplitAndTrimEmpty(config.P2P.PrivatePeerIDs, ",", " ") {
  361. privatePeerIDs[p2p.NodeID(id)] = struct{}{}
  362. }
  363. options := p2p.PeerManagerOptions{
  364. MaxConnected: maxConns,
  365. MaxConnectedUpgrade: 4,
  366. MaxPeers: 1000,
  367. MinRetryTime: 100 * time.Millisecond,
  368. MaxRetryTime: 8 * time.Hour,
  369. MaxRetryTimePersistent: 5 * time.Minute,
  370. RetryTimeJitter: 3 * time.Second,
  371. PrivatePeers: privatePeerIDs,
  372. }
  373. peers := []p2p.NodeAddress{}
  374. for _, p := range strings.SplitAndTrimEmpty(config.P2P.PersistentPeers, ",", " ") {
  375. address, err := p2p.ParseNodeAddress(p)
  376. if err != nil {
  377. return nil, fmt.Errorf("invalid peer address %q: %w", p, err)
  378. }
  379. peers = append(peers, address)
  380. options.PersistentPeers = append(options.PersistentPeers, address.NodeID)
  381. }
  382. for _, p := range strings.SplitAndTrimEmpty(config.P2P.BootstrapPeers, ",", " ") {
  383. address, err := p2p.ParseNodeAddress(p)
  384. if err != nil {
  385. return nil, fmt.Errorf("invalid peer address %q: %w", p, err)
  386. }
  387. peers = append(peers, address)
  388. }
  389. peerDB, err := dbProvider(&DBContext{"peerstore", config})
  390. if err != nil {
  391. return nil, err
  392. }
  393. peerManager, err := p2p.NewPeerManager(nodeID, peerDB, options)
  394. if err != nil {
  395. return nil, fmt.Errorf("failed to create peer manager: %w", err)
  396. }
  397. for _, peer := range peers {
  398. if _, err := peerManager.Add(peer); err != nil {
  399. return nil, fmt.Errorf("failed to add peer %q: %w", peer, err)
  400. }
  401. }
  402. return peerManager, nil
  403. }
  404. func createRouter(
  405. p2pLogger log.Logger,
  406. p2pMetrics *p2p.Metrics,
  407. nodeInfo p2p.NodeInfo,
  408. privKey crypto.PrivKey,
  409. peerManager *p2p.PeerManager,
  410. transport p2p.Transport,
  411. options p2p.RouterOptions,
  412. ) (*p2p.Router, error) {
  413. return p2p.NewRouter(
  414. p2pLogger,
  415. p2pMetrics,
  416. nodeInfo,
  417. privKey,
  418. peerManager,
  419. []p2p.Transport{transport},
  420. options,
  421. )
  422. }
  423. func createSwitch(
  424. config *cfg.Config,
  425. transport p2p.Transport,
  426. p2pMetrics *p2p.Metrics,
  427. mempoolReactor *p2p.ReactorShim,
  428. bcReactor p2p.Reactor,
  429. stateSyncReactor *p2p.ReactorShim,
  430. consensusReactor *p2p.ReactorShim,
  431. evidenceReactor *p2p.ReactorShim,
  432. proxyApp proxy.AppConns,
  433. nodeInfo p2p.NodeInfo,
  434. nodeKey p2p.NodeKey,
  435. p2pLogger log.Logger,
  436. ) *p2p.Switch {
  437. var (
  438. connFilters = []p2p.ConnFilterFunc{}
  439. peerFilters = []p2p.PeerFilterFunc{}
  440. )
  441. if !config.P2P.AllowDuplicateIP {
  442. connFilters = append(connFilters, p2p.ConnDuplicateIPFilter)
  443. }
  444. // Filter peers by addr or pubkey with an ABCI query.
  445. // If the query return code is OK, add peer.
  446. if config.FilterPeers {
  447. connFilters = append(
  448. connFilters,
  449. // ABCI query for address filtering.
  450. func(_ p2p.ConnSet, c net.Conn, _ []net.IP) error {
  451. res, err := proxyApp.Query().QuerySync(context.Background(), abci.RequestQuery{
  452. Path: fmt.Sprintf("/p2p/filter/addr/%s", c.RemoteAddr().String()),
  453. })
  454. if err != nil {
  455. return err
  456. }
  457. if res.IsErr() {
  458. return fmt.Errorf("error querying abci app: %v", res)
  459. }
  460. return nil
  461. },
  462. )
  463. peerFilters = append(
  464. peerFilters,
  465. // ABCI query for ID filtering.
  466. func(_ p2p.IPeerSet, p p2p.Peer) error {
  467. res, err := proxyApp.Query().QuerySync(context.Background(), abci.RequestQuery{
  468. Path: fmt.Sprintf("/p2p/filter/id/%s", p.ID()),
  469. })
  470. if err != nil {
  471. return err
  472. }
  473. if res.IsErr() {
  474. return fmt.Errorf("error querying abci app: %v", res)
  475. }
  476. return nil
  477. },
  478. )
  479. }
  480. sw := p2p.NewSwitch(
  481. config.P2P,
  482. transport,
  483. p2p.WithMetrics(p2pMetrics),
  484. p2p.SwitchPeerFilters(peerFilters...),
  485. p2p.SwitchConnFilters(connFilters...),
  486. )
  487. sw.SetLogger(p2pLogger)
  488. if config.Mode != cfg.ModeSeed {
  489. sw.AddReactor("MEMPOOL", mempoolReactor)
  490. sw.AddReactor("BLOCKCHAIN", bcReactor)
  491. sw.AddReactor("CONSENSUS", consensusReactor)
  492. sw.AddReactor("EVIDENCE", evidenceReactor)
  493. sw.AddReactor("STATESYNC", stateSyncReactor)
  494. }
  495. sw.SetNodeInfo(nodeInfo)
  496. sw.SetNodeKey(nodeKey)
  497. p2pLogger.Info("P2P Node ID", "ID", nodeKey.ID, "file", config.NodeKeyFile())
  498. return sw
  499. }
  500. func createAddrBookAndSetOnSwitch(config *cfg.Config, sw *p2p.Switch,
  501. p2pLogger log.Logger, nodeKey p2p.NodeKey) (pex.AddrBook, error) {
  502. addrBook := pex.NewAddrBook(config.P2P.AddrBookFile(), config.P2P.AddrBookStrict)
  503. addrBook.SetLogger(p2pLogger.With("book", config.P2P.AddrBookFile()))
  504. // Add ourselves to addrbook to prevent dialing ourselves
  505. if config.P2P.ExternalAddress != "" {
  506. addr, err := p2p.NewNetAddressString(p2p.IDAddressString(nodeKey.ID, config.P2P.ExternalAddress))
  507. if err != nil {
  508. return nil, fmt.Errorf("p2p.external_address is incorrect: %w", err)
  509. }
  510. addrBook.AddOurAddress(addr)
  511. }
  512. if config.P2P.ListenAddress != "" {
  513. addr, err := p2p.NewNetAddressString(p2p.IDAddressString(nodeKey.ID, config.P2P.ListenAddress))
  514. if err != nil {
  515. return nil, fmt.Errorf("p2p.laddr is incorrect: %w", err)
  516. }
  517. addrBook.AddOurAddress(addr)
  518. }
  519. sw.SetAddrBook(addrBook)
  520. return addrBook, nil
  521. }
  522. func createPEXReactorAndAddToSwitch(addrBook pex.AddrBook, config *cfg.Config,
  523. sw *p2p.Switch, logger log.Logger) *pex.Reactor {
  524. reactorConfig := &pex.ReactorConfig{
  525. Seeds: strings.SplitAndTrimEmpty(config.P2P.Seeds, ",", " "),
  526. SeedMode: config.Mode == cfg.ModeSeed,
  527. // See consensus/reactor.go: blocksToContributeToBecomeGoodPeer 10000
  528. // blocks assuming 10s blocks ~ 28 hours.
  529. // TODO (melekes): make it dynamic based on the actual block latencies
  530. // from the live network.
  531. // https://github.com/tendermint/tendermint/issues/3523
  532. SeedDisconnectWaitPeriod: 28 * time.Hour,
  533. PersistentPeersMaxDialPeriod: config.P2P.PersistentPeersMaxDialPeriod,
  534. }
  535. // TODO persistent peers ? so we can have their DNS addrs saved
  536. pexReactor := pex.NewReactor(addrBook, reactorConfig)
  537. pexReactor.SetLogger(logger.With("module", "pex"))
  538. sw.AddReactor("PEX", pexReactor)
  539. return pexReactor
  540. }
  541. func createPEXReactorV2(
  542. config *cfg.Config,
  543. logger log.Logger,
  544. peerManager *p2p.PeerManager,
  545. router *p2p.Router,
  546. ) (*pex.ReactorV2, error) {
  547. channel, err := router.OpenChannel(pex.ChannelDescriptor(), &protop2p.PexMessage{}, 4096)
  548. if err != nil {
  549. return nil, err
  550. }
  551. peerUpdates := peerManager.Subscribe()
  552. return pex.NewReactorV2(logger, peerManager, channel, peerUpdates), nil
  553. }
  554. func makeNodeInfo(
  555. config *cfg.Config,
  556. nodeKey p2p.NodeKey,
  557. txIndexer indexer.TxIndexer,
  558. genDoc *types.GenesisDoc,
  559. state sm.State,
  560. ) (p2p.NodeInfo, error) {
  561. txIndexerStatus := "on"
  562. if _, ok := txIndexer.(*null.TxIndex); ok {
  563. txIndexerStatus = "off"
  564. }
  565. var bcChannel byte
  566. switch config.FastSync.Version {
  567. case cfg.BlockchainV0:
  568. bcChannel = byte(bcv0.BlockchainChannel)
  569. case cfg.BlockchainV2:
  570. bcChannel = bcv2.BlockchainChannel
  571. default:
  572. return p2p.NodeInfo{}, fmt.Errorf("unknown fastsync version %s", config.FastSync.Version)
  573. }
  574. nodeInfo := p2p.NodeInfo{
  575. ProtocolVersion: p2p.NewProtocolVersion(
  576. version.P2PProtocol, // global
  577. state.Version.Consensus.Block,
  578. state.Version.Consensus.App,
  579. ),
  580. NodeID: nodeKey.ID,
  581. Network: genDoc.ChainID,
  582. Version: version.TMCoreSemVer,
  583. Channels: []byte{
  584. bcChannel,
  585. byte(cs.StateChannel),
  586. byte(cs.DataChannel),
  587. byte(cs.VoteChannel),
  588. byte(cs.VoteSetBitsChannel),
  589. byte(mempl.MempoolChannel),
  590. byte(evidence.EvidenceChannel),
  591. byte(statesync.SnapshotChannel),
  592. byte(statesync.ChunkChannel),
  593. },
  594. Moniker: config.Moniker,
  595. Other: p2p.NodeInfoOther{
  596. TxIndex: txIndexerStatus,
  597. RPCAddress: config.RPC.ListenAddress,
  598. },
  599. }
  600. if config.P2P.PexReactor {
  601. nodeInfo.Channels = append(nodeInfo.Channels, pex.PexChannel)
  602. }
  603. lAddr := config.P2P.ExternalAddress
  604. if lAddr == "" {
  605. lAddr = config.P2P.ListenAddress
  606. }
  607. nodeInfo.ListenAddr = lAddr
  608. err := nodeInfo.Validate()
  609. return nodeInfo, err
  610. }
  611. func makeSeedNodeInfo(
  612. config *cfg.Config,
  613. nodeKey p2p.NodeKey,
  614. genDoc *types.GenesisDoc,
  615. state sm.State,
  616. ) (p2p.NodeInfo, error) {
  617. nodeInfo := p2p.NodeInfo{
  618. ProtocolVersion: p2p.NewProtocolVersion(
  619. version.P2PProtocol, // global
  620. state.Version.Consensus.Block,
  621. state.Version.Consensus.App,
  622. ),
  623. NodeID: nodeKey.ID,
  624. Network: genDoc.ChainID,
  625. Version: version.TMCoreSemVer,
  626. Channels: []byte{},
  627. Moniker: config.Moniker,
  628. Other: p2p.NodeInfoOther{
  629. TxIndex: "off",
  630. RPCAddress: config.RPC.ListenAddress,
  631. },
  632. }
  633. if config.P2P.PexReactor {
  634. nodeInfo.Channels = append(nodeInfo.Channels, pex.PexChannel)
  635. }
  636. lAddr := config.P2P.ExternalAddress
  637. if lAddr == "" {
  638. lAddr = config.P2P.ListenAddress
  639. }
  640. nodeInfo.ListenAddr = lAddr
  641. err := nodeInfo.Validate()
  642. return nodeInfo, err
  643. }