You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

823 lines
22 KiB

  1. package node
  2. import (
  3. "bytes"
  4. "context"
  5. "errors"
  6. "fmt"
  7. "math"
  8. "net"
  9. _ "net/http/pprof" // nolint: gosec // securely exposed on separate, optional port
  10. "strings"
  11. "time"
  12. dbm "github.com/tendermint/tm-db"
  13. abci "github.com/tendermint/tendermint/abci/types"
  14. cfg "github.com/tendermint/tendermint/config"
  15. "github.com/tendermint/tendermint/crypto"
  16. bcv0 "github.com/tendermint/tendermint/internal/blockchain/v0"
  17. bcv2 "github.com/tendermint/tendermint/internal/blockchain/v2"
  18. cs "github.com/tendermint/tendermint/internal/consensus"
  19. "github.com/tendermint/tendermint/internal/evidence"
  20. "github.com/tendermint/tendermint/internal/mempool"
  21. mempoolv0 "github.com/tendermint/tendermint/internal/mempool/v0"
  22. mempoolv1 "github.com/tendermint/tendermint/internal/mempool/v1"
  23. "github.com/tendermint/tendermint/internal/p2p"
  24. "github.com/tendermint/tendermint/internal/p2p/pex"
  25. "github.com/tendermint/tendermint/internal/statesync"
  26. "github.com/tendermint/tendermint/libs/log"
  27. "github.com/tendermint/tendermint/libs/service"
  28. tmstrings "github.com/tendermint/tendermint/libs/strings"
  29. protop2p "github.com/tendermint/tendermint/proto/tendermint/p2p"
  30. "github.com/tendermint/tendermint/proxy"
  31. sm "github.com/tendermint/tendermint/state"
  32. "github.com/tendermint/tendermint/state/indexer"
  33. kv "github.com/tendermint/tendermint/state/indexer/sink/kv"
  34. null "github.com/tendermint/tendermint/state/indexer/sink/null"
  35. psql "github.com/tendermint/tendermint/state/indexer/sink/psql"
  36. "github.com/tendermint/tendermint/store"
  37. "github.com/tendermint/tendermint/types"
  38. "github.com/tendermint/tendermint/version"
  39. )
  40. func initDBs(config *cfg.Config, dbProvider cfg.DBProvider) (blockStore *store.BlockStore, stateDB dbm.DB, err error) {
  41. var blockStoreDB dbm.DB
  42. blockStoreDB, err = dbProvider(&cfg.DBContext{ID: "blockstore", Config: config})
  43. if err != nil {
  44. return
  45. }
  46. blockStore = store.NewBlockStore(blockStoreDB)
  47. stateDB, err = dbProvider(&cfg.DBContext{ID: "state", Config: config})
  48. return
  49. }
  50. func createAndStartProxyAppConns(clientCreator proxy.ClientCreator, logger log.Logger) (proxy.AppConns, error) {
  51. proxyApp := proxy.NewAppConns(clientCreator)
  52. proxyApp.SetLogger(logger.With("module", "proxy"))
  53. if err := proxyApp.Start(); err != nil {
  54. return nil, fmt.Errorf("error starting proxy app connections: %v", err)
  55. }
  56. return proxyApp, nil
  57. }
  58. func createAndStartEventBus(logger log.Logger) (*types.EventBus, error) {
  59. eventBus := types.NewEventBus()
  60. eventBus.SetLogger(logger.With("module", "events"))
  61. if err := eventBus.Start(); err != nil {
  62. return nil, err
  63. }
  64. return eventBus, nil
  65. }
  66. func createAndStartIndexerService(
  67. config *cfg.Config,
  68. dbProvider cfg.DBProvider,
  69. eventBus *types.EventBus,
  70. logger log.Logger,
  71. chainID string,
  72. ) (*indexer.Service, []indexer.EventSink, error) {
  73. eventSinks := []indexer.EventSink{}
  74. // check for duplicated sinks
  75. sinks := map[string]bool{}
  76. for _, s := range config.TxIndex.Indexer {
  77. sl := strings.ToLower(s)
  78. if sinks[sl] {
  79. return nil, nil, errors.New("found duplicated sinks, please check the tx-index section in the config.toml")
  80. }
  81. sinks[sl] = true
  82. }
  83. loop:
  84. for k := range sinks {
  85. switch k {
  86. case string(indexer.NULL):
  87. // When we see null in the config, the eventsinks will be reset with the
  88. // nullEventSink.
  89. eventSinks = []indexer.EventSink{null.NewEventSink()}
  90. break loop
  91. case string(indexer.KV):
  92. store, err := dbProvider(&cfg.DBContext{ID: "tx_index", Config: config})
  93. if err != nil {
  94. return nil, nil, err
  95. }
  96. eventSinks = append(eventSinks, kv.NewEventSink(store))
  97. case string(indexer.PSQL):
  98. conn := config.TxIndex.PsqlConn
  99. if conn == "" {
  100. return nil, nil, errors.New("the psql connection settings cannot be empty")
  101. }
  102. es, _, err := psql.NewEventSink(conn, chainID)
  103. if err != nil {
  104. return nil, nil, err
  105. }
  106. eventSinks = append(eventSinks, es)
  107. default:
  108. return nil, nil, errors.New("unsupported event sink type")
  109. }
  110. }
  111. if len(eventSinks) == 0 {
  112. eventSinks = []indexer.EventSink{null.NewEventSink()}
  113. }
  114. indexerService := indexer.NewIndexerService(eventSinks, eventBus)
  115. indexerService.SetLogger(logger.With("module", "txindex"))
  116. if err := indexerService.Start(); err != nil {
  117. return nil, nil, err
  118. }
  119. return indexerService, eventSinks, nil
  120. }
  121. func doHandshake(
  122. stateStore sm.Store,
  123. state sm.State,
  124. blockStore sm.BlockStore,
  125. genDoc *types.GenesisDoc,
  126. eventBus types.BlockEventPublisher,
  127. proxyApp proxy.AppConns,
  128. consensusLogger log.Logger) error {
  129. handshaker := cs.NewHandshaker(stateStore, state, blockStore, genDoc)
  130. handshaker.SetLogger(consensusLogger)
  131. handshaker.SetEventBus(eventBus)
  132. if err := handshaker.Handshake(proxyApp); err != nil {
  133. return fmt.Errorf("error during handshake: %v", err)
  134. }
  135. return nil
  136. }
  137. func logNodeStartupInfo(state sm.State, pubKey crypto.PubKey, logger, consensusLogger log.Logger, mode string) {
  138. // Log the version info.
  139. logger.Info("Version info",
  140. "tmVersion", version.TMVersion,
  141. "block", version.BlockProtocol,
  142. "p2p", version.P2PProtocol,
  143. "mode", mode,
  144. )
  145. // If the state and software differ in block version, at least log it.
  146. if state.Version.Consensus.Block != version.BlockProtocol {
  147. logger.Info("Software and state have different block protocols",
  148. "software", version.BlockProtocol,
  149. "state", state.Version.Consensus.Block,
  150. )
  151. }
  152. switch {
  153. case mode == cfg.ModeFull:
  154. consensusLogger.Info("This node is a fullnode")
  155. case mode == cfg.ModeValidator:
  156. addr := pubKey.Address()
  157. // Log whether this node is a validator or an observer
  158. if state.Validators.HasAddress(addr) {
  159. consensusLogger.Info("This node is a validator", "addr", addr, "pubKey", pubKey.Bytes())
  160. } else {
  161. consensusLogger.Info("This node is a validator (NOT in the active validator set)",
  162. "addr", addr, "pubKey", pubKey.Bytes())
  163. }
  164. }
  165. }
  166. func onlyValidatorIsUs(state sm.State, pubKey crypto.PubKey) bool {
  167. if state.Validators.Size() > 1 {
  168. return false
  169. }
  170. addr, _ := state.Validators.GetByIndex(0)
  171. return pubKey != nil && bytes.Equal(pubKey.Address(), addr)
  172. }
  173. func createMempoolReactor(
  174. config *cfg.Config,
  175. proxyApp proxy.AppConns,
  176. state sm.State,
  177. memplMetrics *mempool.Metrics,
  178. peerManager *p2p.PeerManager,
  179. router *p2p.Router,
  180. logger log.Logger,
  181. ) (*p2p.ReactorShim, service.Service, mempool.Mempool, error) {
  182. logger = logger.With("module", "mempool", "version", config.Mempool.Version)
  183. channelShims := mempoolv0.GetChannelShims(config.Mempool)
  184. reactorShim := p2p.NewReactorShim(logger, "MempoolShim", channelShims)
  185. var (
  186. channels map[p2p.ChannelID]*p2p.Channel
  187. peerUpdates *p2p.PeerUpdates
  188. )
  189. if config.P2P.DisableLegacy {
  190. channels = makeChannelsFromShims(router, channelShims)
  191. peerUpdates = peerManager.Subscribe()
  192. } else {
  193. channels = getChannelsFromShim(reactorShim)
  194. peerUpdates = reactorShim.PeerUpdates
  195. }
  196. switch config.Mempool.Version {
  197. case cfg.MempoolV0:
  198. mp := mempoolv0.NewCListMempool(
  199. config.Mempool,
  200. proxyApp.Mempool(),
  201. state.LastBlockHeight,
  202. mempoolv0.WithMetrics(memplMetrics),
  203. mempoolv0.WithPreCheck(sm.TxPreCheck(state)),
  204. mempoolv0.WithPostCheck(sm.TxPostCheck(state)),
  205. )
  206. mp.SetLogger(logger)
  207. reactor := mempoolv0.NewReactor(
  208. logger,
  209. config.Mempool,
  210. peerManager,
  211. mp,
  212. channels[mempool.MempoolChannel],
  213. peerUpdates,
  214. )
  215. if config.Consensus.WaitForTxs() {
  216. mp.EnableTxsAvailable()
  217. }
  218. return reactorShim, reactor, mp, nil
  219. case cfg.MempoolV1:
  220. mp := mempoolv1.NewTxMempool(
  221. logger,
  222. config.Mempool,
  223. proxyApp.Mempool(),
  224. state.LastBlockHeight,
  225. mempoolv1.WithMetrics(memplMetrics),
  226. mempoolv1.WithPreCheck(sm.TxPreCheck(state)),
  227. mempoolv1.WithPostCheck(sm.TxPostCheck(state)),
  228. )
  229. reactor := mempoolv1.NewReactor(
  230. logger,
  231. config.Mempool,
  232. peerManager,
  233. mp,
  234. channels[mempool.MempoolChannel],
  235. peerUpdates,
  236. )
  237. if config.Consensus.WaitForTxs() {
  238. mp.EnableTxsAvailable()
  239. }
  240. return reactorShim, reactor, mp, nil
  241. default:
  242. return nil, nil, nil, fmt.Errorf("unknown mempool version: %s", config.Mempool.Version)
  243. }
  244. }
  245. func createEvidenceReactor(
  246. config *cfg.Config,
  247. dbProvider cfg.DBProvider,
  248. stateDB dbm.DB,
  249. blockStore *store.BlockStore,
  250. peerManager *p2p.PeerManager,
  251. router *p2p.Router,
  252. logger log.Logger,
  253. ) (*p2p.ReactorShim, *evidence.Reactor, *evidence.Pool, error) {
  254. evidenceDB, err := dbProvider(&cfg.DBContext{ID: "evidence", Config: config})
  255. if err != nil {
  256. return nil, nil, nil, err
  257. }
  258. logger = logger.With("module", "evidence")
  259. reactorShim := p2p.NewReactorShim(logger, "EvidenceShim", evidence.ChannelShims)
  260. evidencePool, err := evidence.NewPool(logger, evidenceDB, sm.NewStore(stateDB), blockStore)
  261. if err != nil {
  262. return nil, nil, nil, fmt.Errorf("creating evidence pool: %w", err)
  263. }
  264. var (
  265. channels map[p2p.ChannelID]*p2p.Channel
  266. peerUpdates *p2p.PeerUpdates
  267. )
  268. if config.P2P.DisableLegacy {
  269. channels = makeChannelsFromShims(router, evidence.ChannelShims)
  270. peerUpdates = peerManager.Subscribe()
  271. } else {
  272. channels = getChannelsFromShim(reactorShim)
  273. peerUpdates = reactorShim.PeerUpdates
  274. }
  275. evidenceReactor := evidence.NewReactor(
  276. logger,
  277. channels[evidence.EvidenceChannel],
  278. peerUpdates,
  279. evidencePool,
  280. )
  281. return reactorShim, evidenceReactor, evidencePool, nil
  282. }
  283. func createBlockchainReactor(
  284. logger log.Logger,
  285. config *cfg.Config,
  286. state sm.State,
  287. blockExec *sm.BlockExecutor,
  288. blockStore *store.BlockStore,
  289. csReactor *cs.Reactor,
  290. peerManager *p2p.PeerManager,
  291. router *p2p.Router,
  292. fastSync bool,
  293. metrics *cs.Metrics,
  294. ) (*p2p.ReactorShim, service.Service, error) {
  295. logger = logger.With("module", "blockchain")
  296. switch config.FastSync.Version {
  297. case cfg.BlockchainV0:
  298. reactorShim := p2p.NewReactorShim(logger, "BlockchainShim", bcv0.ChannelShims)
  299. var (
  300. channels map[p2p.ChannelID]*p2p.Channel
  301. peerUpdates *p2p.PeerUpdates
  302. )
  303. if config.P2P.DisableLegacy {
  304. channels = makeChannelsFromShims(router, bcv0.ChannelShims)
  305. peerUpdates = peerManager.Subscribe()
  306. } else {
  307. channels = getChannelsFromShim(reactorShim)
  308. peerUpdates = reactorShim.PeerUpdates
  309. }
  310. reactor, err := bcv0.NewReactor(
  311. logger, state.Copy(), blockExec, blockStore, csReactor,
  312. channels[bcv0.BlockchainChannel], peerUpdates, fastSync,
  313. metrics,
  314. )
  315. if err != nil {
  316. return nil, nil, err
  317. }
  318. return reactorShim, reactor, nil
  319. case cfg.BlockchainV2:
  320. reactor := bcv2.NewBlockchainReactor(state.Copy(), blockExec, blockStore, fastSync, metrics)
  321. reactor.SetLogger(logger)
  322. return nil, reactor, nil
  323. default:
  324. return nil, nil, fmt.Errorf("unknown fastsync version %s", config.FastSync.Version)
  325. }
  326. }
  327. func createConsensusReactor(
  328. config *cfg.Config,
  329. state sm.State,
  330. blockExec *sm.BlockExecutor,
  331. blockStore sm.BlockStore,
  332. mp mempool.Mempool,
  333. evidencePool *evidence.Pool,
  334. privValidator types.PrivValidator,
  335. csMetrics *cs.Metrics,
  336. waitSync bool,
  337. eventBus *types.EventBus,
  338. peerManager *p2p.PeerManager,
  339. router *p2p.Router,
  340. logger log.Logger,
  341. ) (*p2p.ReactorShim, *cs.Reactor, *cs.State) {
  342. consensusState := cs.NewState(
  343. config.Consensus,
  344. state.Copy(),
  345. blockExec,
  346. blockStore,
  347. mp,
  348. evidencePool,
  349. cs.StateMetrics(csMetrics),
  350. )
  351. consensusState.SetLogger(logger)
  352. if privValidator != nil && config.Mode == cfg.ModeValidator {
  353. consensusState.SetPrivValidator(privValidator)
  354. }
  355. reactorShim := p2p.NewReactorShim(logger, "ConsensusShim", cs.ChannelShims)
  356. var (
  357. channels map[p2p.ChannelID]*p2p.Channel
  358. peerUpdates *p2p.PeerUpdates
  359. )
  360. if config.P2P.DisableLegacy {
  361. channels = makeChannelsFromShims(router, cs.ChannelShims)
  362. peerUpdates = peerManager.Subscribe()
  363. } else {
  364. channels = getChannelsFromShim(reactorShim)
  365. peerUpdates = reactorShim.PeerUpdates
  366. }
  367. reactor := cs.NewReactor(
  368. logger,
  369. consensusState,
  370. channels[cs.StateChannel],
  371. channels[cs.DataChannel],
  372. channels[cs.VoteChannel],
  373. channels[cs.VoteSetBitsChannel],
  374. peerUpdates,
  375. waitSync,
  376. cs.ReactorMetrics(csMetrics),
  377. )
  378. // Services which will be publishing and/or subscribing for messages (events)
  379. // consensusReactor will set it on consensusState and blockExecutor.
  380. reactor.SetEventBus(eventBus)
  381. return reactorShim, reactor, consensusState
  382. }
  383. func createTransport(logger log.Logger, config *cfg.Config) *p2p.MConnTransport {
  384. return p2p.NewMConnTransport(
  385. logger, p2p.MConnConfig(config.P2P), []*p2p.ChannelDescriptor{},
  386. p2p.MConnTransportOptions{
  387. MaxAcceptedConnections: uint32(config.P2P.MaxNumInboundPeers +
  388. len(tmstrings.SplitAndTrimEmpty(config.P2P.UnconditionalPeerIDs, ",", " ")),
  389. ),
  390. },
  391. )
  392. }
  393. func createPeerManager(
  394. config *cfg.Config,
  395. dbProvider cfg.DBProvider,
  396. p2pLogger log.Logger,
  397. nodeID types.NodeID,
  398. ) (*p2p.PeerManager, error) {
  399. var maxConns uint16
  400. switch {
  401. case config.P2P.MaxConnections > 0:
  402. maxConns = config.P2P.MaxConnections
  403. case config.P2P.MaxNumInboundPeers > 0 && config.P2P.MaxNumOutboundPeers > 0:
  404. x := config.P2P.MaxNumInboundPeers + config.P2P.MaxNumOutboundPeers
  405. if x > math.MaxUint16 {
  406. return nil, fmt.Errorf(
  407. "max inbound peers (%d) + max outbound peers (%d) exceeds maximum (%d)",
  408. config.P2P.MaxNumInboundPeers,
  409. config.P2P.MaxNumOutboundPeers,
  410. math.MaxUint16,
  411. )
  412. }
  413. maxConns = uint16(x)
  414. default:
  415. maxConns = 64
  416. }
  417. privatePeerIDs := make(map[types.NodeID]struct{})
  418. for _, id := range tmstrings.SplitAndTrimEmpty(config.P2P.PrivatePeerIDs, ",", " ") {
  419. privatePeerIDs[types.NodeID(id)] = struct{}{}
  420. }
  421. options := p2p.PeerManagerOptions{
  422. MaxConnected: maxConns,
  423. MaxConnectedUpgrade: 4,
  424. MaxPeers: 1000,
  425. MinRetryTime: 100 * time.Millisecond,
  426. MaxRetryTime: 8 * time.Hour,
  427. MaxRetryTimePersistent: 5 * time.Minute,
  428. RetryTimeJitter: 3 * time.Second,
  429. PrivatePeers: privatePeerIDs,
  430. }
  431. peers := []p2p.NodeAddress{}
  432. for _, p := range tmstrings.SplitAndTrimEmpty(config.P2P.PersistentPeers, ",", " ") {
  433. address, err := p2p.ParseNodeAddress(p)
  434. if err != nil {
  435. return nil, fmt.Errorf("invalid peer address %q: %w", p, err)
  436. }
  437. peers = append(peers, address)
  438. options.PersistentPeers = append(options.PersistentPeers, address.NodeID)
  439. }
  440. for _, p := range tmstrings.SplitAndTrimEmpty(config.P2P.BootstrapPeers, ",", " ") {
  441. address, err := p2p.ParseNodeAddress(p)
  442. if err != nil {
  443. return nil, fmt.Errorf("invalid peer address %q: %w", p, err)
  444. }
  445. peers = append(peers, address)
  446. }
  447. peerDB, err := dbProvider(&cfg.DBContext{ID: "peerstore", Config: config})
  448. if err != nil {
  449. return nil, err
  450. }
  451. peerManager, err := p2p.NewPeerManager(nodeID, peerDB, options)
  452. if err != nil {
  453. return nil, fmt.Errorf("failed to create peer manager: %w", err)
  454. }
  455. for _, peer := range peers {
  456. if _, err := peerManager.Add(peer); err != nil {
  457. return nil, fmt.Errorf("failed to add peer %q: %w", peer, err)
  458. }
  459. }
  460. return peerManager, nil
  461. }
  462. func createRouter(
  463. p2pLogger log.Logger,
  464. p2pMetrics *p2p.Metrics,
  465. nodeInfo types.NodeInfo,
  466. privKey crypto.PrivKey,
  467. peerManager *p2p.PeerManager,
  468. transport p2p.Transport,
  469. options p2p.RouterOptions,
  470. ) (*p2p.Router, error) {
  471. return p2p.NewRouter(
  472. p2pLogger,
  473. p2pMetrics,
  474. nodeInfo,
  475. privKey,
  476. peerManager,
  477. []p2p.Transport{transport},
  478. options,
  479. )
  480. }
  481. func createSwitch(
  482. config *cfg.Config,
  483. transport p2p.Transport,
  484. p2pMetrics *p2p.Metrics,
  485. mempoolReactor *p2p.ReactorShim,
  486. bcReactor p2p.Reactor,
  487. stateSyncReactor *p2p.ReactorShim,
  488. consensusReactor *p2p.ReactorShim,
  489. evidenceReactor *p2p.ReactorShim,
  490. proxyApp proxy.AppConns,
  491. nodeInfo types.NodeInfo,
  492. nodeKey types.NodeKey,
  493. p2pLogger log.Logger,
  494. ) *p2p.Switch {
  495. var (
  496. connFilters = []p2p.ConnFilterFunc{}
  497. peerFilters = []p2p.PeerFilterFunc{}
  498. )
  499. if !config.P2P.AllowDuplicateIP {
  500. connFilters = append(connFilters, p2p.ConnDuplicateIPFilter)
  501. }
  502. // Filter peers by addr or pubkey with an ABCI query.
  503. // If the query return code is OK, add peer.
  504. if config.FilterPeers {
  505. connFilters = append(
  506. connFilters,
  507. // ABCI query for address filtering.
  508. func(_ p2p.ConnSet, c net.Conn, _ []net.IP) error {
  509. res, err := proxyApp.Query().QuerySync(context.Background(), abci.RequestQuery{
  510. Path: fmt.Sprintf("/p2p/filter/addr/%s", c.RemoteAddr().String()),
  511. })
  512. if err != nil {
  513. return err
  514. }
  515. if res.IsErr() {
  516. return fmt.Errorf("error querying abci app: %v", res)
  517. }
  518. return nil
  519. },
  520. )
  521. peerFilters = append(
  522. peerFilters,
  523. // ABCI query for ID filtering.
  524. func(_ p2p.IPeerSet, p p2p.Peer) error {
  525. res, err := proxyApp.Query().QuerySync(context.Background(), abci.RequestQuery{
  526. Path: fmt.Sprintf("/p2p/filter/id/%s", p.ID()),
  527. })
  528. if err != nil {
  529. return err
  530. }
  531. if res.IsErr() {
  532. return fmt.Errorf("error querying abci app: %v", res)
  533. }
  534. return nil
  535. },
  536. )
  537. }
  538. sw := p2p.NewSwitch(
  539. config.P2P,
  540. transport,
  541. p2p.WithMetrics(p2pMetrics),
  542. p2p.SwitchPeerFilters(peerFilters...),
  543. p2p.SwitchConnFilters(connFilters...),
  544. )
  545. sw.SetLogger(p2pLogger)
  546. if config.Mode != cfg.ModeSeed {
  547. sw.AddReactor("MEMPOOL", mempoolReactor)
  548. sw.AddReactor("BLOCKCHAIN", bcReactor)
  549. sw.AddReactor("CONSENSUS", consensusReactor)
  550. sw.AddReactor("EVIDENCE", evidenceReactor)
  551. sw.AddReactor("STATESYNC", stateSyncReactor)
  552. }
  553. sw.SetNodeInfo(nodeInfo)
  554. sw.SetNodeKey(nodeKey)
  555. p2pLogger.Info("P2P Node ID", "ID", nodeKey.ID, "file", config.NodeKeyFile())
  556. return sw
  557. }
  558. func createAddrBookAndSetOnSwitch(config *cfg.Config, sw *p2p.Switch,
  559. p2pLogger log.Logger, nodeKey types.NodeKey) (pex.AddrBook, error) {
  560. addrBook := pex.NewAddrBook(config.P2P.AddrBookFile(), config.P2P.AddrBookStrict)
  561. addrBook.SetLogger(p2pLogger.With("book", config.P2P.AddrBookFile()))
  562. // Add ourselves to addrbook to prevent dialing ourselves
  563. if config.P2P.ExternalAddress != "" {
  564. addr, err := types.NewNetAddressString(nodeKey.ID.AddressString(config.P2P.ExternalAddress))
  565. if err != nil {
  566. return nil, fmt.Errorf("p2p.external_address is incorrect: %w", err)
  567. }
  568. addrBook.AddOurAddress(addr)
  569. }
  570. if config.P2P.ListenAddress != "" {
  571. addr, err := types.NewNetAddressString(nodeKey.ID.AddressString(config.P2P.ListenAddress))
  572. if err != nil {
  573. return nil, fmt.Errorf("p2p.laddr is incorrect: %w", err)
  574. }
  575. addrBook.AddOurAddress(addr)
  576. }
  577. sw.SetAddrBook(addrBook)
  578. return addrBook, nil
  579. }
  580. func createPEXReactorAndAddToSwitch(addrBook pex.AddrBook, config *cfg.Config,
  581. sw *p2p.Switch, logger log.Logger) *pex.Reactor {
  582. reactorConfig := &pex.ReactorConfig{
  583. Seeds: tmstrings.SplitAndTrimEmpty(config.P2P.Seeds, ",", " "),
  584. SeedMode: config.Mode == cfg.ModeSeed,
  585. // See consensus/reactor.go: blocksToContributeToBecomeGoodPeer 10000
  586. // blocks assuming 10s blocks ~ 28 hours.
  587. // TODO (melekes): make it dynamic based on the actual block latencies
  588. // from the live network.
  589. // https://github.com/tendermint/tendermint/issues/3523
  590. SeedDisconnectWaitPeriod: 28 * time.Hour,
  591. PersistentPeersMaxDialPeriod: config.P2P.PersistentPeersMaxDialPeriod,
  592. }
  593. // TODO persistent peers ? so we can have their DNS addrs saved
  594. pexReactor := pex.NewReactor(addrBook, reactorConfig)
  595. pexReactor.SetLogger(logger.With("module", "pex"))
  596. sw.AddReactor("PEX", pexReactor)
  597. return pexReactor
  598. }
  599. func createPEXReactorV2(
  600. config *cfg.Config,
  601. logger log.Logger,
  602. peerManager *p2p.PeerManager,
  603. router *p2p.Router,
  604. ) (*pex.ReactorV2, error) {
  605. channel, err := router.OpenChannel(pex.ChannelDescriptor(), &protop2p.PexMessage{}, 128)
  606. if err != nil {
  607. return nil, err
  608. }
  609. peerUpdates := peerManager.Subscribe()
  610. return pex.NewReactorV2(logger, peerManager, channel, peerUpdates), nil
  611. }
  612. func makeNodeInfo(
  613. config *cfg.Config,
  614. nodeKey types.NodeKey,
  615. eventSinks []indexer.EventSink,
  616. genDoc *types.GenesisDoc,
  617. state sm.State,
  618. ) (types.NodeInfo, error) {
  619. txIndexerStatus := "off"
  620. if indexer.IndexingEnabled(eventSinks) {
  621. txIndexerStatus = "on"
  622. }
  623. var bcChannel byte
  624. switch config.FastSync.Version {
  625. case cfg.BlockchainV0:
  626. bcChannel = byte(bcv0.BlockchainChannel)
  627. case cfg.BlockchainV2:
  628. bcChannel = bcv2.BlockchainChannel
  629. default:
  630. return types.NodeInfo{}, fmt.Errorf("unknown fastsync version %s", config.FastSync.Version)
  631. }
  632. nodeInfo := types.NodeInfo{
  633. ProtocolVersion: types.ProtocolVersion{
  634. P2P: version.P2PProtocol, // global
  635. Block: state.Version.Consensus.Block,
  636. App: state.Version.Consensus.App,
  637. },
  638. NodeID: nodeKey.ID,
  639. Network: genDoc.ChainID,
  640. Version: version.TMVersion,
  641. Channels: []byte{
  642. bcChannel,
  643. byte(cs.StateChannel),
  644. byte(cs.DataChannel),
  645. byte(cs.VoteChannel),
  646. byte(cs.VoteSetBitsChannel),
  647. byte(mempool.MempoolChannel),
  648. byte(evidence.EvidenceChannel),
  649. byte(statesync.SnapshotChannel),
  650. byte(statesync.ChunkChannel),
  651. byte(statesync.LightBlockChannel),
  652. },
  653. Moniker: config.Moniker,
  654. Other: types.NodeInfoOther{
  655. TxIndex: txIndexerStatus,
  656. RPCAddress: config.RPC.ListenAddress,
  657. },
  658. }
  659. if config.P2P.PexReactor {
  660. nodeInfo.Channels = append(nodeInfo.Channels, pex.PexChannel)
  661. }
  662. lAddr := config.P2P.ExternalAddress
  663. if lAddr == "" {
  664. lAddr = config.P2P.ListenAddress
  665. }
  666. nodeInfo.ListenAddr = lAddr
  667. err := nodeInfo.Validate()
  668. return nodeInfo, err
  669. }
  670. func makeSeedNodeInfo(
  671. config *cfg.Config,
  672. nodeKey types.NodeKey,
  673. genDoc *types.GenesisDoc,
  674. state sm.State,
  675. ) (types.NodeInfo, error) {
  676. nodeInfo := types.NodeInfo{
  677. ProtocolVersion: types.ProtocolVersion{
  678. P2P: version.P2PProtocol, // global
  679. Block: state.Version.Consensus.Block,
  680. App: state.Version.Consensus.App,
  681. },
  682. NodeID: nodeKey.ID,
  683. Network: genDoc.ChainID,
  684. Version: version.TMVersion,
  685. Channels: []byte{},
  686. Moniker: config.Moniker,
  687. Other: types.NodeInfoOther{
  688. TxIndex: "off",
  689. RPCAddress: config.RPC.ListenAddress,
  690. },
  691. }
  692. if config.P2P.PexReactor {
  693. nodeInfo.Channels = append(nodeInfo.Channels, pex.PexChannel)
  694. }
  695. lAddr := config.P2P.ExternalAddress
  696. if lAddr == "" {
  697. lAddr = config.P2P.ListenAddress
  698. }
  699. nodeInfo.ListenAddr = lAddr
  700. err := nodeInfo.Validate()
  701. return nodeInfo, err
  702. }