You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

814 lines
21 KiB

  1. package node
  2. import (
  3. "bytes"
  4. "context"
  5. "errors"
  6. "fmt"
  7. "math"
  8. "net"
  9. _ "net/http/pprof" // nolint: gosec // securely exposed on separate, optional port
  10. "strings"
  11. "time"
  12. dbm "github.com/tendermint/tm-db"
  13. abci "github.com/tendermint/tendermint/abci/types"
  14. cfg "github.com/tendermint/tendermint/config"
  15. "github.com/tendermint/tendermint/crypto"
  16. bcv0 "github.com/tendermint/tendermint/internal/blockchain/v0"
  17. bcv2 "github.com/tendermint/tendermint/internal/blockchain/v2"
  18. cs "github.com/tendermint/tendermint/internal/consensus"
  19. "github.com/tendermint/tendermint/internal/evidence"
  20. "github.com/tendermint/tendermint/internal/mempool"
  21. mempoolv0 "github.com/tendermint/tendermint/internal/mempool/v0"
  22. mempoolv1 "github.com/tendermint/tendermint/internal/mempool/v1"
  23. "github.com/tendermint/tendermint/internal/p2p"
  24. "github.com/tendermint/tendermint/internal/p2p/pex"
  25. "github.com/tendermint/tendermint/internal/statesync"
  26. "github.com/tendermint/tendermint/libs/log"
  27. "github.com/tendermint/tendermint/libs/service"
  28. tmstrings "github.com/tendermint/tendermint/libs/strings"
  29. protop2p "github.com/tendermint/tendermint/proto/tendermint/p2p"
  30. "github.com/tendermint/tendermint/proxy"
  31. sm "github.com/tendermint/tendermint/state"
  32. "github.com/tendermint/tendermint/state/indexer"
  33. kv "github.com/tendermint/tendermint/state/indexer/sink/kv"
  34. null "github.com/tendermint/tendermint/state/indexer/sink/null"
  35. psql "github.com/tendermint/tendermint/state/indexer/sink/psql"
  36. "github.com/tendermint/tendermint/store"
  37. "github.com/tendermint/tendermint/types"
  38. "github.com/tendermint/tendermint/version"
  39. )
  40. func initDBs(config *cfg.Config, dbProvider cfg.DBProvider) (blockStore *store.BlockStore, stateDB dbm.DB, err error) {
  41. var blockStoreDB dbm.DB
  42. blockStoreDB, err = dbProvider(&cfg.DBContext{ID: "blockstore", Config: config})
  43. if err != nil {
  44. return
  45. }
  46. blockStore = store.NewBlockStore(blockStoreDB)
  47. stateDB, err = dbProvider(&cfg.DBContext{ID: "state", Config: config})
  48. return
  49. }
  50. func createAndStartProxyAppConns(clientCreator proxy.ClientCreator, logger log.Logger) (proxy.AppConns, error) {
  51. proxyApp := proxy.NewAppConns(clientCreator)
  52. proxyApp.SetLogger(logger.With("module", "proxy"))
  53. if err := proxyApp.Start(); err != nil {
  54. return nil, fmt.Errorf("error starting proxy app connections: %v", err)
  55. }
  56. return proxyApp, nil
  57. }
  58. func createAndStartEventBus(logger log.Logger) (*types.EventBus, error) {
  59. eventBus := types.NewEventBus()
  60. eventBus.SetLogger(logger.With("module", "events"))
  61. if err := eventBus.Start(); err != nil {
  62. return nil, err
  63. }
  64. return eventBus, nil
  65. }
  66. func createAndStartIndexerService(
  67. config *cfg.Config,
  68. dbProvider cfg.DBProvider,
  69. eventBus *types.EventBus,
  70. logger log.Logger,
  71. chainID string,
  72. ) (*indexer.Service, []indexer.EventSink, error) {
  73. eventSinks := []indexer.EventSink{}
  74. // Check duplicated sinks.
  75. sinks := map[string]bool{}
  76. for _, s := range config.TxIndex.Indexer {
  77. sl := strings.ToLower(s)
  78. if sinks[sl] {
  79. return nil, nil, errors.New("found duplicated sinks, please check the tx-index section in the config.toml")
  80. }
  81. sinks[sl] = true
  82. }
  83. loop:
  84. for k := range sinks {
  85. switch k {
  86. case string(indexer.NULL):
  87. // when we see null in the config, the eventsinks will be reset with the nullEventSink.
  88. eventSinks = []indexer.EventSink{null.NewEventSink()}
  89. break loop
  90. case string(indexer.KV):
  91. store, err := dbProvider(&cfg.DBContext{ID: "tx_index", Config: config})
  92. if err != nil {
  93. return nil, nil, err
  94. }
  95. eventSinks = append(eventSinks, kv.NewEventSink(store))
  96. case string(indexer.PSQL):
  97. conn := config.TxIndex.PsqlConn
  98. if conn == "" {
  99. return nil, nil, errors.New("the psql connection settings cannot be empty")
  100. }
  101. es, _, err := psql.NewEventSink(conn, chainID)
  102. if err != nil {
  103. return nil, nil, err
  104. }
  105. eventSinks = append(eventSinks, es)
  106. default:
  107. return nil, nil, errors.New("unsupported event sink type")
  108. }
  109. }
  110. if len(eventSinks) == 0 {
  111. eventSinks = []indexer.EventSink{null.NewEventSink()}
  112. }
  113. indexerService := indexer.NewIndexerService(eventSinks, eventBus)
  114. indexerService.SetLogger(logger.With("module", "txindex"))
  115. if err := indexerService.Start(); err != nil {
  116. return nil, nil, err
  117. }
  118. return indexerService, eventSinks, nil
  119. }
  120. func doHandshake(
  121. stateStore sm.Store,
  122. state sm.State,
  123. blockStore sm.BlockStore,
  124. genDoc *types.GenesisDoc,
  125. eventBus types.BlockEventPublisher,
  126. proxyApp proxy.AppConns,
  127. consensusLogger log.Logger) error {
  128. handshaker := cs.NewHandshaker(stateStore, state, blockStore, genDoc)
  129. handshaker.SetLogger(consensusLogger)
  130. handshaker.SetEventBus(eventBus)
  131. if err := handshaker.Handshake(proxyApp); err != nil {
  132. return fmt.Errorf("error during handshake: %v", err)
  133. }
  134. return nil
  135. }
  136. func logNodeStartupInfo(state sm.State, pubKey crypto.PubKey, logger, consensusLogger log.Logger, mode string) {
  137. // Log the version info.
  138. logger.Info("Version info",
  139. "tmVersion", version.TMVersion,
  140. "block", version.BlockProtocol,
  141. "p2p", version.P2PProtocol,
  142. "mode", mode,
  143. )
  144. // If the state and software differ in block version, at least log it.
  145. if state.Version.Consensus.Block != version.BlockProtocol {
  146. logger.Info("Software and state have different block protocols",
  147. "software", version.BlockProtocol,
  148. "state", state.Version.Consensus.Block,
  149. )
  150. }
  151. switch {
  152. case mode == cfg.ModeFull:
  153. consensusLogger.Info("This node is a fullnode")
  154. case mode == cfg.ModeValidator:
  155. addr := pubKey.Address()
  156. // Log whether this node is a validator or an observer
  157. if state.Validators.HasAddress(addr) {
  158. consensusLogger.Info("This node is a validator", "addr", addr, "pubKey", pubKey.Bytes())
  159. } else {
  160. consensusLogger.Info("This node is a validator (NOT in the active validator set)",
  161. "addr", addr, "pubKey", pubKey.Bytes())
  162. }
  163. }
  164. }
  165. func onlyValidatorIsUs(state sm.State, pubKey crypto.PubKey) bool {
  166. if state.Validators.Size() > 1 {
  167. return false
  168. }
  169. addr, _ := state.Validators.GetByIndex(0)
  170. return pubKey != nil && bytes.Equal(pubKey.Address(), addr)
  171. }
  172. func createMempoolReactor(
  173. config *cfg.Config,
  174. proxyApp proxy.AppConns,
  175. state sm.State,
  176. memplMetrics *mempool.Metrics,
  177. peerManager *p2p.PeerManager,
  178. router *p2p.Router,
  179. logger log.Logger,
  180. ) (*p2p.ReactorShim, service.Service, mempool.Mempool, error) {
  181. logger = logger.With("module", "mempool", "version", config.Mempool.Version)
  182. channelShims := mempoolv0.GetChannelShims(config.Mempool)
  183. reactorShim := p2p.NewReactorShim(logger, "MempoolShim", channelShims)
  184. var (
  185. channels map[p2p.ChannelID]*p2p.Channel
  186. peerUpdates *p2p.PeerUpdates
  187. )
  188. if config.P2P.DisableLegacy {
  189. channels = makeChannelsFromShims(router, channelShims)
  190. peerUpdates = peerManager.Subscribe()
  191. } else {
  192. channels = getChannelsFromShim(reactorShim)
  193. peerUpdates = reactorShim.PeerUpdates
  194. }
  195. switch config.Mempool.Version {
  196. case cfg.MempoolV0:
  197. mp := mempoolv0.NewCListMempool(
  198. config.Mempool,
  199. proxyApp.Mempool(),
  200. state.LastBlockHeight,
  201. mempoolv0.WithMetrics(memplMetrics),
  202. mempoolv0.WithPreCheck(sm.TxPreCheck(state)),
  203. mempoolv0.WithPostCheck(sm.TxPostCheck(state)),
  204. )
  205. mp.SetLogger(logger)
  206. reactor := mempoolv0.NewReactor(
  207. logger,
  208. config.Mempool,
  209. peerManager,
  210. mp,
  211. channels[mempool.MempoolChannel],
  212. peerUpdates,
  213. )
  214. if config.Consensus.WaitForTxs() {
  215. mp.EnableTxsAvailable()
  216. }
  217. return reactorShim, reactor, mp, nil
  218. case cfg.MempoolV1:
  219. mp := mempoolv1.NewTxMempool(
  220. logger,
  221. config.Mempool,
  222. proxyApp.Mempool(),
  223. state.LastBlockHeight,
  224. mempoolv1.WithMetrics(memplMetrics),
  225. mempoolv1.WithPreCheck(sm.TxPreCheck(state)),
  226. mempoolv1.WithPostCheck(sm.TxPostCheck(state)),
  227. )
  228. reactor := mempoolv1.NewReactor(
  229. logger,
  230. config.Mempool,
  231. peerManager,
  232. mp,
  233. channels[mempool.MempoolChannel],
  234. peerUpdates,
  235. )
  236. if config.Consensus.WaitForTxs() {
  237. mp.EnableTxsAvailable()
  238. }
  239. return reactorShim, reactor, mp, nil
  240. default:
  241. return nil, nil, nil, fmt.Errorf("unknown mempool version: %s", config.Mempool.Version)
  242. }
  243. }
  244. func createEvidenceReactor(
  245. config *cfg.Config,
  246. dbProvider cfg.DBProvider,
  247. stateDB dbm.DB,
  248. blockStore *store.BlockStore,
  249. peerManager *p2p.PeerManager,
  250. router *p2p.Router,
  251. logger log.Logger,
  252. ) (*p2p.ReactorShim, *evidence.Reactor, *evidence.Pool, error) {
  253. evidenceDB, err := dbProvider(&cfg.DBContext{ID: "evidence", Config: config})
  254. if err != nil {
  255. return nil, nil, nil, err
  256. }
  257. logger = logger.With("module", "evidence")
  258. reactorShim := p2p.NewReactorShim(logger, "EvidenceShim", evidence.ChannelShims)
  259. evidencePool, err := evidence.NewPool(logger, evidenceDB, sm.NewStore(stateDB), blockStore)
  260. if err != nil {
  261. return nil, nil, nil, fmt.Errorf("creating evidence pool: %w", err)
  262. }
  263. var (
  264. channels map[p2p.ChannelID]*p2p.Channel
  265. peerUpdates *p2p.PeerUpdates
  266. )
  267. if config.P2P.DisableLegacy {
  268. channels = makeChannelsFromShims(router, evidence.ChannelShims)
  269. peerUpdates = peerManager.Subscribe()
  270. } else {
  271. channels = getChannelsFromShim(reactorShim)
  272. peerUpdates = reactorShim.PeerUpdates
  273. }
  274. evidenceReactor := evidence.NewReactor(
  275. logger,
  276. channels[evidence.EvidenceChannel],
  277. peerUpdates,
  278. evidencePool,
  279. )
  280. return reactorShim, evidenceReactor, evidencePool, nil
  281. }
  282. func createBlockchainReactor(
  283. logger log.Logger,
  284. config *cfg.Config,
  285. state sm.State,
  286. blockExec *sm.BlockExecutor,
  287. blockStore *store.BlockStore,
  288. csReactor *cs.Reactor,
  289. peerManager *p2p.PeerManager,
  290. router *p2p.Router,
  291. fastSync bool,
  292. ) (*p2p.ReactorShim, service.Service, error) {
  293. logger = logger.With("module", "blockchain")
  294. switch config.FastSync.Version {
  295. case cfg.BlockchainV0:
  296. reactorShim := p2p.NewReactorShim(logger, "BlockchainShim", bcv0.ChannelShims)
  297. var (
  298. channels map[p2p.ChannelID]*p2p.Channel
  299. peerUpdates *p2p.PeerUpdates
  300. )
  301. if config.P2P.DisableLegacy {
  302. channels = makeChannelsFromShims(router, bcv0.ChannelShims)
  303. peerUpdates = peerManager.Subscribe()
  304. } else {
  305. channels = getChannelsFromShim(reactorShim)
  306. peerUpdates = reactorShim.PeerUpdates
  307. }
  308. reactor, err := bcv0.NewReactor(
  309. logger, state.Copy(), blockExec, blockStore, csReactor,
  310. channels[bcv0.BlockchainChannel], peerUpdates, fastSync,
  311. )
  312. if err != nil {
  313. return nil, nil, err
  314. }
  315. return reactorShim, reactor, nil
  316. case cfg.BlockchainV2:
  317. reactor := bcv2.NewBlockchainReactor(state.Copy(), blockExec, blockStore, fastSync)
  318. reactor.SetLogger(logger)
  319. return nil, reactor, nil
  320. default:
  321. return nil, nil, fmt.Errorf("unknown fastsync version %s", config.FastSync.Version)
  322. }
  323. }
  324. func createConsensusReactor(
  325. config *cfg.Config,
  326. state sm.State,
  327. blockExec *sm.BlockExecutor,
  328. blockStore sm.BlockStore,
  329. mp mempool.Mempool,
  330. evidencePool *evidence.Pool,
  331. privValidator types.PrivValidator,
  332. csMetrics *cs.Metrics,
  333. waitSync bool,
  334. eventBus *types.EventBus,
  335. peerManager *p2p.PeerManager,
  336. router *p2p.Router,
  337. logger log.Logger,
  338. ) (*p2p.ReactorShim, *cs.Reactor, *cs.State) {
  339. consensusState := cs.NewState(
  340. config.Consensus,
  341. state.Copy(),
  342. blockExec,
  343. blockStore,
  344. mp,
  345. evidencePool,
  346. cs.StateMetrics(csMetrics),
  347. )
  348. consensusState.SetLogger(logger)
  349. if privValidator != nil && config.Mode == cfg.ModeValidator {
  350. consensusState.SetPrivValidator(privValidator)
  351. }
  352. reactorShim := p2p.NewReactorShim(logger, "ConsensusShim", cs.ChannelShims)
  353. var (
  354. channels map[p2p.ChannelID]*p2p.Channel
  355. peerUpdates *p2p.PeerUpdates
  356. )
  357. if config.P2P.DisableLegacy {
  358. channels = makeChannelsFromShims(router, cs.ChannelShims)
  359. peerUpdates = peerManager.Subscribe()
  360. } else {
  361. channels = getChannelsFromShim(reactorShim)
  362. peerUpdates = reactorShim.PeerUpdates
  363. }
  364. reactor := cs.NewReactor(
  365. logger,
  366. consensusState,
  367. channels[cs.StateChannel],
  368. channels[cs.DataChannel],
  369. channels[cs.VoteChannel],
  370. channels[cs.VoteSetBitsChannel],
  371. peerUpdates,
  372. waitSync,
  373. cs.ReactorMetrics(csMetrics),
  374. )
  375. // Services which will be publishing and/or subscribing for messages (events)
  376. // consensusReactor will set it on consensusState and blockExecutor.
  377. reactor.SetEventBus(eventBus)
  378. return reactorShim, reactor, consensusState
  379. }
  380. func createTransport(logger log.Logger, config *cfg.Config) *p2p.MConnTransport {
  381. return p2p.NewMConnTransport(
  382. logger, p2p.MConnConfig(config.P2P), []*p2p.ChannelDescriptor{},
  383. p2p.MConnTransportOptions{
  384. MaxAcceptedConnections: uint32(config.P2P.MaxNumInboundPeers +
  385. len(tmstrings.SplitAndTrimEmpty(config.P2P.UnconditionalPeerIDs, ",", " ")),
  386. ),
  387. },
  388. )
  389. }
  390. func createPeerManager(
  391. config *cfg.Config,
  392. dbProvider cfg.DBProvider,
  393. p2pLogger log.Logger,
  394. nodeID p2p.NodeID,
  395. ) (*p2p.PeerManager, error) {
  396. var maxConns uint16
  397. switch {
  398. case config.P2P.MaxConnections > 0:
  399. maxConns = config.P2P.MaxConnections
  400. case config.P2P.MaxNumInboundPeers > 0 && config.P2P.MaxNumOutboundPeers > 0:
  401. x := config.P2P.MaxNumInboundPeers + config.P2P.MaxNumOutboundPeers
  402. if x > math.MaxUint16 {
  403. return nil, fmt.Errorf(
  404. "max inbound peers (%d) + max outbound peers (%d) exceeds maximum (%d)",
  405. config.P2P.MaxNumInboundPeers,
  406. config.P2P.MaxNumOutboundPeers,
  407. math.MaxUint16,
  408. )
  409. }
  410. maxConns = uint16(x)
  411. default:
  412. maxConns = 64
  413. }
  414. privatePeerIDs := make(map[p2p.NodeID]struct{})
  415. for _, id := range tmstrings.SplitAndTrimEmpty(config.P2P.PrivatePeerIDs, ",", " ") {
  416. privatePeerIDs[p2p.NodeID(id)] = struct{}{}
  417. }
  418. options := p2p.PeerManagerOptions{
  419. MaxConnected: maxConns,
  420. MaxConnectedUpgrade: 4,
  421. MaxPeers: 1000,
  422. MinRetryTime: 100 * time.Millisecond,
  423. MaxRetryTime: 8 * time.Hour,
  424. MaxRetryTimePersistent: 5 * time.Minute,
  425. RetryTimeJitter: 3 * time.Second,
  426. PrivatePeers: privatePeerIDs,
  427. }
  428. peers := []p2p.NodeAddress{}
  429. for _, p := range tmstrings.SplitAndTrimEmpty(config.P2P.PersistentPeers, ",", " ") {
  430. address, err := p2p.ParseNodeAddress(p)
  431. if err != nil {
  432. return nil, fmt.Errorf("invalid peer address %q: %w", p, err)
  433. }
  434. peers = append(peers, address)
  435. options.PersistentPeers = append(options.PersistentPeers, address.NodeID)
  436. }
  437. for _, p := range tmstrings.SplitAndTrimEmpty(config.P2P.BootstrapPeers, ",", " ") {
  438. address, err := p2p.ParseNodeAddress(p)
  439. if err != nil {
  440. return nil, fmt.Errorf("invalid peer address %q: %w", p, err)
  441. }
  442. peers = append(peers, address)
  443. }
  444. peerDB, err := dbProvider(&cfg.DBContext{ID: "peerstore", Config: config})
  445. if err != nil {
  446. return nil, err
  447. }
  448. peerManager, err := p2p.NewPeerManager(nodeID, peerDB, options)
  449. if err != nil {
  450. return nil, fmt.Errorf("failed to create peer manager: %w", err)
  451. }
  452. for _, peer := range peers {
  453. if _, err := peerManager.Add(peer); err != nil {
  454. return nil, fmt.Errorf("failed to add peer %q: %w", peer, err)
  455. }
  456. }
  457. return peerManager, nil
  458. }
  459. func createRouter(
  460. p2pLogger log.Logger,
  461. p2pMetrics *p2p.Metrics,
  462. nodeInfo p2p.NodeInfo,
  463. privKey crypto.PrivKey,
  464. peerManager *p2p.PeerManager,
  465. transport p2p.Transport,
  466. options p2p.RouterOptions,
  467. ) (*p2p.Router, error) {
  468. return p2p.NewRouter(
  469. p2pLogger,
  470. p2pMetrics,
  471. nodeInfo,
  472. privKey,
  473. peerManager,
  474. []p2p.Transport{transport},
  475. options,
  476. )
  477. }
  478. func createSwitch(
  479. config *cfg.Config,
  480. transport p2p.Transport,
  481. p2pMetrics *p2p.Metrics,
  482. mempoolReactor *p2p.ReactorShim,
  483. bcReactor p2p.Reactor,
  484. stateSyncReactor *p2p.ReactorShim,
  485. consensusReactor *p2p.ReactorShim,
  486. evidenceReactor *p2p.ReactorShim,
  487. proxyApp proxy.AppConns,
  488. nodeInfo p2p.NodeInfo,
  489. nodeKey p2p.NodeKey,
  490. p2pLogger log.Logger,
  491. ) *p2p.Switch {
  492. var (
  493. connFilters = []p2p.ConnFilterFunc{}
  494. peerFilters = []p2p.PeerFilterFunc{}
  495. )
  496. if !config.P2P.AllowDuplicateIP {
  497. connFilters = append(connFilters, p2p.ConnDuplicateIPFilter)
  498. }
  499. // Filter peers by addr or pubkey with an ABCI query.
  500. // If the query return code is OK, add peer.
  501. if config.FilterPeers {
  502. connFilters = append(
  503. connFilters,
  504. // ABCI query for address filtering.
  505. func(_ p2p.ConnSet, c net.Conn, _ []net.IP) error {
  506. res, err := proxyApp.Query().QuerySync(context.Background(), abci.RequestQuery{
  507. Path: fmt.Sprintf("/p2p/filter/addr/%s", c.RemoteAddr().String()),
  508. })
  509. if err != nil {
  510. return err
  511. }
  512. if res.IsErr() {
  513. return fmt.Errorf("error querying abci app: %v", res)
  514. }
  515. return nil
  516. },
  517. )
  518. peerFilters = append(
  519. peerFilters,
  520. // ABCI query for ID filtering.
  521. func(_ p2p.IPeerSet, p p2p.Peer) error {
  522. res, err := proxyApp.Query().QuerySync(context.Background(), abci.RequestQuery{
  523. Path: fmt.Sprintf("/p2p/filter/id/%s", p.ID()),
  524. })
  525. if err != nil {
  526. return err
  527. }
  528. if res.IsErr() {
  529. return fmt.Errorf("error querying abci app: %v", res)
  530. }
  531. return nil
  532. },
  533. )
  534. }
  535. sw := p2p.NewSwitch(
  536. config.P2P,
  537. transport,
  538. p2p.WithMetrics(p2pMetrics),
  539. p2p.SwitchPeerFilters(peerFilters...),
  540. p2p.SwitchConnFilters(connFilters...),
  541. )
  542. sw.SetLogger(p2pLogger)
  543. if config.Mode != cfg.ModeSeed {
  544. sw.AddReactor("MEMPOOL", mempoolReactor)
  545. sw.AddReactor("BLOCKCHAIN", bcReactor)
  546. sw.AddReactor("CONSENSUS", consensusReactor)
  547. sw.AddReactor("EVIDENCE", evidenceReactor)
  548. sw.AddReactor("STATESYNC", stateSyncReactor)
  549. }
  550. sw.SetNodeInfo(nodeInfo)
  551. sw.SetNodeKey(nodeKey)
  552. p2pLogger.Info("P2P Node ID", "ID", nodeKey.ID, "file", config.NodeKeyFile())
  553. return sw
  554. }
  555. func createAddrBookAndSetOnSwitch(config *cfg.Config, sw *p2p.Switch,
  556. p2pLogger log.Logger, nodeKey p2p.NodeKey) (pex.AddrBook, error) {
  557. addrBook := pex.NewAddrBook(config.P2P.AddrBookFile(), config.P2P.AddrBookStrict)
  558. addrBook.SetLogger(p2pLogger.With("book", config.P2P.AddrBookFile()))
  559. // Add ourselves to addrbook to prevent dialing ourselves
  560. if config.P2P.ExternalAddress != "" {
  561. addr, err := p2p.NewNetAddressString(p2p.IDAddressString(nodeKey.ID, config.P2P.ExternalAddress))
  562. if err != nil {
  563. return nil, fmt.Errorf("p2p.external_address is incorrect: %w", err)
  564. }
  565. addrBook.AddOurAddress(addr)
  566. }
  567. if config.P2P.ListenAddress != "" {
  568. addr, err := p2p.NewNetAddressString(p2p.IDAddressString(nodeKey.ID, config.P2P.ListenAddress))
  569. if err != nil {
  570. return nil, fmt.Errorf("p2p.laddr is incorrect: %w", err)
  571. }
  572. addrBook.AddOurAddress(addr)
  573. }
  574. sw.SetAddrBook(addrBook)
  575. return addrBook, nil
  576. }
  577. func createPEXReactorAndAddToSwitch(addrBook pex.AddrBook, config *cfg.Config,
  578. sw *p2p.Switch, logger log.Logger) *pex.Reactor {
  579. reactorConfig := &pex.ReactorConfig{
  580. Seeds: tmstrings.SplitAndTrimEmpty(config.P2P.Seeds, ",", " "),
  581. SeedMode: config.Mode == cfg.ModeSeed,
  582. // See consensus/reactor.go: blocksToContributeToBecomeGoodPeer 10000
  583. // blocks assuming 10s blocks ~ 28 hours.
  584. // TODO (melekes): make it dynamic based on the actual block latencies
  585. // from the live network.
  586. // https://github.com/tendermint/tendermint/issues/3523
  587. SeedDisconnectWaitPeriod: 28 * time.Hour,
  588. PersistentPeersMaxDialPeriod: config.P2P.PersistentPeersMaxDialPeriod,
  589. }
  590. // TODO persistent peers ? so we can have their DNS addrs saved
  591. pexReactor := pex.NewReactor(addrBook, reactorConfig)
  592. pexReactor.SetLogger(logger.With("module", "pex"))
  593. sw.AddReactor("PEX", pexReactor)
  594. return pexReactor
  595. }
  596. func createPEXReactorV2(
  597. config *cfg.Config,
  598. logger log.Logger,
  599. peerManager *p2p.PeerManager,
  600. router *p2p.Router,
  601. ) (*pex.ReactorV2, error) {
  602. channel, err := router.OpenChannel(pex.ChannelDescriptor(), &protop2p.PexMessage{}, 4096)
  603. if err != nil {
  604. return nil, err
  605. }
  606. peerUpdates := peerManager.Subscribe()
  607. return pex.NewReactorV2(logger, peerManager, channel, peerUpdates), nil
  608. }
  609. func makeNodeInfo(
  610. config *cfg.Config,
  611. nodeKey p2p.NodeKey,
  612. eventSinks []indexer.EventSink,
  613. genDoc *types.GenesisDoc,
  614. state sm.State,
  615. ) (p2p.NodeInfo, error) {
  616. txIndexerStatus := "off"
  617. if indexer.IndexingEnabled(eventSinks) {
  618. txIndexerStatus = "on"
  619. }
  620. var bcChannel byte
  621. switch config.FastSync.Version {
  622. case cfg.BlockchainV0:
  623. bcChannel = byte(bcv0.BlockchainChannel)
  624. case cfg.BlockchainV2:
  625. bcChannel = bcv2.BlockchainChannel
  626. default:
  627. return p2p.NodeInfo{}, fmt.Errorf("unknown fastsync version %s", config.FastSync.Version)
  628. }
  629. nodeInfo := p2p.NodeInfo{
  630. ProtocolVersion: p2p.NewProtocolVersion(
  631. version.P2PProtocol, // global
  632. state.Version.Consensus.Block,
  633. state.Version.Consensus.App,
  634. ),
  635. NodeID: nodeKey.ID,
  636. Network: genDoc.ChainID,
  637. Version: version.TMVersion,
  638. Channels: []byte{
  639. bcChannel,
  640. byte(cs.StateChannel),
  641. byte(cs.DataChannel),
  642. byte(cs.VoteChannel),
  643. byte(cs.VoteSetBitsChannel),
  644. byte(mempool.MempoolChannel),
  645. byte(evidence.EvidenceChannel),
  646. byte(statesync.SnapshotChannel),
  647. byte(statesync.ChunkChannel),
  648. byte(statesync.LightBlockChannel),
  649. },
  650. Moniker: config.Moniker,
  651. Other: p2p.NodeInfoOther{
  652. TxIndex: txIndexerStatus,
  653. RPCAddress: config.RPC.ListenAddress,
  654. },
  655. }
  656. if config.P2P.PexReactor {
  657. nodeInfo.Channels = append(nodeInfo.Channels, pex.PexChannel)
  658. }
  659. lAddr := config.P2P.ExternalAddress
  660. if lAddr == "" {
  661. lAddr = config.P2P.ListenAddress
  662. }
  663. nodeInfo.ListenAddr = lAddr
  664. err := nodeInfo.Validate()
  665. return nodeInfo, err
  666. }
  667. func makeSeedNodeInfo(
  668. config *cfg.Config,
  669. nodeKey p2p.NodeKey,
  670. genDoc *types.GenesisDoc,
  671. state sm.State,
  672. ) (p2p.NodeInfo, error) {
  673. nodeInfo := p2p.NodeInfo{
  674. ProtocolVersion: p2p.NewProtocolVersion(
  675. version.P2PProtocol, // global
  676. state.Version.Consensus.Block,
  677. state.Version.Consensus.App,
  678. ),
  679. NodeID: nodeKey.ID,
  680. Network: genDoc.ChainID,
  681. Version: version.TMVersion,
  682. Channels: []byte{},
  683. Moniker: config.Moniker,
  684. Other: p2p.NodeInfoOther{
  685. TxIndex: "off",
  686. RPCAddress: config.RPC.ListenAddress,
  687. },
  688. }
  689. if config.P2P.PexReactor {
  690. nodeInfo.Channels = append(nodeInfo.Channels, pex.PexChannel)
  691. }
  692. lAddr := config.P2P.ExternalAddress
  693. if lAddr == "" {
  694. lAddr = config.P2P.ListenAddress
  695. }
  696. nodeInfo.ListenAddr = lAddr
  697. err := nodeInfo.Validate()
  698. return nodeInfo, err
  699. }