You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

867 lines
26 KiB

max-bytes PR follow-up (#2318) * ReapMaxTxs: return all txs if max is negative this mirrors ReapMaxBytes behavior See https://github.com/tendermint/tendermint/pull/2184#discussion_r214439950 * increase MaxAminoOverheadForBlock tested with: ``` func TestMaxAminoOverheadForBlock(t *testing.T) { maxChainID := "" for i := 0; i < MaxChainIDLen; i++ { maxChainID += "𠜎" } h := Header{ ChainID: maxChainID, Height: 10, Time: time.Now().UTC(), NumTxs: 100, TotalTxs: 200, LastBlockID: makeBlockID(make([]byte, 20), 300, make([]byte, 20)), LastCommitHash: tmhash.Sum([]byte("last_commit_hash")), DataHash: tmhash.Sum([]byte("data_hash")), ValidatorsHash: tmhash.Sum([]byte("validators_hash")), NextValidatorsHash: tmhash.Sum([]byte("next_validators_hash")), ConsensusHash: tmhash.Sum([]byte("consensus_hash")), AppHash: tmhash.Sum([]byte("app_hash")), LastResultsHash: tmhash.Sum([]byte("last_results_hash")), EvidenceHash: tmhash.Sum([]byte("evidence_hash")), ProposerAddress: tmhash.Sum([]byte("proposer_address")), } b := Block{ Header: h, Data: Data{Txs: makeTxs(10000, 100)}, Evidence: EvidenceData{}, LastCommit: &Commit{}, } bz, err := cdc.MarshalBinary(b) require.NoError(t, err) assert.Equal(t, MaxHeaderBytes+MaxAminoOverheadForBlock-2, len(bz)-1000000-20000-1) } ``` * fix MaxYYY constants calculation by using math.MaxInt64 See https://github.com/tendermint/tendermint/pull/2184#discussion_r214444244 * pass mempool filter as an option See https://github.com/tendermint/tendermint/pull/2184#discussion_r214445869 * fixes after Dev's comments
6 years ago
  1. package node
  2. import (
  3. "bytes"
  4. "context"
  5. "errors"
  6. "fmt"
  7. "net"
  8. "net/http"
  9. _ "net/http/pprof"
  10. "strings"
  11. "time"
  12. "github.com/prometheus/client_golang/prometheus"
  13. "github.com/prometheus/client_golang/prometheus/promhttp"
  14. "github.com/tendermint/go-amino"
  15. abci "github.com/tendermint/tendermint/abci/types"
  16. bc "github.com/tendermint/tendermint/blockchain"
  17. cfg "github.com/tendermint/tendermint/config"
  18. cs "github.com/tendermint/tendermint/consensus"
  19. "github.com/tendermint/tendermint/crypto/ed25519"
  20. "github.com/tendermint/tendermint/evidence"
  21. cmn "github.com/tendermint/tendermint/libs/common"
  22. dbm "github.com/tendermint/tendermint/libs/db"
  23. "github.com/tendermint/tendermint/libs/log"
  24. mempl "github.com/tendermint/tendermint/mempool"
  25. "github.com/tendermint/tendermint/p2p"
  26. "github.com/tendermint/tendermint/p2p/pex"
  27. "github.com/tendermint/tendermint/privval"
  28. "github.com/tendermint/tendermint/proxy"
  29. rpccore "github.com/tendermint/tendermint/rpc/core"
  30. ctypes "github.com/tendermint/tendermint/rpc/core/types"
  31. grpccore "github.com/tendermint/tendermint/rpc/grpc"
  32. "github.com/tendermint/tendermint/rpc/lib/server"
  33. sm "github.com/tendermint/tendermint/state"
  34. "github.com/tendermint/tendermint/state/txindex"
  35. "github.com/tendermint/tendermint/state/txindex/kv"
  36. "github.com/tendermint/tendermint/state/txindex/null"
  37. "github.com/tendermint/tendermint/types"
  38. tmtime "github.com/tendermint/tendermint/types/time"
  39. "github.com/tendermint/tendermint/version"
  40. )
  41. //------------------------------------------------------------------------------
  42. // DBContext specifies config information for loading a new DB.
  43. type DBContext struct {
  44. ID string
  45. Config *cfg.Config
  46. }
  47. // DBProvider takes a DBContext and returns an instantiated DB.
  48. type DBProvider func(*DBContext) (dbm.DB, error)
  49. // DefaultDBProvider returns a database using the DBBackend and DBDir
  50. // specified in the ctx.Config.
  51. func DefaultDBProvider(ctx *DBContext) (dbm.DB, error) {
  52. dbType := dbm.DBBackendType(ctx.Config.DBBackend)
  53. return dbm.NewDB(ctx.ID, dbType, ctx.Config.DBDir()), nil
  54. }
  55. // GenesisDocProvider returns a GenesisDoc.
  56. // It allows the GenesisDoc to be pulled from sources other than the
  57. // filesystem, for instance from a distributed key-value store cluster.
  58. type GenesisDocProvider func() (*types.GenesisDoc, error)
  59. // DefaultGenesisDocProviderFunc returns a GenesisDocProvider that loads
  60. // the GenesisDoc from the config.GenesisFile() on the filesystem.
  61. func DefaultGenesisDocProviderFunc(config *cfg.Config) GenesisDocProvider {
  62. return func() (*types.GenesisDoc, error) {
  63. return types.GenesisDocFromFile(config.GenesisFile())
  64. }
  65. }
  66. // NodeProvider takes a config and a logger and returns a ready to go Node.
  67. type NodeProvider func(*cfg.Config, log.Logger) (*Node, error)
  68. // DefaultNewNode returns a Tendermint node with default settings for the
  69. // PrivValidator, ClientCreator, GenesisDoc, and DBProvider.
  70. // It implements NodeProvider.
  71. func DefaultNewNode(config *cfg.Config, logger log.Logger) (*Node, error) {
  72. // Generate node PrivKey
  73. nodeKey, err := p2p.LoadOrGenNodeKey(config.NodeKeyFile())
  74. if err != nil {
  75. return nil, err
  76. }
  77. return NewNode(config,
  78. privval.LoadOrGenFilePV(config.PrivValidatorFile()),
  79. nodeKey,
  80. proxy.DefaultClientCreator(config.ProxyApp, config.ABCI, config.DBDir()),
  81. DefaultGenesisDocProviderFunc(config),
  82. DefaultDBProvider,
  83. DefaultMetricsProvider(config.Instrumentation),
  84. logger,
  85. )
  86. }
  87. // MetricsProvider returns a consensus, p2p and mempool Metrics.
  88. type MetricsProvider func() (*cs.Metrics, *p2p.Metrics, *mempl.Metrics, *sm.Metrics)
  89. // DefaultMetricsProvider returns Metrics build using Prometheus client library
  90. // if Prometheus is enabled. Otherwise, it returns no-op Metrics.
  91. func DefaultMetricsProvider(config *cfg.InstrumentationConfig) MetricsProvider {
  92. return func() (*cs.Metrics, *p2p.Metrics, *mempl.Metrics, *sm.Metrics) {
  93. if config.Prometheus {
  94. return cs.PrometheusMetrics(config.Namespace), p2p.PrometheusMetrics(config.Namespace),
  95. mempl.PrometheusMetrics(config.Namespace), sm.PrometheusMetrics(config.Namespace)
  96. }
  97. return cs.NopMetrics(), p2p.NopMetrics(), mempl.NopMetrics(), sm.NopMetrics()
  98. }
  99. }
  100. //------------------------------------------------------------------------------
  101. // Node is the highest level interface to a full Tendermint node.
  102. // It includes all configuration information and running services.
  103. type Node struct {
  104. cmn.BaseService
  105. // config
  106. config *cfg.Config
  107. genesisDoc *types.GenesisDoc // initial validator set
  108. privValidator types.PrivValidator // local node's validator key
  109. // network
  110. transport *p2p.MultiplexTransport
  111. sw *p2p.Switch // p2p connections
  112. addrBook pex.AddrBook // known peers
  113. nodeInfo p2p.NodeInfo
  114. nodeKey *p2p.NodeKey // our node privkey
  115. isListening bool
  116. // services
  117. eventBus *types.EventBus // pub/sub for services
  118. stateDB dbm.DB
  119. blockStore *bc.BlockStore // store the blockchain to disk
  120. bcReactor *bc.BlockchainReactor // for fast-syncing
  121. mempoolReactor *mempl.MempoolReactor // for gossipping transactions
  122. consensusState *cs.ConsensusState // latest consensus state
  123. consensusReactor *cs.ConsensusReactor // for participating in the consensus
  124. evidencePool *evidence.EvidencePool // tracking evidence
  125. proxyApp proxy.AppConns // connection to the application
  126. rpcListeners []net.Listener // rpc servers
  127. txIndexer txindex.TxIndexer
  128. indexerService *txindex.IndexerService
  129. prometheusSrv *http.Server
  130. }
  131. // NewNode returns a new, ready to go, Tendermint Node.
  132. func NewNode(config *cfg.Config,
  133. privValidator types.PrivValidator,
  134. nodeKey *p2p.NodeKey,
  135. clientCreator proxy.ClientCreator,
  136. genesisDocProvider GenesisDocProvider,
  137. dbProvider DBProvider,
  138. metricsProvider MetricsProvider,
  139. logger log.Logger) (*Node, error) {
  140. // Get BlockStore
  141. blockStoreDB, err := dbProvider(&DBContext{"blockstore", config})
  142. if err != nil {
  143. return nil, err
  144. }
  145. blockStore := bc.NewBlockStore(blockStoreDB)
  146. // Get State
  147. stateDB, err := dbProvider(&DBContext{"state", config})
  148. if err != nil {
  149. return nil, err
  150. }
  151. // Get genesis doc
  152. // TODO: move to state package?
  153. genDoc, err := loadGenesisDoc(stateDB)
  154. if err != nil {
  155. genDoc, err = genesisDocProvider()
  156. if err != nil {
  157. return nil, err
  158. }
  159. // save genesis doc to prevent a certain class of user errors (e.g. when it
  160. // was changed, accidentally or not). Also good for audit trail.
  161. saveGenesisDoc(stateDB, genDoc)
  162. }
  163. state, err := sm.LoadStateFromDBOrGenesisDoc(stateDB, genDoc)
  164. if err != nil {
  165. return nil, err
  166. }
  167. // Create the proxyApp and establish connections to the ABCI app (consensus, mempool, query).
  168. proxyApp := proxy.NewAppConns(clientCreator)
  169. proxyApp.SetLogger(logger.With("module", "proxy"))
  170. if err := proxyApp.Start(); err != nil {
  171. return nil, fmt.Errorf("Error starting proxy app connections: %v", err)
  172. }
  173. // Create the handshaker, which calls RequestInfo, sets the AppVersion on the state,
  174. // and replays any blocks as necessary to sync tendermint with the app.
  175. consensusLogger := logger.With("module", "consensus")
  176. handshaker := cs.NewHandshaker(stateDB, state, blockStore, genDoc)
  177. handshaker.SetLogger(consensusLogger)
  178. if err := handshaker.Handshake(proxyApp); err != nil {
  179. return nil, fmt.Errorf("Error during handshake: %v", err)
  180. }
  181. // Reload the state. It will have the Version.Consensus.App set by the
  182. // Handshake, and may have other modifications as well (ie. depending on
  183. // what happened during block replay).
  184. state = sm.LoadState(stateDB)
  185. // Ensure the state's block version matches that of the software.
  186. if state.Version.Consensus.Block != version.BlockProtocol {
  187. return nil, fmt.Errorf(
  188. "Block version of the software does not match that of the state.\n"+
  189. "Got version.BlockProtocol=%v, state.Version.Consensus.Block=%v",
  190. version.BlockProtocol,
  191. state.Version.Consensus.Block,
  192. )
  193. }
  194. // If an address is provided, listen on the socket for a
  195. // connection from an external signing process.
  196. if config.PrivValidatorListenAddr != "" {
  197. var (
  198. // TODO: persist this key so external signer
  199. // can actually authenticate us
  200. privKey = ed25519.GenPrivKey()
  201. pvsc = privval.NewTCPVal(
  202. logger.With("module", "privval"),
  203. config.PrivValidatorListenAddr,
  204. privKey,
  205. )
  206. )
  207. if err := pvsc.Start(); err != nil {
  208. return nil, fmt.Errorf("Error starting private validator client: %v", err)
  209. }
  210. privValidator = pvsc
  211. }
  212. // Decide whether to fast-sync or not
  213. // We don't fast-sync when the only validator is us.
  214. fastSync := config.FastSync
  215. if state.Validators.Size() == 1 {
  216. addr, _ := state.Validators.GetByIndex(0)
  217. if bytes.Equal(privValidator.GetAddress(), addr) {
  218. fastSync = false
  219. }
  220. }
  221. // Log whether this node is a validator or an observer
  222. if state.Validators.HasAddress(privValidator.GetAddress()) {
  223. consensusLogger.Info("This node is a validator", "addr", privValidator.GetAddress(), "pubKey", privValidator.GetPubKey())
  224. } else {
  225. consensusLogger.Info("This node is not a validator", "addr", privValidator.GetAddress(), "pubKey", privValidator.GetPubKey())
  226. }
  227. csMetrics, p2pMetrics, memplMetrics, smMetrics := metricsProvider()
  228. // Make MempoolReactor
  229. mempool := mempl.NewMempool(
  230. config.Mempool,
  231. proxyApp.Mempool(),
  232. state.LastBlockHeight,
  233. mempl.WithMetrics(memplMetrics),
  234. mempl.WithPreCheck(
  235. mempl.PreCheckAminoMaxBytes(
  236. types.MaxDataBytesUnknownEvidence(
  237. state.ConsensusParams.BlockSize.MaxBytes,
  238. state.Validators.Size(),
  239. ),
  240. ),
  241. ),
  242. mempl.WithPostCheck(
  243. mempl.PostCheckMaxGas(state.ConsensusParams.BlockSize.MaxGas),
  244. ),
  245. )
  246. mempoolLogger := logger.With("module", "mempool")
  247. mempool.SetLogger(mempoolLogger)
  248. mempool.InitWAL() // no need to have the mempool wal during tests
  249. mempoolReactor := mempl.NewMempoolReactor(config.Mempool, mempool)
  250. mempoolReactor.SetLogger(mempoolLogger)
  251. if config.Consensus.WaitForTxs() {
  252. mempool.EnableTxsAvailable()
  253. }
  254. // Make Evidence Reactor
  255. evidenceDB, err := dbProvider(&DBContext{"evidence", config})
  256. if err != nil {
  257. return nil, err
  258. }
  259. evidenceLogger := logger.With("module", "evidence")
  260. evidenceStore := evidence.NewEvidenceStore(evidenceDB)
  261. evidencePool := evidence.NewEvidencePool(stateDB, evidenceStore)
  262. evidencePool.SetLogger(evidenceLogger)
  263. evidenceReactor := evidence.NewEvidenceReactor(evidencePool)
  264. evidenceReactor.SetLogger(evidenceLogger)
  265. blockExecLogger := logger.With("module", "state")
  266. // make block executor for consensus and blockchain reactors to execute blocks
  267. blockExec := sm.NewBlockExecutor(
  268. stateDB,
  269. blockExecLogger,
  270. proxyApp.Consensus(),
  271. mempool,
  272. evidencePool,
  273. sm.BlockExecutorWithMetrics(smMetrics),
  274. )
  275. // Make BlockchainReactor
  276. bcReactor := bc.NewBlockchainReactor(state.Copy(), blockExec, blockStore, fastSync)
  277. bcReactor.SetLogger(logger.With("module", "blockchain"))
  278. // Make ConsensusReactor
  279. consensusState := cs.NewConsensusState(
  280. config.Consensus,
  281. state.Copy(),
  282. blockExec,
  283. blockStore,
  284. mempool,
  285. evidencePool,
  286. cs.StateMetrics(csMetrics),
  287. )
  288. consensusState.SetLogger(consensusLogger)
  289. if privValidator != nil {
  290. consensusState.SetPrivValidator(privValidator)
  291. }
  292. consensusReactor := cs.NewConsensusReactor(consensusState, fastSync, cs.ReactorMetrics(csMetrics))
  293. consensusReactor.SetLogger(consensusLogger)
  294. eventBus := types.NewEventBus()
  295. eventBus.SetLogger(logger.With("module", "events"))
  296. // services which will be publishing and/or subscribing for messages (events)
  297. // consensusReactor will set it on consensusState and blockExecutor
  298. consensusReactor.SetEventBus(eventBus)
  299. // Transaction indexing
  300. var txIndexer txindex.TxIndexer
  301. switch config.TxIndex.Indexer {
  302. case "kv":
  303. store, err := dbProvider(&DBContext{"tx_index", config})
  304. if err != nil {
  305. return nil, err
  306. }
  307. if config.TxIndex.IndexTags != "" {
  308. txIndexer = kv.NewTxIndex(store, kv.IndexTags(splitAndTrimEmpty(config.TxIndex.IndexTags, ",", " ")))
  309. } else if config.TxIndex.IndexAllTags {
  310. txIndexer = kv.NewTxIndex(store, kv.IndexAllTags())
  311. } else {
  312. txIndexer = kv.NewTxIndex(store)
  313. }
  314. default:
  315. txIndexer = &null.TxIndex{}
  316. }
  317. indexerService := txindex.NewIndexerService(txIndexer, eventBus)
  318. indexerService.SetLogger(logger.With("module", "txindex"))
  319. var (
  320. p2pLogger = logger.With("module", "p2p")
  321. nodeInfo = makeNodeInfo(
  322. config,
  323. nodeKey.ID(),
  324. txIndexer,
  325. genDoc.ChainID,
  326. p2p.NewProtocolVersion(
  327. version.P2PProtocol, // global
  328. state.Version.Consensus.Block,
  329. state.Version.Consensus.App,
  330. ),
  331. )
  332. )
  333. // Setup Transport.
  334. var (
  335. transport = p2p.NewMultiplexTransport(nodeInfo, *nodeKey)
  336. connFilters = []p2p.ConnFilterFunc{}
  337. peerFilters = []p2p.PeerFilterFunc{}
  338. )
  339. if !config.P2P.AllowDuplicateIP {
  340. connFilters = append(connFilters, p2p.ConnDuplicateIPFilter())
  341. }
  342. // Filter peers by addr or pubkey with an ABCI query.
  343. // If the query return code is OK, add peer.
  344. if config.FilterPeers {
  345. connFilters = append(
  346. connFilters,
  347. // ABCI query for address filtering.
  348. func(_ p2p.ConnSet, c net.Conn, _ []net.IP) error {
  349. res, err := proxyApp.Query().QuerySync(abci.RequestQuery{
  350. Path: fmt.Sprintf("/p2p/filter/addr/%s", c.RemoteAddr().String()),
  351. })
  352. if err != nil {
  353. return err
  354. }
  355. if res.IsErr() {
  356. return fmt.Errorf("Error querying abci app: %v", res)
  357. }
  358. return nil
  359. },
  360. )
  361. peerFilters = append(
  362. peerFilters,
  363. // ABCI query for ID filtering.
  364. func(_ p2p.IPeerSet, p p2p.Peer) error {
  365. res, err := proxyApp.Query().QuerySync(abci.RequestQuery{
  366. Path: fmt.Sprintf("/p2p/filter/id/%s", p.ID()),
  367. })
  368. if err != nil {
  369. return err
  370. }
  371. if res.IsErr() {
  372. return fmt.Errorf("Error querying abci app: %v", res)
  373. }
  374. return nil
  375. },
  376. )
  377. }
  378. p2p.MultiplexTransportConnFilters(connFilters...)(transport)
  379. // Setup Switch.
  380. sw := p2p.NewSwitch(
  381. config.P2P,
  382. transport,
  383. p2p.WithMetrics(p2pMetrics),
  384. p2p.SwitchPeerFilters(peerFilters...),
  385. )
  386. sw.SetLogger(p2pLogger)
  387. sw.AddReactor("MEMPOOL", mempoolReactor)
  388. sw.AddReactor("BLOCKCHAIN", bcReactor)
  389. sw.AddReactor("CONSENSUS", consensusReactor)
  390. sw.AddReactor("EVIDENCE", evidenceReactor)
  391. sw.SetNodeInfo(nodeInfo)
  392. sw.SetNodeKey(nodeKey)
  393. p2pLogger.Info("P2P Node ID", "ID", nodeKey.ID(), "file", config.NodeKeyFile())
  394. // Optionally, start the pex reactor
  395. //
  396. // TODO:
  397. //
  398. // We need to set Seeds and PersistentPeers on the switch,
  399. // since it needs to be able to use these (and their DNS names)
  400. // even if the PEX is off. We can include the DNS name in the NetAddress,
  401. // but it would still be nice to have a clear list of the current "PersistentPeers"
  402. // somewhere that we can return with net_info.
  403. //
  404. // If PEX is on, it should handle dialing the seeds. Otherwise the switch does it.
  405. // Note we currently use the addrBook regardless at least for AddOurAddress
  406. addrBook := pex.NewAddrBook(config.P2P.AddrBookFile(), config.P2P.AddrBookStrict)
  407. // Add ourselves to addrbook to prevent dialing ourselves
  408. addrBook.AddOurAddress(nodeInfo.NetAddress())
  409. addrBook.SetLogger(p2pLogger.With("book", config.P2P.AddrBookFile()))
  410. if config.P2P.PexReactor {
  411. // TODO persistent peers ? so we can have their DNS addrs saved
  412. pexReactor := pex.NewPEXReactor(addrBook,
  413. &pex.PEXReactorConfig{
  414. Seeds: splitAndTrimEmpty(config.P2P.Seeds, ",", " "),
  415. SeedMode: config.P2P.SeedMode,
  416. })
  417. pexReactor.SetLogger(p2pLogger)
  418. sw.AddReactor("PEX", pexReactor)
  419. }
  420. sw.SetAddrBook(addrBook)
  421. // run the profile server
  422. profileHost := config.ProfListenAddress
  423. if profileHost != "" {
  424. go func() {
  425. logger.Error("Profile server", "err", http.ListenAndServe(profileHost, nil))
  426. }()
  427. }
  428. node := &Node{
  429. config: config,
  430. genesisDoc: genDoc,
  431. privValidator: privValidator,
  432. transport: transport,
  433. sw: sw,
  434. addrBook: addrBook,
  435. nodeInfo: nodeInfo,
  436. nodeKey: nodeKey,
  437. stateDB: stateDB,
  438. blockStore: blockStore,
  439. bcReactor: bcReactor,
  440. mempoolReactor: mempoolReactor,
  441. consensusState: consensusState,
  442. consensusReactor: consensusReactor,
  443. evidencePool: evidencePool,
  444. proxyApp: proxyApp,
  445. txIndexer: txIndexer,
  446. indexerService: indexerService,
  447. eventBus: eventBus,
  448. }
  449. node.BaseService = *cmn.NewBaseService(logger, "Node", node)
  450. return node, nil
  451. }
  452. // OnStart starts the Node. It implements cmn.Service.
  453. func (n *Node) OnStart() error {
  454. now := tmtime.Now()
  455. genTime := n.genesisDoc.GenesisTime
  456. if genTime.After(now) {
  457. n.Logger.Info("Genesis time is in the future. Sleeping until then...", "genTime", genTime)
  458. time.Sleep(genTime.Sub(now))
  459. }
  460. err := n.eventBus.Start()
  461. if err != nil {
  462. return err
  463. }
  464. // Add private IDs to addrbook to block those peers being added
  465. n.addrBook.AddPrivateIDs(splitAndTrimEmpty(n.config.P2P.PrivatePeerIDs, ",", " "))
  466. // Start the RPC server before the P2P server
  467. // so we can eg. receive txs for the first block
  468. if n.config.RPC.ListenAddress != "" {
  469. listeners, err := n.startRPC()
  470. if err != nil {
  471. return err
  472. }
  473. n.rpcListeners = listeners
  474. }
  475. if n.config.Instrumentation.Prometheus &&
  476. n.config.Instrumentation.PrometheusListenAddr != "" {
  477. n.prometheusSrv = n.startPrometheusServer(n.config.Instrumentation.PrometheusListenAddr)
  478. }
  479. // Start the transport.
  480. addr, err := p2p.NewNetAddressStringWithOptionalID(n.config.P2P.ListenAddress)
  481. if err != nil {
  482. return err
  483. }
  484. if err := n.transport.Listen(*addr); err != nil {
  485. return err
  486. }
  487. n.isListening = true
  488. // Start the switch (the P2P server).
  489. err = n.sw.Start()
  490. if err != nil {
  491. return err
  492. }
  493. // Always connect to persistent peers
  494. if n.config.P2P.PersistentPeers != "" {
  495. err = n.sw.DialPeersAsync(n.addrBook, splitAndTrimEmpty(n.config.P2P.PersistentPeers, ",", " "), true)
  496. if err != nil {
  497. return err
  498. }
  499. }
  500. // start tx indexer
  501. return n.indexerService.Start()
  502. }
  503. // OnStop stops the Node. It implements cmn.Service.
  504. func (n *Node) OnStop() {
  505. n.BaseService.OnStop()
  506. n.Logger.Info("Stopping Node")
  507. // first stop the non-reactor services
  508. n.eventBus.Stop()
  509. n.indexerService.Stop()
  510. // now stop the reactors
  511. // TODO: gracefully disconnect from peers.
  512. n.sw.Stop()
  513. if err := n.transport.Close(); err != nil {
  514. n.Logger.Error("Error closing transport", "err", err)
  515. }
  516. n.isListening = false
  517. // finally stop the listeners / external services
  518. for _, l := range n.rpcListeners {
  519. n.Logger.Info("Closing rpc listener", "listener", l)
  520. if err := l.Close(); err != nil {
  521. n.Logger.Error("Error closing listener", "listener", l, "err", err)
  522. }
  523. }
  524. if pvsc, ok := n.privValidator.(*privval.TCPVal); ok {
  525. if err := pvsc.Stop(); err != nil {
  526. n.Logger.Error("Error stopping priv validator socket client", "err", err)
  527. }
  528. }
  529. if n.prometheusSrv != nil {
  530. if err := n.prometheusSrv.Shutdown(context.Background()); err != nil {
  531. // Error from closing listeners, or context timeout:
  532. n.Logger.Error("Prometheus HTTP server Shutdown", "err", err)
  533. }
  534. }
  535. }
  536. // ConfigureRPC sets all variables in rpccore so they will serve
  537. // rpc calls from this node
  538. func (n *Node) ConfigureRPC() {
  539. rpccore.SetStateDB(n.stateDB)
  540. rpccore.SetBlockStore(n.blockStore)
  541. rpccore.SetConsensusState(n.consensusState)
  542. rpccore.SetMempool(n.mempoolReactor.Mempool)
  543. rpccore.SetEvidencePool(n.evidencePool)
  544. rpccore.SetP2PPeers(n.sw)
  545. rpccore.SetP2PTransport(n)
  546. rpccore.SetPubKey(n.privValidator.GetPubKey())
  547. rpccore.SetGenesisDoc(n.genesisDoc)
  548. rpccore.SetAddrBook(n.addrBook)
  549. rpccore.SetProxyAppQuery(n.proxyApp.Query())
  550. rpccore.SetTxIndexer(n.txIndexer)
  551. rpccore.SetConsensusReactor(n.consensusReactor)
  552. rpccore.SetEventBus(n.eventBus)
  553. rpccore.SetLogger(n.Logger.With("module", "rpc"))
  554. }
  555. func (n *Node) startRPC() ([]net.Listener, error) {
  556. n.ConfigureRPC()
  557. listenAddrs := splitAndTrimEmpty(n.config.RPC.ListenAddress, ",", " ")
  558. coreCodec := amino.NewCodec()
  559. ctypes.RegisterAmino(coreCodec)
  560. if n.config.RPC.Unsafe {
  561. rpccore.AddUnsafeRoutes()
  562. }
  563. // we may expose the rpc over both a unix and tcp socket
  564. listeners := make([]net.Listener, len(listenAddrs))
  565. for i, listenAddr := range listenAddrs {
  566. mux := http.NewServeMux()
  567. rpcLogger := n.Logger.With("module", "rpc-server")
  568. wm := rpcserver.NewWebsocketManager(rpccore.Routes, coreCodec, rpcserver.EventSubscriber(n.eventBus))
  569. wm.SetLogger(rpcLogger.With("protocol", "websocket"))
  570. mux.HandleFunc("/websocket", wm.WebsocketHandler)
  571. rpcserver.RegisterRPCFuncs(mux, rpccore.Routes, coreCodec, rpcLogger)
  572. listener, err := rpcserver.StartHTTPServer(
  573. listenAddr,
  574. mux,
  575. rpcLogger,
  576. rpcserver.Config{MaxOpenConnections: n.config.RPC.MaxOpenConnections},
  577. )
  578. if err != nil {
  579. return nil, err
  580. }
  581. listeners[i] = listener
  582. }
  583. // we expose a simplified api over grpc for convenience to app devs
  584. grpcListenAddr := n.config.RPC.GRPCListenAddress
  585. if grpcListenAddr != "" {
  586. listener, err := grpccore.StartGRPCServer(
  587. grpcListenAddr,
  588. grpccore.Config{
  589. MaxOpenConnections: n.config.RPC.GRPCMaxOpenConnections,
  590. },
  591. )
  592. if err != nil {
  593. return nil, err
  594. }
  595. listeners = append(listeners, listener)
  596. }
  597. return listeners, nil
  598. }
  599. // startPrometheusServer starts a Prometheus HTTP server, listening for metrics
  600. // collectors on addr.
  601. func (n *Node) startPrometheusServer(addr string) *http.Server {
  602. srv := &http.Server{
  603. Addr: addr,
  604. Handler: promhttp.InstrumentMetricHandler(
  605. prometheus.DefaultRegisterer, promhttp.HandlerFor(
  606. prometheus.DefaultGatherer,
  607. promhttp.HandlerOpts{MaxRequestsInFlight: n.config.Instrumentation.MaxOpenConnections},
  608. ),
  609. ),
  610. }
  611. go func() {
  612. if err := srv.ListenAndServe(); err != http.ErrServerClosed {
  613. // Error starting or closing listener:
  614. n.Logger.Error("Prometheus HTTP server ListenAndServe", "err", err)
  615. }
  616. }()
  617. return srv
  618. }
  619. // Switch returns the Node's Switch.
  620. func (n *Node) Switch() *p2p.Switch {
  621. return n.sw
  622. }
  623. // BlockStore returns the Node's BlockStore.
  624. func (n *Node) BlockStore() *bc.BlockStore {
  625. return n.blockStore
  626. }
  627. // ConsensusState returns the Node's ConsensusState.
  628. func (n *Node) ConsensusState() *cs.ConsensusState {
  629. return n.consensusState
  630. }
  631. // ConsensusReactor returns the Node's ConsensusReactor.
  632. func (n *Node) ConsensusReactor() *cs.ConsensusReactor {
  633. return n.consensusReactor
  634. }
  635. // MempoolReactor returns the Node's MempoolReactor.
  636. func (n *Node) MempoolReactor() *mempl.MempoolReactor {
  637. return n.mempoolReactor
  638. }
  639. // EvidencePool returns the Node's EvidencePool.
  640. func (n *Node) EvidencePool() *evidence.EvidencePool {
  641. return n.evidencePool
  642. }
  643. // EventBus returns the Node's EventBus.
  644. func (n *Node) EventBus() *types.EventBus {
  645. return n.eventBus
  646. }
  647. // PrivValidator returns the Node's PrivValidator.
  648. // XXX: for convenience only!
  649. func (n *Node) PrivValidator() types.PrivValidator {
  650. return n.privValidator
  651. }
  652. // GenesisDoc returns the Node's GenesisDoc.
  653. func (n *Node) GenesisDoc() *types.GenesisDoc {
  654. return n.genesisDoc
  655. }
  656. // ProxyApp returns the Node's AppConns, representing its connections to the ABCI application.
  657. func (n *Node) ProxyApp() proxy.AppConns {
  658. return n.proxyApp
  659. }
  660. //------------------------------------------------------------------------------
  661. func (n *Node) Listeners() []string {
  662. return []string{
  663. fmt.Sprintf("Listener(@%v)", n.config.P2P.ExternalAddress),
  664. }
  665. }
  666. func (n *Node) IsListening() bool {
  667. return n.isListening
  668. }
  669. // NodeInfo returns the Node's Info from the Switch.
  670. func (n *Node) NodeInfo() p2p.NodeInfo {
  671. return n.nodeInfo
  672. }
  673. func makeNodeInfo(
  674. config *cfg.Config,
  675. nodeID p2p.ID,
  676. txIndexer txindex.TxIndexer,
  677. chainID string,
  678. protocolVersion p2p.ProtocolVersion,
  679. ) p2p.NodeInfo {
  680. txIndexerStatus := "on"
  681. if _, ok := txIndexer.(*null.TxIndex); ok {
  682. txIndexerStatus = "off"
  683. }
  684. nodeInfo := p2p.DefaultNodeInfo{
  685. ProtocolVersion: protocolVersion,
  686. ID_: nodeID,
  687. Network: chainID,
  688. Version: version.TMCoreSemVer,
  689. Channels: []byte{
  690. bc.BlockchainChannel,
  691. cs.StateChannel, cs.DataChannel, cs.VoteChannel, cs.VoteSetBitsChannel,
  692. mempl.MempoolChannel,
  693. evidence.EvidenceChannel,
  694. },
  695. Moniker: config.Moniker,
  696. Other: p2p.DefaultNodeInfoOther{
  697. TxIndex: txIndexerStatus,
  698. RPCAddress: config.RPC.ListenAddress,
  699. },
  700. }
  701. if config.P2P.PexReactor {
  702. nodeInfo.Channels = append(nodeInfo.Channels, pex.PexChannel)
  703. }
  704. lAddr := config.P2P.ExternalAddress
  705. if lAddr == "" {
  706. lAddr = config.P2P.ListenAddress
  707. }
  708. nodeInfo.ListenAddr = lAddr
  709. return nodeInfo
  710. }
  711. //------------------------------------------------------------------------------
  712. var (
  713. genesisDocKey = []byte("genesisDoc")
  714. )
  715. // panics if failed to unmarshal bytes
  716. func loadGenesisDoc(db dbm.DB) (*types.GenesisDoc, error) {
  717. bytes := db.Get(genesisDocKey)
  718. if len(bytes) == 0 {
  719. return nil, errors.New("Genesis doc not found")
  720. }
  721. var genDoc *types.GenesisDoc
  722. err := cdc.UnmarshalJSON(bytes, &genDoc)
  723. if err != nil {
  724. cmn.PanicCrisis(fmt.Sprintf("Failed to load genesis doc due to unmarshaling error: %v (bytes: %X)", err, bytes))
  725. }
  726. return genDoc, nil
  727. }
  728. // panics if failed to marshal the given genesis document
  729. func saveGenesisDoc(db dbm.DB, genDoc *types.GenesisDoc) {
  730. bytes, err := cdc.MarshalJSON(genDoc)
  731. if err != nil {
  732. cmn.PanicCrisis(fmt.Sprintf("Failed to save genesis doc due to marshaling error: %v", err))
  733. }
  734. db.SetSync(genesisDocKey, bytes)
  735. }
  736. // splitAndTrimEmpty slices s into all subslices separated by sep and returns a
  737. // slice of the string s with all leading and trailing Unicode code points
  738. // contained in cutset removed. If sep is empty, SplitAndTrim splits after each
  739. // UTF-8 sequence. First part is equivalent to strings.SplitN with a count of
  740. // -1. also filter out empty strings, only return non-empty strings.
  741. func splitAndTrimEmpty(s, sep, cutset string) []string {
  742. if s == "" {
  743. return []string{}
  744. }
  745. spl := strings.Split(s, sep)
  746. nonEmptyStrings := make([]string, 0, len(spl))
  747. for i := 0; i < len(spl); i++ {
  748. element := strings.Trim(spl[i], cutset)
  749. if element != "" {
  750. nonEmptyStrings = append(nonEmptyStrings, element)
  751. }
  752. }
  753. return nonEmptyStrings
  754. }