You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

849 lines
25 KiB

max-bytes PR follow-up (#2318) * ReapMaxTxs: return all txs if max is negative this mirrors ReapMaxBytes behavior See https://github.com/tendermint/tendermint/pull/2184#discussion_r214439950 * increase MaxAminoOverheadForBlock tested with: ``` func TestMaxAminoOverheadForBlock(t *testing.T) { maxChainID := "" for i := 0; i < MaxChainIDLen; i++ { maxChainID += "𠜎" } h := Header{ ChainID: maxChainID, Height: 10, Time: time.Now().UTC(), NumTxs: 100, TotalTxs: 200, LastBlockID: makeBlockID(make([]byte, 20), 300, make([]byte, 20)), LastCommitHash: tmhash.Sum([]byte("last_commit_hash")), DataHash: tmhash.Sum([]byte("data_hash")), ValidatorsHash: tmhash.Sum([]byte("validators_hash")), NextValidatorsHash: tmhash.Sum([]byte("next_validators_hash")), ConsensusHash: tmhash.Sum([]byte("consensus_hash")), AppHash: tmhash.Sum([]byte("app_hash")), LastResultsHash: tmhash.Sum([]byte("last_results_hash")), EvidenceHash: tmhash.Sum([]byte("evidence_hash")), ProposerAddress: tmhash.Sum([]byte("proposer_address")), } b := Block{ Header: h, Data: Data{Txs: makeTxs(10000, 100)}, Evidence: EvidenceData{}, LastCommit: &Commit{}, } bz, err := cdc.MarshalBinary(b) require.NoError(t, err) assert.Equal(t, MaxHeaderBytes+MaxAminoOverheadForBlock-2, len(bz)-1000000-20000-1) } ``` * fix MaxYYY constants calculation by using math.MaxInt64 See https://github.com/tendermint/tendermint/pull/2184#discussion_r214444244 * pass mempool filter as an option See https://github.com/tendermint/tendermint/pull/2184#discussion_r214445869 * fixes after Dev's comments
6 years ago
  1. package node
  2. import (
  3. "bytes"
  4. "context"
  5. "errors"
  6. "fmt"
  7. "net"
  8. "net/http"
  9. _ "net/http/pprof"
  10. "strings"
  11. "time"
  12. "github.com/prometheus/client_golang/prometheus"
  13. "github.com/prometheus/client_golang/prometheus/promhttp"
  14. amino "github.com/tendermint/go-amino"
  15. abci "github.com/tendermint/tendermint/abci/types"
  16. bc "github.com/tendermint/tendermint/blockchain"
  17. cfg "github.com/tendermint/tendermint/config"
  18. cs "github.com/tendermint/tendermint/consensus"
  19. "github.com/tendermint/tendermint/crypto/ed25519"
  20. "github.com/tendermint/tendermint/evidence"
  21. cmn "github.com/tendermint/tendermint/libs/common"
  22. dbm "github.com/tendermint/tendermint/libs/db"
  23. "github.com/tendermint/tendermint/libs/log"
  24. mempl "github.com/tendermint/tendermint/mempool"
  25. "github.com/tendermint/tendermint/p2p"
  26. "github.com/tendermint/tendermint/p2p/pex"
  27. "github.com/tendermint/tendermint/privval"
  28. "github.com/tendermint/tendermint/proxy"
  29. rpccore "github.com/tendermint/tendermint/rpc/core"
  30. ctypes "github.com/tendermint/tendermint/rpc/core/types"
  31. grpccore "github.com/tendermint/tendermint/rpc/grpc"
  32. rpc "github.com/tendermint/tendermint/rpc/lib"
  33. rpcserver "github.com/tendermint/tendermint/rpc/lib/server"
  34. sm "github.com/tendermint/tendermint/state"
  35. "github.com/tendermint/tendermint/state/txindex"
  36. "github.com/tendermint/tendermint/state/txindex/kv"
  37. "github.com/tendermint/tendermint/state/txindex/null"
  38. "github.com/tendermint/tendermint/types"
  39. tmtime "github.com/tendermint/tendermint/types/time"
  40. "github.com/tendermint/tendermint/version"
  41. )
  42. //------------------------------------------------------------------------------
  43. // DBContext specifies config information for loading a new DB.
  44. type DBContext struct {
  45. ID string
  46. Config *cfg.Config
  47. }
  48. // DBProvider takes a DBContext and returns an instantiated DB.
  49. type DBProvider func(*DBContext) (dbm.DB, error)
  50. // DefaultDBProvider returns a database using the DBBackend and DBDir
  51. // specified in the ctx.Config.
  52. func DefaultDBProvider(ctx *DBContext) (dbm.DB, error) {
  53. dbType := dbm.DBBackendType(ctx.Config.DBBackend)
  54. return dbm.NewDB(ctx.ID, dbType, ctx.Config.DBDir()), nil
  55. }
  56. // GenesisDocProvider returns a GenesisDoc.
  57. // It allows the GenesisDoc to be pulled from sources other than the
  58. // filesystem, for instance from a distributed key-value store cluster.
  59. type GenesisDocProvider func() (*types.GenesisDoc, error)
  60. // DefaultGenesisDocProviderFunc returns a GenesisDocProvider that loads
  61. // the GenesisDoc from the config.GenesisFile() on the filesystem.
  62. func DefaultGenesisDocProviderFunc(config *cfg.Config) GenesisDocProvider {
  63. return func() (*types.GenesisDoc, error) {
  64. return types.GenesisDocFromFile(config.GenesisFile())
  65. }
  66. }
  67. // NodeProvider takes a config and a logger and returns a ready to go Node.
  68. type NodeProvider func(*cfg.Config, log.Logger) (*Node, error)
  69. // DefaultNewNode returns a Tendermint node with default settings for the
  70. // PrivValidator, ClientCreator, GenesisDoc, and DBProvider.
  71. // It implements NodeProvider.
  72. func DefaultNewNode(config *cfg.Config, logger log.Logger) (*Node, error) {
  73. // Generate node PrivKey
  74. nodeKey, err := p2p.LoadOrGenNodeKey(config.NodeKeyFile())
  75. if err != nil {
  76. return nil, err
  77. }
  78. return NewNode(config,
  79. privval.LoadOrGenFilePV(config.PrivValidatorFile()),
  80. nodeKey,
  81. proxy.DefaultClientCreator(config.ProxyApp, config.ABCI, config.DBDir()),
  82. DefaultGenesisDocProviderFunc(config),
  83. DefaultDBProvider,
  84. DefaultMetricsProvider(config.Instrumentation),
  85. logger,
  86. )
  87. }
  88. // MetricsProvider returns a consensus, p2p and mempool Metrics.
  89. type MetricsProvider func() (*cs.Metrics, *p2p.Metrics, *mempl.Metrics)
  90. // DefaultMetricsProvider returns Metrics build using Prometheus client library
  91. // if Prometheus is enabled. Otherwise, it returns no-op Metrics.
  92. func DefaultMetricsProvider(config *cfg.InstrumentationConfig) MetricsProvider {
  93. return func() (*cs.Metrics, *p2p.Metrics, *mempl.Metrics) {
  94. if config.Prometheus {
  95. return cs.PrometheusMetrics(), p2p.PrometheusMetrics(), mempl.PrometheusMetrics()
  96. }
  97. return cs.NopMetrics(), p2p.NopMetrics(), mempl.NopMetrics()
  98. }
  99. }
  100. //------------------------------------------------------------------------------
  101. // Node is the highest level interface to a full Tendermint node.
  102. // It includes all configuration information and running services.
  103. type Node struct {
  104. cmn.BaseService
  105. // config
  106. config *cfg.Config
  107. genesisDoc *types.GenesisDoc // initial validator set
  108. privValidator types.PrivValidator // local node's validator key
  109. // network
  110. transport *p2p.MultiplexTransport
  111. sw *p2p.Switch // p2p connections
  112. addrBook pex.AddrBook // known peers
  113. nodeInfo p2p.NodeInfo
  114. nodeKey *p2p.NodeKey // our node privkey
  115. isListening bool
  116. // services
  117. eventBus *types.EventBus // pub/sub for services
  118. stateDB dbm.DB
  119. blockStore *bc.BlockStore // store the blockchain to disk
  120. bcReactor *bc.BlockchainReactor // for fast-syncing
  121. mempoolReactor *mempl.MempoolReactor // for gossipping transactions
  122. consensusState *cs.ConsensusState // latest consensus state
  123. consensusReactor *cs.ConsensusReactor // for participating in the consensus
  124. evidencePool *evidence.EvidencePool // tracking evidence
  125. proxyApp proxy.AppConns // connection to the application
  126. rpcListeners []net.Listener // rpc servers
  127. txIndexer txindex.TxIndexer
  128. indexerService *txindex.IndexerService
  129. prometheusSrv *http.Server
  130. }
  131. // NewNode returns a new, ready to go, Tendermint Node.
  132. func NewNode(config *cfg.Config,
  133. privValidator types.PrivValidator,
  134. nodeKey *p2p.NodeKey,
  135. clientCreator proxy.ClientCreator,
  136. genesisDocProvider GenesisDocProvider,
  137. dbProvider DBProvider,
  138. metricsProvider MetricsProvider,
  139. logger log.Logger) (*Node, error) {
  140. // Get BlockStore
  141. blockStoreDB, err := dbProvider(&DBContext{"blockstore", config})
  142. if err != nil {
  143. return nil, err
  144. }
  145. blockStore := bc.NewBlockStore(blockStoreDB)
  146. // Get State
  147. stateDB, err := dbProvider(&DBContext{"state", config})
  148. if err != nil {
  149. return nil, err
  150. }
  151. // Get genesis doc
  152. // TODO: move to state package?
  153. genDoc, err := loadGenesisDoc(stateDB)
  154. if err != nil {
  155. genDoc, err = genesisDocProvider()
  156. if err != nil {
  157. return nil, err
  158. }
  159. // save genesis doc to prevent a certain class of user errors (e.g. when it
  160. // was changed, accidentally or not). Also good for audit trail.
  161. saveGenesisDoc(stateDB, genDoc)
  162. }
  163. state, err := sm.LoadStateFromDBOrGenesisDoc(stateDB, genDoc)
  164. if err != nil {
  165. return nil, err
  166. }
  167. // Create the proxyApp and establish connections to the ABCI app (consensus, mempool, query).
  168. proxyApp := proxy.NewAppConns(clientCreator)
  169. proxyApp.SetLogger(logger.With("module", "proxy"))
  170. if err := proxyApp.Start(); err != nil {
  171. return nil, fmt.Errorf("Error starting proxy app connections: %v", err)
  172. }
  173. // Create the handshaker, which calls RequestInfo and replays any blocks
  174. // as necessary to sync tendermint with the app.
  175. consensusLogger := logger.With("module", "consensus")
  176. handshaker := cs.NewHandshaker(stateDB, state, blockStore, genDoc)
  177. handshaker.SetLogger(consensusLogger)
  178. if err := handshaker.Handshake(proxyApp); err != nil {
  179. return nil, fmt.Errorf("Error during handshake: %v", err)
  180. }
  181. // reload the state (it may have been updated by the handshake)
  182. state = sm.LoadState(stateDB)
  183. // If an address is provided, listen on the socket for a
  184. // connection from an external signing process.
  185. if config.PrivValidatorListenAddr != "" {
  186. var (
  187. // TODO: persist this key so external signer
  188. // can actually authenticate us
  189. privKey = ed25519.GenPrivKey()
  190. pvsc = privval.NewSocketPV(
  191. logger.With("module", "privval"),
  192. config.PrivValidatorListenAddr,
  193. privKey,
  194. )
  195. )
  196. if err := pvsc.Start(); err != nil {
  197. return nil, fmt.Errorf("Error starting private validator client: %v", err)
  198. }
  199. privValidator = pvsc
  200. }
  201. // Decide whether to fast-sync or not
  202. // We don't fast-sync when the only validator is us.
  203. fastSync := config.FastSync
  204. if state.Validators.Size() == 1 {
  205. addr, _ := state.Validators.GetByIndex(0)
  206. if bytes.Equal(privValidator.GetAddress(), addr) {
  207. fastSync = false
  208. }
  209. }
  210. // Log whether this node is a validator or an observer
  211. if state.Validators.HasAddress(privValidator.GetAddress()) {
  212. consensusLogger.Info("This node is a validator", "addr", privValidator.GetAddress(), "pubKey", privValidator.GetPubKey())
  213. } else {
  214. consensusLogger.Info("This node is not a validator", "addr", privValidator.GetAddress(), "pubKey", privValidator.GetPubKey())
  215. }
  216. csMetrics, p2pMetrics, memplMetrics := metricsProvider()
  217. // Make MempoolReactor
  218. mempool := mempl.NewMempool(
  219. config.Mempool,
  220. proxyApp.Mempool(),
  221. state.LastBlockHeight,
  222. mempl.WithMetrics(memplMetrics),
  223. mempl.WithPreCheck(
  224. mempl.PreCheckAminoMaxBytes(
  225. types.MaxDataBytesUnknownEvidence(
  226. state.ConsensusParams.BlockSize.MaxBytes,
  227. state.Validators.Size(),
  228. ),
  229. ),
  230. ),
  231. mempl.WithPostCheck(
  232. mempl.PostCheckMaxGas(state.ConsensusParams.BlockSize.MaxGas),
  233. ),
  234. )
  235. mempoolLogger := logger.With("module", "mempool")
  236. mempool.SetLogger(mempoolLogger)
  237. mempool.InitWAL() // no need to have the mempool wal during tests
  238. mempoolReactor := mempl.NewMempoolReactor(config.Mempool, mempool)
  239. mempoolReactor.SetLogger(mempoolLogger)
  240. if config.Consensus.WaitForTxs() {
  241. mempool.EnableTxsAvailable()
  242. }
  243. // Make Evidence Reactor
  244. evidenceDB, err := dbProvider(&DBContext{"evidence", config})
  245. if err != nil {
  246. return nil, err
  247. }
  248. evidenceLogger := logger.With("module", "evidence")
  249. evidenceStore := evidence.NewEvidenceStore(evidenceDB)
  250. evidencePool := evidence.NewEvidencePool(stateDB, evidenceStore)
  251. evidencePool.SetLogger(evidenceLogger)
  252. evidenceReactor := evidence.NewEvidenceReactor(evidencePool)
  253. evidenceReactor.SetLogger(evidenceLogger)
  254. blockExecLogger := logger.With("module", "state")
  255. // make block executor for consensus and blockchain reactors to execute blocks
  256. blockExec := sm.NewBlockExecutor(stateDB, blockExecLogger, proxyApp.Consensus(), mempool, evidencePool)
  257. // Make BlockchainReactor
  258. bcReactor := bc.NewBlockchainReactor(state.Copy(), blockExec, blockStore, fastSync)
  259. bcReactor.SetLogger(logger.With("module", "blockchain"))
  260. // Make ConsensusReactor
  261. consensusState := cs.NewConsensusState(
  262. config.Consensus,
  263. state.Copy(),
  264. blockExec,
  265. blockStore,
  266. mempool,
  267. evidencePool,
  268. cs.WithMetrics(csMetrics),
  269. )
  270. consensusState.SetLogger(consensusLogger)
  271. if privValidator != nil {
  272. consensusState.SetPrivValidator(privValidator)
  273. }
  274. consensusReactor := cs.NewConsensusReactor(consensusState, fastSync)
  275. consensusReactor.SetLogger(consensusLogger)
  276. eventBus := types.NewEventBus()
  277. eventBus.SetLogger(logger.With("module", "events"))
  278. // services which will be publishing and/or subscribing for messages (events)
  279. // consensusReactor will set it on consensusState and blockExecutor
  280. consensusReactor.SetEventBus(eventBus)
  281. // Transaction indexing
  282. var txIndexer txindex.TxIndexer
  283. switch config.TxIndex.Indexer {
  284. case "kv":
  285. store, err := dbProvider(&DBContext{"tx_index", config})
  286. if err != nil {
  287. return nil, err
  288. }
  289. if config.TxIndex.IndexTags != "" {
  290. txIndexer = kv.NewTxIndex(store, kv.IndexTags(splitAndTrimEmpty(config.TxIndex.IndexTags, ",", " ")))
  291. } else if config.TxIndex.IndexAllTags {
  292. txIndexer = kv.NewTxIndex(store, kv.IndexAllTags())
  293. } else {
  294. txIndexer = kv.NewTxIndex(store)
  295. }
  296. default:
  297. txIndexer = &null.TxIndex{}
  298. }
  299. indexerService := txindex.NewIndexerService(txIndexer, eventBus)
  300. indexerService.SetLogger(logger.With("module", "txindex"))
  301. var (
  302. p2pLogger = logger.With("module", "p2p")
  303. nodeInfo = makeNodeInfo(config, nodeKey.ID(), txIndexer, genDoc.ChainID)
  304. )
  305. // Setup Transport.
  306. var (
  307. transport = p2p.NewMultiplexTransport(nodeInfo, *nodeKey)
  308. connFilters = []p2p.ConnFilterFunc{}
  309. peerFilters = []p2p.PeerFilterFunc{}
  310. )
  311. if !config.P2P.AllowDuplicateIP {
  312. connFilters = append(connFilters, p2p.ConnDuplicateIPFilter())
  313. }
  314. // Filter peers by addr or pubkey with an ABCI query.
  315. // If the query return code is OK, add peer.
  316. // XXX: Query format subject to change
  317. if config.FilterPeers {
  318. connFilters = append(
  319. connFilters,
  320. // ABCI query for address filtering.
  321. func(_ p2p.ConnSet, c net.Conn, _ []net.IP) error {
  322. res, err := proxyApp.Query().QuerySync(abci.RequestQuery{
  323. Path: fmt.Sprintf("/p2p/filter/addr/%s", c.RemoteAddr().String()),
  324. })
  325. if err != nil {
  326. return err
  327. }
  328. if res.IsErr() {
  329. return fmt.Errorf("Error querying abci app: %v", res)
  330. }
  331. return nil
  332. },
  333. )
  334. peerFilters = append(
  335. peerFilters,
  336. // ABCI query for ID filtering.
  337. func(_ p2p.IPeerSet, p p2p.Peer) error {
  338. res, err := proxyApp.Query().QuerySync(abci.RequestQuery{
  339. Path: fmt.Sprintf("/p2p/filter/id/%s", p.ID()),
  340. })
  341. if err != nil {
  342. return err
  343. }
  344. if res.IsErr() {
  345. return fmt.Errorf("Error querying abci app: %v", res)
  346. }
  347. return nil
  348. },
  349. )
  350. }
  351. p2p.MultiplexTransportConnFilters(connFilters...)(transport)
  352. // Setup Switch.
  353. sw := p2p.NewSwitch(
  354. config.P2P,
  355. transport,
  356. p2p.WithMetrics(p2pMetrics),
  357. p2p.SwitchPeerFilters(peerFilters...),
  358. )
  359. sw.SetLogger(p2pLogger)
  360. sw.AddReactor("MEMPOOL", mempoolReactor)
  361. sw.AddReactor("BLOCKCHAIN", bcReactor)
  362. sw.AddReactor("CONSENSUS", consensusReactor)
  363. sw.AddReactor("EVIDENCE", evidenceReactor)
  364. sw.SetNodeInfo(nodeInfo)
  365. sw.SetNodeKey(nodeKey)
  366. p2pLogger.Info("P2P Node ID", "ID", nodeKey.ID(), "file", config.NodeKeyFile())
  367. // Optionally, start the pex reactor
  368. //
  369. // TODO:
  370. //
  371. // We need to set Seeds and PersistentPeers on the switch,
  372. // since it needs to be able to use these (and their DNS names)
  373. // even if the PEX is off. We can include the DNS name in the NetAddress,
  374. // but it would still be nice to have a clear list of the current "PersistentPeers"
  375. // somewhere that we can return with net_info.
  376. //
  377. // If PEX is on, it should handle dialing the seeds. Otherwise the switch does it.
  378. // Note we currently use the addrBook regardless at least for AddOurAddress
  379. addrBook := pex.NewAddrBook(config.P2P.AddrBookFile(), config.P2P.AddrBookStrict)
  380. // Add ourselves to addrbook to prevent dialing ourselves
  381. addrBook.AddOurAddress(nodeInfo.NetAddress())
  382. addrBook.SetLogger(p2pLogger.With("book", config.P2P.AddrBookFile()))
  383. if config.P2P.PexReactor {
  384. // TODO persistent peers ? so we can have their DNS addrs saved
  385. pexReactor := pex.NewPEXReactor(addrBook,
  386. &pex.PEXReactorConfig{
  387. Seeds: splitAndTrimEmpty(config.P2P.Seeds, ",", " "),
  388. SeedMode: config.P2P.SeedMode,
  389. })
  390. pexReactor.SetLogger(p2pLogger)
  391. sw.AddReactor("PEX", pexReactor)
  392. }
  393. sw.SetAddrBook(addrBook)
  394. // run the profile server
  395. profileHost := config.ProfListenAddress
  396. if profileHost != "" {
  397. go func() {
  398. logger.Error("Profile server", "err", http.ListenAndServe(profileHost, nil))
  399. }()
  400. }
  401. node := &Node{
  402. config: config,
  403. genesisDoc: genDoc,
  404. privValidator: privValidator,
  405. transport: transport,
  406. sw: sw,
  407. addrBook: addrBook,
  408. nodeInfo: nodeInfo,
  409. nodeKey: nodeKey,
  410. stateDB: stateDB,
  411. blockStore: blockStore,
  412. bcReactor: bcReactor,
  413. mempoolReactor: mempoolReactor,
  414. consensusState: consensusState,
  415. consensusReactor: consensusReactor,
  416. evidencePool: evidencePool,
  417. proxyApp: proxyApp,
  418. txIndexer: txIndexer,
  419. indexerService: indexerService,
  420. eventBus: eventBus,
  421. }
  422. node.BaseService = *cmn.NewBaseService(logger, "Node", node)
  423. return node, nil
  424. }
  425. // OnStart starts the Node. It implements cmn.Service.
  426. func (n *Node) OnStart() error {
  427. now := tmtime.Now()
  428. genTime := n.genesisDoc.GenesisTime
  429. if genTime.After(now) {
  430. n.Logger.Info("Genesis time is in the future. Sleeping until then...", "genTime", genTime)
  431. time.Sleep(genTime.Sub(now))
  432. }
  433. err := n.eventBus.Start()
  434. if err != nil {
  435. return err
  436. }
  437. // Add private IDs to addrbook to block those peers being added
  438. n.addrBook.AddPrivateIDs(splitAndTrimEmpty(n.config.P2P.PrivatePeerIDs, ",", " "))
  439. // Start the RPC server before the P2P server
  440. // so we can eg. receive txs for the first block
  441. if n.config.RPC.ListenAddress != "" {
  442. listeners, err := n.startRPC()
  443. if err != nil {
  444. return err
  445. }
  446. n.rpcListeners = listeners
  447. }
  448. if n.config.Instrumentation.Prometheus &&
  449. n.config.Instrumentation.PrometheusListenAddr != "" {
  450. n.prometheusSrv = n.startPrometheusServer(n.config.Instrumentation.PrometheusListenAddr)
  451. }
  452. // Start the transport.
  453. addr, err := p2p.NewNetAddressStringWithOptionalID(n.config.P2P.ListenAddress)
  454. if err != nil {
  455. return err
  456. }
  457. if err := n.transport.Listen(*addr); err != nil {
  458. return err
  459. }
  460. n.isListening = true
  461. // Start the switch (the P2P server).
  462. err = n.sw.Start()
  463. if err != nil {
  464. return err
  465. }
  466. // Always connect to persistent peers
  467. if n.config.P2P.PersistentPeers != "" {
  468. err = n.sw.DialPeersAsync(n.addrBook, splitAndTrimEmpty(n.config.P2P.PersistentPeers, ",", " "), true)
  469. if err != nil {
  470. return err
  471. }
  472. }
  473. // start tx indexer
  474. return n.indexerService.Start()
  475. }
  476. // OnStop stops the Node. It implements cmn.Service.
  477. func (n *Node) OnStop() {
  478. n.BaseService.OnStop()
  479. n.Logger.Info("Stopping Node")
  480. // first stop the non-reactor services
  481. n.eventBus.Stop()
  482. n.indexerService.Stop()
  483. // now stop the reactors
  484. // TODO: gracefully disconnect from peers.
  485. n.sw.Stop()
  486. if err := n.transport.Close(); err != nil {
  487. n.Logger.Error("Error closing transport", "err", err)
  488. }
  489. n.isListening = false
  490. // finally stop the listeners / external services
  491. for _, l := range n.rpcListeners {
  492. n.Logger.Info("Closing rpc listener", "listener", l)
  493. if err := l.Close(); err != nil {
  494. n.Logger.Error("Error closing listener", "listener", l, "err", err)
  495. }
  496. }
  497. if pvsc, ok := n.privValidator.(*privval.SocketPV); ok {
  498. if err := pvsc.Stop(); err != nil {
  499. n.Logger.Error("Error stopping priv validator socket client", "err", err)
  500. }
  501. }
  502. if n.prometheusSrv != nil {
  503. if err := n.prometheusSrv.Shutdown(context.Background()); err != nil {
  504. // Error from closing listeners, or context timeout:
  505. n.Logger.Error("Prometheus HTTP server Shutdown", "err", err)
  506. }
  507. }
  508. }
  509. // RunForever waits for an interrupt signal and stops the node.
  510. func (n *Node) RunForever() {
  511. // Sleep forever and then...
  512. cmn.TrapSignal(func() {
  513. n.Stop()
  514. })
  515. }
  516. // ConfigureRPC sets all variables in rpccore so they will serve
  517. // rpc calls from this node
  518. func (n *Node) ConfigureRPC() {
  519. rpccore.SetStateDB(n.stateDB)
  520. rpccore.SetBlockStore(n.blockStore)
  521. rpccore.SetConsensusState(n.consensusState)
  522. rpccore.SetMempool(n.mempoolReactor.Mempool)
  523. rpccore.SetEvidencePool(n.evidencePool)
  524. rpccore.SetP2PPeers(n.sw)
  525. rpccore.SetP2PTransport(n)
  526. rpccore.SetPubKey(n.privValidator.GetPubKey())
  527. rpccore.SetGenesisDoc(n.genesisDoc)
  528. rpccore.SetAddrBook(n.addrBook)
  529. rpccore.SetProxyAppQuery(n.proxyApp.Query())
  530. rpccore.SetTxIndexer(n.txIndexer)
  531. rpccore.SetConsensusReactor(n.consensusReactor)
  532. rpccore.SetEventBus(n.eventBus)
  533. rpccore.SetLogger(n.Logger.With("module", "rpc"))
  534. }
  535. func (n *Node) startRPC() ([]net.Listener, error) {
  536. n.ConfigureRPC()
  537. listenAddrs := splitAndTrimEmpty(n.config.RPC.ListenAddress, ",", " ")
  538. coreCodec := amino.NewCodec()
  539. ctypes.RegisterAmino(coreCodec)
  540. if n.config.RPC.Unsafe {
  541. rpccore.AddUnsafeRoutes()
  542. }
  543. // we may expose the rpc over both a unix and tcp socket
  544. listeners := make([]net.Listener, len(listenAddrs))
  545. for i, listenAddr := range listenAddrs {
  546. mux := http.NewServeMux()
  547. rpcLogger := n.Logger.With("module", "rpc-server")
  548. wm := rpcserver.NewWebsocketManager(rpccore.Routes, coreCodec, rpcserver.EventSubscriber(n.eventBus))
  549. wm.SetLogger(rpcLogger.With("protocol", "websocket"))
  550. mux.HandleFunc("/websocket", wm.WebsocketHandler)
  551. rpcserver.RegisterRPCFuncs(mux, rpccore.Routes, coreCodec, rpcLogger)
  552. listener, err := rpcserver.StartHTTPServer(
  553. listenAddr,
  554. mux,
  555. rpcLogger,
  556. rpcserver.Config{MaxOpenConnections: n.config.RPC.MaxOpenConnections},
  557. )
  558. if err != nil {
  559. return nil, err
  560. }
  561. listeners[i] = listener
  562. }
  563. // we expose a simplified api over grpc for convenience to app devs
  564. grpcListenAddr := n.config.RPC.GRPCListenAddress
  565. if grpcListenAddr != "" {
  566. listener, err := grpccore.StartGRPCServer(
  567. grpcListenAddr,
  568. grpccore.Config{
  569. MaxOpenConnections: n.config.RPC.GRPCMaxOpenConnections,
  570. },
  571. )
  572. if err != nil {
  573. return nil, err
  574. }
  575. listeners = append(listeners, listener)
  576. }
  577. return listeners, nil
  578. }
  579. // startPrometheusServer starts a Prometheus HTTP server, listening for metrics
  580. // collectors on addr.
  581. func (n *Node) startPrometheusServer(addr string) *http.Server {
  582. srv := &http.Server{
  583. Addr: addr,
  584. Handler: promhttp.InstrumentMetricHandler(
  585. prometheus.DefaultRegisterer, promhttp.HandlerFor(
  586. prometheus.DefaultGatherer,
  587. promhttp.HandlerOpts{MaxRequestsInFlight: n.config.Instrumentation.MaxOpenConnections},
  588. ),
  589. ),
  590. }
  591. go func() {
  592. if err := srv.ListenAndServe(); err != http.ErrServerClosed {
  593. // Error starting or closing listener:
  594. n.Logger.Error("Prometheus HTTP server ListenAndServe", "err", err)
  595. }
  596. }()
  597. return srv
  598. }
  599. // Switch returns the Node's Switch.
  600. func (n *Node) Switch() *p2p.Switch {
  601. return n.sw
  602. }
  603. // BlockStore returns the Node's BlockStore.
  604. func (n *Node) BlockStore() *bc.BlockStore {
  605. return n.blockStore
  606. }
  607. // ConsensusState returns the Node's ConsensusState.
  608. func (n *Node) ConsensusState() *cs.ConsensusState {
  609. return n.consensusState
  610. }
  611. // ConsensusReactor returns the Node's ConsensusReactor.
  612. func (n *Node) ConsensusReactor() *cs.ConsensusReactor {
  613. return n.consensusReactor
  614. }
  615. // MempoolReactor returns the Node's MempoolReactor.
  616. func (n *Node) MempoolReactor() *mempl.MempoolReactor {
  617. return n.mempoolReactor
  618. }
  619. // EvidencePool returns the Node's EvidencePool.
  620. func (n *Node) EvidencePool() *evidence.EvidencePool {
  621. return n.evidencePool
  622. }
  623. // EventBus returns the Node's EventBus.
  624. func (n *Node) EventBus() *types.EventBus {
  625. return n.eventBus
  626. }
  627. // PrivValidator returns the Node's PrivValidator.
  628. // XXX: for convenience only!
  629. func (n *Node) PrivValidator() types.PrivValidator {
  630. return n.privValidator
  631. }
  632. // GenesisDoc returns the Node's GenesisDoc.
  633. func (n *Node) GenesisDoc() *types.GenesisDoc {
  634. return n.genesisDoc
  635. }
  636. // ProxyApp returns the Node's AppConns, representing its connections to the ABCI application.
  637. func (n *Node) ProxyApp() proxy.AppConns {
  638. return n.proxyApp
  639. }
  640. //------------------------------------------------------------------------------
  641. func (n *Node) Listeners() []string {
  642. return []string{
  643. fmt.Sprintf("Listener(@%v)", n.config.P2P.ExternalAddress),
  644. }
  645. }
  646. func (n *Node) IsListening() bool {
  647. return n.isListening
  648. }
  649. // NodeInfo returns the Node's Info from the Switch.
  650. func (n *Node) NodeInfo() p2p.NodeInfo {
  651. return n.nodeInfo
  652. }
  653. func makeNodeInfo(
  654. config *cfg.Config,
  655. nodeID p2p.ID,
  656. txIndexer txindex.TxIndexer,
  657. chainID string,
  658. ) p2p.NodeInfo {
  659. txIndexerStatus := "on"
  660. if _, ok := txIndexer.(*null.TxIndex); ok {
  661. txIndexerStatus = "off"
  662. }
  663. nodeInfo := p2p.NodeInfo{
  664. ID: nodeID,
  665. Network: chainID,
  666. Version: version.Version,
  667. Channels: []byte{
  668. bc.BlockchainChannel,
  669. cs.StateChannel, cs.DataChannel, cs.VoteChannel, cs.VoteSetBitsChannel,
  670. mempl.MempoolChannel,
  671. evidence.EvidenceChannel,
  672. },
  673. Moniker: config.Moniker,
  674. Other: p2p.NodeInfoOther{
  675. AminoVersion: amino.Version,
  676. P2PVersion: p2p.Version,
  677. ConsensusVersion: cs.Version,
  678. RPCVersion: fmt.Sprintf("%v/%v", rpc.Version, rpccore.Version),
  679. TxIndex: txIndexerStatus,
  680. RPCAddress: config.RPC.ListenAddress,
  681. },
  682. }
  683. if config.P2P.PexReactor {
  684. nodeInfo.Channels = append(nodeInfo.Channels, pex.PexChannel)
  685. }
  686. lAddr := config.P2P.ExternalAddress
  687. if lAddr == "" {
  688. lAddr = config.P2P.ListenAddress
  689. }
  690. nodeInfo.ListenAddr = lAddr
  691. return nodeInfo
  692. }
  693. //------------------------------------------------------------------------------
  694. var (
  695. genesisDocKey = []byte("genesisDoc")
  696. )
  697. // panics if failed to unmarshal bytes
  698. func loadGenesisDoc(db dbm.DB) (*types.GenesisDoc, error) {
  699. bytes := db.Get(genesisDocKey)
  700. if len(bytes) == 0 {
  701. return nil, errors.New("Genesis doc not found")
  702. }
  703. var genDoc *types.GenesisDoc
  704. err := cdc.UnmarshalJSON(bytes, &genDoc)
  705. if err != nil {
  706. cmn.PanicCrisis(fmt.Sprintf("Failed to load genesis doc due to unmarshaling error: %v (bytes: %X)", err, bytes))
  707. }
  708. return genDoc, nil
  709. }
  710. // panics if failed to marshal the given genesis document
  711. func saveGenesisDoc(db dbm.DB, genDoc *types.GenesisDoc) {
  712. bytes, err := cdc.MarshalJSON(genDoc)
  713. if err != nil {
  714. cmn.PanicCrisis(fmt.Sprintf("Failed to save genesis doc due to marshaling error: %v", err))
  715. }
  716. db.SetSync(genesisDocKey, bytes)
  717. }
  718. // splitAndTrimEmpty slices s into all subslices separated by sep and returns a
  719. // slice of the string s with all leading and trailing Unicode code points
  720. // contained in cutset removed. If sep is empty, SplitAndTrim splits after each
  721. // UTF-8 sequence. First part is equivalent to strings.SplitN with a count of
  722. // -1. also filter out empty strings, only return non-empty strings.
  723. func splitAndTrimEmpty(s, sep, cutset string) []string {
  724. if s == "" {
  725. return []string{}
  726. }
  727. spl := strings.Split(s, sep)
  728. nonEmptyStrings := make([]string, 0, len(spl))
  729. for i := 0; i < len(spl); i++ {
  730. element := strings.Trim(spl[i], cutset)
  731. if element != "" {
  732. nonEmptyStrings = append(nonEmptyStrings, element)
  733. }
  734. }
  735. return nonEmptyStrings
  736. }