You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

776 lines
24 KiB

max-bytes PR follow-up (#2318) * ReapMaxTxs: return all txs if max is negative this mirrors ReapMaxBytes behavior See https://github.com/tendermint/tendermint/pull/2184#discussion_r214439950 * increase MaxAminoOverheadForBlock tested with: ``` func TestMaxAminoOverheadForBlock(t *testing.T) { maxChainID := "" for i := 0; i < MaxChainIDLen; i++ { maxChainID += "𠜎" } h := Header{ ChainID: maxChainID, Height: 10, Time: time.Now().UTC(), NumTxs: 100, TotalTxs: 200, LastBlockID: makeBlockID(make([]byte, 20), 300, make([]byte, 20)), LastCommitHash: tmhash.Sum([]byte("last_commit_hash")), DataHash: tmhash.Sum([]byte("data_hash")), ValidatorsHash: tmhash.Sum([]byte("validators_hash")), NextValidatorsHash: tmhash.Sum([]byte("next_validators_hash")), ConsensusHash: tmhash.Sum([]byte("consensus_hash")), AppHash: tmhash.Sum([]byte("app_hash")), LastResultsHash: tmhash.Sum([]byte("last_results_hash")), EvidenceHash: tmhash.Sum([]byte("evidence_hash")), ProposerAddress: tmhash.Sum([]byte("proposer_address")), } b := Block{ Header: h, Data: Data{Txs: makeTxs(10000, 100)}, Evidence: EvidenceData{}, LastCommit: &Commit{}, } bz, err := cdc.MarshalBinary(b) require.NoError(t, err) assert.Equal(t, MaxHeaderBytes+MaxAminoOverheadForBlock-2, len(bz)-1000000-20000-1) } ``` * fix MaxYYY constants calculation by using math.MaxInt64 See https://github.com/tendermint/tendermint/pull/2184#discussion_r214444244 * pass mempool filter as an option See https://github.com/tendermint/tendermint/pull/2184#discussion_r214445869 * fixes after Dev's comments
6 years ago
  1. package node
  2. import (
  3. "bytes"
  4. "context"
  5. "errors"
  6. "fmt"
  7. "net"
  8. "net/http"
  9. "github.com/prometheus/client_golang/prometheus"
  10. "github.com/prometheus/client_golang/prometheus/promhttp"
  11. amino "github.com/tendermint/go-amino"
  12. abci "github.com/tendermint/tendermint/abci/types"
  13. "github.com/tendermint/tendermint/crypto/ed25519"
  14. cmn "github.com/tendermint/tendermint/libs/common"
  15. dbm "github.com/tendermint/tendermint/libs/db"
  16. "github.com/tendermint/tendermint/libs/log"
  17. bc "github.com/tendermint/tendermint/blockchain"
  18. cfg "github.com/tendermint/tendermint/config"
  19. cs "github.com/tendermint/tendermint/consensus"
  20. "github.com/tendermint/tendermint/evidence"
  21. mempl "github.com/tendermint/tendermint/mempool"
  22. "github.com/tendermint/tendermint/p2p"
  23. "github.com/tendermint/tendermint/p2p/pex"
  24. "github.com/tendermint/tendermint/privval"
  25. "github.com/tendermint/tendermint/proxy"
  26. rpccore "github.com/tendermint/tendermint/rpc/core"
  27. ctypes "github.com/tendermint/tendermint/rpc/core/types"
  28. grpccore "github.com/tendermint/tendermint/rpc/grpc"
  29. rpc "github.com/tendermint/tendermint/rpc/lib"
  30. rpcserver "github.com/tendermint/tendermint/rpc/lib/server"
  31. sm "github.com/tendermint/tendermint/state"
  32. "github.com/tendermint/tendermint/state/txindex"
  33. "github.com/tendermint/tendermint/state/txindex/kv"
  34. "github.com/tendermint/tendermint/state/txindex/null"
  35. "github.com/tendermint/tendermint/types"
  36. "github.com/tendermint/tendermint/version"
  37. _ "net/http/pprof"
  38. "strings"
  39. )
  40. //------------------------------------------------------------------------------
  41. // DBContext specifies config information for loading a new DB.
  42. type DBContext struct {
  43. ID string
  44. Config *cfg.Config
  45. }
  46. // DBProvider takes a DBContext and returns an instantiated DB.
  47. type DBProvider func(*DBContext) (dbm.DB, error)
  48. // DefaultDBProvider returns a database using the DBBackend and DBDir
  49. // specified in the ctx.Config.
  50. func DefaultDBProvider(ctx *DBContext) (dbm.DB, error) {
  51. dbType := dbm.DBBackendType(ctx.Config.DBBackend)
  52. return dbm.NewDB(ctx.ID, dbType, ctx.Config.DBDir()), nil
  53. }
  54. // GenesisDocProvider returns a GenesisDoc.
  55. // It allows the GenesisDoc to be pulled from sources other than the
  56. // filesystem, for instance from a distributed key-value store cluster.
  57. type GenesisDocProvider func() (*types.GenesisDoc, error)
  58. // DefaultGenesisDocProviderFunc returns a GenesisDocProvider that loads
  59. // the GenesisDoc from the config.GenesisFile() on the filesystem.
  60. func DefaultGenesisDocProviderFunc(config *cfg.Config) GenesisDocProvider {
  61. return func() (*types.GenesisDoc, error) {
  62. return types.GenesisDocFromFile(config.GenesisFile())
  63. }
  64. }
  65. // NodeProvider takes a config and a logger and returns a ready to go Node.
  66. type NodeProvider func(*cfg.Config, log.Logger) (*Node, error)
  67. // DefaultNewNode returns a Tendermint node with default settings for the
  68. // PrivValidator, ClientCreator, GenesisDoc, and DBProvider.
  69. // It implements NodeProvider.
  70. func DefaultNewNode(config *cfg.Config, logger log.Logger) (*Node, error) {
  71. // Generate node PrivKey
  72. nodeKey, err := p2p.LoadOrGenNodeKey(config.NodeKeyFile())
  73. if err != nil {
  74. return nil, err
  75. }
  76. return NewNode(config,
  77. privval.LoadOrGenFilePV(config.PrivValidatorFile()),
  78. nodeKey,
  79. proxy.DefaultClientCreator(config.ProxyApp, config.ABCI, config.DBDir()),
  80. DefaultGenesisDocProviderFunc(config),
  81. DefaultDBProvider,
  82. DefaultMetricsProvider(config.Instrumentation),
  83. logger,
  84. )
  85. }
  86. // MetricsProvider returns a consensus, p2p and mempool Metrics.
  87. type MetricsProvider func() (*cs.Metrics, *p2p.Metrics, *mempl.Metrics)
  88. // DefaultMetricsProvider returns Metrics build using Prometheus client library
  89. // if Prometheus is enabled. Otherwise, it returns no-op Metrics.
  90. func DefaultMetricsProvider(config *cfg.InstrumentationConfig) MetricsProvider {
  91. return func() (*cs.Metrics, *p2p.Metrics, *mempl.Metrics) {
  92. if config.Prometheus {
  93. return cs.PrometheusMetrics(), p2p.PrometheusMetrics(), mempl.PrometheusMetrics()
  94. }
  95. return cs.NopMetrics(), p2p.NopMetrics(), mempl.NopMetrics()
  96. }
  97. }
  98. //------------------------------------------------------------------------------
  99. // Node is the highest level interface to a full Tendermint node.
  100. // It includes all configuration information and running services.
  101. type Node struct {
  102. cmn.BaseService
  103. // config
  104. config *cfg.Config
  105. genesisDoc *types.GenesisDoc // initial validator set
  106. privValidator types.PrivValidator // local node's validator key
  107. // network
  108. sw *p2p.Switch // p2p connections
  109. addrBook pex.AddrBook // known peers
  110. nodeKey *p2p.NodeKey // our node privkey
  111. // services
  112. eventBus *types.EventBus // pub/sub for services
  113. stateDB dbm.DB
  114. blockStore *bc.BlockStore // store the blockchain to disk
  115. bcReactor *bc.BlockchainReactor // for fast-syncing
  116. mempoolReactor *mempl.MempoolReactor // for gossipping transactions
  117. consensusState *cs.ConsensusState // latest consensus state
  118. consensusReactor *cs.ConsensusReactor // for participating in the consensus
  119. evidencePool *evidence.EvidencePool // tracking evidence
  120. proxyApp proxy.AppConns // connection to the application
  121. rpcListeners []net.Listener // rpc servers
  122. txIndexer txindex.TxIndexer
  123. indexerService *txindex.IndexerService
  124. prometheusSrv *http.Server
  125. }
  126. // NewNode returns a new, ready to go, Tendermint Node.
  127. func NewNode(config *cfg.Config,
  128. privValidator types.PrivValidator,
  129. nodeKey *p2p.NodeKey,
  130. clientCreator proxy.ClientCreator,
  131. genesisDocProvider GenesisDocProvider,
  132. dbProvider DBProvider,
  133. metricsProvider MetricsProvider,
  134. logger log.Logger) (*Node, error) {
  135. // Get BlockStore
  136. blockStoreDB, err := dbProvider(&DBContext{"blockstore", config})
  137. if err != nil {
  138. return nil, err
  139. }
  140. blockStore := bc.NewBlockStore(blockStoreDB)
  141. // Get State
  142. stateDB, err := dbProvider(&DBContext{"state", config})
  143. if err != nil {
  144. return nil, err
  145. }
  146. // Get genesis doc
  147. // TODO: move to state package?
  148. genDoc, err := loadGenesisDoc(stateDB)
  149. if err != nil {
  150. genDoc, err = genesisDocProvider()
  151. if err != nil {
  152. return nil, err
  153. }
  154. // save genesis doc to prevent a certain class of user errors (e.g. when it
  155. // was changed, accidentally or not). Also good for audit trail.
  156. saveGenesisDoc(stateDB, genDoc)
  157. }
  158. state, err := sm.LoadStateFromDBOrGenesisDoc(stateDB, genDoc)
  159. if err != nil {
  160. return nil, err
  161. }
  162. // Create the proxyApp, which manages connections (consensus, mempool, query)
  163. // and sync tendermint and the app by performing a handshake
  164. // and replaying any necessary blocks
  165. consensusLogger := logger.With("module", "consensus")
  166. handshaker := cs.NewHandshaker(stateDB, state, blockStore, genDoc)
  167. handshaker.SetLogger(consensusLogger)
  168. proxyApp := proxy.NewAppConns(clientCreator, handshaker)
  169. proxyApp.SetLogger(logger.With("module", "proxy"))
  170. if err := proxyApp.Start(); err != nil {
  171. return nil, fmt.Errorf("Error starting proxy app connections: %v", err)
  172. }
  173. // reload the state (it may have been updated by the handshake)
  174. state = sm.LoadState(stateDB)
  175. // If an address is provided, listen on the socket for a
  176. // connection from an external signing process.
  177. if config.PrivValidatorListenAddr != "" {
  178. var (
  179. // TODO: persist this key so external signer
  180. // can actually authenticate us
  181. privKey = ed25519.GenPrivKey()
  182. pvsc = privval.NewSocketPV(
  183. logger.With("module", "privval"),
  184. config.PrivValidatorListenAddr,
  185. privKey,
  186. )
  187. )
  188. if err := pvsc.Start(); err != nil {
  189. return nil, fmt.Errorf("Error starting private validator client: %v", err)
  190. }
  191. privValidator = pvsc
  192. }
  193. // Decide whether to fast-sync or not
  194. // We don't fast-sync when the only validator is us.
  195. fastSync := config.FastSync
  196. if state.Validators.Size() == 1 {
  197. addr, _ := state.Validators.GetByIndex(0)
  198. if bytes.Equal(privValidator.GetAddress(), addr) {
  199. fastSync = false
  200. }
  201. }
  202. // Log whether this node is a validator or an observer
  203. if state.Validators.HasAddress(privValidator.GetAddress()) {
  204. consensusLogger.Info("This node is a validator", "addr", privValidator.GetAddress(), "pubKey", privValidator.GetPubKey())
  205. } else {
  206. consensusLogger.Info("This node is not a validator", "addr", privValidator.GetAddress(), "pubKey", privValidator.GetPubKey())
  207. }
  208. csMetrics, p2pMetrics, memplMetrics := metricsProvider()
  209. // Make MempoolReactor
  210. maxDataBytes := types.MaxDataBytesUnknownEvidence(
  211. state.ConsensusParams.BlockSize.MaxBytes,
  212. state.Validators.Size(),
  213. )
  214. mempool := mempl.NewMempool(
  215. config.Mempool,
  216. proxyApp.Mempool(),
  217. state.LastBlockHeight,
  218. mempl.WithMetrics(memplMetrics),
  219. mempl.WithFilter(func(tx types.Tx) bool { return len(tx) <= maxDataBytes }),
  220. )
  221. mempoolLogger := logger.With("module", "mempool")
  222. mempool.SetLogger(mempoolLogger)
  223. mempool.InitWAL() // no need to have the mempool wal during tests
  224. mempoolReactor := mempl.NewMempoolReactor(config.Mempool, mempool)
  225. mempoolReactor.SetLogger(mempoolLogger)
  226. if config.Consensus.WaitForTxs() {
  227. mempool.EnableTxsAvailable()
  228. }
  229. // Make Evidence Reactor
  230. evidenceDB, err := dbProvider(&DBContext{"evidence", config})
  231. if err != nil {
  232. return nil, err
  233. }
  234. evidenceLogger := logger.With("module", "evidence")
  235. evidenceStore := evidence.NewEvidenceStore(evidenceDB)
  236. evidencePool := evidence.NewEvidencePool(stateDB, evidenceStore)
  237. evidencePool.SetLogger(evidenceLogger)
  238. evidenceReactor := evidence.NewEvidenceReactor(evidencePool)
  239. evidenceReactor.SetLogger(evidenceLogger)
  240. blockExecLogger := logger.With("module", "state")
  241. // make block executor for consensus and blockchain reactors to execute blocks
  242. blockExec := sm.NewBlockExecutor(stateDB, blockExecLogger, proxyApp.Consensus(), mempool, evidencePool)
  243. // Make BlockchainReactor
  244. bcReactor := bc.NewBlockchainReactor(state.Copy(), blockExec, blockStore, fastSync)
  245. bcReactor.SetLogger(logger.With("module", "blockchain"))
  246. // Make ConsensusReactor
  247. consensusState := cs.NewConsensusState(
  248. config.Consensus,
  249. state.Copy(),
  250. blockExec,
  251. blockStore,
  252. mempool,
  253. evidencePool,
  254. cs.WithMetrics(csMetrics),
  255. )
  256. consensusState.SetLogger(consensusLogger)
  257. if privValidator != nil {
  258. consensusState.SetPrivValidator(privValidator)
  259. }
  260. consensusReactor := cs.NewConsensusReactor(consensusState, fastSync)
  261. consensusReactor.SetLogger(consensusLogger)
  262. p2pLogger := logger.With("module", "p2p")
  263. sw := p2p.NewSwitch(config.P2P, p2p.WithMetrics(p2pMetrics))
  264. sw.SetLogger(p2pLogger)
  265. sw.AddReactor("MEMPOOL", mempoolReactor)
  266. sw.AddReactor("BLOCKCHAIN", bcReactor)
  267. sw.AddReactor("CONSENSUS", consensusReactor)
  268. sw.AddReactor("EVIDENCE", evidenceReactor)
  269. p2pLogger.Info("P2P Node ID", "ID", nodeKey.ID(), "file", config.NodeKeyFile())
  270. // Optionally, start the pex reactor
  271. //
  272. // TODO:
  273. //
  274. // We need to set Seeds and PersistentPeers on the switch,
  275. // since it needs to be able to use these (and their DNS names)
  276. // even if the PEX is off. We can include the DNS name in the NetAddress,
  277. // but it would still be nice to have a clear list of the current "PersistentPeers"
  278. // somewhere that we can return with net_info.
  279. //
  280. // If PEX is on, it should handle dialing the seeds. Otherwise the switch does it.
  281. // Note we currently use the addrBook regardless at least for AddOurAddress
  282. addrBook := pex.NewAddrBook(config.P2P.AddrBookFile(), config.P2P.AddrBookStrict)
  283. addrBook.SetLogger(p2pLogger.With("book", config.P2P.AddrBookFile()))
  284. if config.P2P.PexReactor {
  285. // TODO persistent peers ? so we can have their DNS addrs saved
  286. pexReactor := pex.NewPEXReactor(addrBook,
  287. &pex.PEXReactorConfig{
  288. Seeds: splitAndTrimEmpty(config.P2P.Seeds, ",", " "),
  289. SeedMode: config.P2P.SeedMode,
  290. })
  291. pexReactor.SetLogger(p2pLogger)
  292. sw.AddReactor("PEX", pexReactor)
  293. }
  294. sw.SetAddrBook(addrBook)
  295. // Filter peers by addr or pubkey with an ABCI query.
  296. // If the query return code is OK, add peer.
  297. // XXX: Query format subject to change
  298. if config.FilterPeers {
  299. // NOTE: addr is ip:port
  300. sw.SetAddrFilter(func(addr net.Addr) error {
  301. resQuery, err := proxyApp.Query().QuerySync(abci.RequestQuery{Path: fmt.Sprintf("/p2p/filter/addr/%s", addr.String())})
  302. if err != nil {
  303. return err
  304. }
  305. if resQuery.IsErr() {
  306. return fmt.Errorf("Error querying abci app: %v", resQuery)
  307. }
  308. return nil
  309. })
  310. sw.SetIDFilter(func(id p2p.ID) error {
  311. resQuery, err := proxyApp.Query().QuerySync(abci.RequestQuery{Path: fmt.Sprintf("/p2p/filter/id/%s", id)})
  312. if err != nil {
  313. return err
  314. }
  315. if resQuery.IsErr() {
  316. return fmt.Errorf("Error querying abci app: %v", resQuery)
  317. }
  318. return nil
  319. })
  320. }
  321. eventBus := types.NewEventBus()
  322. eventBus.SetLogger(logger.With("module", "events"))
  323. // services which will be publishing and/or subscribing for messages (events)
  324. // consensusReactor will set it on consensusState and blockExecutor
  325. consensusReactor.SetEventBus(eventBus)
  326. // Transaction indexing
  327. var txIndexer txindex.TxIndexer
  328. switch config.TxIndex.Indexer {
  329. case "kv":
  330. store, err := dbProvider(&DBContext{"tx_index", config})
  331. if err != nil {
  332. return nil, err
  333. }
  334. if config.TxIndex.IndexTags != "" {
  335. txIndexer = kv.NewTxIndex(store, kv.IndexTags(splitAndTrimEmpty(config.TxIndex.IndexTags, ",", " ")))
  336. } else if config.TxIndex.IndexAllTags {
  337. txIndexer = kv.NewTxIndex(store, kv.IndexAllTags())
  338. } else {
  339. txIndexer = kv.NewTxIndex(store)
  340. }
  341. default:
  342. txIndexer = &null.TxIndex{}
  343. }
  344. indexerService := txindex.NewIndexerService(txIndexer, eventBus)
  345. indexerService.SetLogger(logger.With("module", "txindex"))
  346. // run the profile server
  347. profileHost := config.ProfListenAddress
  348. if profileHost != "" {
  349. go func() {
  350. logger.Error("Profile server", "err", http.ListenAndServe(profileHost, nil))
  351. }()
  352. }
  353. node := &Node{
  354. config: config,
  355. genesisDoc: genDoc,
  356. privValidator: privValidator,
  357. sw: sw,
  358. addrBook: addrBook,
  359. nodeKey: nodeKey,
  360. stateDB: stateDB,
  361. blockStore: blockStore,
  362. bcReactor: bcReactor,
  363. mempoolReactor: mempoolReactor,
  364. consensusState: consensusState,
  365. consensusReactor: consensusReactor,
  366. evidencePool: evidencePool,
  367. proxyApp: proxyApp,
  368. txIndexer: txIndexer,
  369. indexerService: indexerService,
  370. eventBus: eventBus,
  371. }
  372. node.BaseService = *cmn.NewBaseService(logger, "Node", node)
  373. return node, nil
  374. }
  375. // OnStart starts the Node. It implements cmn.Service.
  376. func (n *Node) OnStart() error {
  377. err := n.eventBus.Start()
  378. if err != nil {
  379. return err
  380. }
  381. // Create & add listener
  382. l := p2p.NewDefaultListener(
  383. n.config.P2P.ListenAddress,
  384. n.config.P2P.ExternalAddress,
  385. n.config.P2P.UPNP,
  386. n.Logger.With("module", "p2p"))
  387. n.sw.AddListener(l)
  388. nodeInfo := n.makeNodeInfo(n.nodeKey.ID())
  389. n.sw.SetNodeInfo(nodeInfo)
  390. n.sw.SetNodeKey(n.nodeKey)
  391. // Add ourselves to addrbook to prevent dialing ourselves
  392. n.addrBook.AddOurAddress(nodeInfo.NetAddress())
  393. // Add private IDs to addrbook to block those peers being added
  394. n.addrBook.AddPrivateIDs(splitAndTrimEmpty(n.config.P2P.PrivatePeerIDs, ",", " "))
  395. // Start the RPC server before the P2P server
  396. // so we can eg. receive txs for the first block
  397. if n.config.RPC.ListenAddress != "" {
  398. listeners, err := n.startRPC()
  399. if err != nil {
  400. return err
  401. }
  402. n.rpcListeners = listeners
  403. }
  404. if n.config.Instrumentation.Prometheus &&
  405. n.config.Instrumentation.PrometheusListenAddr != "" {
  406. n.prometheusSrv = n.startPrometheusServer(n.config.Instrumentation.PrometheusListenAddr)
  407. }
  408. // Start the switch (the P2P server).
  409. err = n.sw.Start()
  410. if err != nil {
  411. return err
  412. }
  413. // Always connect to persistent peers
  414. if n.config.P2P.PersistentPeers != "" {
  415. err = n.sw.DialPeersAsync(n.addrBook, splitAndTrimEmpty(n.config.P2P.PersistentPeers, ",", " "), true)
  416. if err != nil {
  417. return err
  418. }
  419. }
  420. // start tx indexer
  421. return n.indexerService.Start()
  422. }
  423. // OnStop stops the Node. It implements cmn.Service.
  424. func (n *Node) OnStop() {
  425. n.BaseService.OnStop()
  426. n.Logger.Info("Stopping Node")
  427. // first stop the non-reactor services
  428. n.eventBus.Stop()
  429. n.indexerService.Stop()
  430. // now stop the reactors
  431. // TODO: gracefully disconnect from peers.
  432. n.sw.Stop()
  433. // finally stop the listeners / external services
  434. for _, l := range n.rpcListeners {
  435. n.Logger.Info("Closing rpc listener", "listener", l)
  436. if err := l.Close(); err != nil {
  437. n.Logger.Error("Error closing listener", "listener", l, "err", err)
  438. }
  439. }
  440. if pvsc, ok := n.privValidator.(*privval.SocketPV); ok {
  441. if err := pvsc.Stop(); err != nil {
  442. n.Logger.Error("Error stopping priv validator socket client", "err", err)
  443. }
  444. }
  445. if n.prometheusSrv != nil {
  446. if err := n.prometheusSrv.Shutdown(context.Background()); err != nil {
  447. // Error from closing listeners, or context timeout:
  448. n.Logger.Error("Prometheus HTTP server Shutdown", "err", err)
  449. }
  450. }
  451. }
  452. // RunForever waits for an interrupt signal and stops the node.
  453. func (n *Node) RunForever() {
  454. // Sleep forever and then...
  455. cmn.TrapSignal(func() {
  456. n.Stop()
  457. })
  458. }
  459. // AddListener adds a listener to accept inbound peer connections.
  460. // It should be called before starting the Node.
  461. // The first listener is the primary listener (in NodeInfo)
  462. func (n *Node) AddListener(l p2p.Listener) {
  463. n.sw.AddListener(l)
  464. }
  465. // ConfigureRPC sets all variables in rpccore so they will serve
  466. // rpc calls from this node
  467. func (n *Node) ConfigureRPC() {
  468. rpccore.SetStateDB(n.stateDB)
  469. rpccore.SetBlockStore(n.blockStore)
  470. rpccore.SetConsensusState(n.consensusState)
  471. rpccore.SetMempool(n.mempoolReactor.Mempool)
  472. rpccore.SetEvidencePool(n.evidencePool)
  473. rpccore.SetSwitch(n.sw)
  474. rpccore.SetPubKey(n.privValidator.GetPubKey())
  475. rpccore.SetGenesisDoc(n.genesisDoc)
  476. rpccore.SetAddrBook(n.addrBook)
  477. rpccore.SetProxyAppQuery(n.proxyApp.Query())
  478. rpccore.SetTxIndexer(n.txIndexer)
  479. rpccore.SetConsensusReactor(n.consensusReactor)
  480. rpccore.SetEventBus(n.eventBus)
  481. rpccore.SetLogger(n.Logger.With("module", "rpc"))
  482. }
  483. func (n *Node) startRPC() ([]net.Listener, error) {
  484. n.ConfigureRPC()
  485. listenAddrs := splitAndTrimEmpty(n.config.RPC.ListenAddress, ",", " ")
  486. coreCodec := amino.NewCodec()
  487. ctypes.RegisterAmino(coreCodec)
  488. if n.config.RPC.Unsafe {
  489. rpccore.AddUnsafeRoutes()
  490. }
  491. // we may expose the rpc over both a unix and tcp socket
  492. listeners := make([]net.Listener, len(listenAddrs))
  493. for i, listenAddr := range listenAddrs {
  494. mux := http.NewServeMux()
  495. rpcLogger := n.Logger.With("module", "rpc-server")
  496. wm := rpcserver.NewWebsocketManager(rpccore.Routes, coreCodec, rpcserver.EventSubscriber(n.eventBus))
  497. wm.SetLogger(rpcLogger.With("protocol", "websocket"))
  498. mux.HandleFunc("/websocket", wm.WebsocketHandler)
  499. rpcserver.RegisterRPCFuncs(mux, rpccore.Routes, coreCodec, rpcLogger)
  500. listener, err := rpcserver.StartHTTPServer(
  501. listenAddr,
  502. mux,
  503. rpcLogger,
  504. rpcserver.Config{MaxOpenConnections: n.config.RPC.MaxOpenConnections},
  505. )
  506. if err != nil {
  507. return nil, err
  508. }
  509. listeners[i] = listener
  510. }
  511. // we expose a simplified api over grpc for convenience to app devs
  512. grpcListenAddr := n.config.RPC.GRPCListenAddress
  513. if grpcListenAddr != "" {
  514. listener, err := grpccore.StartGRPCServer(
  515. grpcListenAddr,
  516. grpccore.Config{
  517. MaxOpenConnections: n.config.RPC.GRPCMaxOpenConnections,
  518. },
  519. )
  520. if err != nil {
  521. return nil, err
  522. }
  523. listeners = append(listeners, listener)
  524. }
  525. return listeners, nil
  526. }
  527. // startPrometheusServer starts a Prometheus HTTP server, listening for metrics
  528. // collectors on addr.
  529. func (n *Node) startPrometheusServer(addr string) *http.Server {
  530. srv := &http.Server{
  531. Addr: addr,
  532. Handler: promhttp.InstrumentMetricHandler(
  533. prometheus.DefaultRegisterer, promhttp.HandlerFor(
  534. prometheus.DefaultGatherer,
  535. promhttp.HandlerOpts{MaxRequestsInFlight: n.config.Instrumentation.MaxOpenConnections},
  536. ),
  537. ),
  538. }
  539. go func() {
  540. if err := srv.ListenAndServe(); err != http.ErrServerClosed {
  541. // Error starting or closing listener:
  542. n.Logger.Error("Prometheus HTTP server ListenAndServe", "err", err)
  543. }
  544. }()
  545. return srv
  546. }
  547. // Switch returns the Node's Switch.
  548. func (n *Node) Switch() *p2p.Switch {
  549. return n.sw
  550. }
  551. // BlockStore returns the Node's BlockStore.
  552. func (n *Node) BlockStore() *bc.BlockStore {
  553. return n.blockStore
  554. }
  555. // ConsensusState returns the Node's ConsensusState.
  556. func (n *Node) ConsensusState() *cs.ConsensusState {
  557. return n.consensusState
  558. }
  559. // ConsensusReactor returns the Node's ConsensusReactor.
  560. func (n *Node) ConsensusReactor() *cs.ConsensusReactor {
  561. return n.consensusReactor
  562. }
  563. // MempoolReactor returns the Node's MempoolReactor.
  564. func (n *Node) MempoolReactor() *mempl.MempoolReactor {
  565. return n.mempoolReactor
  566. }
  567. // EvidencePool returns the Node's EvidencePool.
  568. func (n *Node) EvidencePool() *evidence.EvidencePool {
  569. return n.evidencePool
  570. }
  571. // EventBus returns the Node's EventBus.
  572. func (n *Node) EventBus() *types.EventBus {
  573. return n.eventBus
  574. }
  575. // PrivValidator returns the Node's PrivValidator.
  576. // XXX: for convenience only!
  577. func (n *Node) PrivValidator() types.PrivValidator {
  578. return n.privValidator
  579. }
  580. // GenesisDoc returns the Node's GenesisDoc.
  581. func (n *Node) GenesisDoc() *types.GenesisDoc {
  582. return n.genesisDoc
  583. }
  584. // ProxyApp returns the Node's AppConns, representing its connections to the ABCI application.
  585. func (n *Node) ProxyApp() proxy.AppConns {
  586. return n.proxyApp
  587. }
  588. func (n *Node) makeNodeInfo(nodeID p2p.ID) p2p.NodeInfo {
  589. txIndexerStatus := "on"
  590. if _, ok := n.txIndexer.(*null.TxIndex); ok {
  591. txIndexerStatus = "off"
  592. }
  593. nodeInfo := p2p.NodeInfo{
  594. ID: nodeID,
  595. Network: n.genesisDoc.ChainID,
  596. Version: version.Version,
  597. Channels: []byte{
  598. bc.BlockchainChannel,
  599. cs.StateChannel, cs.DataChannel, cs.VoteChannel, cs.VoteSetBitsChannel,
  600. mempl.MempoolChannel,
  601. evidence.EvidenceChannel,
  602. },
  603. Moniker: n.config.Moniker,
  604. Other: p2p.NodeInfoOther{
  605. AminoVersion: amino.Version,
  606. P2PVersion: p2p.Version,
  607. ConsensusVersion: cs.Version,
  608. RPCVersion: fmt.Sprintf("%v/%v", rpc.Version, rpccore.Version),
  609. TxIndex: txIndexerStatus,
  610. },
  611. }
  612. if n.config.P2P.PexReactor {
  613. nodeInfo.Channels = append(nodeInfo.Channels, pex.PexChannel)
  614. }
  615. rpcListenAddr := n.config.RPC.ListenAddress
  616. nodeInfo.Other.RPCAddress = rpcListenAddr
  617. if !n.sw.IsListening() {
  618. return nodeInfo
  619. }
  620. p2pListener := n.sw.Listeners()[0]
  621. p2pHost := p2pListener.ExternalAddressHost()
  622. p2pPort := p2pListener.ExternalAddress().Port
  623. nodeInfo.ListenAddr = fmt.Sprintf("%v:%v", p2pHost, p2pPort)
  624. return nodeInfo
  625. }
  626. //------------------------------------------------------------------------------
  627. // NodeInfo returns the Node's Info from the Switch.
  628. func (n *Node) NodeInfo() p2p.NodeInfo {
  629. return n.sw.NodeInfo()
  630. }
  631. //------------------------------------------------------------------------------
  632. var (
  633. genesisDocKey = []byte("genesisDoc")
  634. )
  635. // panics if failed to unmarshal bytes
  636. func loadGenesisDoc(db dbm.DB) (*types.GenesisDoc, error) {
  637. bytes := db.Get(genesisDocKey)
  638. if len(bytes) == 0 {
  639. return nil, errors.New("Genesis doc not found")
  640. }
  641. var genDoc *types.GenesisDoc
  642. err := cdc.UnmarshalJSON(bytes, &genDoc)
  643. if err != nil {
  644. cmn.PanicCrisis(fmt.Sprintf("Failed to load genesis doc due to unmarshaling error: %v (bytes: %X)", err, bytes))
  645. }
  646. return genDoc, nil
  647. }
  648. // panics if failed to marshal the given genesis document
  649. func saveGenesisDoc(db dbm.DB, genDoc *types.GenesisDoc) {
  650. bytes, err := cdc.MarshalJSON(genDoc)
  651. if err != nil {
  652. cmn.PanicCrisis(fmt.Sprintf("Failed to save genesis doc due to marshaling error: %v", err))
  653. }
  654. db.SetSync(genesisDocKey, bytes)
  655. }
  656. // splitAndTrimEmpty slices s into all subslices separated by sep and returns a
  657. // slice of the string s with all leading and trailing Unicode code points
  658. // contained in cutset removed. If sep is empty, SplitAndTrim splits after each
  659. // UTF-8 sequence. First part is equivalent to strings.SplitN with a count of
  660. // -1. also filter out empty strings, only return non-empty strings.
  661. func splitAndTrimEmpty(s, sep, cutset string) []string {
  662. if s == "" {
  663. return []string{}
  664. }
  665. spl := strings.Split(s, sep)
  666. nonEmptyStrings := make([]string, 0, len(spl))
  667. for i := 0; i < len(spl); i++ {
  668. element := strings.Trim(spl[i], cutset)
  669. if element != "" {
  670. nonEmptyStrings = append(nonEmptyStrings, element)
  671. }
  672. }
  673. return nonEmptyStrings
  674. }