You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

774 lines
24 KiB

max-bytes PR follow-up (#2318) * ReapMaxTxs: return all txs if max is negative this mirrors ReapMaxBytes behavior See https://github.com/tendermint/tendermint/pull/2184#discussion_r214439950 * increase MaxAminoOverheadForBlock tested with: ``` func TestMaxAminoOverheadForBlock(t *testing.T) { maxChainID := "" for i := 0; i < MaxChainIDLen; i++ { maxChainID += "𠜎" } h := Header{ ChainID: maxChainID, Height: 10, Time: time.Now().UTC(), NumTxs: 100, TotalTxs: 200, LastBlockID: makeBlockID(make([]byte, 20), 300, make([]byte, 20)), LastCommitHash: tmhash.Sum([]byte("last_commit_hash")), DataHash: tmhash.Sum([]byte("data_hash")), ValidatorsHash: tmhash.Sum([]byte("validators_hash")), NextValidatorsHash: tmhash.Sum([]byte("next_validators_hash")), ConsensusHash: tmhash.Sum([]byte("consensus_hash")), AppHash: tmhash.Sum([]byte("app_hash")), LastResultsHash: tmhash.Sum([]byte("last_results_hash")), EvidenceHash: tmhash.Sum([]byte("evidence_hash")), ProposerAddress: tmhash.Sum([]byte("proposer_address")), } b := Block{ Header: h, Data: Data{Txs: makeTxs(10000, 100)}, Evidence: EvidenceData{}, LastCommit: &Commit{}, } bz, err := cdc.MarshalBinary(b) require.NoError(t, err) assert.Equal(t, MaxHeaderBytes+MaxAminoOverheadForBlock-2, len(bz)-1000000-20000-1) } ``` * fix MaxYYY constants calculation by using math.MaxInt64 See https://github.com/tendermint/tendermint/pull/2184#discussion_r214444244 * pass mempool filter as an option See https://github.com/tendermint/tendermint/pull/2184#discussion_r214445869 * fixes after Dev's comments
6 years ago
max-bytes PR follow-up (#2318) * ReapMaxTxs: return all txs if max is negative this mirrors ReapMaxBytes behavior See https://github.com/tendermint/tendermint/pull/2184#discussion_r214439950 * increase MaxAminoOverheadForBlock tested with: ``` func TestMaxAminoOverheadForBlock(t *testing.T) { maxChainID := "" for i := 0; i < MaxChainIDLen; i++ { maxChainID += "𠜎" } h := Header{ ChainID: maxChainID, Height: 10, Time: time.Now().UTC(), NumTxs: 100, TotalTxs: 200, LastBlockID: makeBlockID(make([]byte, 20), 300, make([]byte, 20)), LastCommitHash: tmhash.Sum([]byte("last_commit_hash")), DataHash: tmhash.Sum([]byte("data_hash")), ValidatorsHash: tmhash.Sum([]byte("validators_hash")), NextValidatorsHash: tmhash.Sum([]byte("next_validators_hash")), ConsensusHash: tmhash.Sum([]byte("consensus_hash")), AppHash: tmhash.Sum([]byte("app_hash")), LastResultsHash: tmhash.Sum([]byte("last_results_hash")), EvidenceHash: tmhash.Sum([]byte("evidence_hash")), ProposerAddress: tmhash.Sum([]byte("proposer_address")), } b := Block{ Header: h, Data: Data{Txs: makeTxs(10000, 100)}, Evidence: EvidenceData{}, LastCommit: &Commit{}, } bz, err := cdc.MarshalBinary(b) require.NoError(t, err) assert.Equal(t, MaxHeaderBytes+MaxAminoOverheadForBlock-2, len(bz)-1000000-20000-1) } ``` * fix MaxYYY constants calculation by using math.MaxInt64 See https://github.com/tendermint/tendermint/pull/2184#discussion_r214444244 * pass mempool filter as an option See https://github.com/tendermint/tendermint/pull/2184#discussion_r214445869 * fixes after Dev's comments
6 years ago
max-bytes PR follow-up (#2318) * ReapMaxTxs: return all txs if max is negative this mirrors ReapMaxBytes behavior See https://github.com/tendermint/tendermint/pull/2184#discussion_r214439950 * increase MaxAminoOverheadForBlock tested with: ``` func TestMaxAminoOverheadForBlock(t *testing.T) { maxChainID := "" for i := 0; i < MaxChainIDLen; i++ { maxChainID += "𠜎" } h := Header{ ChainID: maxChainID, Height: 10, Time: time.Now().UTC(), NumTxs: 100, TotalTxs: 200, LastBlockID: makeBlockID(make([]byte, 20), 300, make([]byte, 20)), LastCommitHash: tmhash.Sum([]byte("last_commit_hash")), DataHash: tmhash.Sum([]byte("data_hash")), ValidatorsHash: tmhash.Sum([]byte("validators_hash")), NextValidatorsHash: tmhash.Sum([]byte("next_validators_hash")), ConsensusHash: tmhash.Sum([]byte("consensus_hash")), AppHash: tmhash.Sum([]byte("app_hash")), LastResultsHash: tmhash.Sum([]byte("last_results_hash")), EvidenceHash: tmhash.Sum([]byte("evidence_hash")), ProposerAddress: tmhash.Sum([]byte("proposer_address")), } b := Block{ Header: h, Data: Data{Txs: makeTxs(10000, 100)}, Evidence: EvidenceData{}, LastCommit: &Commit{}, } bz, err := cdc.MarshalBinary(b) require.NoError(t, err) assert.Equal(t, MaxHeaderBytes+MaxAminoOverheadForBlock-2, len(bz)-1000000-20000-1) } ``` * fix MaxYYY constants calculation by using math.MaxInt64 See https://github.com/tendermint/tendermint/pull/2184#discussion_r214444244 * pass mempool filter as an option See https://github.com/tendermint/tendermint/pull/2184#discussion_r214445869 * fixes after Dev's comments
6 years ago
  1. package node
  2. import (
  3. "bytes"
  4. "context"
  5. "errors"
  6. "fmt"
  7. "net"
  8. "net/http"
  9. "github.com/prometheus/client_golang/prometheus"
  10. "github.com/prometheus/client_golang/prometheus/promhttp"
  11. amino "github.com/tendermint/go-amino"
  12. abci "github.com/tendermint/tendermint/abci/types"
  13. "github.com/tendermint/tendermint/crypto/ed25519"
  14. cmn "github.com/tendermint/tendermint/libs/common"
  15. dbm "github.com/tendermint/tendermint/libs/db"
  16. "github.com/tendermint/tendermint/libs/log"
  17. bc "github.com/tendermint/tendermint/blockchain"
  18. cfg "github.com/tendermint/tendermint/config"
  19. cs "github.com/tendermint/tendermint/consensus"
  20. "github.com/tendermint/tendermint/evidence"
  21. mempl "github.com/tendermint/tendermint/mempool"
  22. "github.com/tendermint/tendermint/p2p"
  23. "github.com/tendermint/tendermint/p2p/pex"
  24. "github.com/tendermint/tendermint/privval"
  25. "github.com/tendermint/tendermint/proxy"
  26. rpccore "github.com/tendermint/tendermint/rpc/core"
  27. ctypes "github.com/tendermint/tendermint/rpc/core/types"
  28. grpccore "github.com/tendermint/tendermint/rpc/grpc"
  29. rpc "github.com/tendermint/tendermint/rpc/lib"
  30. rpcserver "github.com/tendermint/tendermint/rpc/lib/server"
  31. sm "github.com/tendermint/tendermint/state"
  32. "github.com/tendermint/tendermint/state/txindex"
  33. "github.com/tendermint/tendermint/state/txindex/kv"
  34. "github.com/tendermint/tendermint/state/txindex/null"
  35. "github.com/tendermint/tendermint/types"
  36. "github.com/tendermint/tendermint/version"
  37. _ "net/http/pprof"
  38. "strings"
  39. )
  40. //------------------------------------------------------------------------------
  41. // DBContext specifies config information for loading a new DB.
  42. type DBContext struct {
  43. ID string
  44. Config *cfg.Config
  45. }
  46. // DBProvider takes a DBContext and returns an instantiated DB.
  47. type DBProvider func(*DBContext) (dbm.DB, error)
  48. // DefaultDBProvider returns a database using the DBBackend and DBDir
  49. // specified in the ctx.Config.
  50. func DefaultDBProvider(ctx *DBContext) (dbm.DB, error) {
  51. dbType := dbm.DBBackendType(ctx.Config.DBBackend)
  52. return dbm.NewDB(ctx.ID, dbType, ctx.Config.DBDir()), nil
  53. }
  54. // GenesisDocProvider returns a GenesisDoc.
  55. // It allows the GenesisDoc to be pulled from sources other than the
  56. // filesystem, for instance from a distributed key-value store cluster.
  57. type GenesisDocProvider func() (*types.GenesisDoc, error)
  58. // DefaultGenesisDocProviderFunc returns a GenesisDocProvider that loads
  59. // the GenesisDoc from the config.GenesisFile() on the filesystem.
  60. func DefaultGenesisDocProviderFunc(config *cfg.Config) GenesisDocProvider {
  61. return func() (*types.GenesisDoc, error) {
  62. return types.GenesisDocFromFile(config.GenesisFile())
  63. }
  64. }
  65. // NodeProvider takes a config and a logger and returns a ready to go Node.
  66. type NodeProvider func(*cfg.Config, log.Logger) (*Node, error)
  67. // DefaultNewNode returns a Tendermint node with default settings for the
  68. // PrivValidator, ClientCreator, GenesisDoc, and DBProvider.
  69. // It implements NodeProvider.
  70. func DefaultNewNode(config *cfg.Config, logger log.Logger) (*Node, error) {
  71. // Generate node PrivKey
  72. nodeKey, err := p2p.LoadOrGenNodeKey(config.NodeKeyFile())
  73. if err != nil {
  74. return nil, err
  75. }
  76. return NewNode(config,
  77. privval.LoadOrGenFilePV(config.PrivValidatorFile()),
  78. nodeKey,
  79. proxy.DefaultClientCreator(config.ProxyApp, config.ABCI, config.DBDir()),
  80. DefaultGenesisDocProviderFunc(config),
  81. DefaultDBProvider,
  82. DefaultMetricsProvider(config.Instrumentation),
  83. logger,
  84. )
  85. }
  86. // MetricsProvider returns a consensus, p2p and mempool Metrics.
  87. type MetricsProvider func() (*cs.Metrics, *p2p.Metrics, *mempl.Metrics)
  88. // DefaultMetricsProvider returns Metrics build using Prometheus client library
  89. // if Prometheus is enabled. Otherwise, it returns no-op Metrics.
  90. func DefaultMetricsProvider(config *cfg.InstrumentationConfig) MetricsProvider {
  91. return func() (*cs.Metrics, *p2p.Metrics, *mempl.Metrics) {
  92. if config.Prometheus {
  93. return cs.PrometheusMetrics(), p2p.PrometheusMetrics(), mempl.PrometheusMetrics()
  94. }
  95. return cs.NopMetrics(), p2p.NopMetrics(), mempl.NopMetrics()
  96. }
  97. }
  98. //------------------------------------------------------------------------------
  99. // Node is the highest level interface to a full Tendermint node.
  100. // It includes all configuration information and running services.
  101. type Node struct {
  102. cmn.BaseService
  103. // config
  104. config *cfg.Config
  105. genesisDoc *types.GenesisDoc // initial validator set
  106. privValidator types.PrivValidator // local node's validator key
  107. // network
  108. sw *p2p.Switch // p2p connections
  109. addrBook pex.AddrBook // known peers
  110. nodeKey *p2p.NodeKey // our node privkey
  111. // services
  112. eventBus *types.EventBus // pub/sub for services
  113. stateDB dbm.DB
  114. blockStore *bc.BlockStore // store the blockchain to disk
  115. bcReactor *bc.BlockchainReactor // for fast-syncing
  116. mempoolReactor *mempl.MempoolReactor // for gossipping transactions
  117. consensusState *cs.ConsensusState // latest consensus state
  118. consensusReactor *cs.ConsensusReactor // for participating in the consensus
  119. evidencePool *evidence.EvidencePool // tracking evidence
  120. proxyApp proxy.AppConns // connection to the application
  121. rpcListeners []net.Listener // rpc servers
  122. txIndexer txindex.TxIndexer
  123. indexerService *txindex.IndexerService
  124. prometheusSrv *http.Server
  125. }
  126. // NewNode returns a new, ready to go, Tendermint Node.
  127. func NewNode(config *cfg.Config,
  128. privValidator types.PrivValidator,
  129. nodeKey *p2p.NodeKey,
  130. clientCreator proxy.ClientCreator,
  131. genesisDocProvider GenesisDocProvider,
  132. dbProvider DBProvider,
  133. metricsProvider MetricsProvider,
  134. logger log.Logger) (*Node, error) {
  135. // Get BlockStore
  136. blockStoreDB, err := dbProvider(&DBContext{"blockstore", config})
  137. if err != nil {
  138. return nil, err
  139. }
  140. blockStore := bc.NewBlockStore(blockStoreDB)
  141. // Get State
  142. stateDB, err := dbProvider(&DBContext{"state", config})
  143. if err != nil {
  144. return nil, err
  145. }
  146. // Get genesis doc
  147. // TODO: move to state package?
  148. genDoc, err := loadGenesisDoc(stateDB)
  149. if err != nil {
  150. genDoc, err = genesisDocProvider()
  151. if err != nil {
  152. return nil, err
  153. }
  154. // save genesis doc to prevent a certain class of user errors (e.g. when it
  155. // was changed, accidentally or not). Also good for audit trail.
  156. saveGenesisDoc(stateDB, genDoc)
  157. }
  158. state, err := sm.LoadStateFromDBOrGenesisDoc(stateDB, genDoc)
  159. if err != nil {
  160. return nil, err
  161. }
  162. // Create the proxyApp, which manages connections (consensus, mempool, query)
  163. // and sync tendermint and the app by performing a handshake
  164. // and replaying any necessary blocks
  165. consensusLogger := logger.With("module", "consensus")
  166. handshaker := cs.NewHandshaker(stateDB, state, blockStore, genDoc)
  167. handshaker.SetLogger(consensusLogger)
  168. proxyApp := proxy.NewAppConns(clientCreator, handshaker)
  169. proxyApp.SetLogger(logger.With("module", "proxy"))
  170. if err := proxyApp.Start(); err != nil {
  171. return nil, fmt.Errorf("Error starting proxy app connections: %v", err)
  172. }
  173. // reload the state (it may have been updated by the handshake)
  174. state = sm.LoadState(stateDB)
  175. // If an address is provided, listen on the socket for a
  176. // connection from an external signing process.
  177. if config.PrivValidatorListenAddr != "" {
  178. var (
  179. // TODO: persist this key so external signer
  180. // can actually authenticate us
  181. privKey = ed25519.GenPrivKey()
  182. pvsc = privval.NewSocketPV(
  183. logger.With("module", "privval"),
  184. config.PrivValidatorListenAddr,
  185. privKey,
  186. )
  187. )
  188. if err := pvsc.Start(); err != nil {
  189. return nil, fmt.Errorf("Error starting private validator client: %v", err)
  190. }
  191. privValidator = pvsc
  192. }
  193. // Decide whether to fast-sync or not
  194. // We don't fast-sync when the only validator is us.
  195. fastSync := config.FastSync
  196. if state.Validators.Size() == 1 {
  197. addr, _ := state.Validators.GetByIndex(0)
  198. if bytes.Equal(privValidator.GetAddress(), addr) {
  199. fastSync = false
  200. }
  201. }
  202. // Log whether this node is a validator or an observer
  203. if state.Validators.HasAddress(privValidator.GetAddress()) {
  204. consensusLogger.Info("This node is a validator", "addr", privValidator.GetAddress(), "pubKey", privValidator.GetPubKey())
  205. } else {
  206. consensusLogger.Info("This node is not a validator", "addr", privValidator.GetAddress(), "pubKey", privValidator.GetPubKey())
  207. }
  208. csMetrics, p2pMetrics, memplMetrics := metricsProvider()
  209. // Make MempoolReactor
  210. maxBytes := state.ConsensusParams.TxSize.MaxBytes
  211. mempool := mempl.NewMempool(
  212. config.Mempool,
  213. proxyApp.Mempool(),
  214. state.LastBlockHeight,
  215. mempl.WithMetrics(memplMetrics),
  216. mempl.WithFilter(func(tx types.Tx) bool { return len(tx) <= maxBytes }),
  217. )
  218. mempoolLogger := logger.With("module", "mempool")
  219. mempool.SetLogger(mempoolLogger)
  220. mempool.InitWAL() // no need to have the mempool wal during tests
  221. mempoolReactor := mempl.NewMempoolReactor(config.Mempool, mempool)
  222. mempoolReactor.SetLogger(mempoolLogger)
  223. if config.Consensus.WaitForTxs() {
  224. mempool.EnableTxsAvailable()
  225. }
  226. // Make Evidence Reactor
  227. evidenceDB, err := dbProvider(&DBContext{"evidence", config})
  228. if err != nil {
  229. return nil, err
  230. }
  231. evidenceLogger := logger.With("module", "evidence")
  232. evidenceStore := evidence.NewEvidenceStore(evidenceDB)
  233. evidencePool := evidence.NewEvidencePool(stateDB, evidenceStore)
  234. evidencePool.SetLogger(evidenceLogger)
  235. evidenceReactor := evidence.NewEvidenceReactor(evidencePool)
  236. evidenceReactor.SetLogger(evidenceLogger)
  237. blockExecLogger := logger.With("module", "state")
  238. // make block executor for consensus and blockchain reactors to execute blocks
  239. blockExec := sm.NewBlockExecutor(stateDB, blockExecLogger, proxyApp.Consensus(), mempool, evidencePool)
  240. // Make BlockchainReactor
  241. bcReactor := bc.NewBlockchainReactor(state.Copy(), blockExec, blockStore, fastSync)
  242. bcReactor.SetLogger(logger.With("module", "blockchain"))
  243. // Make ConsensusReactor
  244. consensusState := cs.NewConsensusState(
  245. config.Consensus,
  246. state.Copy(),
  247. blockExec,
  248. blockStore,
  249. mempool,
  250. evidencePool,
  251. cs.WithMetrics(csMetrics),
  252. )
  253. consensusState.SetLogger(consensusLogger)
  254. if privValidator != nil {
  255. consensusState.SetPrivValidator(privValidator)
  256. }
  257. consensusReactor := cs.NewConsensusReactor(consensusState, fastSync)
  258. consensusReactor.SetLogger(consensusLogger)
  259. p2pLogger := logger.With("module", "p2p")
  260. sw := p2p.NewSwitch(config.P2P, p2p.WithMetrics(p2pMetrics))
  261. sw.SetLogger(p2pLogger)
  262. sw.AddReactor("MEMPOOL", mempoolReactor)
  263. sw.AddReactor("BLOCKCHAIN", bcReactor)
  264. sw.AddReactor("CONSENSUS", consensusReactor)
  265. sw.AddReactor("EVIDENCE", evidenceReactor)
  266. p2pLogger.Info("P2P Node ID", "ID", nodeKey.ID(), "file", config.NodeKeyFile())
  267. // Optionally, start the pex reactor
  268. //
  269. // TODO:
  270. //
  271. // We need to set Seeds and PersistentPeers on the switch,
  272. // since it needs to be able to use these (and their DNS names)
  273. // even if the PEX is off. We can include the DNS name in the NetAddress,
  274. // but it would still be nice to have a clear list of the current "PersistentPeers"
  275. // somewhere that we can return with net_info.
  276. //
  277. // If PEX is on, it should handle dialing the seeds. Otherwise the switch does it.
  278. // Note we currently use the addrBook regardless at least for AddOurAddress
  279. addrBook := pex.NewAddrBook(config.P2P.AddrBookFile(), config.P2P.AddrBookStrict)
  280. addrBook.SetLogger(p2pLogger.With("book", config.P2P.AddrBookFile()))
  281. if config.P2P.PexReactor {
  282. // TODO persistent peers ? so we can have their DNS addrs saved
  283. pexReactor := pex.NewPEXReactor(addrBook,
  284. &pex.PEXReactorConfig{
  285. Seeds: splitAndTrimEmpty(config.P2P.Seeds, ",", " "),
  286. SeedMode: config.P2P.SeedMode,
  287. })
  288. pexReactor.SetLogger(p2pLogger)
  289. sw.AddReactor("PEX", pexReactor)
  290. }
  291. sw.SetAddrBook(addrBook)
  292. // Filter peers by addr or pubkey with an ABCI query.
  293. // If the query return code is OK, add peer.
  294. // XXX: Query format subject to change
  295. if config.FilterPeers {
  296. // NOTE: addr is ip:port
  297. sw.SetAddrFilter(func(addr net.Addr) error {
  298. resQuery, err := proxyApp.Query().QuerySync(abci.RequestQuery{Path: fmt.Sprintf("/p2p/filter/addr/%s", addr.String())})
  299. if err != nil {
  300. return err
  301. }
  302. if resQuery.IsErr() {
  303. return fmt.Errorf("Error querying abci app: %v", resQuery)
  304. }
  305. return nil
  306. })
  307. sw.SetIDFilter(func(id p2p.ID) error {
  308. resQuery, err := proxyApp.Query().QuerySync(abci.RequestQuery{Path: fmt.Sprintf("/p2p/filter/id/%s", id)})
  309. if err != nil {
  310. return err
  311. }
  312. if resQuery.IsErr() {
  313. return fmt.Errorf("Error querying abci app: %v", resQuery)
  314. }
  315. return nil
  316. })
  317. }
  318. eventBus := types.NewEventBus()
  319. eventBus.SetLogger(logger.With("module", "events"))
  320. // services which will be publishing and/or subscribing for messages (events)
  321. // consensusReactor will set it on consensusState and blockExecutor
  322. consensusReactor.SetEventBus(eventBus)
  323. // Transaction indexing
  324. var txIndexer txindex.TxIndexer
  325. switch config.TxIndex.Indexer {
  326. case "kv":
  327. store, err := dbProvider(&DBContext{"tx_index", config})
  328. if err != nil {
  329. return nil, err
  330. }
  331. if config.TxIndex.IndexTags != "" {
  332. txIndexer = kv.NewTxIndex(store, kv.IndexTags(splitAndTrimEmpty(config.TxIndex.IndexTags, ",", " ")))
  333. } else if config.TxIndex.IndexAllTags {
  334. txIndexer = kv.NewTxIndex(store, kv.IndexAllTags())
  335. } else {
  336. txIndexer = kv.NewTxIndex(store)
  337. }
  338. default:
  339. txIndexer = &null.TxIndex{}
  340. }
  341. indexerService := txindex.NewIndexerService(txIndexer, eventBus)
  342. indexerService.SetLogger(logger.With("module", "txindex"))
  343. // run the profile server
  344. profileHost := config.ProfListenAddress
  345. if profileHost != "" {
  346. go func() {
  347. logger.Error("Profile server", "err", http.ListenAndServe(profileHost, nil))
  348. }()
  349. }
  350. node := &Node{
  351. config: config,
  352. genesisDoc: genDoc,
  353. privValidator: privValidator,
  354. sw: sw,
  355. addrBook: addrBook,
  356. nodeKey: nodeKey,
  357. stateDB: stateDB,
  358. blockStore: blockStore,
  359. bcReactor: bcReactor,
  360. mempoolReactor: mempoolReactor,
  361. consensusState: consensusState,
  362. consensusReactor: consensusReactor,
  363. evidencePool: evidencePool,
  364. proxyApp: proxyApp,
  365. txIndexer: txIndexer,
  366. indexerService: indexerService,
  367. eventBus: eventBus,
  368. }
  369. node.BaseService = *cmn.NewBaseService(logger, "Node", node)
  370. return node, nil
  371. }
  372. // OnStart starts the Node. It implements cmn.Service.
  373. func (n *Node) OnStart() error {
  374. err := n.eventBus.Start()
  375. if err != nil {
  376. return err
  377. }
  378. // Create & add listener
  379. l := p2p.NewDefaultListener(
  380. n.config.P2P.ListenAddress,
  381. n.config.P2P.ExternalAddress,
  382. n.config.P2P.UPNP,
  383. n.Logger.With("module", "p2p"))
  384. n.sw.AddListener(l)
  385. nodeInfo := n.makeNodeInfo(n.nodeKey.ID())
  386. n.sw.SetNodeInfo(nodeInfo)
  387. n.sw.SetNodeKey(n.nodeKey)
  388. // Add ourselves to addrbook to prevent dialing ourselves
  389. n.addrBook.AddOurAddress(nodeInfo.NetAddress())
  390. // Add private IDs to addrbook to block those peers being added
  391. n.addrBook.AddPrivateIDs(splitAndTrimEmpty(n.config.P2P.PrivatePeerIDs, ",", " "))
  392. // Start the RPC server before the P2P server
  393. // so we can eg. receive txs for the first block
  394. if n.config.RPC.ListenAddress != "" {
  395. listeners, err := n.startRPC()
  396. if err != nil {
  397. return err
  398. }
  399. n.rpcListeners = listeners
  400. }
  401. if n.config.Instrumentation.Prometheus &&
  402. n.config.Instrumentation.PrometheusListenAddr != "" {
  403. n.prometheusSrv = n.startPrometheusServer(n.config.Instrumentation.PrometheusListenAddr)
  404. }
  405. // Start the switch (the P2P server).
  406. err = n.sw.Start()
  407. if err != nil {
  408. return err
  409. }
  410. // Always connect to persistent peers
  411. if n.config.P2P.PersistentPeers != "" {
  412. err = n.sw.DialPeersAsync(n.addrBook, splitAndTrimEmpty(n.config.P2P.PersistentPeers, ",", " "), true)
  413. if err != nil {
  414. return err
  415. }
  416. }
  417. // start tx indexer
  418. return n.indexerService.Start()
  419. }
  420. // OnStop stops the Node. It implements cmn.Service.
  421. func (n *Node) OnStop() {
  422. n.BaseService.OnStop()
  423. n.Logger.Info("Stopping Node")
  424. // first stop the non-reactor services
  425. n.eventBus.Stop()
  426. n.indexerService.Stop()
  427. // now stop the reactors
  428. // TODO: gracefully disconnect from peers.
  429. n.sw.Stop()
  430. // finally stop the listeners / external services
  431. for _, l := range n.rpcListeners {
  432. n.Logger.Info("Closing rpc listener", "listener", l)
  433. if err := l.Close(); err != nil {
  434. n.Logger.Error("Error closing listener", "listener", l, "err", err)
  435. }
  436. }
  437. if pvsc, ok := n.privValidator.(*privval.SocketPV); ok {
  438. if err := pvsc.Stop(); err != nil {
  439. n.Logger.Error("Error stopping priv validator socket client", "err", err)
  440. }
  441. }
  442. if n.prometheusSrv != nil {
  443. if err := n.prometheusSrv.Shutdown(context.Background()); err != nil {
  444. // Error from closing listeners, or context timeout:
  445. n.Logger.Error("Prometheus HTTP server Shutdown", "err", err)
  446. }
  447. }
  448. }
  449. // RunForever waits for an interrupt signal and stops the node.
  450. func (n *Node) RunForever() {
  451. // Sleep forever and then...
  452. cmn.TrapSignal(func() {
  453. n.Stop()
  454. })
  455. }
  456. // AddListener adds a listener to accept inbound peer connections.
  457. // It should be called before starting the Node.
  458. // The first listener is the primary listener (in NodeInfo)
  459. func (n *Node) AddListener(l p2p.Listener) {
  460. n.sw.AddListener(l)
  461. }
  462. // ConfigureRPC sets all variables in rpccore so they will serve
  463. // rpc calls from this node
  464. func (n *Node) ConfigureRPC() {
  465. rpccore.SetStateDB(n.stateDB)
  466. rpccore.SetBlockStore(n.blockStore)
  467. rpccore.SetConsensusState(n.consensusState)
  468. rpccore.SetMempool(n.mempoolReactor.Mempool)
  469. rpccore.SetEvidencePool(n.evidencePool)
  470. rpccore.SetSwitch(n.sw)
  471. rpccore.SetPubKey(n.privValidator.GetPubKey())
  472. rpccore.SetGenesisDoc(n.genesisDoc)
  473. rpccore.SetAddrBook(n.addrBook)
  474. rpccore.SetProxyAppQuery(n.proxyApp.Query())
  475. rpccore.SetTxIndexer(n.txIndexer)
  476. rpccore.SetConsensusReactor(n.consensusReactor)
  477. rpccore.SetEventBus(n.eventBus)
  478. rpccore.SetLogger(n.Logger.With("module", "rpc"))
  479. }
  480. func (n *Node) startRPC() ([]net.Listener, error) {
  481. n.ConfigureRPC()
  482. listenAddrs := splitAndTrimEmpty(n.config.RPC.ListenAddress, ",", " ")
  483. coreCodec := amino.NewCodec()
  484. ctypes.RegisterAmino(coreCodec)
  485. if n.config.RPC.Unsafe {
  486. rpccore.AddUnsafeRoutes()
  487. }
  488. // we may expose the rpc over both a unix and tcp socket
  489. listeners := make([]net.Listener, len(listenAddrs))
  490. for i, listenAddr := range listenAddrs {
  491. mux := http.NewServeMux()
  492. rpcLogger := n.Logger.With("module", "rpc-server")
  493. wm := rpcserver.NewWebsocketManager(rpccore.Routes, coreCodec, rpcserver.EventSubscriber(n.eventBus))
  494. wm.SetLogger(rpcLogger.With("protocol", "websocket"))
  495. mux.HandleFunc("/websocket", wm.WebsocketHandler)
  496. rpcserver.RegisterRPCFuncs(mux, rpccore.Routes, coreCodec, rpcLogger)
  497. listener, err := rpcserver.StartHTTPServer(
  498. listenAddr,
  499. mux,
  500. rpcLogger,
  501. rpcserver.Config{MaxOpenConnections: n.config.RPC.MaxOpenConnections},
  502. )
  503. if err != nil {
  504. return nil, err
  505. }
  506. listeners[i] = listener
  507. }
  508. // we expose a simplified api over grpc for convenience to app devs
  509. grpcListenAddr := n.config.RPC.GRPCListenAddress
  510. if grpcListenAddr != "" {
  511. listener, err := grpccore.StartGRPCServer(
  512. grpcListenAddr,
  513. grpccore.Config{
  514. MaxOpenConnections: n.config.RPC.GRPCMaxOpenConnections,
  515. },
  516. )
  517. if err != nil {
  518. return nil, err
  519. }
  520. listeners = append(listeners, listener)
  521. }
  522. return listeners, nil
  523. }
  524. // startPrometheusServer starts a Prometheus HTTP server, listening for metrics
  525. // collectors on addr.
  526. func (n *Node) startPrometheusServer(addr string) *http.Server {
  527. srv := &http.Server{
  528. Addr: addr,
  529. Handler: promhttp.InstrumentMetricHandler(
  530. prometheus.DefaultRegisterer, promhttp.HandlerFor(
  531. prometheus.DefaultGatherer,
  532. promhttp.HandlerOpts{MaxRequestsInFlight: n.config.Instrumentation.MaxOpenConnections},
  533. ),
  534. ),
  535. }
  536. go func() {
  537. if err := srv.ListenAndServe(); err != http.ErrServerClosed {
  538. // Error starting or closing listener:
  539. n.Logger.Error("Prometheus HTTP server ListenAndServe", "err", err)
  540. }
  541. }()
  542. return srv
  543. }
  544. // Switch returns the Node's Switch.
  545. func (n *Node) Switch() *p2p.Switch {
  546. return n.sw
  547. }
  548. // BlockStore returns the Node's BlockStore.
  549. func (n *Node) BlockStore() *bc.BlockStore {
  550. return n.blockStore
  551. }
  552. // ConsensusState returns the Node's ConsensusState.
  553. func (n *Node) ConsensusState() *cs.ConsensusState {
  554. return n.consensusState
  555. }
  556. // ConsensusReactor returns the Node's ConsensusReactor.
  557. func (n *Node) ConsensusReactor() *cs.ConsensusReactor {
  558. return n.consensusReactor
  559. }
  560. // MempoolReactor returns the Node's MempoolReactor.
  561. func (n *Node) MempoolReactor() *mempl.MempoolReactor {
  562. return n.mempoolReactor
  563. }
  564. // EvidencePool returns the Node's EvidencePool.
  565. func (n *Node) EvidencePool() *evidence.EvidencePool {
  566. return n.evidencePool
  567. }
  568. // EventBus returns the Node's EventBus.
  569. func (n *Node) EventBus() *types.EventBus {
  570. return n.eventBus
  571. }
  572. // PrivValidator returns the Node's PrivValidator.
  573. // XXX: for convenience only!
  574. func (n *Node) PrivValidator() types.PrivValidator {
  575. return n.privValidator
  576. }
  577. // GenesisDoc returns the Node's GenesisDoc.
  578. func (n *Node) GenesisDoc() *types.GenesisDoc {
  579. return n.genesisDoc
  580. }
  581. // ProxyApp returns the Node's AppConns, representing its connections to the ABCI application.
  582. func (n *Node) ProxyApp() proxy.AppConns {
  583. return n.proxyApp
  584. }
  585. func (n *Node) makeNodeInfo(nodeID p2p.ID) p2p.NodeInfo {
  586. txIndexerStatus := "on"
  587. if _, ok := n.txIndexer.(*null.TxIndex); ok {
  588. txIndexerStatus = "off"
  589. }
  590. nodeInfo := p2p.NodeInfo{
  591. ID: nodeID,
  592. Network: n.genesisDoc.ChainID,
  593. Version: version.Version,
  594. Channels: []byte{
  595. bc.BlockchainChannel,
  596. cs.StateChannel, cs.DataChannel, cs.VoteChannel, cs.VoteSetBitsChannel,
  597. mempl.MempoolChannel,
  598. evidence.EvidenceChannel,
  599. },
  600. Moniker: n.config.Moniker,
  601. Other: []string{
  602. fmt.Sprintf("amino_version=%v", amino.Version),
  603. fmt.Sprintf("p2p_version=%v", p2p.Version),
  604. fmt.Sprintf("consensus_version=%v", cs.Version),
  605. fmt.Sprintf("rpc_version=%v/%v", rpc.Version, rpccore.Version),
  606. fmt.Sprintf("tx_index=%v", txIndexerStatus),
  607. },
  608. }
  609. if n.config.P2P.PexReactor {
  610. nodeInfo.Channels = append(nodeInfo.Channels, pex.PexChannel)
  611. }
  612. rpcListenAddr := n.config.RPC.ListenAddress
  613. nodeInfo.Other = append(nodeInfo.Other, fmt.Sprintf("rpc_addr=%v", rpcListenAddr))
  614. if !n.sw.IsListening() {
  615. return nodeInfo
  616. }
  617. p2pListener := n.sw.Listeners()[0]
  618. p2pHost := p2pListener.ExternalAddressHost()
  619. p2pPort := p2pListener.ExternalAddress().Port
  620. nodeInfo.ListenAddr = fmt.Sprintf("%v:%v", p2pHost, p2pPort)
  621. return nodeInfo
  622. }
  623. //------------------------------------------------------------------------------
  624. // NodeInfo returns the Node's Info from the Switch.
  625. func (n *Node) NodeInfo() p2p.NodeInfo {
  626. return n.sw.NodeInfo()
  627. }
  628. //------------------------------------------------------------------------------
  629. var (
  630. genesisDocKey = []byte("genesisDoc")
  631. )
  632. // panics if failed to unmarshal bytes
  633. func loadGenesisDoc(db dbm.DB) (*types.GenesisDoc, error) {
  634. bytes := db.Get(genesisDocKey)
  635. if len(bytes) == 0 {
  636. return nil, errors.New("Genesis doc not found")
  637. }
  638. var genDoc *types.GenesisDoc
  639. err := cdc.UnmarshalJSON(bytes, &genDoc)
  640. if err != nil {
  641. cmn.PanicCrisis(fmt.Sprintf("Failed to load genesis doc due to unmarshaling error: %v (bytes: %X)", err, bytes))
  642. }
  643. return genDoc, nil
  644. }
  645. // panics if failed to marshal the given genesis document
  646. func saveGenesisDoc(db dbm.DB, genDoc *types.GenesisDoc) {
  647. bytes, err := cdc.MarshalJSON(genDoc)
  648. if err != nil {
  649. cmn.PanicCrisis(fmt.Sprintf("Failed to save genesis doc due to marshaling error: %v", err))
  650. }
  651. db.SetSync(genesisDocKey, bytes)
  652. }
  653. // splitAndTrimEmpty slices s into all subslices separated by sep and returns a
  654. // slice of the string s with all leading and trailing Unicode code points
  655. // contained in cutset removed. If sep is empty, SplitAndTrim splits after each
  656. // UTF-8 sequence. First part is equivalent to strings.SplitN with a count of
  657. // -1. also filter out empty strings, only return non-empty strings.
  658. func splitAndTrimEmpty(s, sep, cutset string) []string {
  659. if s == "" {
  660. return []string{}
  661. }
  662. spl := strings.Split(s, sep)
  663. nonEmptyStrings := make([]string, 0, len(spl))
  664. for i := 0; i < len(spl); i++ {
  665. element := strings.Trim(spl[i], cutset)
  666. if element != "" {
  667. nonEmptyStrings = append(nonEmptyStrings, element)
  668. }
  669. }
  670. return nonEmptyStrings
  671. }