You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

1016 lines
29 KiB

10 years ago
10 years ago
10 years ago
10 years ago
10 years ago
9 years ago
10 years ago
10 years ago
10 years ago
10 years ago
10 years ago
10 years ago
10 years ago
10 years ago
10 years ago
10 years ago
9 years ago
9 years ago
10 years ago
10 years ago
10 years ago
10 years ago
10 years ago
10 years ago
10 years ago
10 years ago
10 years ago
10 years ago
10 years ago
10 years ago
10 years ago
10 years ago
10 years ago
10 years ago
10 years ago
10 years ago
10 years ago
9 years ago
10 years ago
10 years ago
10 years ago
10 years ago
10 years ago
10 years ago
10 years ago
10 years ago
10 years ago
10 years ago
10 years ago
10 years ago
10 years ago
10 years ago
10 years ago
10 years ago
10 years ago
10 years ago
10 years ago
10 years ago
10 years ago
10 years ago
10 years ago
10 years ago
10 years ago
10 years ago
10 years ago
10 years ago
10 years ago
10 years ago
10 years ago
10 years ago
10 years ago
10 years ago
10 years ago
  1. package consensus
  2. import (
  3. "bytes"
  4. "errors"
  5. "fmt"
  6. "reflect"
  7. "sync"
  8. "time"
  9. bc "github.com/tendermint/tendermint/blockchain"
  10. . "github.com/tendermint/tendermint/common"
  11. "github.com/tendermint/tendermint/events"
  12. "github.com/tendermint/tendermint/p2p"
  13. sm "github.com/tendermint/tendermint/state"
  14. "github.com/tendermint/tendermint/types"
  15. "github.com/tendermint/tendermint/wire"
  16. )
  17. const (
  18. StateChannel = byte(0x20)
  19. DataChannel = byte(0x21)
  20. VoteChannel = byte(0x22)
  21. peerGossipSleepDuration = 100 * time.Millisecond // Time to sleep if there's nothing to send.
  22. )
  23. //-----------------------------------------------------------------------------
  24. type ConsensusReactor struct {
  25. p2p.BaseReactor
  26. blockStore *bc.BlockStore
  27. conS *ConsensusState
  28. fastSync bool
  29. evsw events.Fireable
  30. }
  31. func NewConsensusReactor(consensusState *ConsensusState, blockStore *bc.BlockStore, fastSync bool) *ConsensusReactor {
  32. conR := &ConsensusReactor{
  33. blockStore: blockStore,
  34. conS: consensusState,
  35. fastSync: fastSync,
  36. }
  37. conR.BaseReactor = *p2p.NewBaseReactor(log, "ConsensusReactor", conR)
  38. return conR
  39. }
  40. func (conR *ConsensusReactor) OnStart() error {
  41. log.Notice("ConsensusReactor ", "fastSync", conR.fastSync)
  42. conR.BaseReactor.OnStart()
  43. if !conR.fastSync {
  44. _, err := conR.conS.Start()
  45. if err != nil {
  46. return err
  47. }
  48. }
  49. go conR.broadcastNewRoundStepRoutine()
  50. return nil
  51. }
  52. func (conR *ConsensusReactor) OnStop() {
  53. conR.BaseReactor.OnStop()
  54. conR.conS.Stop()
  55. }
  56. // Switch from the fast_sync to the consensus:
  57. // reset the state, turn off fast_sync, start the consensus-state-machine
  58. func (conR *ConsensusReactor) SwitchToConsensus(state *sm.State) {
  59. log.Notice("SwitchToConsensus")
  60. // NOTE: The line below causes broadcastNewRoundStepRoutine() to
  61. // broadcast a NewRoundStepMessage.
  62. conR.conS.updateToState(state)
  63. conR.fastSync = false
  64. conR.conS.Start()
  65. }
  66. // Implements Reactor
  67. func (conR *ConsensusReactor) GetChannels() []*p2p.ChannelDescriptor {
  68. // TODO optimize
  69. return []*p2p.ChannelDescriptor{
  70. &p2p.ChannelDescriptor{
  71. ID: StateChannel,
  72. Priority: 5,
  73. SendQueueCapacity: 100,
  74. },
  75. &p2p.ChannelDescriptor{
  76. ID: DataChannel,
  77. Priority: 5,
  78. SendQueueCapacity: 2,
  79. },
  80. &p2p.ChannelDescriptor{
  81. ID: VoteChannel,
  82. Priority: 5,
  83. SendQueueCapacity: 40,
  84. },
  85. }
  86. }
  87. // Implements Reactor
  88. func (conR *ConsensusReactor) AddPeer(peer *p2p.Peer) {
  89. if !conR.IsRunning() {
  90. return
  91. }
  92. // Create peerState for peer
  93. peerState := NewPeerState(peer)
  94. peer.Data.Set(types.PeerStateKey, peerState)
  95. // Begin gossip routines for this peer.
  96. go conR.gossipDataRoutine(peer, peerState)
  97. go conR.gossipVotesRoutine(peer, peerState)
  98. // Send our state to peer.
  99. // If we're fast_syncing, broadcast a RoundStepMessage later upon SwitchToConsensus().
  100. if !conR.fastSync {
  101. conR.sendNewRoundStepMessage(peer)
  102. }
  103. }
  104. // Implements Reactor
  105. func (conR *ConsensusReactor) RemovePeer(peer *p2p.Peer, reason interface{}) {
  106. if !conR.IsRunning() {
  107. return
  108. }
  109. // TODO
  110. //peer.Data.Get(PeerStateKey).(*PeerState).Disconnect()
  111. }
  112. // Implements Reactor
  113. // NOTE: We process these messages even when we're fast_syncing.
  114. func (conR *ConsensusReactor) Receive(chID byte, peer *p2p.Peer, msgBytes []byte) {
  115. if !conR.IsRunning() {
  116. log.Debug("Receive", "channel", chID, "peer", peer, "bytes", msgBytes)
  117. return
  118. }
  119. // Get peer states
  120. ps := peer.Data.Get(types.PeerStateKey).(*PeerState)
  121. _, msg, err := DecodeMessage(msgBytes)
  122. if err != nil {
  123. log.Warn("Error decoding message", "channel", chID, "peer", peer, "msg", msg, "error", err, "bytes", msgBytes)
  124. // TODO punish peer?
  125. return
  126. }
  127. log.Debug("Receive", "channel", chID, "peer", peer, "msg", msg)
  128. switch chID {
  129. case StateChannel:
  130. switch msg := msg.(type) {
  131. case *NewRoundStepMessage:
  132. ps.ApplyNewRoundStepMessage(msg)
  133. case *CommitStepMessage:
  134. ps.ApplyCommitStepMessage(msg)
  135. case *HasVoteMessage:
  136. ps.ApplyHasVoteMessage(msg)
  137. default:
  138. log.Warn(Fmt("Unknown message type %v", reflect.TypeOf(msg)))
  139. }
  140. case DataChannel:
  141. if conR.fastSync {
  142. log.Warn("Ignoring message received during fastSync", "msg", msg)
  143. return
  144. }
  145. switch msg := msg.(type) {
  146. case *ProposalMessage:
  147. ps.SetHasProposal(msg.Proposal)
  148. err = conR.conS.SetProposal(msg.Proposal)
  149. case *ProposalPOLMessage:
  150. ps.ApplyProposalPOLMessage(msg)
  151. case *BlockPartMessage:
  152. ps.SetHasProposalBlockPart(msg.Height, msg.Round, msg.Part.Proof.Index)
  153. _, err = conR.conS.AddProposalBlockPart(msg.Height, msg.Part)
  154. default:
  155. log.Warn(Fmt("Unknown message type %v", reflect.TypeOf(msg)))
  156. }
  157. case VoteChannel:
  158. if conR.fastSync {
  159. log.Warn("Ignoring message received during fastSync", "msg", msg)
  160. return
  161. }
  162. switch msg := msg.(type) {
  163. case *VoteMessage:
  164. vote, valIndex := msg.Vote, msg.ValidatorIndex
  165. // attempt to add the vote and dupeout the validator if its a duplicate signature
  166. added, err := conR.conS.TryAddVote(valIndex, vote, peer.Key)
  167. if err == ErrAddingVote {
  168. // TODO: punish peer
  169. } else if err != nil {
  170. return
  171. }
  172. cs := conR.conS
  173. cs.mtx.Lock()
  174. height, valSize, lastCommitSize := cs.Height, cs.Validators.Size(), cs.LastCommit.Size()
  175. cs.mtx.Unlock()
  176. ps.EnsureVoteBitArrays(height, valSize)
  177. ps.EnsureVoteBitArrays(height-1, lastCommitSize)
  178. ps.SetHasVote(vote, valIndex)
  179. if added {
  180. // If rs.Height == vote.Height && rs.Round < vote.Round,
  181. // the peer is sending us CatchupCommit precommits.
  182. // We could make note of this and help filter in broadcastHasVoteMessage().
  183. conR.broadcastHasVoteMessage(vote, valIndex)
  184. }
  185. default:
  186. // don't punish (leave room for soft upgrades)
  187. log.Warn(Fmt("Unknown message type %v", reflect.TypeOf(msg)))
  188. }
  189. default:
  190. log.Warn(Fmt("Unknown channel %X", chID))
  191. }
  192. if err != nil {
  193. log.Warn("Error in Receive()", "error", err)
  194. }
  195. }
  196. // Broadcasts HasVoteMessage to peers that care.
  197. func (conR *ConsensusReactor) broadcastHasVoteMessage(vote *types.Vote, index int) {
  198. msg := &HasVoteMessage{
  199. Height: vote.Height,
  200. Round: vote.Round,
  201. Type: vote.Type,
  202. Index: index,
  203. }
  204. conR.Switch.Broadcast(StateChannel, msg)
  205. /*
  206. // TODO: Make this broadcast more selective.
  207. for _, peer := range conR.Switch.Peers().List() {
  208. ps := peer.Data.Get(PeerStateKey).(*PeerState)
  209. prs := ps.GetRoundState()
  210. if prs.Height == vote.Height {
  211. // TODO: Also filter on round?
  212. peer.TrySend(StateChannel, msg)
  213. } else {
  214. // Height doesn't match
  215. // TODO: check a field, maybe CatchupCommitRound?
  216. // TODO: But that requires changing the struct field comment.
  217. }
  218. }
  219. */
  220. }
  221. // Sets our private validator account for signing votes.
  222. func (conR *ConsensusReactor) SetPrivValidator(priv *types.PrivValidator) {
  223. conR.conS.SetPrivValidator(priv)
  224. }
  225. // implements events.Eventable
  226. func (conR *ConsensusReactor) SetFireable(evsw events.Fireable) {
  227. conR.evsw = evsw
  228. conR.conS.SetFireable(evsw)
  229. }
  230. //--------------------------------------
  231. func makeRoundStepMessages(rs *RoundState) (nrsMsg *NewRoundStepMessage, csMsg *CommitStepMessage) {
  232. nrsMsg = &NewRoundStepMessage{
  233. Height: rs.Height,
  234. Round: rs.Round,
  235. Step: rs.Step,
  236. SecondsSinceStartTime: int(time.Now().Sub(rs.StartTime).Seconds()),
  237. LastCommitRound: rs.LastCommit.Round(),
  238. }
  239. if rs.Step == RoundStepCommit {
  240. csMsg = &CommitStepMessage{
  241. Height: rs.Height,
  242. BlockPartsHeader: rs.ProposalBlockParts.Header(),
  243. BlockParts: rs.ProposalBlockParts.BitArray(),
  244. }
  245. }
  246. return
  247. }
  248. // Listens for changes to the ConsensusState.Step by pulling
  249. // on conR.conS.NewStepCh().
  250. func (conR *ConsensusReactor) broadcastNewRoundStepRoutine() {
  251. for {
  252. // Get RoundState with new Step or quit.
  253. var rs *RoundState
  254. select {
  255. case rs = <-conR.conS.NewStepCh():
  256. case <-conR.Quit:
  257. return
  258. }
  259. nrsMsg, csMsg := makeRoundStepMessages(rs)
  260. if nrsMsg != nil {
  261. conR.Switch.Broadcast(StateChannel, nrsMsg)
  262. }
  263. if csMsg != nil {
  264. conR.Switch.Broadcast(StateChannel, csMsg)
  265. }
  266. }
  267. }
  268. func (conR *ConsensusReactor) sendNewRoundStepMessage(peer *p2p.Peer) {
  269. rs := conR.conS.GetRoundState()
  270. nrsMsg, csMsg := makeRoundStepMessages(rs)
  271. if nrsMsg != nil {
  272. peer.Send(StateChannel, nrsMsg)
  273. }
  274. if csMsg != nil {
  275. peer.Send(StateChannel, csMsg)
  276. }
  277. }
  278. func (conR *ConsensusReactor) gossipDataRoutine(peer *p2p.Peer, ps *PeerState) {
  279. log := log.New("peer", peer.Key)
  280. OUTER_LOOP:
  281. for {
  282. // Manage disconnects from self or peer.
  283. if !peer.IsRunning() || !conR.IsRunning() {
  284. log.Notice(Fmt("Stopping gossipDataRoutine for %v.", peer))
  285. return
  286. }
  287. rs := conR.conS.GetRoundState()
  288. prs := ps.GetRoundState()
  289. // Send proposal Block parts?
  290. if rs.ProposalBlockParts.HasHeader(prs.ProposalBlockPartsHeader) {
  291. //log.Info("ProposalBlockParts matched", "blockParts", prs.ProposalBlockParts)
  292. if index, ok := rs.ProposalBlockParts.BitArray().Sub(prs.ProposalBlockParts.Copy()).PickRandom(); ok {
  293. part := rs.ProposalBlockParts.GetPart(index)
  294. msg := &BlockPartMessage{
  295. Height: rs.Height, // This tells peer that this part applies to us.
  296. Round: rs.Round, // This tells peer that this part applies to us.
  297. Part: part,
  298. }
  299. peer.Send(DataChannel, msg)
  300. ps.SetHasProposalBlockPart(prs.Height, prs.Round, index)
  301. continue OUTER_LOOP
  302. }
  303. }
  304. // If the peer is on a previous height, help catch up.
  305. if (0 < prs.Height) && (prs.Height < rs.Height) {
  306. //log.Info("Data catchup", "height", rs.Height, "peerHeight", prs.Height, "peerProposalBlockParts", prs.ProposalBlockParts)
  307. if index, ok := prs.ProposalBlockParts.Not().PickRandom(); ok {
  308. // Ensure that the peer's PartSetHeader is correct
  309. blockMeta := conR.blockStore.LoadBlockMeta(prs.Height)
  310. if !blockMeta.PartsHeader.Equals(prs.ProposalBlockPartsHeader) {
  311. log.Info("Peer ProposalBlockPartsHeader mismatch, sleeping",
  312. "peerHeight", prs.Height, "blockPartsHeader", blockMeta.PartsHeader, "peerBlockPartsHeader", prs.ProposalBlockPartsHeader)
  313. time.Sleep(peerGossipSleepDuration)
  314. continue OUTER_LOOP
  315. }
  316. // Load the part
  317. part := conR.blockStore.LoadBlockPart(prs.Height, index)
  318. if part == nil {
  319. log.Warn("Could not load part", "index", index,
  320. "peerHeight", prs.Height, "blockPartsHeader", blockMeta.PartsHeader, "peerBlockPartsHeader", prs.ProposalBlockPartsHeader)
  321. time.Sleep(peerGossipSleepDuration)
  322. continue OUTER_LOOP
  323. }
  324. // Send the part
  325. msg := &BlockPartMessage{
  326. Height: prs.Height, // Not our height, so it doesn't matter.
  327. Round: prs.Round, // Not our height, so it doesn't matter.
  328. Part: part,
  329. }
  330. peer.Send(DataChannel, msg)
  331. ps.SetHasProposalBlockPart(prs.Height, prs.Round, index)
  332. continue OUTER_LOOP
  333. } else {
  334. //log.Info("No parts to send in catch-up, sleeping")
  335. time.Sleep(peerGossipSleepDuration)
  336. continue OUTER_LOOP
  337. }
  338. }
  339. // If height and round don't match, sleep.
  340. if (rs.Height != prs.Height) || (rs.Round != prs.Round) {
  341. //log.Info("Peer Height|Round mismatch, sleeping", "peerHeight", prs.Height, "peerRound", prs.Round, "peer", peer)
  342. time.Sleep(peerGossipSleepDuration)
  343. continue OUTER_LOOP
  344. }
  345. // By here, height and round match.
  346. // Proposal block parts were already matched and sent if any were wanted.
  347. // (These can match on hash so the round doesn't matter)
  348. // Now consider sending other things, like the Proposal itself.
  349. // Send Proposal && ProposalPOL BitArray?
  350. if rs.Proposal != nil && !prs.Proposal {
  351. // Proposal
  352. {
  353. msg := &ProposalMessage{Proposal: rs.Proposal}
  354. peer.Send(DataChannel, msg)
  355. ps.SetHasProposal(rs.Proposal)
  356. }
  357. // ProposalPOL.
  358. // Peer must receive ProposalMessage first.
  359. // rs.Proposal was validated, so rs.Proposal.POLRound <= rs.Round,
  360. // so we definitely have rs.Votes.Prevotes(rs.Proposal.POLRound).
  361. if 0 <= rs.Proposal.POLRound {
  362. msg := &ProposalPOLMessage{
  363. Height: rs.Height,
  364. ProposalPOLRound: rs.Proposal.POLRound,
  365. ProposalPOL: rs.Votes.Prevotes(rs.Proposal.POLRound).BitArray(),
  366. }
  367. peer.Send(DataChannel, msg)
  368. }
  369. continue OUTER_LOOP
  370. }
  371. // Nothing to do. Sleep.
  372. time.Sleep(peerGossipSleepDuration)
  373. continue OUTER_LOOP
  374. }
  375. }
  376. func (conR *ConsensusReactor) gossipVotesRoutine(peer *p2p.Peer, ps *PeerState) {
  377. log := log.New("peer", peer.Key)
  378. // Simple hack to throttle logs upon sleep.
  379. var sleeping = 0
  380. OUTER_LOOP:
  381. for {
  382. // Manage disconnects from self or peer.
  383. if !peer.IsRunning() || !conR.IsRunning() {
  384. log.Notice(Fmt("Stopping gossipVotesRoutine for %v.", peer))
  385. return
  386. }
  387. rs := conR.conS.GetRoundState()
  388. prs := ps.GetRoundState()
  389. switch sleeping {
  390. case 1: // First sleep
  391. sleeping = 2
  392. case 2: // No more sleep
  393. sleeping = 0
  394. }
  395. log.Debug("gossipVotesRoutine", "rsHeight", rs.Height, "rsRound", rs.Round,
  396. "prsHeight", prs.Height, "prsRound", prs.Round, "prsStep", prs.Step)
  397. // If height matches, then send LastCommit, Prevotes, Precommits.
  398. if rs.Height == prs.Height {
  399. // If there are lastCommits to send...
  400. if prs.Step == RoundStepNewHeight {
  401. if ps.PickSendVote(rs.LastCommit) {
  402. log.Info("Picked rs.LastCommit to send")
  403. continue OUTER_LOOP
  404. }
  405. }
  406. // If there are prevotes to send...
  407. if rs.Round == prs.Round && prs.Step <= RoundStepPrevote {
  408. if ps.PickSendVote(rs.Votes.Prevotes(rs.Round)) {
  409. log.Info("Picked rs.Prevotes(rs.Round) to send")
  410. continue OUTER_LOOP
  411. }
  412. }
  413. // If there are precommits to send...
  414. if rs.Round == prs.Round && prs.Step <= RoundStepPrecommit {
  415. if ps.PickSendVote(rs.Votes.Precommits(rs.Round)) {
  416. log.Info("Picked rs.Precommits(rs.Round) to send")
  417. continue OUTER_LOOP
  418. }
  419. }
  420. // If there are prevotes to send for the last round...
  421. if rs.Round == prs.Round+1 && prs.Step <= RoundStepPrevote {
  422. if ps.PickSendVote(rs.Votes.Prevotes(prs.Round)) {
  423. log.Info("Picked rs.Prevotes(prs.Round) to send")
  424. continue OUTER_LOOP
  425. }
  426. }
  427. // If there are precommits to send for the last round...
  428. if rs.Round == prs.Round+1 && prs.Step <= RoundStepPrecommit {
  429. if ps.PickSendVote(rs.Votes.Precommits(prs.Round)) {
  430. log.Info("Picked rs.Precommits(prs.Round) to send")
  431. continue OUTER_LOOP
  432. }
  433. }
  434. // If there are POLPrevotes to send...
  435. if 0 <= prs.ProposalPOLRound {
  436. if polPrevotes := rs.Votes.Prevotes(prs.ProposalPOLRound); polPrevotes != nil {
  437. if ps.PickSendVote(polPrevotes) {
  438. log.Info("Picked rs.Prevotes(prs.ProposalPOLRound) to send")
  439. continue OUTER_LOOP
  440. }
  441. }
  442. }
  443. }
  444. // Special catchup logic.
  445. // If peer is lagging by height 1, send LastCommit.
  446. if prs.Height != 0 && rs.Height == prs.Height+1 {
  447. if ps.PickSendVote(rs.LastCommit) {
  448. log.Info("Picked rs.LastCommit to send")
  449. continue OUTER_LOOP
  450. }
  451. }
  452. // Catchup logic
  453. // If peer is lagging by more than 1, send Validation.
  454. if prs.Height != 0 && rs.Height >= prs.Height+2 {
  455. // Load the block validation for prs.Height,
  456. // which contains precommit signatures for prs.Height.
  457. validation := conR.blockStore.LoadBlockValidation(prs.Height)
  458. log.Info("Loaded BlockValidation for catch-up", "height", prs.Height, "validation", validation)
  459. if ps.PickSendVote(validation) {
  460. log.Info("Picked Catchup validation to send")
  461. continue OUTER_LOOP
  462. }
  463. }
  464. if sleeping == 0 {
  465. // We sent nothing. Sleep...
  466. sleeping = 1
  467. log.Info("No votes to send, sleeping", "peer", peer,
  468. "localPV", rs.Votes.Prevotes(rs.Round).BitArray(), "peerPV", prs.Prevotes,
  469. "localPC", rs.Votes.Precommits(rs.Round).BitArray(), "peerPC", prs.Precommits)
  470. } else if sleeping == 2 {
  471. // Continued sleep...
  472. sleeping = 1
  473. }
  474. time.Sleep(peerGossipSleepDuration)
  475. continue OUTER_LOOP
  476. }
  477. }
  478. //-----------------------------------------------------------------------------
  479. // Read only when returned by PeerState.GetRoundState().
  480. type PeerRoundState struct {
  481. Height int // Height peer is at
  482. Round int // Round peer is at, -1 if unknown.
  483. Step RoundStepType // Step peer is at
  484. StartTime time.Time // Estimated start of round 0 at this height
  485. Proposal bool // True if peer has proposal for this round
  486. ProposalBlockPartsHeader types.PartSetHeader //
  487. ProposalBlockParts *BitArray //
  488. ProposalPOLRound int // Proposal's POL round. -1 if none.
  489. ProposalPOL *BitArray // nil until ProposalPOLMessage received.
  490. Prevotes *BitArray // All votes peer has for this round
  491. Precommits *BitArray // All precommits peer has for this round
  492. LastCommitRound int // Round of commit for last height. -1 if none.
  493. LastCommit *BitArray // All commit precommits of commit for last height.
  494. CatchupCommitRound int // Round that we believe commit round is. -1 if none.
  495. CatchupCommit *BitArray // All commit precommits peer has for this height
  496. }
  497. //-----------------------------------------------------------------------------
  498. var (
  499. ErrPeerStateHeightRegression = errors.New("Error peer state height regression")
  500. ErrPeerStateInvalidStartTime = errors.New("Error peer state invalid startTime")
  501. )
  502. type PeerState struct {
  503. Peer *p2p.Peer
  504. mtx sync.Mutex
  505. PeerRoundState
  506. }
  507. func NewPeerState(peer *p2p.Peer) *PeerState {
  508. return &PeerState{
  509. Peer: peer,
  510. PeerRoundState: PeerRoundState{
  511. Round: -1,
  512. ProposalPOLRound: -1,
  513. LastCommitRound: -1,
  514. CatchupCommitRound: -1,
  515. },
  516. }
  517. }
  518. // Returns an atomic snapshot of the PeerRoundState.
  519. // There's no point in mutating it since it won't change PeerState.
  520. func (ps *PeerState) GetRoundState() *PeerRoundState {
  521. ps.mtx.Lock()
  522. defer ps.mtx.Unlock()
  523. prs := ps.PeerRoundState // copy
  524. return &prs
  525. }
  526. // Returns an atomic snapshot of the PeerRoundState's height
  527. // used by the mempool to ensure peers are caught up before broadcasting new txs
  528. func (ps *PeerState) GetHeight() int {
  529. ps.mtx.Lock()
  530. defer ps.mtx.Unlock()
  531. return ps.PeerRoundState.Height
  532. }
  533. func (ps *PeerState) SetHasProposal(proposal *types.Proposal) {
  534. ps.mtx.Lock()
  535. defer ps.mtx.Unlock()
  536. if ps.Height != proposal.Height || ps.Round != proposal.Round {
  537. return
  538. }
  539. if ps.Proposal {
  540. return
  541. }
  542. ps.Proposal = true
  543. ps.ProposalBlockPartsHeader = proposal.BlockPartsHeader
  544. ps.ProposalBlockParts = NewBitArray(proposal.BlockPartsHeader.Total)
  545. ps.ProposalPOLRound = proposal.POLRound
  546. ps.ProposalPOL = nil // Nil until ProposalPOLMessage received.
  547. }
  548. func (ps *PeerState) SetHasProposalBlockPart(height int, round int, index int) {
  549. ps.mtx.Lock()
  550. defer ps.mtx.Unlock()
  551. if ps.Height != height || ps.Round != round {
  552. return
  553. }
  554. ps.ProposalBlockParts.SetIndex(index, true)
  555. }
  556. // Convenience function to send vote to peer.
  557. // Returns true if vote was sent.
  558. func (ps *PeerState) PickSendVote(votes types.VoteSetReader) (ok bool) {
  559. if index, vote, ok := ps.PickVoteToSend(votes); ok {
  560. msg := &VoteMessage{index, vote}
  561. ps.Peer.Send(VoteChannel, msg)
  562. return true
  563. }
  564. return false
  565. }
  566. // votes: Must be the correct Size() for the Height().
  567. func (ps *PeerState) PickVoteToSend(votes types.VoteSetReader) (index int, vote *types.Vote, ok bool) {
  568. ps.mtx.Lock()
  569. defer ps.mtx.Unlock()
  570. if votes.Size() == 0 {
  571. return 0, nil, false
  572. }
  573. height, round, type_, size := votes.Height(), votes.Round(), votes.Type(), votes.Size()
  574. // Lazily set data using 'votes'.
  575. if votes.IsCommit() {
  576. ps.ensureCatchupCommitRound(height, round, size)
  577. }
  578. ps.ensureVoteBitArrays(height, size)
  579. psVotes := ps.getVoteBitArray(height, round, type_)
  580. if psVotes == nil {
  581. return 0, nil, false // Not something worth sending
  582. }
  583. if index, ok := votes.BitArray().Sub(psVotes).PickRandom(); ok {
  584. ps.setHasVote(height, round, type_, index)
  585. return index, votes.GetByIndex(index), true
  586. }
  587. return 0, nil, false
  588. }
  589. func (ps *PeerState) getVoteBitArray(height, round int, type_ byte) *BitArray {
  590. if ps.Height == height {
  591. if ps.Round == round {
  592. switch type_ {
  593. case types.VoteTypePrevote:
  594. return ps.Prevotes
  595. case types.VoteTypePrecommit:
  596. return ps.Precommits
  597. default:
  598. PanicSanity(Fmt("Unexpected vote type %X", type_))
  599. }
  600. }
  601. if ps.CatchupCommitRound == round {
  602. switch type_ {
  603. case types.VoteTypePrevote:
  604. return nil
  605. case types.VoteTypePrecommit:
  606. return ps.CatchupCommit
  607. default:
  608. PanicSanity(Fmt("Unexpected vote type %X", type_))
  609. }
  610. }
  611. return nil
  612. }
  613. if ps.Height == height+1 {
  614. if ps.LastCommitRound == round {
  615. switch type_ {
  616. case types.VoteTypePrevote:
  617. return nil
  618. case types.VoteTypePrecommit:
  619. return ps.LastCommit
  620. default:
  621. PanicSanity(Fmt("Unexpected vote type %X", type_))
  622. }
  623. }
  624. return nil
  625. }
  626. return nil
  627. }
  628. // NOTE: 'round' is what we know to be the commit round for height.
  629. func (ps *PeerState) ensureCatchupCommitRound(height, round int, numValidators int) {
  630. if ps.Height != height {
  631. return
  632. }
  633. if ps.CatchupCommitRound != -1 && ps.CatchupCommitRound != round {
  634. PanicSanity(Fmt("Conflicting CatchupCommitRound. Height: %v, Orig: %v, New: %v", height, ps.CatchupCommitRound, round))
  635. }
  636. if ps.CatchupCommitRound == round {
  637. return // Nothing to do!
  638. }
  639. ps.CatchupCommitRound = round
  640. if round == ps.Round {
  641. ps.CatchupCommit = ps.Precommits
  642. } else {
  643. ps.CatchupCommit = NewBitArray(numValidators)
  644. }
  645. }
  646. // NOTE: It's important to make sure that numValidators actually matches
  647. // what the node sees as the number of validators for height.
  648. func (ps *PeerState) EnsureVoteBitArrays(height int, numValidators int) {
  649. ps.mtx.Lock()
  650. defer ps.mtx.Unlock()
  651. ps.ensureVoteBitArrays(height, numValidators)
  652. }
  653. func (ps *PeerState) ensureVoteBitArrays(height int, numValidators int) {
  654. if ps.Height == height {
  655. if ps.Prevotes == nil {
  656. ps.Prevotes = NewBitArray(numValidators)
  657. }
  658. if ps.Precommits == nil {
  659. ps.Precommits = NewBitArray(numValidators)
  660. }
  661. if ps.CatchupCommit == nil {
  662. ps.CatchupCommit = NewBitArray(numValidators)
  663. }
  664. if ps.ProposalPOL == nil {
  665. ps.ProposalPOL = NewBitArray(numValidators)
  666. }
  667. } else if ps.Height == height+1 {
  668. if ps.LastCommit == nil {
  669. ps.LastCommit = NewBitArray(numValidators)
  670. }
  671. }
  672. }
  673. func (ps *PeerState) SetHasVote(vote *types.Vote, index int) {
  674. ps.mtx.Lock()
  675. defer ps.mtx.Unlock()
  676. ps.setHasVote(vote.Height, vote.Round, vote.Type, index)
  677. }
  678. func (ps *PeerState) setHasVote(height int, round int, type_ byte, index int) {
  679. log := log.New("peer", ps.Peer.Key, "peerRound", ps.Round, "height", height, "round", round)
  680. if type_ != types.VoteTypePrevote && type_ != types.VoteTypePrecommit {
  681. PanicSanity("Invalid vote type")
  682. }
  683. if ps.Height == height {
  684. if ps.Round == round {
  685. switch type_ {
  686. case types.VoteTypePrevote:
  687. ps.Prevotes.SetIndex(index, true)
  688. log.Info("SetHasVote(round-match)", "prevotes", ps.Prevotes, "index", index)
  689. case types.VoteTypePrecommit:
  690. ps.Precommits.SetIndex(index, true)
  691. log.Info("SetHasVote(round-match)", "precommits", ps.Precommits, "index", index)
  692. }
  693. } else if ps.CatchupCommitRound == round {
  694. switch type_ {
  695. case types.VoteTypePrevote:
  696. case types.VoteTypePrecommit:
  697. ps.CatchupCommit.SetIndex(index, true)
  698. log.Info("SetHasVote(CatchupCommit)", "precommits", ps.Precommits, "index", index)
  699. }
  700. } else if ps.ProposalPOLRound == round {
  701. switch type_ {
  702. case types.VoteTypePrevote:
  703. ps.ProposalPOL.SetIndex(index, true)
  704. log.Info("SetHasVote(ProposalPOL)", "prevotes", ps.Prevotes, "index", index)
  705. case types.VoteTypePrecommit:
  706. }
  707. }
  708. } else if ps.Height == height+1 {
  709. if ps.LastCommitRound == round {
  710. switch type_ {
  711. case types.VoteTypePrevote:
  712. case types.VoteTypePrecommit:
  713. ps.LastCommit.SetIndex(index, true)
  714. log.Info("setHasVote(LastCommit)", "lastCommit", ps.LastCommit, "index", index)
  715. }
  716. }
  717. } else {
  718. // Does not apply.
  719. }
  720. }
  721. func (ps *PeerState) ApplyNewRoundStepMessage(msg *NewRoundStepMessage) {
  722. ps.mtx.Lock()
  723. defer ps.mtx.Unlock()
  724. // Ignore duplicate messages.
  725. if ps.Height == msg.Height && ps.Round == msg.Round && ps.Step == msg.Step {
  726. return
  727. }
  728. // Just remember these values.
  729. psHeight := ps.Height
  730. psRound := ps.Round
  731. //psStep := ps.Step
  732. psCatchupCommitRound := ps.CatchupCommitRound
  733. psCatchupCommit := ps.CatchupCommit
  734. startTime := time.Now().Add(-1 * time.Duration(msg.SecondsSinceStartTime) * time.Second)
  735. ps.Height = msg.Height
  736. ps.Round = msg.Round
  737. ps.Step = msg.Step
  738. ps.StartTime = startTime
  739. if psHeight != msg.Height || psRound != msg.Round {
  740. ps.Proposal = false
  741. ps.ProposalBlockPartsHeader = types.PartSetHeader{}
  742. ps.ProposalBlockParts = nil
  743. ps.ProposalPOLRound = -1
  744. ps.ProposalPOL = nil
  745. // We'll update the BitArray capacity later.
  746. ps.Prevotes = nil
  747. ps.Precommits = nil
  748. }
  749. if psHeight == msg.Height && psRound != msg.Round && msg.Round == psCatchupCommitRound {
  750. // Peer caught up to CatchupCommitRound.
  751. // Preserve psCatchupCommit!
  752. // NOTE: We prefer to use prs.Precommits if
  753. // pr.Round matches pr.CatchupCommitRound.
  754. ps.Precommits = psCatchupCommit
  755. }
  756. if psHeight != msg.Height {
  757. // Shift Precommits to LastCommit.
  758. if psHeight+1 == msg.Height && psRound == msg.LastCommitRound {
  759. ps.LastCommitRound = msg.LastCommitRound
  760. ps.LastCommit = ps.Precommits
  761. } else {
  762. ps.LastCommitRound = msg.LastCommitRound
  763. ps.LastCommit = nil
  764. }
  765. // We'll update the BitArray capacity later.
  766. ps.CatchupCommitRound = -1
  767. ps.CatchupCommit = nil
  768. }
  769. }
  770. func (ps *PeerState) ApplyCommitStepMessage(msg *CommitStepMessage) {
  771. ps.mtx.Lock()
  772. defer ps.mtx.Unlock()
  773. if ps.Height != msg.Height {
  774. return
  775. }
  776. ps.ProposalBlockPartsHeader = msg.BlockPartsHeader
  777. ps.ProposalBlockParts = msg.BlockParts
  778. }
  779. func (ps *PeerState) ApplyHasVoteMessage(msg *HasVoteMessage) {
  780. ps.mtx.Lock()
  781. defer ps.mtx.Unlock()
  782. if ps.Height != msg.Height {
  783. return
  784. }
  785. ps.setHasVote(msg.Height, msg.Round, msg.Type, msg.Index)
  786. }
  787. func (ps *PeerState) ApplyProposalPOLMessage(msg *ProposalPOLMessage) {
  788. ps.mtx.Lock()
  789. defer ps.mtx.Unlock()
  790. if ps.Height != msg.Height {
  791. return
  792. }
  793. if ps.ProposalPOLRound != msg.ProposalPOLRound {
  794. return
  795. }
  796. // TODO: Merge onto existing ps.ProposalPOL?
  797. // We might have sent some prevotes in the meantime.
  798. ps.ProposalPOL = msg.ProposalPOL
  799. }
  800. //-----------------------------------------------------------------------------
  801. // Messages
  802. const (
  803. msgTypeNewRoundStep = byte(0x01)
  804. msgTypeCommitStep = byte(0x02)
  805. msgTypeProposal = byte(0x11)
  806. msgTypeProposalPOL = byte(0x12)
  807. msgTypeBlockPart = byte(0x13) // both block & POL
  808. msgTypeVote = byte(0x14)
  809. msgTypeHasVote = byte(0x15)
  810. )
  811. type ConsensusMessage interface{}
  812. var _ = wire.RegisterInterface(
  813. struct{ ConsensusMessage }{},
  814. wire.ConcreteType{&NewRoundStepMessage{}, msgTypeNewRoundStep},
  815. wire.ConcreteType{&CommitStepMessage{}, msgTypeCommitStep},
  816. wire.ConcreteType{&ProposalMessage{}, msgTypeProposal},
  817. wire.ConcreteType{&ProposalPOLMessage{}, msgTypeProposalPOL},
  818. wire.ConcreteType{&BlockPartMessage{}, msgTypeBlockPart},
  819. wire.ConcreteType{&VoteMessage{}, msgTypeVote},
  820. wire.ConcreteType{&HasVoteMessage{}, msgTypeHasVote},
  821. )
  822. // TODO: check for unnecessary extra bytes at the end.
  823. func DecodeMessage(bz []byte) (msgType byte, msg ConsensusMessage, err error) {
  824. msgType = bz[0]
  825. n := new(int64)
  826. r := bytes.NewReader(bz)
  827. msg = wire.ReadBinary(struct{ ConsensusMessage }{}, r, n, &err).(struct{ ConsensusMessage }).ConsensusMessage
  828. return
  829. }
  830. //-------------------------------------
  831. // For every height/round/step transition
  832. type NewRoundStepMessage struct {
  833. Height int
  834. Round int
  835. Step RoundStepType
  836. SecondsSinceStartTime int
  837. LastCommitRound int
  838. }
  839. func (m *NewRoundStepMessage) String() string {
  840. return fmt.Sprintf("[NewRoundStep H:%v R:%v S:%v LCR:%v]",
  841. m.Height, m.Round, m.Step, m.LastCommitRound)
  842. }
  843. //-------------------------------------
  844. type CommitStepMessage struct {
  845. Height int
  846. BlockPartsHeader types.PartSetHeader
  847. BlockParts *BitArray
  848. }
  849. func (m *CommitStepMessage) String() string {
  850. return fmt.Sprintf("[CommitStep H:%v BP:%v BA:%v]", m.Height, m.BlockPartsHeader, m.BlockParts)
  851. }
  852. //-------------------------------------
  853. type ProposalMessage struct {
  854. Proposal *types.Proposal
  855. }
  856. func (m *ProposalMessage) String() string {
  857. return fmt.Sprintf("[Proposal %v]", m.Proposal)
  858. }
  859. //-------------------------------------
  860. type ProposalPOLMessage struct {
  861. Height int
  862. ProposalPOLRound int
  863. ProposalPOL *BitArray
  864. }
  865. func (m *ProposalPOLMessage) String() string {
  866. return fmt.Sprintf("[ProposalPOL H:%v POLR:%v POL:%v]", m.Height, m.ProposalPOLRound, m.ProposalPOL)
  867. }
  868. //-------------------------------------
  869. type BlockPartMessage struct {
  870. Height int
  871. Round int
  872. Part *types.Part
  873. }
  874. func (m *BlockPartMessage) String() string {
  875. return fmt.Sprintf("[BlockPart H:%v R:%v P:%v]", m.Height, m.Round, m.Part)
  876. }
  877. //-------------------------------------
  878. type VoteMessage struct {
  879. ValidatorIndex int
  880. Vote *types.Vote
  881. }
  882. func (m *VoteMessage) String() string {
  883. return fmt.Sprintf("[Vote VI:%v V:%v VI:%v]", m.ValidatorIndex, m.Vote, m.ValidatorIndex)
  884. }
  885. //-------------------------------------
  886. type HasVoteMessage struct {
  887. Height int
  888. Round int
  889. Type byte
  890. Index int
  891. }
  892. func (m *HasVoteMessage) String() string {
  893. return fmt.Sprintf("[HasVote VI:%v V:{%v/%02d/%v} VI:%v]", m.Index, m.Height, m.Round, m.Type, m.Index)
  894. }