You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

634 lines
18 KiB

  1. package evidence_test
  2. import (
  3. "encoding/hex"
  4. "fmt"
  5. "math/rand"
  6. "sync"
  7. "testing"
  8. "time"
  9. "github.com/stretchr/testify/mock"
  10. "github.com/stretchr/testify/require"
  11. dbm "github.com/tendermint/tm-db"
  12. "github.com/tendermint/tendermint/crypto"
  13. "github.com/tendermint/tendermint/crypto/tmhash"
  14. "github.com/tendermint/tendermint/evidence"
  15. "github.com/tendermint/tendermint/evidence/mocks"
  16. "github.com/tendermint/tendermint/libs/log"
  17. "github.com/tendermint/tendermint/p2p"
  18. tmproto "github.com/tendermint/tendermint/proto/tendermint/types"
  19. sm "github.com/tendermint/tendermint/state"
  20. "github.com/tendermint/tendermint/types"
  21. )
  22. var (
  23. numEvidence = 10
  24. rng = rand.New(rand.NewSource(time.Now().UnixNano()))
  25. )
  26. type reactorTestSuite struct {
  27. reactor *evidence.Reactor
  28. pool *evidence.Pool
  29. peerID p2p.NodeID
  30. evidenceChannel *p2p.Channel
  31. evidenceInCh chan p2p.Envelope
  32. evidenceOutCh chan p2p.Envelope
  33. evidencePeerErrCh chan p2p.PeerError
  34. peerUpdatesCh chan p2p.PeerUpdate
  35. peerUpdates *p2p.PeerUpdatesCh
  36. }
  37. func setup(t *testing.T, logger log.Logger, pool *evidence.Pool, chBuf uint) *reactorTestSuite {
  38. t.Helper()
  39. pID := make([]byte, 16)
  40. _, err := rng.Read(pID)
  41. require.NoError(t, err)
  42. peerUpdatesCh := make(chan p2p.PeerUpdate)
  43. rts := &reactorTestSuite{
  44. pool: pool,
  45. evidenceInCh: make(chan p2p.Envelope, chBuf),
  46. evidenceOutCh: make(chan p2p.Envelope, chBuf),
  47. evidencePeerErrCh: make(chan p2p.PeerError, chBuf),
  48. peerUpdatesCh: peerUpdatesCh,
  49. peerUpdates: p2p.NewPeerUpdates(peerUpdatesCh),
  50. peerID: p2p.NodeID(fmt.Sprintf("%x", pID)),
  51. }
  52. rts.evidenceChannel = p2p.NewChannel(
  53. evidence.EvidenceChannel,
  54. new(tmproto.EvidenceList),
  55. rts.evidenceInCh,
  56. rts.evidenceOutCh,
  57. rts.evidencePeerErrCh,
  58. )
  59. rts.reactor = evidence.NewReactor(
  60. logger,
  61. rts.evidenceChannel,
  62. rts.peerUpdates,
  63. pool,
  64. )
  65. require.NoError(t, rts.reactor.Start())
  66. require.True(t, rts.reactor.IsRunning())
  67. t.Cleanup(func() {
  68. require.NoError(t, rts.reactor.Stop())
  69. require.False(t, rts.reactor.IsRunning())
  70. })
  71. return rts
  72. }
  73. func createTestSuites(t *testing.T, stateStores []sm.Store, chBuf uint) []*reactorTestSuite {
  74. t.Helper()
  75. numSStores := len(stateStores)
  76. testSuites := make([]*reactorTestSuite, numSStores)
  77. evidenceTime := time.Date(2019, 1, 1, 0, 0, 0, 0, time.UTC)
  78. for i := 0; i < numSStores; i++ {
  79. logger := log.TestingLogger().With("validator", i)
  80. evidenceDB := dbm.NewMemDB()
  81. blockStore := &mocks.BlockStore{}
  82. blockStore.On("LoadBlockMeta", mock.AnythingOfType("int64")).Return(
  83. &types.BlockMeta{Header: types.Header{Time: evidenceTime}},
  84. )
  85. pool, err := evidence.NewPool(logger, evidenceDB, stateStores[i], blockStore)
  86. require.NoError(t, err)
  87. testSuites[i] = setup(t, logger, pool, chBuf)
  88. }
  89. return testSuites
  90. }
  91. func waitForEvidence(t *testing.T, evList types.EvidenceList, suites ...*reactorTestSuite) {
  92. t.Helper()
  93. wg := new(sync.WaitGroup)
  94. for _, suite := range suites {
  95. wg.Add(1)
  96. go func(s *reactorTestSuite) {
  97. var localEvList []types.Evidence
  98. currentPoolSize := 0
  99. for currentPoolSize != len(evList) {
  100. // each evidence should not be more than 500 bytes
  101. localEvList, _ = s.pool.PendingEvidence(int64(len(evList) * 500))
  102. currentPoolSize = len(localEvList)
  103. }
  104. // put the reaped evidence in a map so we can quickly check we got everything
  105. evMap := make(map[string]types.Evidence)
  106. for _, e := range localEvList {
  107. evMap[string(e.Hash())] = e
  108. }
  109. for i, expectedEv := range evList {
  110. gotEv := evMap[string(expectedEv.Hash())]
  111. require.Equalf(
  112. t,
  113. expectedEv,
  114. gotEv,
  115. "evidence at index %d in pool does not match; got: %v, expected: %v", i, gotEv, expectedEv,
  116. )
  117. }
  118. wg.Done()
  119. }(suite)
  120. }
  121. // wait for the evidence in all evidence pools
  122. wg.Wait()
  123. }
  124. func createEvidenceList(
  125. t *testing.T,
  126. pool *evidence.Pool,
  127. val types.PrivValidator,
  128. numEvidence int,
  129. ) types.EvidenceList {
  130. evList := make([]types.Evidence, numEvidence)
  131. for i := 0; i < numEvidence; i++ {
  132. ev := types.NewMockDuplicateVoteEvidenceWithValidator(
  133. int64(i+1),
  134. time.Date(2019, 1, 1, 0, 0, 0, 0, time.UTC),
  135. val,
  136. evidenceChainID,
  137. )
  138. require.NoError(t, pool.AddEvidence(ev))
  139. evList[i] = ev
  140. }
  141. return evList
  142. }
  143. // simulateRouter will increment the provided WaitGroup and execute a simulated
  144. // router where, for each outbound p2p Envelope from the primary reactor, we
  145. // proxy (send) the Envelope the relevant peer reactor. Done is invoked on the
  146. // WaitGroup when numOut Envelopes are sent (i.e. read from the outbound channel).
  147. func simulateRouter(wg *sync.WaitGroup, primary *reactorTestSuite, suites []*reactorTestSuite, numOut int) {
  148. wg.Add(1)
  149. // create a mapping for efficient suite lookup by peer ID
  150. suitesByPeerID := make(map[p2p.NodeID]*reactorTestSuite)
  151. for _, suite := range suites {
  152. suitesByPeerID[suite.peerID] = suite
  153. }
  154. // Simulate a router by listening for all outbound envelopes and proxying the
  155. // envelope to the respective peer (suite).
  156. go func() {
  157. for i := 0; i < numOut; i++ {
  158. envelope := <-primary.evidenceOutCh
  159. other := suitesByPeerID[envelope.To]
  160. other.evidenceInCh <- p2p.Envelope{
  161. From: primary.peerID,
  162. To: envelope.To,
  163. Message: envelope.Message,
  164. }
  165. }
  166. wg.Done()
  167. }()
  168. }
  169. func TestReactorMultiDisconnect(t *testing.T) {
  170. val := types.NewMockPV()
  171. height := int64(numEvidence) + 10
  172. stateDB1 := initializeValidatorState(t, val, height)
  173. stateDB2 := initializeValidatorState(t, val, height)
  174. testSuites := createTestSuites(t, []sm.Store{stateDB1, stateDB2}, 20)
  175. primary := testSuites[0]
  176. secondary := testSuites[1]
  177. _ = createEvidenceList(t, primary.pool, val, numEvidence)
  178. primary.peerUpdatesCh <- p2p.PeerUpdate{
  179. Status: p2p.PeerStatusUp,
  180. PeerID: secondary.peerID,
  181. }
  182. // Ensure "disconnecting" the secondary peer from the primary more than once
  183. // is handled gracefully.
  184. primary.peerUpdatesCh <- p2p.PeerUpdate{
  185. Status: p2p.PeerStatusDown,
  186. PeerID: secondary.peerID,
  187. }
  188. primary.peerUpdatesCh <- p2p.PeerUpdate{
  189. Status: p2p.PeerStatusDown,
  190. PeerID: secondary.peerID,
  191. }
  192. }
  193. // TestReactorBroadcastEvidence creates an environment of multiple peers that
  194. // are all at the same height. One peer, designated as a primary, gossips all
  195. // evidence to the remaining peers.
  196. func TestReactorBroadcastEvidence(t *testing.T) {
  197. numPeers := 7
  198. // create a stateDB for all test suites (nodes)
  199. stateDBs := make([]sm.Store, numPeers)
  200. val := types.NewMockPV()
  201. // We need all validators saved for heights at least as high as we have
  202. // evidence for.
  203. height := int64(numEvidence) + 10
  204. for i := 0; i < numPeers; i++ {
  205. stateDBs[i] = initializeValidatorState(t, val, height)
  206. }
  207. // Create a series of test suites where each suite contains a reactor and
  208. // evidence pool. In addition, we mark a primary suite and the rest are
  209. // secondaries where each secondary is added as a peer via a PeerUpdate to the
  210. // primary. As a result, the primary will gossip all evidence to each secondary.
  211. testSuites := createTestSuites(t, stateDBs, 0)
  212. primary := testSuites[0]
  213. secondaries := testSuites[1:]
  214. // Simulate a router by listening for all outbound envelopes and proxying the
  215. // envelopes to the respective peer (suite).
  216. wg := new(sync.WaitGroup)
  217. simulateRouter(wg, primary, testSuites, numEvidence*len(secondaries))
  218. evList := createEvidenceList(t, primary.pool, val, numEvidence)
  219. // Add each secondary suite (node) as a peer to the primary suite (node). This
  220. // will cause the primary to gossip all evidence to the secondaries.
  221. for _, suite := range secondaries {
  222. primary.peerUpdatesCh <- p2p.PeerUpdate{
  223. Status: p2p.PeerStatusUp,
  224. PeerID: suite.peerID,
  225. }
  226. }
  227. // Wait till all secondary suites (reactor) received all evidence from the
  228. // primary suite (node).
  229. waitForEvidence(t, evList, secondaries...)
  230. for _, suite := range testSuites {
  231. require.Equal(t, numEvidence, int(suite.pool.Size()))
  232. }
  233. wg.Wait()
  234. // ensure all channels are drained
  235. for _, suite := range testSuites {
  236. require.Empty(t, suite.evidenceOutCh)
  237. }
  238. }
  239. // TestReactorSelectiveBroadcast tests a context where we have two reactors
  240. // connected to one another but are at different heights. Reactor 1 which is
  241. // ahead receives a list of evidence.
  242. func TestReactorBroadcastEvidence_Lagging(t *testing.T) {
  243. val := types.NewMockPV()
  244. height1 := int64(numEvidence) + 10
  245. height2 := int64(numEvidence) / 2
  246. // stateDB1 is ahead of stateDB2, where stateDB1 has all heights (1-10) and
  247. // stateDB2 only has heights 1-7.
  248. stateDB1 := initializeValidatorState(t, val, height1)
  249. stateDB2 := initializeValidatorState(t, val, height2)
  250. testSuites := createTestSuites(t, []sm.Store{stateDB1, stateDB2}, 0)
  251. primary := testSuites[0]
  252. secondaries := testSuites[1:]
  253. // Simulate a router by listening for all outbound envelopes and proxying the
  254. // envelope to the respective peer (suite).
  255. wg := new(sync.WaitGroup)
  256. simulateRouter(wg, primary, testSuites, numEvidence*len(secondaries))
  257. // Send a list of valid evidence to the first reactor's, the one that is ahead,
  258. // evidence pool.
  259. evList := createEvidenceList(t, primary.pool, val, numEvidence)
  260. // Add each secondary suite (node) as a peer to the primary suite (node). This
  261. // will cause the primary to gossip all evidence to the secondaries.
  262. for _, suite := range secondaries {
  263. primary.peerUpdatesCh <- p2p.PeerUpdate{
  264. Status: p2p.PeerStatusUp,
  265. PeerID: suite.peerID,
  266. }
  267. }
  268. // only ones less than the peers height should make it through
  269. waitForEvidence(t, evList[:height2+2], secondaries...)
  270. require.Equal(t, numEvidence, int(primary.pool.Size()))
  271. require.Equal(t, int(height2+2), int(secondaries[0].pool.Size()))
  272. // The primary will continue to send the remaining evidence to the secondaries
  273. // so we wait until it has sent all the envelopes.
  274. wg.Wait()
  275. // ensure all channels are drained
  276. for _, suite := range testSuites {
  277. require.Empty(t, suite.evidenceOutCh)
  278. }
  279. }
  280. func TestReactorBroadcastEvidence_Pending(t *testing.T) {
  281. val := types.NewMockPV()
  282. height := int64(10)
  283. stateDB1 := initializeValidatorState(t, val, height)
  284. stateDB2 := initializeValidatorState(t, val, height)
  285. testSuites := createTestSuites(t, []sm.Store{stateDB1, stateDB2}, 0)
  286. primary := testSuites[0]
  287. secondary := testSuites[1]
  288. // Simulate a router by listening for all outbound envelopes and proxying the
  289. // envelopes to the respective peer (suite).
  290. wg := new(sync.WaitGroup)
  291. simulateRouter(wg, primary, testSuites, numEvidence)
  292. // add all evidence to the primary reactor
  293. evList := createEvidenceList(t, primary.pool, val, numEvidence)
  294. // Manually add half the evidence to the secondary which will mark them as
  295. // pending.
  296. for i := 0; i < numEvidence/2; i++ {
  297. require.NoError(t, secondary.pool.AddEvidence(evList[i]))
  298. }
  299. // the secondary should have half the evidence as pending
  300. require.Equal(t, uint32(numEvidence/2), secondary.pool.Size())
  301. // add the secondary reactor as a peer to the primary reactor
  302. primary.peerUpdatesCh <- p2p.PeerUpdate{
  303. Status: p2p.PeerStatusUp,
  304. PeerID: secondary.peerID,
  305. }
  306. // The secondary reactor should have received all the evidence ignoring the
  307. // already pending evidence.
  308. waitForEvidence(t, evList, secondary)
  309. for _, suite := range testSuites {
  310. require.Equal(t, numEvidence, int(suite.pool.Size()))
  311. }
  312. wg.Wait()
  313. // ensure all channels are drained
  314. for _, suite := range testSuites {
  315. require.Empty(t, suite.evidenceOutCh)
  316. }
  317. }
  318. func TestReactorBroadcastEvidence_Committed(t *testing.T) {
  319. val := types.NewMockPV()
  320. height := int64(10)
  321. stateDB1 := initializeValidatorState(t, val, height)
  322. stateDB2 := initializeValidatorState(t, val, height)
  323. testSuites := createTestSuites(t, []sm.Store{stateDB1, stateDB2}, 0)
  324. primary := testSuites[0]
  325. secondary := testSuites[1]
  326. // add all evidence to the primary reactor
  327. evList := createEvidenceList(t, primary.pool, val, numEvidence)
  328. // Manually add half the evidence to the secondary which will mark them as
  329. // pending.
  330. for i := 0; i < numEvidence/2; i++ {
  331. require.NoError(t, secondary.pool.AddEvidence(evList[i]))
  332. }
  333. // the secondary should have half the evidence as pending
  334. require.Equal(t, uint32(numEvidence/2), secondary.pool.Size())
  335. state, err := stateDB2.Load()
  336. require.NoError(t, err)
  337. // update the secondary's pool such that all pending evidence is committed
  338. state.LastBlockHeight++
  339. secondary.pool.Update(state, evList[:numEvidence/2])
  340. // the secondary should have half the evidence as committed
  341. require.Equal(t, uint32(0), secondary.pool.Size())
  342. // Simulate a router by listening for all outbound envelopes and proxying the
  343. // envelopes to the respective peer (suite).
  344. wg := new(sync.WaitGroup)
  345. simulateRouter(wg, primary, testSuites, numEvidence)
  346. // add the secondary reactor as a peer to the primary reactor
  347. primary.peerUpdatesCh <- p2p.PeerUpdate{
  348. Status: p2p.PeerStatusUp,
  349. PeerID: secondary.peerID,
  350. }
  351. // The secondary reactor should have received all the evidence ignoring the
  352. // already committed evidence.
  353. waitForEvidence(t, evList[numEvidence/2:], secondary)
  354. require.Equal(t, numEvidence, int(primary.pool.Size()))
  355. require.Equal(t, numEvidence/2, int(secondary.pool.Size()))
  356. wg.Wait()
  357. // ensure all channels are drained
  358. for _, suite := range testSuites {
  359. require.Empty(t, suite.evidenceOutCh)
  360. }
  361. }
  362. func TestReactorBroadcastEvidence_FullyConnected(t *testing.T) {
  363. numPeers := 7
  364. // create a stateDB for all test suites (nodes)
  365. stateDBs := make([]sm.Store, numPeers)
  366. val := types.NewMockPV()
  367. // We need all validators saved for heights at least as high as we have
  368. // evidence for.
  369. height := int64(numEvidence) + 10
  370. for i := 0; i < numPeers; i++ {
  371. stateDBs[i] = initializeValidatorState(t, val, height)
  372. }
  373. testSuites := createTestSuites(t, stateDBs, 0)
  374. // Simulate a router by listening for all outbound envelopes and proxying the
  375. // envelopes to the respective peer (suite).
  376. wg := new(sync.WaitGroup)
  377. for _, suite := range testSuites {
  378. simulateRouter(wg, suite, testSuites, numEvidence*(len(testSuites)-1))
  379. }
  380. evList := createEvidenceList(t, testSuites[0].pool, val, numEvidence)
  381. // every suite (reactor) connects to every other suite (reactor)
  382. for _, suiteI := range testSuites {
  383. for _, suiteJ := range testSuites {
  384. if suiteI.peerID != suiteJ.peerID {
  385. suiteI.peerUpdatesCh <- p2p.PeerUpdate{
  386. Status: p2p.PeerStatusUp,
  387. PeerID: suiteJ.peerID,
  388. }
  389. }
  390. }
  391. }
  392. // wait till all suites (reactors) received all evidence from other suites (reactors)
  393. waitForEvidence(t, evList, testSuites...)
  394. for _, suite := range testSuites {
  395. require.Equal(t, numEvidence, int(suite.pool.Size()))
  396. // commit state so we do not continue to repeat gossiping the same evidence
  397. state := suite.pool.State()
  398. state.LastBlockHeight++
  399. suite.pool.Update(state, evList)
  400. }
  401. wg.Wait()
  402. }
  403. func TestReactorBroadcastEvidence_RemovePeer(t *testing.T) {
  404. val := types.NewMockPV()
  405. height := int64(10)
  406. stateDB1 := initializeValidatorState(t, val, height)
  407. stateDB2 := initializeValidatorState(t, val, height)
  408. testSuites := createTestSuites(t, []sm.Store{stateDB1, stateDB2}, uint(numEvidence))
  409. primary := testSuites[0]
  410. secondary := testSuites[1]
  411. // Simulate a router by listening for all outbound envelopes and proxying the
  412. // envelopes to the respective peer (suite).
  413. wg := new(sync.WaitGroup)
  414. simulateRouter(wg, primary, testSuites, numEvidence/2)
  415. // add all evidence to the primary reactor
  416. evList := createEvidenceList(t, primary.pool, val, numEvidence)
  417. // add the secondary reactor as a peer to the primary reactor
  418. primary.peerUpdatesCh <- p2p.PeerUpdate{
  419. Status: p2p.PeerStatusUp,
  420. PeerID: secondary.peerID,
  421. }
  422. // have the secondary reactor receive only half the evidence
  423. waitForEvidence(t, evList[:numEvidence/2], secondary)
  424. // disconnect the peer
  425. primary.peerUpdatesCh <- p2p.PeerUpdate{
  426. Status: p2p.PeerStatusDown,
  427. PeerID: secondary.peerID,
  428. }
  429. // Ensure the secondary only received half of the evidence before being
  430. // disconnected.
  431. require.Equal(t, numEvidence/2, int(secondary.pool.Size()))
  432. wg.Wait()
  433. // The primary reactor should still be attempting to send the remaining half.
  434. //
  435. // NOTE: The channel is buffered (size numEvidence) as to ensure the primary
  436. // reactor will send all envelopes at once before receiving the signal to stop
  437. // gossiping.
  438. for i := 0; i < numEvidence/2; i++ {
  439. <-primary.evidenceOutCh
  440. }
  441. // ensure all channels are drained
  442. for _, suite := range testSuites {
  443. require.Empty(t, suite.evidenceOutCh)
  444. }
  445. }
  446. // nolint:lll
  447. func TestEvidenceListSerialization(t *testing.T) {
  448. exampleVote := func(msgType byte) *types.Vote {
  449. var stamp, err = time.Parse(types.TimeFormat, "2017-12-25T03:00:01.234Z")
  450. require.NoError(t, err)
  451. return &types.Vote{
  452. Type: tmproto.SignedMsgType(msgType),
  453. Height: 3,
  454. Round: 2,
  455. Timestamp: stamp,
  456. BlockID: types.BlockID{
  457. Hash: tmhash.Sum([]byte("blockID_hash")),
  458. PartSetHeader: types.PartSetHeader{
  459. Total: 1000000,
  460. Hash: tmhash.Sum([]byte("blockID_part_set_header_hash")),
  461. },
  462. },
  463. ValidatorAddress: crypto.AddressHash([]byte("validator_address")),
  464. ValidatorIndex: 56789,
  465. }
  466. }
  467. val := &types.Validator{
  468. Address: crypto.AddressHash([]byte("validator_address")),
  469. VotingPower: 10,
  470. }
  471. valSet := types.NewValidatorSet([]*types.Validator{val})
  472. dupl := types.NewDuplicateVoteEvidence(
  473. exampleVote(1),
  474. exampleVote(2),
  475. defaultEvidenceTime,
  476. valSet,
  477. )
  478. testCases := map[string]struct {
  479. evidenceList []types.Evidence
  480. expBytes string
  481. }{
  482. "DuplicateVoteEvidence": {
  483. []types.Evidence{dupl},
  484. "0a85020a82020a79080210031802224a0a208b01023386c371778ecb6368573e539afc3cc860ec3a2f614e54fe5652f4fc80122608c0843d122072db3d959635dff1bb567bedaa70573392c5159666a3f8caf11e413aac52207a2a0b08b1d381d20510809dca6f32146af1f4111082efb388211bc72c55bcd61e9ac3d538d5bb031279080110031802224a0a208b01023386c371778ecb6368573e539afc3cc860ec3a2f614e54fe5652f4fc80122608c0843d122072db3d959635dff1bb567bedaa70573392c5159666a3f8caf11e413aac52207a2a0b08b1d381d20510809dca6f32146af1f4111082efb388211bc72c55bcd61e9ac3d538d5bb03180a200a2a060880dbaae105",
  485. },
  486. }
  487. for name, tc := range testCases {
  488. tc := tc
  489. t.Run(name, func(t *testing.T) {
  490. protoEv := make([]tmproto.Evidence, len(tc.evidenceList))
  491. for i := 0; i < len(tc.evidenceList); i++ {
  492. ev, err := types.EvidenceToProto(tc.evidenceList[i])
  493. require.NoError(t, err)
  494. protoEv[i] = *ev
  495. }
  496. epl := tmproto.EvidenceList{
  497. Evidence: protoEv,
  498. }
  499. bz, err := epl.Marshal()
  500. require.NoError(t, err)
  501. require.Equal(t, tc.expBytes, hex.EncodeToString(bz))
  502. })
  503. }
  504. }