You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

575 lines
17 KiB

  1. package evidence_test
  2. import (
  3. "encoding/hex"
  4. "math/rand"
  5. "sync"
  6. "testing"
  7. "time"
  8. "github.com/fortytw2/leaktest"
  9. "github.com/stretchr/testify/assert"
  10. "github.com/stretchr/testify/mock"
  11. "github.com/stretchr/testify/require"
  12. dbm "github.com/tendermint/tm-db"
  13. "github.com/tendermint/tendermint/crypto"
  14. "github.com/tendermint/tendermint/crypto/tmhash"
  15. "github.com/tendermint/tendermint/evidence"
  16. "github.com/tendermint/tendermint/evidence/mocks"
  17. "github.com/tendermint/tendermint/libs/log"
  18. "github.com/tendermint/tendermint/p2p"
  19. "github.com/tendermint/tendermint/p2p/p2ptest"
  20. tmproto "github.com/tendermint/tendermint/proto/tendermint/types"
  21. sm "github.com/tendermint/tendermint/state"
  22. "github.com/tendermint/tendermint/types"
  23. )
  24. var (
  25. numEvidence = 10
  26. rng = rand.New(rand.NewSource(time.Now().UnixNano()))
  27. )
  28. type reactorTestSuite struct {
  29. network *p2ptest.Network
  30. logger log.Logger
  31. reactors map[p2p.NodeID]*evidence.Reactor
  32. pools map[p2p.NodeID]*evidence.Pool
  33. evidenceChannels map[p2p.NodeID]*p2p.Channel
  34. peerUpdates map[p2p.NodeID]*p2p.PeerUpdates
  35. peerChans map[p2p.NodeID]chan p2p.PeerUpdate
  36. nodes []*p2ptest.Node
  37. numStateStores int
  38. }
  39. func setup(t *testing.T, stateStores []sm.Store, chBuf uint) *reactorTestSuite {
  40. t.Helper()
  41. pID := make([]byte, 16)
  42. _, err := rng.Read(pID)
  43. require.NoError(t, err)
  44. numStateStores := len(stateStores)
  45. rts := &reactorTestSuite{
  46. numStateStores: numStateStores,
  47. logger: log.TestingLogger().With("testCase", t.Name()),
  48. network: p2ptest.MakeNetwork(t, p2ptest.NetworkOptions{NumNodes: numStateStores}),
  49. reactors: make(map[p2p.NodeID]*evidence.Reactor, numStateStores),
  50. pools: make(map[p2p.NodeID]*evidence.Pool, numStateStores),
  51. peerUpdates: make(map[p2p.NodeID]*p2p.PeerUpdates, numStateStores),
  52. peerChans: make(map[p2p.NodeID]chan p2p.PeerUpdate, numStateStores),
  53. }
  54. chDesc := p2p.ChannelDescriptor{ID: byte(evidence.EvidenceChannel)}
  55. rts.evidenceChannels = rts.network.MakeChannelsNoCleanup(t,
  56. chDesc,
  57. new(tmproto.EvidenceList),
  58. int(chBuf))
  59. require.Len(t, rts.network.RandomNode().PeerManager.Peers(), 0)
  60. idx := 0
  61. evidenceTime := time.Date(2019, 1, 1, 0, 0, 0, 0, time.UTC)
  62. for nodeID := range rts.network.Nodes {
  63. logger := rts.logger.With("validator", idx)
  64. evidenceDB := dbm.NewMemDB()
  65. blockStore := &mocks.BlockStore{}
  66. state, _ := stateStores[idx].Load()
  67. blockStore.On("LoadBlockMeta", mock.AnythingOfType("int64")).Return(func(h int64) *types.BlockMeta {
  68. if h <= state.LastBlockHeight {
  69. return &types.BlockMeta{Header: types.Header{Time: evidenceTime}}
  70. }
  71. return nil
  72. })
  73. rts.pools[nodeID], err = evidence.NewPool(logger, evidenceDB, stateStores[idx], blockStore)
  74. require.NoError(t, err)
  75. rts.peerChans[nodeID] = make(chan p2p.PeerUpdate)
  76. rts.peerUpdates[nodeID] = p2p.NewPeerUpdates(rts.peerChans[nodeID], 1)
  77. rts.network.Nodes[nodeID].PeerManager.Register(rts.peerUpdates[nodeID])
  78. rts.nodes = append(rts.nodes, rts.network.Nodes[nodeID])
  79. rts.reactors[nodeID] = evidence.NewReactor(logger,
  80. rts.evidenceChannels[nodeID],
  81. rts.peerUpdates[nodeID],
  82. rts.pools[nodeID])
  83. require.NoError(t, rts.reactors[nodeID].Start())
  84. require.True(t, rts.reactors[nodeID].IsRunning())
  85. idx++
  86. }
  87. t.Cleanup(func() {
  88. for _, r := range rts.reactors {
  89. if r.IsRunning() {
  90. require.NoError(t, r.Stop())
  91. require.False(t, r.IsRunning())
  92. }
  93. }
  94. leaktest.Check(t)
  95. })
  96. return rts
  97. }
  98. func (rts *reactorTestSuite) start(t *testing.T) {
  99. rts.network.Start(t)
  100. require.Len(t,
  101. rts.network.RandomNode().PeerManager.Peers(),
  102. rts.numStateStores-1,
  103. "network does not have expected number of nodes")
  104. }
  105. func (rts *reactorTestSuite) waitForEvidence(t *testing.T, evList types.EvidenceList, ids ...p2p.NodeID) {
  106. t.Helper()
  107. fn := func(pool *evidence.Pool) {
  108. var (
  109. localEvList []types.Evidence
  110. size int64
  111. loops int
  112. )
  113. // wait till we have at least the amount of evidence
  114. // that we expect. if there's more local evidence then
  115. // it doesn't make sense to wait longer and a
  116. // different assertion should catch the resulting error
  117. for len(localEvList) < len(evList) {
  118. // each evidence should not be more than 500 bytes
  119. localEvList, size = pool.PendingEvidence(int64(len(evList) * 500))
  120. if loops == 100 {
  121. t.Log("current wait status:", "|",
  122. "local", len(localEvList), "|",
  123. "waitlist", len(evList), "|",
  124. "size", size)
  125. }
  126. loops++
  127. }
  128. // put the reaped evidence in a map so we can quickly check we got everything
  129. evMap := make(map[string]types.Evidence)
  130. for _, e := range localEvList {
  131. evMap[string(e.Hash())] = e
  132. }
  133. for i, expectedEv := range evList {
  134. gotEv := evMap[string(expectedEv.Hash())]
  135. require.Equalf(
  136. t,
  137. expectedEv,
  138. gotEv,
  139. "evidence for pool %d in pool does not match; got: %v, expected: %v", i, gotEv, expectedEv,
  140. )
  141. }
  142. }
  143. if len(ids) == 1 {
  144. // special case waiting once, just to avoid the extra
  145. // goroutine, in the case that this hits a timeout,
  146. // the stack will be clearer.
  147. fn(rts.pools[ids[0]])
  148. return
  149. }
  150. wg := sync.WaitGroup{}
  151. for id := range rts.pools {
  152. if len(ids) > 0 && !p2ptest.NodeInSlice(id, ids) {
  153. // if an ID list is specified, then we only
  154. // want to wait for those pools that are
  155. // specified in the list, otherwise, wait for
  156. // all pools.
  157. continue
  158. }
  159. wg.Add(1)
  160. go func(id p2p.NodeID) { defer wg.Done(); fn(rts.pools[id]) }(id)
  161. }
  162. wg.Wait()
  163. }
  164. func (rts *reactorTestSuite) assertEvidenceChannelsEmpty(t *testing.T) {
  165. t.Helper()
  166. for id, r := range rts.reactors {
  167. require.NoError(t, r.Stop(), "stopping reactor #%s", id)
  168. r.Wait()
  169. require.False(t, r.IsRunning(), "reactor #%d did not stop", id)
  170. }
  171. for id, ech := range rts.evidenceChannels {
  172. require.Empty(t, ech.Out, "checking channel #%q", id)
  173. }
  174. }
  175. func createEvidenceList(
  176. t *testing.T,
  177. pool *evidence.Pool,
  178. val types.PrivValidator,
  179. numEvidence int,
  180. ) types.EvidenceList {
  181. t.Helper()
  182. evList := make([]types.Evidence, numEvidence)
  183. for i := 0; i < numEvidence; i++ {
  184. ev := types.NewMockDuplicateVoteEvidenceWithValidator(
  185. int64(i+1),
  186. time.Date(2019, 1, 1, 0, 0, 0, 0, time.UTC),
  187. val,
  188. evidenceChainID,
  189. )
  190. require.NoError(t, pool.AddEvidence(ev),
  191. "adding evidence it#%d of %d to pool with height %d",
  192. i, numEvidence, pool.State().LastBlockHeight)
  193. evList[i] = ev
  194. }
  195. return evList
  196. }
  197. func TestReactorMultiDisconnect(t *testing.T) {
  198. val := types.NewMockPV()
  199. height := int64(numEvidence) + 10
  200. stateDB1 := initializeValidatorState(t, val, height)
  201. stateDB2 := initializeValidatorState(t, val, height)
  202. rts := setup(t, []sm.Store{stateDB1, stateDB2}, 20)
  203. primary := rts.nodes[0]
  204. secondary := rts.nodes[1]
  205. _ = createEvidenceList(t, rts.pools[primary.NodeID], val, numEvidence)
  206. require.Equal(t, primary.PeerManager.Status(secondary.NodeID), p2p.PeerStatusDown)
  207. rts.start(t)
  208. require.Equal(t, primary.PeerManager.Status(secondary.NodeID), p2p.PeerStatusUp)
  209. // Ensure "disconnecting" the secondary peer from the primary more than once
  210. // is handled gracefully.
  211. require.NoError(t, primary.PeerManager.Disconnected(secondary.NodeID))
  212. require.Equal(t, primary.PeerManager.Status(secondary.NodeID), p2p.PeerStatusDown)
  213. _, err := primary.PeerManager.TryEvictNext()
  214. require.NoError(t, err)
  215. require.NoError(t, primary.PeerManager.Disconnected(secondary.NodeID))
  216. require.Equal(t, primary.PeerManager.Status(secondary.NodeID), p2p.PeerStatusDown)
  217. require.Equal(t, secondary.PeerManager.Status(primary.NodeID), p2p.PeerStatusUp)
  218. }
  219. // TestReactorBroadcastEvidence creates an environment of multiple peers that
  220. // are all at the same height. One peer, designated as a primary, gossips all
  221. // evidence to the remaining peers.
  222. func TestReactorBroadcastEvidence(t *testing.T) {
  223. numPeers := 7
  224. // create a stateDB for all test suites (nodes)
  225. stateDBs := make([]sm.Store, numPeers)
  226. val := types.NewMockPV()
  227. // We need all validators saved for heights at least as high as we have
  228. // evidence for.
  229. height := int64(numEvidence) + 10
  230. for i := 0; i < numPeers; i++ {
  231. stateDBs[i] = initializeValidatorState(t, val, height)
  232. }
  233. rts := setup(t, stateDBs, 0)
  234. rts.start(t)
  235. // Create a series of fixtures where each suite contains a reactor and
  236. // evidence pool. In addition, we mark a primary suite and the rest are
  237. // secondaries where each secondary is added as a peer via a PeerUpdate to the
  238. // primary. As a result, the primary will gossip all evidence to each secondary.
  239. primary := rts.network.RandomNode()
  240. secondaries := make([]*p2ptest.Node, 0, len(rts.network.NodeIDs())-1)
  241. secondaryIDs := make([]p2p.NodeID, 0, cap(secondaries))
  242. for id := range rts.network.Nodes {
  243. if id == primary.NodeID {
  244. continue
  245. }
  246. secondaries = append(secondaries, rts.network.Nodes[id])
  247. secondaryIDs = append(secondaryIDs, id)
  248. }
  249. evList := createEvidenceList(t, rts.pools[primary.NodeID], val, numEvidence)
  250. // Add each secondary suite (node) as a peer to the primary suite (node). This
  251. // will cause the primary to gossip all evidence to the secondaries.
  252. for _, suite := range secondaries {
  253. rts.peerChans[primary.NodeID] <- p2p.PeerUpdate{
  254. Status: p2p.PeerStatusUp,
  255. NodeID: suite.NodeID,
  256. }
  257. }
  258. // Wait till all secondary suites (reactor) received all evidence from the
  259. // primary suite (node).
  260. rts.waitForEvidence(t, evList, secondaryIDs...)
  261. for _, pool := range rts.pools {
  262. require.Equal(t, numEvidence, int(pool.Size()))
  263. }
  264. rts.assertEvidenceChannelsEmpty(t)
  265. }
  266. // TestReactorSelectiveBroadcast tests a context where we have two reactors
  267. // connected to one another but are at different heights. Reactor 1 which is
  268. // ahead receives a list of evidence.
  269. func TestReactorBroadcastEvidence_Lagging(t *testing.T) {
  270. val := types.NewMockPV()
  271. height1 := int64(numEvidence) + 10
  272. height2 := int64(numEvidence) / 2
  273. // stateDB1 is ahead of stateDB2, where stateDB1 has all heights (1-20) and
  274. // stateDB2 only has heights 1-5.
  275. stateDB1 := initializeValidatorState(t, val, height1)
  276. stateDB2 := initializeValidatorState(t, val, height2)
  277. rts := setup(t, []sm.Store{stateDB1, stateDB2}, 100)
  278. rts.start(t)
  279. primary := rts.nodes[0]
  280. secondary := rts.nodes[1]
  281. // Send a list of valid evidence to the first reactor's, the one that is ahead,
  282. // evidence pool.
  283. evList := createEvidenceList(t, rts.pools[primary.NodeID], val, numEvidence)
  284. // Add each secondary suite (node) as a peer to the primary suite (node). This
  285. // will cause the primary to gossip all evidence to the secondaries.
  286. rts.peerChans[primary.NodeID] <- p2p.PeerUpdate{
  287. Status: p2p.PeerStatusUp,
  288. NodeID: secondary.NodeID,
  289. }
  290. // only ones less than the peers height should make it through
  291. rts.waitForEvidence(t, evList[:height2], secondary.NodeID)
  292. require.Equal(t, numEvidence, int(rts.pools[primary.NodeID].Size()))
  293. require.Equal(t, int(height2), int(rts.pools[secondary.NodeID].Size()))
  294. rts.assertEvidenceChannelsEmpty(t)
  295. }
  296. func TestReactorBroadcastEvidence_Pending(t *testing.T) {
  297. val := types.NewMockPV()
  298. height := int64(10)
  299. stateDB1 := initializeValidatorState(t, val, height)
  300. stateDB2 := initializeValidatorState(t, val, height)
  301. rts := setup(t, []sm.Store{stateDB1, stateDB2}, 100)
  302. primary := rts.nodes[0]
  303. secondary := rts.nodes[1]
  304. evList := createEvidenceList(t, rts.pools[primary.NodeID], val, numEvidence)
  305. // Manually add half the evidence to the secondary which will mark them as
  306. // pending.
  307. for i := 0; i < numEvidence/2; i++ {
  308. require.NoError(t, rts.pools[secondary.NodeID].AddEvidence(evList[i]))
  309. }
  310. // the secondary should have half the evidence as pending
  311. require.Equal(t, numEvidence/2, int(rts.pools[secondary.NodeID].Size()))
  312. rts.start(t)
  313. // The secondary reactor should have received all the evidence ignoring the
  314. // already pending evidence.
  315. rts.waitForEvidence(t, evList, secondary.NodeID)
  316. // check to make sure that all of the evidence has
  317. // propogated
  318. require.Len(t, rts.pools, 2)
  319. assert.EqualValues(t, numEvidence, rts.pools[primary.NodeID].Size(),
  320. "primary node should have all the evidence")
  321. if assert.EqualValues(t, numEvidence, rts.pools[secondary.NodeID].Size(),
  322. "secondary nodes should have caught up") {
  323. rts.assertEvidenceChannelsEmpty(t)
  324. }
  325. }
  326. func TestReactorBroadcastEvidence_Committed(t *testing.T) {
  327. val := types.NewMockPV()
  328. height := int64(10)
  329. stateDB1 := initializeValidatorState(t, val, height)
  330. stateDB2 := initializeValidatorState(t, val, height)
  331. rts := setup(t, []sm.Store{stateDB1, stateDB2}, 0)
  332. primary := rts.nodes[0]
  333. secondary := rts.nodes[1]
  334. // add all evidence to the primary reactor
  335. evList := createEvidenceList(t, rts.pools[primary.NodeID], val, numEvidence)
  336. // Manually add half the evidence to the secondary which will mark them as
  337. // pending.
  338. for i := 0; i < numEvidence/2; i++ {
  339. require.NoError(t, rts.pools[secondary.NodeID].AddEvidence(evList[i]))
  340. }
  341. // the secondary should have half the evidence as pending
  342. require.Equal(t, numEvidence/2, int(rts.pools[secondary.NodeID].Size()))
  343. state, err := stateDB2.Load()
  344. require.NoError(t, err)
  345. // update the secondary's pool such that all pending evidence is committed
  346. state.LastBlockHeight++
  347. rts.pools[secondary.NodeID].Update(state, evList[:numEvidence/2])
  348. // the secondary should have half the evidence as committed
  349. require.Equal(t, 0, int(rts.pools[secondary.NodeID].Size()))
  350. // start the network and ensure it's configured
  351. rts.start(t)
  352. // without the following sleep the test consistently fails;
  353. // likely because the sleep forces a context switch that lets
  354. // the router process other operations.
  355. time.Sleep(2 * time.Millisecond)
  356. // The secondary reactor should have received all the evidence ignoring the
  357. // already committed evidence.
  358. rts.waitForEvidence(t, evList[numEvidence/2:], secondary.NodeID)
  359. require.Len(t, rts.pools, 2)
  360. assert.EqualValues(t, numEvidence, rts.pools[primary.NodeID].Size(),
  361. "primary node should have all the evidence")
  362. if assert.EqualValues(t, numEvidence/2, rts.pools[secondary.NodeID].Size(),
  363. "secondary nodes should have caught up") {
  364. rts.assertEvidenceChannelsEmpty(t)
  365. }
  366. }
  367. func TestReactorBroadcastEvidence_FullyConnected(t *testing.T) {
  368. numPeers := 7
  369. // create a stateDB for all test suites (nodes)
  370. stateDBs := make([]sm.Store, numPeers)
  371. val := types.NewMockPV()
  372. // We need all validators saved for heights at least as high as we have
  373. // evidence for.
  374. height := int64(numEvidence) + 10
  375. for i := 0; i < numPeers; i++ {
  376. stateDBs[i] = initializeValidatorState(t, val, height)
  377. }
  378. rts := setup(t, stateDBs, 0)
  379. rts.start(t)
  380. evList := createEvidenceList(t, rts.pools[rts.network.RandomNode().NodeID], val, numEvidence)
  381. // every suite (reactor) connects to every other suite (reactor)
  382. for outerID, outerChan := range rts.peerChans {
  383. for innerID := range rts.peerChans {
  384. if outerID != innerID {
  385. outerChan <- p2p.PeerUpdate{
  386. Status: p2p.PeerStatusUp,
  387. NodeID: innerID,
  388. }
  389. }
  390. }
  391. }
  392. // wait till all suites (reactors) received all evidence from other suites (reactors)
  393. rts.waitForEvidence(t, evList)
  394. for _, pool := range rts.pools {
  395. require.Equal(t, numEvidence, int(pool.Size()))
  396. // commit state so we do not continue to repeat gossiping the same evidence
  397. state := pool.State()
  398. state.LastBlockHeight++
  399. pool.Update(state, evList)
  400. }
  401. }
  402. // nolint:lll
  403. func TestEvidenceListSerialization(t *testing.T) {
  404. exampleVote := func(msgType byte) *types.Vote {
  405. var stamp, err = time.Parse(types.TimeFormat, "2017-12-25T03:00:01.234Z")
  406. require.NoError(t, err)
  407. return &types.Vote{
  408. Type: tmproto.SignedMsgType(msgType),
  409. Height: 3,
  410. Round: 2,
  411. Timestamp: stamp,
  412. BlockID: types.BlockID{
  413. Hash: tmhash.Sum([]byte("blockID_hash")),
  414. PartSetHeader: types.PartSetHeader{
  415. Total: 1000000,
  416. Hash: tmhash.Sum([]byte("blockID_part_set_header_hash")),
  417. },
  418. },
  419. ValidatorAddress: crypto.AddressHash([]byte("validator_address")),
  420. ValidatorIndex: 56789,
  421. }
  422. }
  423. val := &types.Validator{
  424. Address: crypto.AddressHash([]byte("validator_address")),
  425. VotingPower: 10,
  426. }
  427. valSet := types.NewValidatorSet([]*types.Validator{val})
  428. dupl := types.NewDuplicateVoteEvidence(
  429. exampleVote(1),
  430. exampleVote(2),
  431. defaultEvidenceTime,
  432. valSet,
  433. )
  434. testCases := map[string]struct {
  435. evidenceList []types.Evidence
  436. expBytes string
  437. }{
  438. "DuplicateVoteEvidence": {
  439. []types.Evidence{dupl},
  440. "0a85020a82020a79080210031802224a0a208b01023386c371778ecb6368573e539afc3cc860ec3a2f614e54fe5652f4fc80122608c0843d122072db3d959635dff1bb567bedaa70573392c5159666a3f8caf11e413aac52207a2a0b08b1d381d20510809dca6f32146af1f4111082efb388211bc72c55bcd61e9ac3d538d5bb031279080110031802224a0a208b01023386c371778ecb6368573e539afc3cc860ec3a2f614e54fe5652f4fc80122608c0843d122072db3d959635dff1bb567bedaa70573392c5159666a3f8caf11e413aac52207a2a0b08b1d381d20510809dca6f32146af1f4111082efb388211bc72c55bcd61e9ac3d538d5bb03180a200a2a060880dbaae105",
  441. },
  442. }
  443. for name, tc := range testCases {
  444. tc := tc
  445. t.Run(name, func(t *testing.T) {
  446. protoEv := make([]tmproto.Evidence, len(tc.evidenceList))
  447. for i := 0; i < len(tc.evidenceList); i++ {
  448. ev, err := types.EvidenceToProto(tc.evidenceList[i])
  449. require.NoError(t, err)
  450. protoEv[i] = *ev
  451. }
  452. epl := tmproto.EvidenceList{
  453. Evidence: protoEv,
  454. }
  455. bz, err := epl.Marshal()
  456. require.NoError(t, err)
  457. require.Equal(t, tc.expBytes, hex.EncodeToString(bz))
  458. })
  459. }
  460. }