You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

858 lines
25 KiB

  1. package statesync
  2. import (
  3. "context"
  4. "fmt"
  5. "strings"
  6. "sync"
  7. "testing"
  8. "time"
  9. "github.com/fortytw2/leaktest"
  10. "github.com/stretchr/testify/mock"
  11. "github.com/stretchr/testify/require"
  12. dbm "github.com/tendermint/tm-db"
  13. abci "github.com/tendermint/tendermint/abci/types"
  14. "github.com/tendermint/tendermint/config"
  15. "github.com/tendermint/tendermint/internal/p2p"
  16. "github.com/tendermint/tendermint/internal/proxy"
  17. proxymocks "github.com/tendermint/tendermint/internal/proxy/mocks"
  18. smmocks "github.com/tendermint/tendermint/internal/state/mocks"
  19. "github.com/tendermint/tendermint/internal/statesync/mocks"
  20. "github.com/tendermint/tendermint/internal/store"
  21. "github.com/tendermint/tendermint/internal/test/factory"
  22. "github.com/tendermint/tendermint/libs/log"
  23. "github.com/tendermint/tendermint/light/provider"
  24. ssproto "github.com/tendermint/tendermint/proto/tendermint/statesync"
  25. tmproto "github.com/tendermint/tendermint/proto/tendermint/types"
  26. "github.com/tendermint/tendermint/types"
  27. )
  28. var (
  29. m = PrometheusMetrics(config.TestConfig().Instrumentation.Namespace)
  30. )
  31. type reactorTestSuite struct {
  32. reactor *Reactor
  33. syncer *syncer
  34. conn *proxymocks.AppConnSnapshot
  35. connQuery *proxymocks.AppConnQuery
  36. stateProvider *mocks.StateProvider
  37. snapshotChannel *p2p.Channel
  38. snapshotInCh chan p2p.Envelope
  39. snapshotOutCh chan p2p.Envelope
  40. snapshotPeerErrCh chan p2p.PeerError
  41. chunkChannel *p2p.Channel
  42. chunkInCh chan p2p.Envelope
  43. chunkOutCh chan p2p.Envelope
  44. chunkPeerErrCh chan p2p.PeerError
  45. blockChannel *p2p.Channel
  46. blockInCh chan p2p.Envelope
  47. blockOutCh chan p2p.Envelope
  48. blockPeerErrCh chan p2p.PeerError
  49. paramsChannel *p2p.Channel
  50. paramsInCh chan p2p.Envelope
  51. paramsOutCh chan p2p.Envelope
  52. paramsPeerErrCh chan p2p.PeerError
  53. peerUpdateCh chan p2p.PeerUpdate
  54. peerUpdates *p2p.PeerUpdates
  55. stateStore *smmocks.Store
  56. blockStore *store.BlockStore
  57. }
  58. func setup(
  59. t *testing.T,
  60. conn *proxymocks.AppConnSnapshot,
  61. connQuery *proxymocks.AppConnQuery,
  62. stateProvider *mocks.StateProvider,
  63. chBuf uint,
  64. ) *reactorTestSuite {
  65. t.Helper()
  66. if conn == nil {
  67. conn = &proxymocks.AppConnSnapshot{}
  68. }
  69. if connQuery == nil {
  70. connQuery = &proxymocks.AppConnQuery{}
  71. }
  72. if stateProvider == nil {
  73. stateProvider = &mocks.StateProvider{}
  74. }
  75. rts := &reactorTestSuite{
  76. snapshotInCh: make(chan p2p.Envelope, chBuf),
  77. snapshotOutCh: make(chan p2p.Envelope, chBuf),
  78. snapshotPeerErrCh: make(chan p2p.PeerError, chBuf),
  79. chunkInCh: make(chan p2p.Envelope, chBuf),
  80. chunkOutCh: make(chan p2p.Envelope, chBuf),
  81. chunkPeerErrCh: make(chan p2p.PeerError, chBuf),
  82. blockInCh: make(chan p2p.Envelope, chBuf),
  83. blockOutCh: make(chan p2p.Envelope, chBuf),
  84. blockPeerErrCh: make(chan p2p.PeerError, chBuf),
  85. paramsInCh: make(chan p2p.Envelope, chBuf),
  86. paramsOutCh: make(chan p2p.Envelope, chBuf),
  87. paramsPeerErrCh: make(chan p2p.PeerError, chBuf),
  88. conn: conn,
  89. connQuery: connQuery,
  90. stateProvider: stateProvider,
  91. }
  92. rts.peerUpdateCh = make(chan p2p.PeerUpdate, chBuf)
  93. rts.peerUpdates = p2p.NewPeerUpdates(rts.peerUpdateCh, int(chBuf))
  94. rts.snapshotChannel = p2p.NewChannel(
  95. SnapshotChannel,
  96. new(ssproto.Message),
  97. rts.snapshotInCh,
  98. rts.snapshotOutCh,
  99. rts.snapshotPeerErrCh,
  100. )
  101. rts.chunkChannel = p2p.NewChannel(
  102. ChunkChannel,
  103. new(ssproto.Message),
  104. rts.chunkInCh,
  105. rts.chunkOutCh,
  106. rts.chunkPeerErrCh,
  107. )
  108. rts.blockChannel = p2p.NewChannel(
  109. LightBlockChannel,
  110. new(ssproto.Message),
  111. rts.blockInCh,
  112. rts.blockOutCh,
  113. rts.blockPeerErrCh,
  114. )
  115. rts.paramsChannel = p2p.NewChannel(
  116. ParamsChannel,
  117. new(ssproto.Message),
  118. rts.paramsInCh,
  119. rts.paramsOutCh,
  120. rts.paramsPeerErrCh,
  121. )
  122. rts.stateStore = &smmocks.Store{}
  123. rts.blockStore = store.NewBlockStore(dbm.NewMemDB())
  124. cfg := config.DefaultStateSyncConfig()
  125. rts.reactor = NewReactor(
  126. factory.DefaultTestChainID,
  127. 1,
  128. *cfg,
  129. log.TestingLogger(),
  130. conn,
  131. connQuery,
  132. rts.snapshotChannel,
  133. rts.chunkChannel,
  134. rts.blockChannel,
  135. rts.paramsChannel,
  136. rts.peerUpdates,
  137. rts.stateStore,
  138. rts.blockStore,
  139. "",
  140. m,
  141. )
  142. rts.syncer = newSyncer(
  143. *cfg,
  144. log.NewNopLogger(),
  145. conn,
  146. connQuery,
  147. stateProvider,
  148. rts.snapshotOutCh,
  149. rts.chunkOutCh,
  150. rts.snapshotChannel.Done(),
  151. "",
  152. rts.reactor.metrics,
  153. )
  154. require.NoError(t, rts.reactor.Start())
  155. require.True(t, rts.reactor.IsRunning())
  156. t.Cleanup(func() {
  157. require.NoError(t, rts.reactor.Stop())
  158. rts.reactor.Wait()
  159. require.False(t, rts.reactor.IsRunning())
  160. })
  161. return rts
  162. }
  163. func TestReactor_Sync(t *testing.T) {
  164. const snapshotHeight = 7
  165. rts := setup(t, nil, nil, nil, 2)
  166. chain := buildLightBlockChain(t, 1, 10, time.Now())
  167. // app accepts any snapshot
  168. rts.conn.On("OfferSnapshotSync", ctx, mock.AnythingOfType("types.RequestOfferSnapshot")).
  169. Return(&abci.ResponseOfferSnapshot{Result: abci.ResponseOfferSnapshot_ACCEPT}, nil)
  170. // app accepts every chunk
  171. rts.conn.On("ApplySnapshotChunkSync", ctx, mock.AnythingOfType("types.RequestApplySnapshotChunk")).
  172. Return(&abci.ResponseApplySnapshotChunk{Result: abci.ResponseApplySnapshotChunk_ACCEPT}, nil)
  173. // app query returns valid state app hash
  174. rts.connQuery.On("InfoSync", ctx, proxy.RequestInfo).Return(&abci.ResponseInfo{
  175. AppVersion: 9,
  176. LastBlockHeight: snapshotHeight,
  177. LastBlockAppHash: chain[snapshotHeight+1].AppHash,
  178. }, nil)
  179. // store accepts state and validator sets
  180. rts.stateStore.On("Bootstrap", mock.AnythingOfType("state.State")).Return(nil)
  181. rts.stateStore.On("SaveValidatorSets", mock.AnythingOfType("int64"), mock.AnythingOfType("int64"),
  182. mock.AnythingOfType("*types.ValidatorSet")).Return(nil)
  183. closeCh := make(chan struct{})
  184. defer close(closeCh)
  185. go handleLightBlockRequests(t, chain, rts.blockOutCh,
  186. rts.blockInCh, closeCh, 0)
  187. go graduallyAddPeers(rts.peerUpdateCh, closeCh, 1*time.Second)
  188. go handleSnapshotRequests(t, rts.snapshotOutCh, rts.snapshotInCh, closeCh, []snapshot{
  189. {
  190. Height: uint64(snapshotHeight),
  191. Format: 1,
  192. Chunks: 1,
  193. },
  194. })
  195. go handleChunkRequests(t, rts.chunkOutCh, rts.chunkInCh, closeCh, []byte("abc"))
  196. go handleConsensusParamsRequest(t, rts.paramsOutCh, rts.paramsInCh, closeCh)
  197. // update the config to use the p2p provider
  198. rts.reactor.cfg.UseP2P = true
  199. rts.reactor.cfg.TrustHeight = 1
  200. rts.reactor.cfg.TrustHash = fmt.Sprintf("%X", chain[1].Hash())
  201. rts.reactor.cfg.DiscoveryTime = 1 * time.Second
  202. // Run state sync
  203. _, err := rts.reactor.Sync(context.Background())
  204. require.NoError(t, err)
  205. }
  206. func TestReactor_ChunkRequest_InvalidRequest(t *testing.T) {
  207. rts := setup(t, nil, nil, nil, 2)
  208. rts.chunkInCh <- p2p.Envelope{
  209. From: types.NodeID("aa"),
  210. Message: &ssproto.SnapshotsRequest{},
  211. }
  212. response := <-rts.chunkPeerErrCh
  213. require.Error(t, response.Err)
  214. require.Empty(t, rts.chunkOutCh)
  215. require.Contains(t, response.Err.Error(), "received unknown message")
  216. require.Equal(t, types.NodeID("aa"), response.NodeID)
  217. }
  218. func TestReactor_ChunkRequest(t *testing.T) {
  219. testcases := map[string]struct {
  220. request *ssproto.ChunkRequest
  221. chunk []byte
  222. expectResponse *ssproto.ChunkResponse
  223. }{
  224. "chunk is returned": {
  225. &ssproto.ChunkRequest{Height: 1, Format: 1, Index: 1},
  226. []byte{1, 2, 3},
  227. &ssproto.ChunkResponse{Height: 1, Format: 1, Index: 1, Chunk: []byte{1, 2, 3}},
  228. },
  229. "empty chunk is returned, as empty": {
  230. &ssproto.ChunkRequest{Height: 1, Format: 1, Index: 1},
  231. []byte{},
  232. &ssproto.ChunkResponse{Height: 1, Format: 1, Index: 1, Chunk: []byte{}},
  233. },
  234. "nil (missing) chunk is returned as missing": {
  235. &ssproto.ChunkRequest{Height: 1, Format: 1, Index: 1},
  236. nil,
  237. &ssproto.ChunkResponse{Height: 1, Format: 1, Index: 1, Missing: true},
  238. },
  239. "invalid request": {
  240. &ssproto.ChunkRequest{Height: 1, Format: 1, Index: 1},
  241. nil,
  242. &ssproto.ChunkResponse{Height: 1, Format: 1, Index: 1, Missing: true},
  243. },
  244. }
  245. for name, tc := range testcases {
  246. tc := tc
  247. t.Run(name, func(t *testing.T) {
  248. // mock ABCI connection to return local snapshots
  249. conn := &proxymocks.AppConnSnapshot{}
  250. conn.On("LoadSnapshotChunkSync", context.Background(), abci.RequestLoadSnapshotChunk{
  251. Height: tc.request.Height,
  252. Format: tc.request.Format,
  253. Chunk: tc.request.Index,
  254. }).Return(&abci.ResponseLoadSnapshotChunk{Chunk: tc.chunk}, nil)
  255. rts := setup(t, conn, nil, nil, 2)
  256. rts.chunkInCh <- p2p.Envelope{
  257. From: types.NodeID("aa"),
  258. Message: tc.request,
  259. }
  260. response := <-rts.chunkOutCh
  261. require.Equal(t, tc.expectResponse, response.Message)
  262. require.Empty(t, rts.chunkOutCh)
  263. conn.AssertExpectations(t)
  264. })
  265. }
  266. }
  267. func TestReactor_SnapshotsRequest_InvalidRequest(t *testing.T) {
  268. rts := setup(t, nil, nil, nil, 2)
  269. rts.snapshotInCh <- p2p.Envelope{
  270. From: types.NodeID("aa"),
  271. Message: &ssproto.ChunkRequest{},
  272. }
  273. response := <-rts.snapshotPeerErrCh
  274. require.Error(t, response.Err)
  275. require.Empty(t, rts.snapshotOutCh)
  276. require.Contains(t, response.Err.Error(), "received unknown message")
  277. require.Equal(t, types.NodeID("aa"), response.NodeID)
  278. }
  279. func TestReactor_SnapshotsRequest(t *testing.T) {
  280. testcases := map[string]struct {
  281. snapshots []*abci.Snapshot
  282. expectResponses []*ssproto.SnapshotsResponse
  283. }{
  284. "no snapshots": {nil, []*ssproto.SnapshotsResponse{}},
  285. ">10 unordered snapshots": {
  286. []*abci.Snapshot{
  287. {Height: 1, Format: 2, Chunks: 7, Hash: []byte{1, 2}, Metadata: []byte{1}},
  288. {Height: 2, Format: 2, Chunks: 7, Hash: []byte{2, 2}, Metadata: []byte{2}},
  289. {Height: 3, Format: 2, Chunks: 7, Hash: []byte{3, 2}, Metadata: []byte{3}},
  290. {Height: 1, Format: 1, Chunks: 7, Hash: []byte{1, 1}, Metadata: []byte{4}},
  291. {Height: 2, Format: 1, Chunks: 7, Hash: []byte{2, 1}, Metadata: []byte{5}},
  292. {Height: 3, Format: 1, Chunks: 7, Hash: []byte{3, 1}, Metadata: []byte{6}},
  293. {Height: 1, Format: 4, Chunks: 7, Hash: []byte{1, 4}, Metadata: []byte{7}},
  294. {Height: 2, Format: 4, Chunks: 7, Hash: []byte{2, 4}, Metadata: []byte{8}},
  295. {Height: 3, Format: 4, Chunks: 7, Hash: []byte{3, 4}, Metadata: []byte{9}},
  296. {Height: 1, Format: 3, Chunks: 7, Hash: []byte{1, 3}, Metadata: []byte{10}},
  297. {Height: 2, Format: 3, Chunks: 7, Hash: []byte{2, 3}, Metadata: []byte{11}},
  298. {Height: 3, Format: 3, Chunks: 7, Hash: []byte{3, 3}, Metadata: []byte{12}},
  299. },
  300. []*ssproto.SnapshotsResponse{
  301. {Height: 3, Format: 4, Chunks: 7, Hash: []byte{3, 4}, Metadata: []byte{9}},
  302. {Height: 3, Format: 3, Chunks: 7, Hash: []byte{3, 3}, Metadata: []byte{12}},
  303. {Height: 3, Format: 2, Chunks: 7, Hash: []byte{3, 2}, Metadata: []byte{3}},
  304. {Height: 3, Format: 1, Chunks: 7, Hash: []byte{3, 1}, Metadata: []byte{6}},
  305. {Height: 2, Format: 4, Chunks: 7, Hash: []byte{2, 4}, Metadata: []byte{8}},
  306. {Height: 2, Format: 3, Chunks: 7, Hash: []byte{2, 3}, Metadata: []byte{11}},
  307. {Height: 2, Format: 2, Chunks: 7, Hash: []byte{2, 2}, Metadata: []byte{2}},
  308. {Height: 2, Format: 1, Chunks: 7, Hash: []byte{2, 1}, Metadata: []byte{5}},
  309. {Height: 1, Format: 4, Chunks: 7, Hash: []byte{1, 4}, Metadata: []byte{7}},
  310. {Height: 1, Format: 3, Chunks: 7, Hash: []byte{1, 3}, Metadata: []byte{10}},
  311. },
  312. },
  313. }
  314. for name, tc := range testcases {
  315. tc := tc
  316. t.Run(name, func(t *testing.T) {
  317. // mock ABCI connection to return local snapshots
  318. conn := &proxymocks.AppConnSnapshot{}
  319. conn.On("ListSnapshotsSync", context.Background(), abci.RequestListSnapshots{}).Return(&abci.ResponseListSnapshots{
  320. Snapshots: tc.snapshots,
  321. }, nil)
  322. rts := setup(t, conn, nil, nil, 100)
  323. rts.snapshotInCh <- p2p.Envelope{
  324. From: types.NodeID("aa"),
  325. Message: &ssproto.SnapshotsRequest{},
  326. }
  327. if len(tc.expectResponses) > 0 {
  328. retryUntil(t, func() bool { return len(rts.snapshotOutCh) == len(tc.expectResponses) }, time.Second)
  329. }
  330. responses := make([]*ssproto.SnapshotsResponse, len(tc.expectResponses))
  331. for i := 0; i < len(tc.expectResponses); i++ {
  332. e := <-rts.snapshotOutCh
  333. responses[i] = e.Message.(*ssproto.SnapshotsResponse)
  334. }
  335. require.Equal(t, tc.expectResponses, responses)
  336. require.Empty(t, rts.snapshotOutCh)
  337. })
  338. }
  339. }
  340. func TestReactor_LightBlockResponse(t *testing.T) {
  341. rts := setup(t, nil, nil, nil, 2)
  342. var height int64 = 10
  343. h := factory.MakeRandomHeader()
  344. h.Height = height
  345. blockID := factory.MakeBlockIDWithHash(h.Hash())
  346. vals, pv := factory.RandValidatorSet(1, 10)
  347. vote, err := factory.MakeVote(pv[0], h.ChainID, 0, h.Height, 0, 2,
  348. blockID, factory.DefaultTestTime)
  349. require.NoError(t, err)
  350. sh := &types.SignedHeader{
  351. Header: h,
  352. Commit: &types.Commit{
  353. Height: h.Height,
  354. BlockID: blockID,
  355. Signatures: []types.CommitSig{
  356. vote.CommitSig(),
  357. },
  358. },
  359. }
  360. lb := &types.LightBlock{
  361. SignedHeader: sh,
  362. ValidatorSet: vals,
  363. }
  364. require.NoError(t, rts.blockStore.SaveSignedHeader(sh, blockID))
  365. rts.stateStore.On("LoadValidators", height).Return(vals, nil)
  366. rts.blockInCh <- p2p.Envelope{
  367. From: types.NodeID("aa"),
  368. Message: &ssproto.LightBlockRequest{
  369. Height: 10,
  370. },
  371. }
  372. require.Empty(t, rts.blockPeerErrCh)
  373. select {
  374. case response := <-rts.blockOutCh:
  375. require.Equal(t, types.NodeID("aa"), response.To)
  376. res, ok := response.Message.(*ssproto.LightBlockResponse)
  377. require.True(t, ok)
  378. receivedLB, err := types.LightBlockFromProto(res.LightBlock)
  379. require.NoError(t, err)
  380. require.Equal(t, lb, receivedLB)
  381. case <-time.After(1 * time.Second):
  382. t.Fatal("expected light block response")
  383. }
  384. }
  385. func TestReactor_BlockProviders(t *testing.T) {
  386. rts := setup(t, nil, nil, nil, 2)
  387. rts.peerUpdateCh <- p2p.PeerUpdate{
  388. NodeID: types.NodeID("aa"),
  389. Status: p2p.PeerStatusUp,
  390. }
  391. rts.peerUpdateCh <- p2p.PeerUpdate{
  392. NodeID: types.NodeID("bb"),
  393. Status: p2p.PeerStatusUp,
  394. }
  395. closeCh := make(chan struct{})
  396. defer close(closeCh)
  397. chain := buildLightBlockChain(t, 1, 10, time.Now())
  398. go handleLightBlockRequests(t, chain, rts.blockOutCh, rts.blockInCh, closeCh, 0)
  399. peers := rts.reactor.peers.All()
  400. require.Len(t, peers, 2)
  401. providers := make([]provider.Provider, len(peers))
  402. for idx, peer := range peers {
  403. providers[idx] = NewBlockProvider(peer, factory.DefaultTestChainID, rts.reactor.dispatcher)
  404. }
  405. wg := sync.WaitGroup{}
  406. for _, p := range providers {
  407. wg.Add(1)
  408. go func(t *testing.T, p provider.Provider) {
  409. defer wg.Done()
  410. for height := 2; height < 10; height++ {
  411. lb, err := p.LightBlock(context.Background(), int64(height))
  412. require.NoError(t, err)
  413. require.NotNil(t, lb)
  414. require.Equal(t, height, int(lb.Height))
  415. }
  416. }(t, p)
  417. }
  418. ctx, cancel := context.WithCancel(context.Background())
  419. go func() { wg.Wait(); cancel() }()
  420. select {
  421. case <-time.After(time.Second):
  422. // not all of the requests to the dispatcher were responded to
  423. // within the timeout
  424. t.Fail()
  425. case <-ctx.Done():
  426. }
  427. }
  428. func TestReactor_StateProviderP2P(t *testing.T) {
  429. t.Cleanup(leaktest.CheckTimeout(t, 1*time.Minute))
  430. rts := setup(t, nil, nil, nil, 2)
  431. // make syncer non nil else test won't think we are state syncing
  432. rts.reactor.syncer = rts.syncer
  433. peerA := types.NodeID(strings.Repeat("a", 2*types.NodeIDByteLength))
  434. peerB := types.NodeID(strings.Repeat("b", 2*types.NodeIDByteLength))
  435. rts.peerUpdateCh <- p2p.PeerUpdate{
  436. NodeID: peerA,
  437. Status: p2p.PeerStatusUp,
  438. }
  439. rts.peerUpdateCh <- p2p.PeerUpdate{
  440. NodeID: peerB,
  441. Status: p2p.PeerStatusUp,
  442. }
  443. closeCh := make(chan struct{})
  444. defer close(closeCh)
  445. chain := buildLightBlockChain(t, 1, 10, time.Now())
  446. go handleLightBlockRequests(t, chain, rts.blockOutCh, rts.blockInCh, closeCh, 0)
  447. go handleConsensusParamsRequest(t, rts.paramsOutCh, rts.paramsInCh, closeCh)
  448. rts.reactor.cfg.UseP2P = true
  449. rts.reactor.cfg.TrustHeight = 1
  450. rts.reactor.cfg.TrustHash = fmt.Sprintf("%X", chain[1].Hash())
  451. for _, p := range []types.NodeID{peerA, peerB} {
  452. if !rts.reactor.peers.Contains(p) {
  453. rts.reactor.peers.Append(p)
  454. }
  455. }
  456. require.True(t, rts.reactor.peers.Len() >= 2, "peer network not configured")
  457. bctx, cancel := context.WithCancel(context.Background())
  458. defer cancel()
  459. ictx, cancel := context.WithTimeout(bctx, time.Second)
  460. defer cancel()
  461. rts.reactor.mtx.Lock()
  462. err := rts.reactor.initStateProvider(ictx, factory.DefaultTestChainID, 1)
  463. rts.reactor.mtx.Unlock()
  464. require.NoError(t, err)
  465. rts.reactor.syncer.stateProvider = rts.reactor.stateProvider
  466. actx, cancel := context.WithTimeout(bctx, 10*time.Second)
  467. defer cancel()
  468. appHash, err := rts.reactor.stateProvider.AppHash(actx, 5)
  469. require.NoError(t, err)
  470. require.Len(t, appHash, 32)
  471. state, err := rts.reactor.stateProvider.State(actx, 5)
  472. require.NoError(t, err)
  473. require.Equal(t, appHash, state.AppHash)
  474. require.Equal(t, types.DefaultConsensusParams(), &state.ConsensusParams)
  475. commit, err := rts.reactor.stateProvider.Commit(actx, 5)
  476. require.NoError(t, err)
  477. require.Equal(t, commit.BlockID, state.LastBlockID)
  478. added, err := rts.reactor.syncer.AddSnapshot(peerA, &snapshot{
  479. Height: 1, Format: 2, Chunks: 7, Hash: []byte{1, 2}, Metadata: []byte{1},
  480. })
  481. require.NoError(t, err)
  482. require.True(t, added)
  483. }
  484. func TestReactor_Backfill(t *testing.T) {
  485. // test backfill algorithm with varying failure rates [0, 10]
  486. failureRates := []int{0, 2, 9}
  487. for _, failureRate := range failureRates {
  488. failureRate := failureRate
  489. t.Run(fmt.Sprintf("failure rate: %d", failureRate), func(t *testing.T) {
  490. t.Cleanup(leaktest.CheckTimeout(t, 1*time.Minute))
  491. rts := setup(t, nil, nil, nil, 21)
  492. var (
  493. startHeight int64 = 20
  494. stopHeight int64 = 10
  495. stopTime = time.Date(2020, 1, 1, 0, 100, 0, 0, time.UTC)
  496. )
  497. peers := []string{"a", "b", "c", "d"}
  498. for _, peer := range peers {
  499. rts.peerUpdateCh <- p2p.PeerUpdate{
  500. NodeID: types.NodeID(peer),
  501. Status: p2p.PeerStatusUp,
  502. }
  503. }
  504. trackingHeight := startHeight
  505. rts.stateStore.On("SaveValidatorSets", mock.AnythingOfType("int64"), mock.AnythingOfType("int64"),
  506. mock.AnythingOfType("*types.ValidatorSet")).Return(func(lh, uh int64, vals *types.ValidatorSet) error {
  507. require.Equal(t, trackingHeight, lh)
  508. require.Equal(t, lh, uh)
  509. require.GreaterOrEqual(t, lh, stopHeight)
  510. trackingHeight--
  511. return nil
  512. })
  513. chain := buildLightBlockChain(t, stopHeight-1, startHeight+1, stopTime)
  514. closeCh := make(chan struct{})
  515. defer close(closeCh)
  516. go handleLightBlockRequests(t, chain, rts.blockOutCh,
  517. rts.blockInCh, closeCh, failureRate)
  518. err := rts.reactor.backfill(
  519. context.Background(),
  520. factory.DefaultTestChainID,
  521. startHeight,
  522. stopHeight,
  523. 1,
  524. factory.MakeBlockIDWithHash(chain[startHeight].Header.Hash()),
  525. stopTime,
  526. )
  527. if failureRate > 3 {
  528. require.Error(t, err)
  529. require.NotEqual(t, rts.reactor.backfilledBlocks, rts.reactor.backfillBlockTotal)
  530. require.Equal(t, startHeight-stopHeight+1, rts.reactor.backfillBlockTotal)
  531. } else {
  532. require.NoError(t, err)
  533. for height := startHeight; height <= stopHeight; height++ {
  534. blockMeta := rts.blockStore.LoadBlockMeta(height)
  535. require.NotNil(t, blockMeta)
  536. }
  537. require.Nil(t, rts.blockStore.LoadBlockMeta(stopHeight-1))
  538. require.Nil(t, rts.blockStore.LoadBlockMeta(startHeight+1))
  539. require.Equal(t, startHeight-stopHeight+1, rts.reactor.backfilledBlocks)
  540. require.Equal(t, startHeight-stopHeight+1, rts.reactor.backfillBlockTotal)
  541. }
  542. require.Equal(t, rts.reactor.backfilledBlocks, rts.reactor.BackFilledBlocks())
  543. require.Equal(t, rts.reactor.backfillBlockTotal, rts.reactor.BackFillBlocksTotal())
  544. })
  545. }
  546. }
  547. // retryUntil will continue to evaluate fn and will return successfully when true
  548. // or fail when the timeout is reached.
  549. func retryUntil(t *testing.T, fn func() bool, timeout time.Duration) {
  550. ctx, cancel := context.WithTimeout(context.Background(), timeout)
  551. defer cancel()
  552. for {
  553. if fn() {
  554. return
  555. }
  556. require.NoError(t, ctx.Err())
  557. }
  558. }
  559. func handleLightBlockRequests(t *testing.T,
  560. chain map[int64]*types.LightBlock,
  561. receiving chan p2p.Envelope,
  562. sending chan p2p.Envelope,
  563. close chan struct{},
  564. failureRate int) {
  565. requests := 0
  566. errorCount := 0
  567. for {
  568. select {
  569. case envelope := <-receiving:
  570. if msg, ok := envelope.Message.(*ssproto.LightBlockRequest); ok {
  571. if requests%10 >= failureRate {
  572. lb, err := chain[int64(msg.Height)].ToProto()
  573. require.NoError(t, err)
  574. sending <- p2p.Envelope{
  575. From: envelope.To,
  576. Message: &ssproto.LightBlockResponse{
  577. LightBlock: lb,
  578. },
  579. }
  580. } else {
  581. switch errorCount % 3 {
  582. case 0: // send a different block
  583. vals, pv := factory.RandValidatorSet(3, 10)
  584. _, _, lb := mockLB(t, int64(msg.Height), factory.DefaultTestTime, factory.MakeBlockID(), vals, pv)
  585. differntLB, err := lb.ToProto()
  586. require.NoError(t, err)
  587. sending <- p2p.Envelope{
  588. From: envelope.To,
  589. Message: &ssproto.LightBlockResponse{
  590. LightBlock: differntLB,
  591. },
  592. }
  593. case 1: // send nil block i.e. pretend we don't have it
  594. sending <- p2p.Envelope{
  595. From: envelope.To,
  596. Message: &ssproto.LightBlockResponse{
  597. LightBlock: nil,
  598. },
  599. }
  600. case 2: // don't do anything
  601. }
  602. errorCount++
  603. }
  604. }
  605. case <-close:
  606. return
  607. }
  608. requests++
  609. }
  610. }
  611. func handleConsensusParamsRequest(t *testing.T, receiving, sending chan p2p.Envelope, closeCh chan struct{}) {
  612. t.Helper()
  613. params := types.DefaultConsensusParams()
  614. paramsProto := params.ToProto()
  615. for {
  616. select {
  617. case envelope := <-receiving:
  618. t.Log("received consensus params request")
  619. msg, ok := envelope.Message.(*ssproto.ParamsRequest)
  620. require.True(t, ok)
  621. sending <- p2p.Envelope{
  622. From: envelope.To,
  623. Message: &ssproto.ParamsResponse{
  624. Height: msg.Height,
  625. ConsensusParams: paramsProto,
  626. },
  627. }
  628. case <-closeCh:
  629. return
  630. }
  631. }
  632. }
  633. func buildLightBlockChain(t *testing.T, fromHeight, toHeight int64, startTime time.Time) map[int64]*types.LightBlock {
  634. chain := make(map[int64]*types.LightBlock, toHeight-fromHeight)
  635. lastBlockID := factory.MakeBlockID()
  636. blockTime := startTime.Add(time.Duration(fromHeight-toHeight) * time.Minute)
  637. vals, pv := factory.RandValidatorSet(3, 10)
  638. for height := fromHeight; height < toHeight; height++ {
  639. vals, pv, chain[height] = mockLB(t, height, blockTime, lastBlockID, vals, pv)
  640. lastBlockID = factory.MakeBlockIDWithHash(chain[height].Header.Hash())
  641. blockTime = blockTime.Add(1 * time.Minute)
  642. }
  643. return chain
  644. }
  645. func mockLB(t *testing.T, height int64, time time.Time, lastBlockID types.BlockID,
  646. currentVals *types.ValidatorSet, currentPrivVals []types.PrivValidator,
  647. ) (*types.ValidatorSet, []types.PrivValidator, *types.LightBlock) {
  648. header, err := factory.MakeHeader(&types.Header{
  649. Height: height,
  650. LastBlockID: lastBlockID,
  651. Time: time,
  652. })
  653. require.NoError(t, err)
  654. nextVals, nextPrivVals := factory.RandValidatorSet(3, 10)
  655. header.ValidatorsHash = currentVals.Hash()
  656. header.NextValidatorsHash = nextVals.Hash()
  657. header.ConsensusHash = types.DefaultConsensusParams().HashConsensusParams()
  658. lastBlockID = factory.MakeBlockIDWithHash(header.Hash())
  659. voteSet := types.NewVoteSet(factory.DefaultTestChainID, height, 0, tmproto.PrecommitType, currentVals)
  660. commit, err := factory.MakeCommit(lastBlockID, height, 0, voteSet, currentPrivVals, time)
  661. require.NoError(t, err)
  662. return nextVals, nextPrivVals, &types.LightBlock{
  663. SignedHeader: &types.SignedHeader{
  664. Header: header,
  665. Commit: commit,
  666. },
  667. ValidatorSet: currentVals,
  668. }
  669. }
  670. // graduallyAddPeers delivers a new randomly-generated peer update on peerUpdateCh once
  671. // per interval, until closeCh is closed. Each peer update is assigned a random node ID.
  672. func graduallyAddPeers(
  673. peerUpdateCh chan p2p.PeerUpdate,
  674. closeCh chan struct{},
  675. interval time.Duration,
  676. ) {
  677. ticker := time.NewTicker(interval)
  678. for {
  679. select {
  680. case <-ticker.C:
  681. peerUpdateCh <- p2p.PeerUpdate{
  682. NodeID: factory.RandomNodeID(),
  683. Status: p2p.PeerStatusUp,
  684. }
  685. case <-closeCh:
  686. return
  687. }
  688. }
  689. }
  690. func handleSnapshotRequests(
  691. t *testing.T,
  692. receivingCh chan p2p.Envelope,
  693. sendingCh chan p2p.Envelope,
  694. closeCh chan struct{},
  695. snapshots []snapshot,
  696. ) {
  697. t.Helper()
  698. for {
  699. select {
  700. case envelope := <-receivingCh:
  701. _, ok := envelope.Message.(*ssproto.SnapshotsRequest)
  702. require.True(t, ok)
  703. for _, snapshot := range snapshots {
  704. sendingCh <- p2p.Envelope{
  705. From: envelope.To,
  706. Message: &ssproto.SnapshotsResponse{
  707. Height: snapshot.Height,
  708. Format: snapshot.Format,
  709. Chunks: snapshot.Chunks,
  710. Hash: snapshot.Hash,
  711. Metadata: snapshot.Metadata,
  712. },
  713. }
  714. }
  715. case <-closeCh:
  716. return
  717. }
  718. }
  719. }
  720. func handleChunkRequests(
  721. t *testing.T,
  722. receivingCh chan p2p.Envelope,
  723. sendingCh chan p2p.Envelope,
  724. closeCh chan struct{},
  725. chunk []byte,
  726. ) {
  727. t.Helper()
  728. for {
  729. select {
  730. case envelope := <-receivingCh:
  731. msg, ok := envelope.Message.(*ssproto.ChunkRequest)
  732. require.True(t, ok)
  733. sendingCh <- p2p.Envelope{
  734. From: envelope.To,
  735. Message: &ssproto.ChunkResponse{
  736. Height: msg.Height,
  737. Format: msg.Format,
  738. Index: msg.Index,
  739. Chunk: chunk,
  740. Missing: false,
  741. },
  742. }
  743. case <-closeCh:
  744. return
  745. }
  746. }
  747. }