You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

722 lines
26 KiB

  1. package statesync
  2. import (
  3. "context"
  4. "errors"
  5. "sync"
  6. "testing"
  7. "time"
  8. "github.com/stretchr/testify/assert"
  9. "github.com/stretchr/testify/mock"
  10. "github.com/stretchr/testify/require"
  11. abci "github.com/tendermint/tendermint/abci/types"
  12. tmsync "github.com/tendermint/tendermint/internal/libs/sync"
  13. "github.com/tendermint/tendermint/internal/proxy"
  14. proxymocks "github.com/tendermint/tendermint/internal/proxy/mocks"
  15. sm "github.com/tendermint/tendermint/internal/state"
  16. "github.com/tendermint/tendermint/internal/statesync/mocks"
  17. ssproto "github.com/tendermint/tendermint/proto/tendermint/statesync"
  18. "github.com/tendermint/tendermint/types"
  19. "github.com/tendermint/tendermint/version"
  20. )
  21. var ctx = context.Background()
  22. func TestSyncer_SyncAny(t *testing.T) {
  23. state := sm.State{
  24. ChainID: "chain",
  25. Version: sm.Version{
  26. Consensus: version.Consensus{
  27. Block: version.BlockProtocol,
  28. App: 0,
  29. },
  30. Software: version.TMVersion,
  31. },
  32. LastBlockHeight: 1,
  33. LastBlockID: types.BlockID{Hash: []byte("blockhash")},
  34. LastBlockTime: time.Now(),
  35. LastResultsHash: []byte("last_results_hash"),
  36. AppHash: []byte("app_hash"),
  37. LastValidators: &types.ValidatorSet{Proposer: &types.Validator{Address: []byte("val1")}},
  38. Validators: &types.ValidatorSet{Proposer: &types.Validator{Address: []byte("val2")}},
  39. NextValidators: &types.ValidatorSet{Proposer: &types.Validator{Address: []byte("val3")}},
  40. ConsensusParams: *types.DefaultConsensusParams(),
  41. LastHeightConsensusParamsChanged: 1,
  42. }
  43. commit := &types.Commit{BlockID: types.BlockID{Hash: []byte("blockhash")}}
  44. chunks := []*chunk{
  45. {Height: 1, Format: 1, Index: 0, Chunk: []byte{1, 1, 0}},
  46. {Height: 1, Format: 1, Index: 1, Chunk: []byte{1, 1, 1}},
  47. {Height: 1, Format: 1, Index: 2, Chunk: []byte{1, 1, 2}},
  48. }
  49. s := &snapshot{Height: 1, Format: 1, Chunks: 3, Hash: []byte{1, 2, 3}}
  50. stateProvider := &mocks.StateProvider{}
  51. stateProvider.On("AppHash", mock.Anything, uint64(1)).Return(state.AppHash, nil)
  52. stateProvider.On("AppHash", mock.Anything, uint64(2)).Return([]byte("app_hash_2"), nil)
  53. stateProvider.On("Commit", mock.Anything, uint64(1)).Return(commit, nil)
  54. stateProvider.On("State", mock.Anything, uint64(1)).Return(state, nil)
  55. connSnapshot := &proxymocks.AppConnSnapshot{}
  56. connQuery := &proxymocks.AppConnQuery{}
  57. peerAID := types.NodeID("aa")
  58. peerBID := types.NodeID("bb")
  59. peerCID := types.NodeID("cc")
  60. rts := setup(t, connSnapshot, connQuery, stateProvider, 3)
  61. rts.reactor.syncer = rts.syncer
  62. // Adding a chunk should error when no sync is in progress
  63. _, err := rts.syncer.AddChunk(&chunk{Height: 1, Format: 1, Index: 0, Chunk: []byte{1}})
  64. require.Error(t, err)
  65. // Adding a couple of peers should trigger snapshot discovery messages
  66. rts.syncer.AddPeer(peerAID)
  67. e := <-rts.snapshotOutCh
  68. require.Equal(t, &ssproto.SnapshotsRequest{}, e.Message)
  69. require.Equal(t, peerAID, e.To)
  70. rts.syncer.AddPeer(peerBID)
  71. e = <-rts.snapshotOutCh
  72. require.Equal(t, &ssproto.SnapshotsRequest{}, e.Message)
  73. require.Equal(t, peerBID, e.To)
  74. // Both peers report back with snapshots. One of them also returns a snapshot we don't want, in
  75. // format 2, which will be rejected by the ABCI application.
  76. new, err := rts.syncer.AddSnapshot(peerAID, s)
  77. require.NoError(t, err)
  78. require.True(t, new)
  79. new, err = rts.syncer.AddSnapshot(peerBID, s)
  80. require.NoError(t, err)
  81. require.False(t, new)
  82. s2 := &snapshot{Height: 2, Format: 2, Chunks: 3, Hash: []byte{1}}
  83. new, err = rts.syncer.AddSnapshot(peerBID, s2)
  84. require.NoError(t, err)
  85. require.True(t, new)
  86. new, err = rts.syncer.AddSnapshot(peerCID, s2)
  87. require.NoError(t, err)
  88. require.False(t, new)
  89. // We start a sync, with peers sending back chunks when requested. We first reject the snapshot
  90. // with height 2 format 2, and accept the snapshot at height 1.
  91. connSnapshot.On("OfferSnapshotSync", ctx, abci.RequestOfferSnapshot{
  92. Snapshot: &abci.Snapshot{
  93. Height: 2,
  94. Format: 2,
  95. Chunks: 3,
  96. Hash: []byte{1},
  97. },
  98. AppHash: []byte("app_hash_2"),
  99. }).Return(&abci.ResponseOfferSnapshot{Result: abci.ResponseOfferSnapshot_REJECT_FORMAT}, nil)
  100. connSnapshot.On("OfferSnapshotSync", ctx, abci.RequestOfferSnapshot{
  101. Snapshot: &abci.Snapshot{
  102. Height: s.Height,
  103. Format: s.Format,
  104. Chunks: s.Chunks,
  105. Hash: s.Hash,
  106. Metadata: s.Metadata,
  107. },
  108. AppHash: []byte("app_hash"),
  109. }).Times(2).Return(&abci.ResponseOfferSnapshot{Result: abci.ResponseOfferSnapshot_ACCEPT}, nil)
  110. chunkRequests := make(map[uint32]int)
  111. chunkRequestsMtx := tmsync.Mutex{}
  112. var wg sync.WaitGroup
  113. wg.Add(4)
  114. go func() {
  115. for e := range rts.chunkOutCh {
  116. msg, ok := e.Message.(*ssproto.ChunkRequest)
  117. assert.True(t, ok)
  118. assert.EqualValues(t, 1, msg.Height)
  119. assert.EqualValues(t, 1, msg.Format)
  120. assert.LessOrEqual(t, msg.Index, uint32(len(chunks)))
  121. added, err := rts.syncer.AddChunk(chunks[msg.Index])
  122. assert.NoError(t, err)
  123. assert.True(t, added)
  124. chunkRequestsMtx.Lock()
  125. chunkRequests[msg.Index]++
  126. chunkRequestsMtx.Unlock()
  127. wg.Done()
  128. }
  129. }()
  130. // The first time we're applying chunk 2 we tell it to retry the snapshot and discard chunk 1,
  131. // which should cause it to keep the existing chunk 0 and 2, and restart restoration from
  132. // beginning. We also wait for a little while, to exercise the retry logic in fetchChunks().
  133. connSnapshot.On("ApplySnapshotChunkSync", ctx, abci.RequestApplySnapshotChunk{
  134. Index: 2, Chunk: []byte{1, 1, 2},
  135. }).Once().Run(func(args mock.Arguments) { time.Sleep(2 * time.Second) }).Return(
  136. &abci.ResponseApplySnapshotChunk{
  137. Result: abci.ResponseApplySnapshotChunk_RETRY_SNAPSHOT,
  138. RefetchChunks: []uint32{1},
  139. }, nil)
  140. connSnapshot.On("ApplySnapshotChunkSync", ctx, abci.RequestApplySnapshotChunk{
  141. Index: 0, Chunk: []byte{1, 1, 0},
  142. }).Times(2).Return(&abci.ResponseApplySnapshotChunk{Result: abci.ResponseApplySnapshotChunk_ACCEPT}, nil)
  143. connSnapshot.On("ApplySnapshotChunkSync", ctx, abci.RequestApplySnapshotChunk{
  144. Index: 1, Chunk: []byte{1, 1, 1},
  145. }).Times(2).Return(&abci.ResponseApplySnapshotChunk{Result: abci.ResponseApplySnapshotChunk_ACCEPT}, nil)
  146. connSnapshot.On("ApplySnapshotChunkSync", ctx, abci.RequestApplySnapshotChunk{
  147. Index: 2, Chunk: []byte{1, 1, 2},
  148. }).Once().Return(&abci.ResponseApplySnapshotChunk{Result: abci.ResponseApplySnapshotChunk_ACCEPT}, nil)
  149. connQuery.On("InfoSync", ctx, proxy.RequestInfo).Return(&abci.ResponseInfo{
  150. AppVersion: 9,
  151. LastBlockHeight: 1,
  152. LastBlockAppHash: []byte("app_hash"),
  153. }, nil)
  154. newState, lastCommit, err := rts.syncer.SyncAny(ctx, 0, func() {})
  155. require.NoError(t, err)
  156. wg.Wait()
  157. chunkRequestsMtx.Lock()
  158. require.Equal(t, map[uint32]int{0: 1, 1: 2, 2: 1}, chunkRequests)
  159. chunkRequestsMtx.Unlock()
  160. // The syncer should have updated the state app version from the ABCI info response.
  161. expectState := state
  162. expectState.Version.Consensus.App = 9
  163. require.Equal(t, expectState, newState)
  164. require.Equal(t, commit, lastCommit)
  165. require.Equal(t, len(chunks), int(rts.syncer.processingSnapshot.Chunks))
  166. require.Equal(t, expectState.LastBlockHeight, rts.syncer.lastSyncedSnapshotHeight)
  167. require.True(t, rts.syncer.avgChunkTime > 0)
  168. require.Equal(t, int64(rts.syncer.processingSnapshot.Chunks), rts.reactor.SnapshotChunksTotal())
  169. require.Equal(t, rts.syncer.lastSyncedSnapshotHeight, rts.reactor.SnapshotHeight())
  170. require.Equal(t, time.Duration(rts.syncer.avgChunkTime), rts.reactor.ChunkProcessAvgTime())
  171. require.Equal(t, int64(len(rts.syncer.snapshots.snapshots)), rts.reactor.TotalSnapshots())
  172. require.Equal(t, int64(0), rts.reactor.SnapshotChunksCount())
  173. connSnapshot.AssertExpectations(t)
  174. connQuery.AssertExpectations(t)
  175. }
  176. func TestSyncer_SyncAny_noSnapshots(t *testing.T) {
  177. stateProvider := &mocks.StateProvider{}
  178. stateProvider.On("AppHash", mock.Anything, mock.Anything).Return([]byte("app_hash"), nil)
  179. rts := setup(t, nil, nil, stateProvider, 2)
  180. _, _, err := rts.syncer.SyncAny(ctx, 0, func() {})
  181. require.Equal(t, errNoSnapshots, err)
  182. }
  183. func TestSyncer_SyncAny_abort(t *testing.T) {
  184. stateProvider := &mocks.StateProvider{}
  185. stateProvider.On("AppHash", mock.Anything, mock.Anything).Return([]byte("app_hash"), nil)
  186. rts := setup(t, nil, nil, stateProvider, 2)
  187. s := &snapshot{Height: 1, Format: 1, Chunks: 3, Hash: []byte{1, 2, 3}}
  188. peerID := types.NodeID("aa")
  189. _, err := rts.syncer.AddSnapshot(peerID, s)
  190. require.NoError(t, err)
  191. rts.conn.On("OfferSnapshotSync", ctx, abci.RequestOfferSnapshot{
  192. Snapshot: toABCI(s), AppHash: []byte("app_hash"),
  193. }).Once().Return(&abci.ResponseOfferSnapshot{Result: abci.ResponseOfferSnapshot_ABORT}, nil)
  194. _, _, err = rts.syncer.SyncAny(ctx, 0, func() {})
  195. require.Equal(t, errAbort, err)
  196. rts.conn.AssertExpectations(t)
  197. }
  198. func TestSyncer_SyncAny_reject(t *testing.T) {
  199. stateProvider := &mocks.StateProvider{}
  200. stateProvider.On("AppHash", mock.Anything, mock.Anything).Return([]byte("app_hash"), nil)
  201. rts := setup(t, nil, nil, stateProvider, 2)
  202. // s22 is tried first, then s12, then s11, then errNoSnapshots
  203. s22 := &snapshot{Height: 2, Format: 2, Chunks: 3, Hash: []byte{1, 2, 3}}
  204. s12 := &snapshot{Height: 1, Format: 2, Chunks: 3, Hash: []byte{1, 2, 3}}
  205. s11 := &snapshot{Height: 1, Format: 1, Chunks: 3, Hash: []byte{1, 2, 3}}
  206. peerID := types.NodeID("aa")
  207. _, err := rts.syncer.AddSnapshot(peerID, s22)
  208. require.NoError(t, err)
  209. _, err = rts.syncer.AddSnapshot(peerID, s12)
  210. require.NoError(t, err)
  211. _, err = rts.syncer.AddSnapshot(peerID, s11)
  212. require.NoError(t, err)
  213. rts.conn.On("OfferSnapshotSync", ctx, abci.RequestOfferSnapshot{
  214. Snapshot: toABCI(s22), AppHash: []byte("app_hash"),
  215. }).Once().Return(&abci.ResponseOfferSnapshot{Result: abci.ResponseOfferSnapshot_REJECT}, nil)
  216. rts.conn.On("OfferSnapshotSync", ctx, abci.RequestOfferSnapshot{
  217. Snapshot: toABCI(s12), AppHash: []byte("app_hash"),
  218. }).Once().Return(&abci.ResponseOfferSnapshot{Result: abci.ResponseOfferSnapshot_REJECT}, nil)
  219. rts.conn.On("OfferSnapshotSync", ctx, abci.RequestOfferSnapshot{
  220. Snapshot: toABCI(s11), AppHash: []byte("app_hash"),
  221. }).Once().Return(&abci.ResponseOfferSnapshot{Result: abci.ResponseOfferSnapshot_REJECT}, nil)
  222. _, _, err = rts.syncer.SyncAny(ctx, 0, func() {})
  223. require.Equal(t, errNoSnapshots, err)
  224. rts.conn.AssertExpectations(t)
  225. }
  226. func TestSyncer_SyncAny_reject_format(t *testing.T) {
  227. stateProvider := &mocks.StateProvider{}
  228. stateProvider.On("AppHash", mock.Anything, mock.Anything).Return([]byte("app_hash"), nil)
  229. rts := setup(t, nil, nil, stateProvider, 2)
  230. // s22 is tried first, which reject s22 and s12, then s11 will abort.
  231. s22 := &snapshot{Height: 2, Format: 2, Chunks: 3, Hash: []byte{1, 2, 3}}
  232. s12 := &snapshot{Height: 1, Format: 2, Chunks: 3, Hash: []byte{1, 2, 3}}
  233. s11 := &snapshot{Height: 1, Format: 1, Chunks: 3, Hash: []byte{1, 2, 3}}
  234. peerID := types.NodeID("aa")
  235. _, err := rts.syncer.AddSnapshot(peerID, s22)
  236. require.NoError(t, err)
  237. _, err = rts.syncer.AddSnapshot(peerID, s12)
  238. require.NoError(t, err)
  239. _, err = rts.syncer.AddSnapshot(peerID, s11)
  240. require.NoError(t, err)
  241. rts.conn.On("OfferSnapshotSync", ctx, abci.RequestOfferSnapshot{
  242. Snapshot: toABCI(s22), AppHash: []byte("app_hash"),
  243. }).Once().Return(&abci.ResponseOfferSnapshot{Result: abci.ResponseOfferSnapshot_REJECT_FORMAT}, nil)
  244. rts.conn.On("OfferSnapshotSync", ctx, abci.RequestOfferSnapshot{
  245. Snapshot: toABCI(s11), AppHash: []byte("app_hash"),
  246. }).Once().Return(&abci.ResponseOfferSnapshot{Result: abci.ResponseOfferSnapshot_ABORT}, nil)
  247. _, _, err = rts.syncer.SyncAny(ctx, 0, func() {})
  248. require.Equal(t, errAbort, err)
  249. rts.conn.AssertExpectations(t)
  250. }
  251. func TestSyncer_SyncAny_reject_sender(t *testing.T) {
  252. stateProvider := &mocks.StateProvider{}
  253. stateProvider.On("AppHash", mock.Anything, mock.Anything).Return([]byte("app_hash"), nil)
  254. rts := setup(t, nil, nil, stateProvider, 2)
  255. peerAID := types.NodeID("aa")
  256. peerBID := types.NodeID("bb")
  257. peerCID := types.NodeID("cc")
  258. // sbc will be offered first, which will be rejected with reject_sender, causing all snapshots
  259. // submitted by both b and c (i.e. sb, sc, sbc) to be rejected. Finally, sa will reject and
  260. // errNoSnapshots is returned.
  261. sa := &snapshot{Height: 1, Format: 1, Chunks: 3, Hash: []byte{1, 2, 3}}
  262. sb := &snapshot{Height: 2, Format: 1, Chunks: 3, Hash: []byte{1, 2, 3}}
  263. sc := &snapshot{Height: 3, Format: 1, Chunks: 3, Hash: []byte{1, 2, 3}}
  264. sbc := &snapshot{Height: 4, Format: 1, Chunks: 3, Hash: []byte{1, 2, 3}}
  265. _, err := rts.syncer.AddSnapshot(peerAID, sa)
  266. require.NoError(t, err)
  267. _, err = rts.syncer.AddSnapshot(peerBID, sb)
  268. require.NoError(t, err)
  269. _, err = rts.syncer.AddSnapshot(peerCID, sc)
  270. require.NoError(t, err)
  271. _, err = rts.syncer.AddSnapshot(peerBID, sbc)
  272. require.NoError(t, err)
  273. _, err = rts.syncer.AddSnapshot(peerCID, sbc)
  274. require.NoError(t, err)
  275. rts.conn.On("OfferSnapshotSync", ctx, abci.RequestOfferSnapshot{
  276. Snapshot: toABCI(sbc), AppHash: []byte("app_hash"),
  277. }).Once().Return(&abci.ResponseOfferSnapshot{Result: abci.ResponseOfferSnapshot_REJECT_SENDER}, nil)
  278. rts.conn.On("OfferSnapshotSync", ctx, abci.RequestOfferSnapshot{
  279. Snapshot: toABCI(sa), AppHash: []byte("app_hash"),
  280. }).Once().Return(&abci.ResponseOfferSnapshot{Result: abci.ResponseOfferSnapshot_REJECT}, nil)
  281. _, _, err = rts.syncer.SyncAny(ctx, 0, func() {})
  282. require.Equal(t, errNoSnapshots, err)
  283. rts.conn.AssertExpectations(t)
  284. }
  285. func TestSyncer_SyncAny_abciError(t *testing.T) {
  286. stateProvider := &mocks.StateProvider{}
  287. stateProvider.On("AppHash", mock.Anything, mock.Anything).Return([]byte("app_hash"), nil)
  288. rts := setup(t, nil, nil, stateProvider, 2)
  289. errBoom := errors.New("boom")
  290. s := &snapshot{Height: 1, Format: 1, Chunks: 3, Hash: []byte{1, 2, 3}}
  291. peerID := types.NodeID("aa")
  292. _, err := rts.syncer.AddSnapshot(peerID, s)
  293. require.NoError(t, err)
  294. rts.conn.On("OfferSnapshotSync", ctx, abci.RequestOfferSnapshot{
  295. Snapshot: toABCI(s), AppHash: []byte("app_hash"),
  296. }).Once().Return(nil, errBoom)
  297. _, _, err = rts.syncer.SyncAny(ctx, 0, func() {})
  298. require.True(t, errors.Is(err, errBoom))
  299. rts.conn.AssertExpectations(t)
  300. }
  301. func TestSyncer_offerSnapshot(t *testing.T) {
  302. unknownErr := errors.New("unknown error")
  303. boom := errors.New("boom")
  304. testcases := map[string]struct {
  305. result abci.ResponseOfferSnapshot_Result
  306. err error
  307. expectErr error
  308. }{
  309. "accept": {abci.ResponseOfferSnapshot_ACCEPT, nil, nil},
  310. "abort": {abci.ResponseOfferSnapshot_ABORT, nil, errAbort},
  311. "reject": {abci.ResponseOfferSnapshot_REJECT, nil, errRejectSnapshot},
  312. "reject_format": {abci.ResponseOfferSnapshot_REJECT_FORMAT, nil, errRejectFormat},
  313. "reject_sender": {abci.ResponseOfferSnapshot_REJECT_SENDER, nil, errRejectSender},
  314. "unknown": {abci.ResponseOfferSnapshot_UNKNOWN, nil, unknownErr},
  315. "error": {0, boom, boom},
  316. "unknown non-zero": {9, nil, unknownErr},
  317. }
  318. for name, tc := range testcases {
  319. tc := tc
  320. t.Run(name, func(t *testing.T) {
  321. stateProvider := &mocks.StateProvider{}
  322. stateProvider.On("AppHash", mock.Anything, mock.Anything).Return([]byte("app_hash"), nil)
  323. rts := setup(t, nil, nil, stateProvider, 2)
  324. s := &snapshot{Height: 1, Format: 1, Chunks: 3, Hash: []byte{1, 2, 3}, trustedAppHash: []byte("app_hash")}
  325. rts.conn.On("OfferSnapshotSync", ctx, abci.RequestOfferSnapshot{
  326. Snapshot: toABCI(s),
  327. AppHash: []byte("app_hash"),
  328. }).Return(&abci.ResponseOfferSnapshot{Result: tc.result}, tc.err)
  329. err := rts.syncer.offerSnapshot(ctx, s)
  330. if tc.expectErr == unknownErr {
  331. require.Error(t, err)
  332. } else {
  333. unwrapped := errors.Unwrap(err)
  334. if unwrapped != nil {
  335. err = unwrapped
  336. }
  337. require.Equal(t, tc.expectErr, err)
  338. }
  339. })
  340. }
  341. }
  342. func TestSyncer_applyChunks_Results(t *testing.T) {
  343. unknownErr := errors.New("unknown error")
  344. boom := errors.New("boom")
  345. testcases := map[string]struct {
  346. result abci.ResponseApplySnapshotChunk_Result
  347. err error
  348. expectErr error
  349. }{
  350. "accept": {abci.ResponseApplySnapshotChunk_ACCEPT, nil, nil},
  351. "abort": {abci.ResponseApplySnapshotChunk_ABORT, nil, errAbort},
  352. "retry": {abci.ResponseApplySnapshotChunk_RETRY, nil, nil},
  353. "retry_snapshot": {abci.ResponseApplySnapshotChunk_RETRY_SNAPSHOT, nil, errRetrySnapshot},
  354. "reject_snapshot": {abci.ResponseApplySnapshotChunk_REJECT_SNAPSHOT, nil, errRejectSnapshot},
  355. "unknown": {abci.ResponseApplySnapshotChunk_UNKNOWN, nil, unknownErr},
  356. "error": {0, boom, boom},
  357. "unknown non-zero": {9, nil, unknownErr},
  358. }
  359. for name, tc := range testcases {
  360. tc := tc
  361. t.Run(name, func(t *testing.T) {
  362. stateProvider := &mocks.StateProvider{}
  363. stateProvider.On("AppHash", mock.Anything, mock.Anything).Return([]byte("app_hash"), nil)
  364. rts := setup(t, nil, nil, stateProvider, 2)
  365. body := []byte{1, 2, 3}
  366. chunks, err := newChunkQueue(&snapshot{Height: 1, Format: 1, Chunks: 1}, "")
  367. require.NoError(t, err)
  368. fetchStartTime := time.Now()
  369. _, err = chunks.Add(&chunk{Height: 1, Format: 1, Index: 0, Chunk: body})
  370. require.NoError(t, err)
  371. rts.conn.On("ApplySnapshotChunkSync", ctx, abci.RequestApplySnapshotChunk{
  372. Index: 0, Chunk: body,
  373. }).Once().Return(&abci.ResponseApplySnapshotChunk{Result: tc.result}, tc.err)
  374. if tc.result == abci.ResponseApplySnapshotChunk_RETRY {
  375. rts.conn.On("ApplySnapshotChunkSync", ctx, abci.RequestApplySnapshotChunk{
  376. Index: 0, Chunk: body,
  377. }).Once().Return(&abci.ResponseApplySnapshotChunk{
  378. Result: abci.ResponseApplySnapshotChunk_ACCEPT}, nil)
  379. }
  380. err = rts.syncer.applyChunks(ctx, chunks, fetchStartTime)
  381. if tc.expectErr == unknownErr {
  382. require.Error(t, err)
  383. } else {
  384. unwrapped := errors.Unwrap(err)
  385. if unwrapped != nil {
  386. err = unwrapped
  387. }
  388. require.Equal(t, tc.expectErr, err)
  389. }
  390. rts.conn.AssertExpectations(t)
  391. })
  392. }
  393. }
  394. func TestSyncer_applyChunks_RefetchChunks(t *testing.T) {
  395. // Discarding chunks via refetch_chunks should work the same for all results
  396. testcases := map[string]struct {
  397. result abci.ResponseApplySnapshotChunk_Result
  398. }{
  399. "accept": {abci.ResponseApplySnapshotChunk_ACCEPT},
  400. "abort": {abci.ResponseApplySnapshotChunk_ABORT},
  401. "retry": {abci.ResponseApplySnapshotChunk_RETRY},
  402. "retry_snapshot": {abci.ResponseApplySnapshotChunk_RETRY_SNAPSHOT},
  403. "reject_snapshot": {abci.ResponseApplySnapshotChunk_REJECT_SNAPSHOT},
  404. }
  405. for name, tc := range testcases {
  406. tc := tc
  407. t.Run(name, func(t *testing.T) {
  408. stateProvider := &mocks.StateProvider{}
  409. stateProvider.On("AppHash", mock.Anything, mock.Anything).Return([]byte("app_hash"), nil)
  410. rts := setup(t, nil, nil, stateProvider, 2)
  411. chunks, err := newChunkQueue(&snapshot{Height: 1, Format: 1, Chunks: 3}, "")
  412. require.NoError(t, err)
  413. fetchStartTime := time.Now()
  414. added, err := chunks.Add(&chunk{Height: 1, Format: 1, Index: 0, Chunk: []byte{0}})
  415. require.True(t, added)
  416. require.NoError(t, err)
  417. added, err = chunks.Add(&chunk{Height: 1, Format: 1, Index: 1, Chunk: []byte{1}})
  418. require.True(t, added)
  419. require.NoError(t, err)
  420. added, err = chunks.Add(&chunk{Height: 1, Format: 1, Index: 2, Chunk: []byte{2}})
  421. require.True(t, added)
  422. require.NoError(t, err)
  423. // The first two chunks are accepted, before the last one asks for 1 to be refetched
  424. rts.conn.On("ApplySnapshotChunkSync", ctx, abci.RequestApplySnapshotChunk{
  425. Index: 0, Chunk: []byte{0},
  426. }).Once().Return(&abci.ResponseApplySnapshotChunk{Result: abci.ResponseApplySnapshotChunk_ACCEPT}, nil)
  427. rts.conn.On("ApplySnapshotChunkSync", ctx, abci.RequestApplySnapshotChunk{
  428. Index: 1, Chunk: []byte{1},
  429. }).Once().Return(&abci.ResponseApplySnapshotChunk{Result: abci.ResponseApplySnapshotChunk_ACCEPT}, nil)
  430. rts.conn.On("ApplySnapshotChunkSync", ctx, abci.RequestApplySnapshotChunk{
  431. Index: 2, Chunk: []byte{2},
  432. }).Once().Return(&abci.ResponseApplySnapshotChunk{
  433. Result: tc.result,
  434. RefetchChunks: []uint32{1},
  435. }, nil)
  436. // Since removing the chunk will cause Next() to block, we spawn a goroutine, then
  437. // check the queue contents, and finally close the queue to end the goroutine.
  438. // We don't really care about the result of applyChunks, since it has separate test.
  439. go func() {
  440. rts.syncer.applyChunks(ctx, chunks, fetchStartTime) //nolint:errcheck // purposefully ignore error
  441. }()
  442. time.Sleep(50 * time.Millisecond)
  443. require.True(t, chunks.Has(0))
  444. require.False(t, chunks.Has(1))
  445. require.True(t, chunks.Has(2))
  446. require.NoError(t, chunks.Close())
  447. })
  448. }
  449. }
  450. func TestSyncer_applyChunks_RejectSenders(t *testing.T) {
  451. // Banning chunks senders via ban_chunk_senders should work the same for all results
  452. testcases := map[string]struct {
  453. result abci.ResponseApplySnapshotChunk_Result
  454. }{
  455. "accept": {abci.ResponseApplySnapshotChunk_ACCEPT},
  456. "abort": {abci.ResponseApplySnapshotChunk_ABORT},
  457. "retry": {abci.ResponseApplySnapshotChunk_RETRY},
  458. "retry_snapshot": {abci.ResponseApplySnapshotChunk_RETRY_SNAPSHOT},
  459. "reject_snapshot": {abci.ResponseApplySnapshotChunk_REJECT_SNAPSHOT},
  460. }
  461. for name, tc := range testcases {
  462. tc := tc
  463. t.Run(name, func(t *testing.T) {
  464. stateProvider := &mocks.StateProvider{}
  465. stateProvider.On("AppHash", mock.Anything, mock.Anything).Return([]byte("app_hash"), nil)
  466. rts := setup(t, nil, nil, stateProvider, 2)
  467. // Set up three peers across two snapshots, and ask for one of them to be banned.
  468. // It should be banned from all snapshots.
  469. peerAID := types.NodeID("aa")
  470. peerBID := types.NodeID("bb")
  471. peerCID := types.NodeID("cc")
  472. s1 := &snapshot{Height: 1, Format: 1, Chunks: 3}
  473. s2 := &snapshot{Height: 2, Format: 1, Chunks: 3}
  474. _, err := rts.syncer.AddSnapshot(peerAID, s1)
  475. require.NoError(t, err)
  476. _, err = rts.syncer.AddSnapshot(peerAID, s2)
  477. require.NoError(t, err)
  478. _, err = rts.syncer.AddSnapshot(peerBID, s1)
  479. require.NoError(t, err)
  480. _, err = rts.syncer.AddSnapshot(peerBID, s2)
  481. require.NoError(t, err)
  482. _, err = rts.syncer.AddSnapshot(peerCID, s1)
  483. require.NoError(t, err)
  484. _, err = rts.syncer.AddSnapshot(peerCID, s2)
  485. require.NoError(t, err)
  486. chunks, err := newChunkQueue(s1, "")
  487. require.NoError(t, err)
  488. fetchStartTime := time.Now()
  489. added, err := chunks.Add(&chunk{Height: 1, Format: 1, Index: 0, Chunk: []byte{0}, Sender: peerAID})
  490. require.True(t, added)
  491. require.NoError(t, err)
  492. added, err = chunks.Add(&chunk{Height: 1, Format: 1, Index: 1, Chunk: []byte{1}, Sender: peerBID})
  493. require.True(t, added)
  494. require.NoError(t, err)
  495. added, err = chunks.Add(&chunk{Height: 1, Format: 1, Index: 2, Chunk: []byte{2}, Sender: peerCID})
  496. require.True(t, added)
  497. require.NoError(t, err)
  498. // The first two chunks are accepted, before the last one asks for b sender to be rejected
  499. rts.conn.On("ApplySnapshotChunkSync", ctx, abci.RequestApplySnapshotChunk{
  500. Index: 0, Chunk: []byte{0}, Sender: "aa",
  501. }).Once().Return(&abci.ResponseApplySnapshotChunk{Result: abci.ResponseApplySnapshotChunk_ACCEPT}, nil)
  502. rts.conn.On("ApplySnapshotChunkSync", ctx, abci.RequestApplySnapshotChunk{
  503. Index: 1, Chunk: []byte{1}, Sender: "bb",
  504. }).Once().Return(&abci.ResponseApplySnapshotChunk{Result: abci.ResponseApplySnapshotChunk_ACCEPT}, nil)
  505. rts.conn.On("ApplySnapshotChunkSync", ctx, abci.RequestApplySnapshotChunk{
  506. Index: 2, Chunk: []byte{2}, Sender: "cc",
  507. }).Once().Return(&abci.ResponseApplySnapshotChunk{
  508. Result: tc.result,
  509. RejectSenders: []string{string(peerBID)},
  510. }, nil)
  511. // On retry, the last chunk will be tried again, so we just accept it then.
  512. if tc.result == abci.ResponseApplySnapshotChunk_RETRY {
  513. rts.conn.On("ApplySnapshotChunkSync", ctx, abci.RequestApplySnapshotChunk{
  514. Index: 2, Chunk: []byte{2}, Sender: "cc",
  515. }).Once().Return(&abci.ResponseApplySnapshotChunk{Result: abci.ResponseApplySnapshotChunk_ACCEPT}, nil)
  516. }
  517. // We don't really care about the result of applyChunks, since it has separate test.
  518. // However, it will block on e.g. retry result, so we spawn a goroutine that will
  519. // be shut down when the chunk queue closes.
  520. go func() {
  521. rts.syncer.applyChunks(ctx, chunks, fetchStartTime) //nolint:errcheck // purposefully ignore error
  522. }()
  523. time.Sleep(50 * time.Millisecond)
  524. s1peers := rts.syncer.snapshots.GetPeers(s1)
  525. require.Len(t, s1peers, 2)
  526. require.EqualValues(t, "aa", s1peers[0])
  527. require.EqualValues(t, "cc", s1peers[1])
  528. rts.syncer.snapshots.GetPeers(s1)
  529. require.Len(t, s1peers, 2)
  530. require.EqualValues(t, "aa", s1peers[0])
  531. require.EqualValues(t, "cc", s1peers[1])
  532. require.NoError(t, chunks.Close())
  533. })
  534. }
  535. }
  536. func TestSyncer_verifyApp(t *testing.T) {
  537. boom := errors.New("boom")
  538. s := &snapshot{Height: 3, Format: 1, Chunks: 5, Hash: []byte{1, 2, 3}, trustedAppHash: []byte("app_hash")}
  539. testcases := map[string]struct {
  540. response *abci.ResponseInfo
  541. err error
  542. expectErr error
  543. }{
  544. "verified": {&abci.ResponseInfo{
  545. LastBlockHeight: 3,
  546. LastBlockAppHash: []byte("app_hash"),
  547. AppVersion: 9,
  548. }, nil, nil},
  549. "invalid height": {&abci.ResponseInfo{
  550. LastBlockHeight: 5,
  551. LastBlockAppHash: []byte("app_hash"),
  552. AppVersion: 9,
  553. }, nil, errVerifyFailed},
  554. "invalid hash": {&abci.ResponseInfo{
  555. LastBlockHeight: 3,
  556. LastBlockAppHash: []byte("xxx"),
  557. AppVersion: 9,
  558. }, nil, errVerifyFailed},
  559. "error": {nil, boom, boom},
  560. }
  561. for name, tc := range testcases {
  562. tc := tc
  563. t.Run(name, func(t *testing.T) {
  564. rts := setup(t, nil, nil, nil, 2)
  565. rts.connQuery.On("InfoSync", ctx, proxy.RequestInfo).Return(tc.response, tc.err)
  566. version, err := rts.syncer.verifyApp(s)
  567. unwrapped := errors.Unwrap(err)
  568. if unwrapped != nil {
  569. err = unwrapped
  570. }
  571. require.Equal(t, tc.expectErr, err)
  572. if err == nil {
  573. require.Equal(t, tc.response.AppVersion, version)
  574. }
  575. })
  576. }
  577. }
  578. func toABCI(s *snapshot) *abci.Snapshot {
  579. return &abci.Snapshot{
  580. Height: s.Height,
  581. Format: s.Format,
  582. Chunks: s.Chunks,
  583. Hash: s.Hash,
  584. Metadata: s.Metadata,
  585. }
  586. }