You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

576 lines
18 KiB

  1. package statesync
  2. import (
  3. "bytes"
  4. "context"
  5. "errors"
  6. "fmt"
  7. "sync"
  8. "time"
  9. abci "github.com/tendermint/tendermint/abci/types"
  10. "github.com/tendermint/tendermint/config"
  11. "github.com/tendermint/tendermint/internal/p2p"
  12. "github.com/tendermint/tendermint/internal/proxy"
  13. sm "github.com/tendermint/tendermint/internal/state"
  14. "github.com/tendermint/tendermint/libs/log"
  15. "github.com/tendermint/tendermint/light"
  16. ssproto "github.com/tendermint/tendermint/proto/tendermint/statesync"
  17. "github.com/tendermint/tendermint/types"
  18. )
  19. const (
  20. // chunkTimeout is the timeout while waiting for the next chunk from the chunk queue.
  21. chunkTimeout = 2 * time.Minute
  22. // minimumDiscoveryTime is the lowest allowable time for a
  23. // SyncAny discovery time.
  24. minimumDiscoveryTime = 5 * time.Second
  25. )
  26. var (
  27. // errAbort is returned by Sync() when snapshot restoration is aborted.
  28. errAbort = errors.New("state sync aborted")
  29. // errRetrySnapshot is returned by Sync() when the snapshot should be retried.
  30. errRetrySnapshot = errors.New("retry snapshot")
  31. // errRejectSnapshot is returned by Sync() when the snapshot is rejected.
  32. errRejectSnapshot = errors.New("snapshot was rejected")
  33. // errRejectFormat is returned by Sync() when the snapshot format is rejected.
  34. errRejectFormat = errors.New("snapshot format was rejected")
  35. // errRejectSender is returned by Sync() when the snapshot sender is rejected.
  36. errRejectSender = errors.New("snapshot sender was rejected")
  37. // errVerifyFailed is returned by Sync() when app hash or last height
  38. // verification fails.
  39. errVerifyFailed = errors.New("verification with app failed")
  40. // errTimeout is returned by Sync() when we've waited too long to receive a chunk.
  41. errTimeout = errors.New("timed out waiting for chunk")
  42. // errNoSnapshots is returned by SyncAny() if no snapshots are found and discovery is disabled.
  43. errNoSnapshots = errors.New("no suitable snapshots found")
  44. )
  45. // syncer runs a state sync against an ABCI app. Use either SyncAny() to automatically attempt to
  46. // sync all snapshots in the pool (pausing to discover new ones), or Sync() to sync a specific
  47. // snapshot. Snapshots and chunks are fed via AddSnapshot() and AddChunk() as appropriate.
  48. type syncer struct {
  49. logger log.Logger
  50. stateProvider StateProvider
  51. conn proxy.AppConnSnapshot
  52. connQuery proxy.AppConnQuery
  53. snapshots *snapshotPool
  54. snapshotCh *p2p.Channel
  55. chunkCh *p2p.Channel
  56. tempDir string
  57. fetchers int32
  58. retryTimeout time.Duration
  59. mtx sync.RWMutex
  60. chunks *chunkQueue
  61. metrics *Metrics
  62. avgChunkTime int64
  63. lastSyncedSnapshotHeight int64
  64. processingSnapshot *snapshot
  65. }
  66. // newSyncer creates a new syncer.
  67. func newSyncer(
  68. cfg config.StateSyncConfig,
  69. logger log.Logger,
  70. conn proxy.AppConnSnapshot,
  71. connQuery proxy.AppConnQuery,
  72. stateProvider StateProvider,
  73. snapshotCh *p2p.Channel,
  74. chunkCh *p2p.Channel,
  75. tempDir string,
  76. metrics *Metrics,
  77. ) *syncer {
  78. return &syncer{
  79. logger: logger,
  80. stateProvider: stateProvider,
  81. conn: conn,
  82. connQuery: connQuery,
  83. snapshots: newSnapshotPool(),
  84. snapshotCh: snapshotCh,
  85. chunkCh: chunkCh,
  86. tempDir: tempDir,
  87. fetchers: cfg.Fetchers,
  88. retryTimeout: cfg.ChunkRequestTimeout,
  89. metrics: metrics,
  90. }
  91. }
  92. // AddChunk adds a chunk to the chunk queue, if any. It returns false if the chunk has already
  93. // been added to the queue, or an error if there's no sync in progress.
  94. func (s *syncer) AddChunk(chunk *chunk) (bool, error) {
  95. s.mtx.RLock()
  96. defer s.mtx.RUnlock()
  97. if s.chunks == nil {
  98. return false, errors.New("no state sync in progress")
  99. }
  100. added, err := s.chunks.Add(chunk)
  101. if err != nil {
  102. return false, err
  103. }
  104. if added {
  105. s.logger.Debug("Added chunk to queue", "height", chunk.Height, "format", chunk.Format,
  106. "chunk", chunk.Index)
  107. } else {
  108. s.logger.Debug("Ignoring duplicate chunk in queue", "height", chunk.Height, "format", chunk.Format,
  109. "chunk", chunk.Index)
  110. }
  111. return added, nil
  112. }
  113. // AddSnapshot adds a snapshot to the snapshot pool. It returns true if a new, previously unseen
  114. // snapshot was accepted and added.
  115. func (s *syncer) AddSnapshot(peerID types.NodeID, snapshot *snapshot) (bool, error) {
  116. added, err := s.snapshots.Add(peerID, snapshot)
  117. if err != nil {
  118. return false, err
  119. }
  120. if added {
  121. s.metrics.TotalSnapshots.Add(1)
  122. s.logger.Info("Discovered new snapshot", "height", snapshot.Height, "format", snapshot.Format,
  123. "hash", snapshot.Hash)
  124. }
  125. return added, nil
  126. }
  127. // AddPeer adds a peer to the pool. For now we just keep it simple and send a
  128. // single request to discover snapshots, later we may want to do retries and stuff.
  129. func (s *syncer) AddPeer(ctx context.Context, peerID types.NodeID) error {
  130. s.logger.Debug("Requesting snapshots from peer", "peer", peerID)
  131. return s.snapshotCh.Send(ctx, p2p.Envelope{
  132. To: peerID,
  133. Message: &ssproto.SnapshotsRequest{},
  134. })
  135. }
  136. // RemovePeer removes a peer from the pool.
  137. func (s *syncer) RemovePeer(peerID types.NodeID) {
  138. s.logger.Debug("Removing peer from sync", "peer", peerID)
  139. s.snapshots.RemovePeer(peerID)
  140. }
  141. // SyncAny tries to sync any of the snapshots in the snapshot pool, waiting to discover further
  142. // snapshots if none were found and discoveryTime > 0. It returns the latest state and block commit
  143. // which the caller must use to bootstrap the node.
  144. func (s *syncer) SyncAny(
  145. ctx context.Context,
  146. discoveryTime time.Duration,
  147. requestSnapshots func() error,
  148. ) (sm.State, *types.Commit, error) {
  149. if discoveryTime != 0 && discoveryTime < minimumDiscoveryTime {
  150. discoveryTime = minimumDiscoveryTime
  151. }
  152. if discoveryTime > 0 {
  153. if err := requestSnapshots(); err != nil {
  154. return sm.State{}, nil, err
  155. }
  156. s.logger.Info(fmt.Sprintf("Discovering snapshots for %v", discoveryTime))
  157. time.Sleep(discoveryTime)
  158. }
  159. // The app may ask us to retry a snapshot restoration, in which case we need to reuse
  160. // the snapshot and chunk queue from the previous loop iteration.
  161. var (
  162. snapshot *snapshot
  163. chunks *chunkQueue
  164. err error
  165. )
  166. for {
  167. // If not nil, we're going to retry restoration of the same snapshot.
  168. if snapshot == nil {
  169. snapshot = s.snapshots.Best()
  170. chunks = nil
  171. }
  172. if snapshot == nil {
  173. if discoveryTime == 0 {
  174. return sm.State{}, nil, errNoSnapshots
  175. }
  176. s.logger.Info(fmt.Sprintf("Discovering snapshots for %v", discoveryTime))
  177. time.Sleep(discoveryTime)
  178. continue
  179. }
  180. if chunks == nil {
  181. chunks, err = newChunkQueue(snapshot, s.tempDir)
  182. if err != nil {
  183. return sm.State{}, nil, fmt.Errorf("failed to create chunk queue: %w", err)
  184. }
  185. defer chunks.Close() // in case we forget to close it elsewhere
  186. }
  187. s.processingSnapshot = snapshot
  188. s.metrics.SnapshotChunkTotal.Set(float64(snapshot.Chunks))
  189. newState, commit, err := s.Sync(ctx, snapshot, chunks)
  190. switch {
  191. case err == nil:
  192. s.metrics.SnapshotHeight.Set(float64(snapshot.Height))
  193. s.lastSyncedSnapshotHeight = int64(snapshot.Height)
  194. return newState, commit, nil
  195. case errors.Is(err, errAbort):
  196. return sm.State{}, nil, err
  197. case errors.Is(err, errRetrySnapshot):
  198. chunks.RetryAll()
  199. s.logger.Info("Retrying snapshot", "height", snapshot.Height, "format", snapshot.Format,
  200. "hash", snapshot.Hash)
  201. continue
  202. case errors.Is(err, errTimeout):
  203. s.snapshots.Reject(snapshot)
  204. s.logger.Error("Timed out waiting for snapshot chunks, rejected snapshot",
  205. "height", snapshot.Height, "format", snapshot.Format, "hash", snapshot.Hash)
  206. case errors.Is(err, errRejectSnapshot):
  207. s.snapshots.Reject(snapshot)
  208. s.logger.Info("Snapshot rejected", "height", snapshot.Height, "format", snapshot.Format,
  209. "hash", snapshot.Hash)
  210. case errors.Is(err, errRejectFormat):
  211. s.snapshots.RejectFormat(snapshot.Format)
  212. s.logger.Info("Snapshot format rejected", "format", snapshot.Format)
  213. case errors.Is(err, errRejectSender):
  214. s.logger.Info("Snapshot senders rejected", "height", snapshot.Height, "format", snapshot.Format,
  215. "hash", snapshot.Hash)
  216. for _, peer := range s.snapshots.GetPeers(snapshot) {
  217. s.snapshots.RejectPeer(peer)
  218. s.logger.Info("Snapshot sender rejected", "peer", peer)
  219. }
  220. default:
  221. return sm.State{}, nil, fmt.Errorf("snapshot restoration failed: %w", err)
  222. }
  223. // Discard snapshot and chunks for next iteration
  224. err = chunks.Close()
  225. if err != nil {
  226. s.logger.Error("Failed to clean up chunk queue", "err", err)
  227. }
  228. snapshot = nil
  229. chunks = nil
  230. s.processingSnapshot = nil
  231. }
  232. }
  233. // Sync executes a sync for a specific snapshot, returning the latest state and block commit which
  234. // the caller must use to bootstrap the node.
  235. func (s *syncer) Sync(ctx context.Context, snapshot *snapshot, chunks *chunkQueue) (sm.State, *types.Commit, error) {
  236. s.mtx.Lock()
  237. if s.chunks != nil {
  238. s.mtx.Unlock()
  239. return sm.State{}, nil, errors.New("a state sync is already in progress")
  240. }
  241. s.chunks = chunks
  242. s.mtx.Unlock()
  243. defer func() {
  244. s.mtx.Lock()
  245. s.chunks = nil
  246. s.mtx.Unlock()
  247. }()
  248. hctx, hcancel := context.WithTimeout(ctx, 30*time.Second)
  249. defer hcancel()
  250. // Fetch the app hash corresponding to the snapshot
  251. appHash, err := s.stateProvider.AppHash(hctx, snapshot.Height)
  252. if err != nil {
  253. // check if the main context was triggered
  254. if ctx.Err() != nil {
  255. return sm.State{}, nil, ctx.Err()
  256. }
  257. // catch the case where all the light client providers have been exhausted
  258. if err == light.ErrNoWitnesses {
  259. return sm.State{}, nil,
  260. fmt.Errorf("failed to get app hash at height %d. No witnesses remaining", snapshot.Height)
  261. }
  262. s.logger.Info("failed to get and verify tendermint state. Dropping snapshot and trying again",
  263. "err", err, "height", snapshot.Height)
  264. return sm.State{}, nil, errRejectSnapshot
  265. }
  266. snapshot.trustedAppHash = appHash
  267. // Offer snapshot to ABCI app.
  268. err = s.offerSnapshot(ctx, snapshot)
  269. if err != nil {
  270. return sm.State{}, nil, err
  271. }
  272. // Spawn chunk fetchers. They will terminate when the chunk queue is closed or context canceled.
  273. fetchCtx, cancel := context.WithCancel(ctx)
  274. defer cancel()
  275. fetchStartTime := time.Now()
  276. for i := int32(0); i < s.fetchers; i++ {
  277. go s.fetchChunks(fetchCtx, snapshot, chunks)
  278. }
  279. pctx, pcancel := context.WithTimeout(ctx, 1*time.Minute)
  280. defer pcancel()
  281. // Optimistically build new state, so we don't discover any light client failures at the end.
  282. state, err := s.stateProvider.State(pctx, snapshot.Height)
  283. if err != nil {
  284. // check if the main context was triggered
  285. if ctx.Err() != nil {
  286. return sm.State{}, nil, ctx.Err()
  287. }
  288. if err == light.ErrNoWitnesses {
  289. return sm.State{}, nil,
  290. fmt.Errorf("failed to get tendermint state at height %d. No witnesses remaining", snapshot.Height)
  291. }
  292. s.logger.Info("failed to get and verify tendermint state. Dropping snapshot and trying again",
  293. "err", err, "height", snapshot.Height)
  294. return sm.State{}, nil, errRejectSnapshot
  295. }
  296. commit, err := s.stateProvider.Commit(pctx, snapshot.Height)
  297. if err != nil {
  298. // check if the provider context exceeded the 10 second deadline
  299. if ctx.Err() != nil {
  300. return sm.State{}, nil, ctx.Err()
  301. }
  302. if err == light.ErrNoWitnesses {
  303. return sm.State{}, nil,
  304. fmt.Errorf("failed to get commit at height %d. No witnesses remaining", snapshot.Height)
  305. }
  306. s.logger.Info("failed to get and verify commit. Dropping snapshot and trying again",
  307. "err", err, "height", snapshot.Height)
  308. return sm.State{}, nil, errRejectSnapshot
  309. }
  310. // Restore snapshot
  311. err = s.applyChunks(ctx, chunks, fetchStartTime)
  312. if err != nil {
  313. return sm.State{}, nil, err
  314. }
  315. // Verify app and update app version
  316. appVersion, err := s.verifyApp(ctx, snapshot)
  317. if err != nil {
  318. return sm.State{}, nil, err
  319. }
  320. state.Version.Consensus.App = appVersion
  321. // Done! 🎉
  322. s.logger.Info("Snapshot restored", "height", snapshot.Height, "format", snapshot.Format,
  323. "hash", snapshot.Hash)
  324. return state, commit, nil
  325. }
  326. // offerSnapshot offers a snapshot to the app. It returns various errors depending on the app's
  327. // response, or nil if the snapshot was accepted.
  328. func (s *syncer) offerSnapshot(ctx context.Context, snapshot *snapshot) error {
  329. s.logger.Info("Offering snapshot to ABCI app", "height", snapshot.Height,
  330. "format", snapshot.Format, "hash", snapshot.Hash)
  331. resp, err := s.conn.OfferSnapshotSync(ctx, abci.RequestOfferSnapshot{
  332. Snapshot: &abci.Snapshot{
  333. Height: snapshot.Height,
  334. Format: snapshot.Format,
  335. Chunks: snapshot.Chunks,
  336. Hash: snapshot.Hash,
  337. Metadata: snapshot.Metadata,
  338. },
  339. AppHash: snapshot.trustedAppHash,
  340. })
  341. if err != nil {
  342. return fmt.Errorf("failed to offer snapshot: %w", err)
  343. }
  344. switch resp.Result {
  345. case abci.ResponseOfferSnapshot_ACCEPT:
  346. s.logger.Info("Snapshot accepted, restoring", "height", snapshot.Height,
  347. "format", snapshot.Format, "hash", snapshot.Hash)
  348. return nil
  349. case abci.ResponseOfferSnapshot_ABORT:
  350. return errAbort
  351. case abci.ResponseOfferSnapshot_REJECT:
  352. return errRejectSnapshot
  353. case abci.ResponseOfferSnapshot_REJECT_FORMAT:
  354. return errRejectFormat
  355. case abci.ResponseOfferSnapshot_REJECT_SENDER:
  356. return errRejectSender
  357. default:
  358. return fmt.Errorf("unknown ResponseOfferSnapshot result %v", resp.Result)
  359. }
  360. }
  361. // applyChunks applies chunks to the app. It returns various errors depending on the app's
  362. // response, or nil once the snapshot is fully restored.
  363. func (s *syncer) applyChunks(ctx context.Context, chunks *chunkQueue, start time.Time) error {
  364. for {
  365. chunk, err := chunks.Next()
  366. if err == errDone {
  367. return nil
  368. } else if err != nil {
  369. return fmt.Errorf("failed to fetch chunk: %w", err)
  370. }
  371. resp, err := s.conn.ApplySnapshotChunkSync(ctx, abci.RequestApplySnapshotChunk{
  372. Index: chunk.Index,
  373. Chunk: chunk.Chunk,
  374. Sender: string(chunk.Sender),
  375. })
  376. if err != nil {
  377. return fmt.Errorf("failed to apply chunk %v: %w", chunk.Index, err)
  378. }
  379. s.logger.Info("Applied snapshot chunk to ABCI app", "height", chunk.Height,
  380. "format", chunk.Format, "chunk", chunk.Index, "total", chunks.Size())
  381. // Discard and refetch any chunks as requested by the app
  382. for _, index := range resp.RefetchChunks {
  383. err := chunks.Discard(index)
  384. if err != nil {
  385. return fmt.Errorf("failed to discard chunk %v: %w", index, err)
  386. }
  387. }
  388. // Reject any senders as requested by the app
  389. for _, sender := range resp.RejectSenders {
  390. if sender != "" {
  391. peerID := types.NodeID(sender)
  392. s.snapshots.RejectPeer(peerID)
  393. if err := chunks.DiscardSender(peerID); err != nil {
  394. return fmt.Errorf("failed to reject sender: %w", err)
  395. }
  396. }
  397. }
  398. switch resp.Result {
  399. case abci.ResponseApplySnapshotChunk_ACCEPT:
  400. s.metrics.SnapshotChunk.Add(1)
  401. s.avgChunkTime = time.Since(start).Nanoseconds() / int64(chunks.numChunksReturned())
  402. s.metrics.ChunkProcessAvgTime.Set(float64(s.avgChunkTime))
  403. case abci.ResponseApplySnapshotChunk_ABORT:
  404. return errAbort
  405. case abci.ResponseApplySnapshotChunk_RETRY:
  406. chunks.Retry(chunk.Index)
  407. case abci.ResponseApplySnapshotChunk_RETRY_SNAPSHOT:
  408. return errRetrySnapshot
  409. case abci.ResponseApplySnapshotChunk_REJECT_SNAPSHOT:
  410. return errRejectSnapshot
  411. default:
  412. return fmt.Errorf("unknown ResponseApplySnapshotChunk result %v", resp.Result)
  413. }
  414. }
  415. }
  416. // fetchChunks requests chunks from peers, receiving allocations from the chunk queue. Chunks
  417. // will be received from the reactor via syncer.AddChunks() to chunkQueue.Add().
  418. func (s *syncer) fetchChunks(ctx context.Context, snapshot *snapshot, chunks *chunkQueue) {
  419. var (
  420. next = true
  421. index uint32
  422. err error
  423. )
  424. for {
  425. if next {
  426. index, err = chunks.Allocate()
  427. if errors.Is(err, errDone) {
  428. // Keep checking until the context is canceled (restore is done), in case any
  429. // chunks need to be refetched.
  430. select {
  431. case <-ctx.Done():
  432. return
  433. case <-time.After(2 * time.Second):
  434. continue
  435. }
  436. }
  437. if err != nil {
  438. s.logger.Error("Failed to allocate chunk from queue", "err", err)
  439. return
  440. }
  441. }
  442. s.logger.Info("Fetching snapshot chunk", "height", snapshot.Height,
  443. "format", snapshot.Format, "chunk", index, "total", chunks.Size())
  444. ticker := time.NewTicker(s.retryTimeout)
  445. defer ticker.Stop()
  446. if err := s.requestChunk(ctx, snapshot, index); err != nil {
  447. return
  448. }
  449. select {
  450. case <-chunks.WaitFor(index):
  451. next = true
  452. case <-ticker.C:
  453. next = false
  454. case <-ctx.Done():
  455. return
  456. }
  457. ticker.Stop()
  458. }
  459. }
  460. // requestChunk requests a chunk from a peer.
  461. //
  462. // returns nil if there are no peers for the given snapshot or the
  463. // request is successfully made and an error if the request cannot be
  464. // completed
  465. func (s *syncer) requestChunk(ctx context.Context, snapshot *snapshot, chunk uint32) error {
  466. peer := s.snapshots.GetPeer(snapshot)
  467. if peer == "" {
  468. s.logger.Error("No valid peers found for snapshot", "height", snapshot.Height,
  469. "format", snapshot.Format, "hash", snapshot.Hash)
  470. return nil
  471. }
  472. s.logger.Debug(
  473. "Requesting snapshot chunk",
  474. "height", snapshot.Height,
  475. "format", snapshot.Format,
  476. "chunk", chunk,
  477. "peer", peer,
  478. )
  479. msg := p2p.Envelope{
  480. To: peer,
  481. Message: &ssproto.ChunkRequest{
  482. Height: snapshot.Height,
  483. Format: snapshot.Format,
  484. Index: chunk,
  485. },
  486. }
  487. if err := s.chunkCh.Send(ctx, msg); err != nil {
  488. return err
  489. }
  490. return nil
  491. }
  492. // verifyApp verifies the sync, checking the app hash and last block height. It returns the
  493. // app version, which should be returned as part of the initial state.
  494. func (s *syncer) verifyApp(ctx context.Context, snapshot *snapshot) (uint64, error) {
  495. resp, err := s.connQuery.InfoSync(ctx, proxy.RequestInfo)
  496. if err != nil {
  497. return 0, fmt.Errorf("failed to query ABCI app for appHash: %w", err)
  498. }
  499. if !bytes.Equal(snapshot.trustedAppHash, resp.LastBlockAppHash) {
  500. s.logger.Error("appHash verification failed",
  501. "expected", snapshot.trustedAppHash,
  502. "actual", resp.LastBlockAppHash)
  503. return 0, errVerifyFailed
  504. }
  505. if uint64(resp.LastBlockHeight) != snapshot.Height {
  506. s.logger.Error(
  507. "ABCI app reported unexpected last block height",
  508. "expected", snapshot.Height,
  509. "actual", resp.LastBlockHeight,
  510. )
  511. return 0, errVerifyFailed
  512. }
  513. s.logger.Info("Verified ABCI app", "height", snapshot.Height, "appHash", snapshot.trustedAppHash)
  514. return resp.AppVersion, nil
  515. }