You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

318 lines
8.5 KiB

cleanup: Reduce and normalize import path aliasing. (#6975) The code in the Tendermint repository makes heavy use of import aliasing. This is made necessary by our extensive reuse of common base package names, and by repetition of similar names across different subdirectories. Unfortunately we have not been very consistent about which packages we alias in various circumstances, and the aliases we use vary. In the spirit of the advice in the style guide and https://github.com/golang/go/wiki/CodeReviewComments#imports, his change makes an effort to clean up and normalize import aliasing. This change makes no API or behavioral changes. It is a pure cleanup intended o help make the code more readable to developers (including myself) trying to understand what is being imported where. Only unexported names have been modified, and the changes were generated and applied mechanically with gofmt -r and comby, respecting the lexical and syntactic rules of Go. Even so, I did not fix every inconsistency. Where the changes would be too disruptive, I left it alone. The principles I followed in this cleanup are: - Remove aliases that restate the package name. - Remove aliases where the base package name is unambiguous. - Move overly-terse abbreviations from the import to the usage site. - Fix lexical issues (remove underscores, remove capitalization). - Fix import groupings to more closely match the style guide. - Group blank (side-effecting) imports and ensure they are commented. - Add aliases to multiple imports with the same base package name.
3 years ago
  1. package statesync
  2. import (
  3. "context"
  4. "errors"
  5. "fmt"
  6. "sync"
  7. "github.com/tendermint/tendermint/internal/p2p"
  8. "github.com/tendermint/tendermint/light/provider"
  9. ssproto "github.com/tendermint/tendermint/proto/tendermint/statesync"
  10. tmproto "github.com/tendermint/tendermint/proto/tendermint/types"
  11. "github.com/tendermint/tendermint/types"
  12. )
  13. var (
  14. errNoConnectedPeers = errors.New("no available peers to dispatch request to")
  15. errUnsolicitedResponse = errors.New("unsolicited light block response")
  16. errPeerAlreadyBusy = errors.New("peer is already processing a request")
  17. errDisconnected = errors.New("dispatcher disconnected")
  18. )
  19. // A Dispatcher multiplexes concurrent requests by multiple peers for light blocks.
  20. // Only one request per peer can be sent at a time. Subsequent concurrent requests will
  21. // report an error from the LightBlock method.
  22. // NOTE: It is not the responsibility of the dispatcher to verify the light blocks.
  23. type Dispatcher struct {
  24. // the channel with which to send light block requests on
  25. requestCh *p2p.Channel
  26. mtx sync.Mutex
  27. // all pending calls that have been dispatched and are awaiting an answer
  28. calls map[types.NodeID]chan *types.LightBlock
  29. }
  30. func NewDispatcher(requestChannel *p2p.Channel) *Dispatcher {
  31. return &Dispatcher{
  32. requestCh: requestChannel,
  33. calls: make(map[types.NodeID]chan *types.LightBlock),
  34. }
  35. }
  36. // LightBlock uses the request channel to fetch a light block from a given peer
  37. // tracking, the call and waiting for the reactor to pass back the response. A nil
  38. // LightBlock response is used to signal that the peer doesn't have the requested LightBlock.
  39. func (d *Dispatcher) LightBlock(ctx context.Context, height int64, peer types.NodeID) (*types.LightBlock, error) {
  40. // dispatch the request to the peer
  41. callCh, err := d.dispatch(ctx, peer, height)
  42. if err != nil {
  43. return nil, err
  44. }
  45. // clean up the call after a response is returned
  46. defer func() {
  47. d.mtx.Lock()
  48. defer d.mtx.Unlock()
  49. if call, ok := d.calls[peer]; ok {
  50. delete(d.calls, peer)
  51. close(call)
  52. }
  53. }()
  54. // wait for a response, cancel or timeout
  55. select {
  56. case resp := <-callCh:
  57. return resp, nil
  58. case <-ctx.Done():
  59. return nil, ctx.Err()
  60. }
  61. }
  62. // dispatch takes a peer and allocates it a channel so long as it's not already
  63. // busy and the receiving channel is still running. It then dispatches the message
  64. func (d *Dispatcher) dispatch(ctx context.Context, peer types.NodeID, height int64) (chan *types.LightBlock, error) {
  65. d.mtx.Lock()
  66. defer d.mtx.Unlock()
  67. select {
  68. case <-ctx.Done():
  69. return nil, errDisconnected
  70. default:
  71. }
  72. ch := make(chan *types.LightBlock, 1)
  73. // check if a request for the same peer has already been made
  74. if _, ok := d.calls[peer]; ok {
  75. close(ch)
  76. return ch, errPeerAlreadyBusy
  77. }
  78. d.calls[peer] = ch
  79. // send request
  80. if err := d.requestCh.Send(ctx, p2p.Envelope{
  81. To: peer,
  82. Message: &ssproto.LightBlockRequest{
  83. Height: uint64(height),
  84. },
  85. }); err != nil {
  86. close(ch)
  87. return ch, err
  88. }
  89. return ch, nil
  90. }
  91. // Respond allows the underlying process which receives requests on the
  92. // requestCh to respond with the respective light block. A nil response is used to
  93. // represent that the receiver of the request does not have a light block at that height.
  94. func (d *Dispatcher) Respond(ctx context.Context, lb *tmproto.LightBlock, peer types.NodeID) error {
  95. d.mtx.Lock()
  96. defer d.mtx.Unlock()
  97. // check that the response came from a request
  98. answerCh, ok := d.calls[peer]
  99. if !ok {
  100. // this can also happen if the response came in after the timeout
  101. return errUnsolicitedResponse
  102. }
  103. // If lb is nil we take that to mean that the peer didn't have the requested light
  104. // block and thus pass on the nil to the caller.
  105. if lb == nil {
  106. select {
  107. case answerCh <- nil:
  108. return nil
  109. case <-ctx.Done():
  110. return ctx.Err()
  111. }
  112. }
  113. block, err := types.LightBlockFromProto(lb)
  114. if err != nil {
  115. return err
  116. }
  117. select {
  118. case <-ctx.Done():
  119. return ctx.Err()
  120. case answerCh <- block:
  121. return nil
  122. }
  123. }
  124. // Close shuts down the dispatcher and cancels any pending calls awaiting responses.
  125. // Peers awaiting responses that have not arrived are delivered a nil block.
  126. func (d *Dispatcher) Close() {
  127. d.mtx.Lock()
  128. defer d.mtx.Unlock()
  129. for peer := range d.calls {
  130. delete(d.calls, peer)
  131. // don't close the channel here as it's closed in
  132. // other handlers, and would otherwise get garbage
  133. // collected.
  134. }
  135. }
  136. //----------------------------------------------------------------
  137. // BlockProvider is a p2p based light provider which uses a dispatcher connected
  138. // to the state sync reactor to serve light blocks to the light client
  139. //
  140. // TODO: This should probably be moved over to the light package but as we're
  141. // not yet officially supporting p2p light clients we'll leave this here for now.
  142. //
  143. // NOTE: BlockProvider will return an error with concurrent calls. However, we don't
  144. // need a mutex because a light client (and the backfill process) will never call a
  145. // method more than once at the same time
  146. type BlockProvider struct {
  147. peer types.NodeID
  148. chainID string
  149. dispatcher *Dispatcher
  150. }
  151. // Creates a block provider which implements the light client Provider interface.
  152. func NewBlockProvider(peer types.NodeID, chainID string, dispatcher *Dispatcher) *BlockProvider {
  153. return &BlockProvider{
  154. peer: peer,
  155. chainID: chainID,
  156. dispatcher: dispatcher,
  157. }
  158. }
  159. // LightBlock fetches a light block from the peer at a specified height returning either a
  160. // light block or an appropriate error.
  161. func (p *BlockProvider) LightBlock(ctx context.Context, height int64) (*types.LightBlock, error) {
  162. lb, err := p.dispatcher.LightBlock(ctx, height, p.peer)
  163. switch err {
  164. case nil:
  165. if lb == nil {
  166. return nil, provider.ErrLightBlockNotFound
  167. }
  168. case context.DeadlineExceeded, context.Canceled:
  169. return nil, err
  170. case errPeerAlreadyBusy:
  171. return nil, provider.ErrLightBlockNotFound
  172. default:
  173. return nil, provider.ErrUnreliableProvider{Reason: err}
  174. }
  175. // check that the height requested is the same one returned
  176. if lb.Height != height {
  177. return nil, provider.ErrBadLightBlock{
  178. Reason: fmt.Errorf("expected height %d, got height %d", height, lb.Height),
  179. }
  180. }
  181. // perform basic validation
  182. if err := lb.ValidateBasic(p.chainID); err != nil {
  183. return nil, provider.ErrBadLightBlock{Reason: err}
  184. }
  185. return lb, nil
  186. }
  187. // ReportEvidence should allow for the light client to report any light client
  188. // attacks. This is a no op as there currently isn't a way to wire this up to
  189. // the evidence reactor (we should endeavor to do this in the future but for now
  190. // it's not critical for backwards verification)
  191. func (p *BlockProvider) ReportEvidence(ctx context.Context, ev types.Evidence) error {
  192. return nil
  193. }
  194. // String implements stringer interface
  195. func (p *BlockProvider) String() string { return string(p.peer) }
  196. // Returns the ID address of the provider (NodeID of peer)
  197. func (p *BlockProvider) ID() string { return string(p.peer) }
  198. //----------------------------------------------------------------
  199. // peerList is a rolling list of peers. This is used to distribute the load of
  200. // retrieving blocks over all the peers the reactor is connected to
  201. type peerList struct {
  202. mtx sync.Mutex
  203. peers []types.NodeID
  204. waiting []chan types.NodeID
  205. }
  206. func newPeerList() *peerList {
  207. return &peerList{
  208. peers: make([]types.NodeID, 0),
  209. waiting: make([]chan types.NodeID, 0),
  210. }
  211. }
  212. func (l *peerList) Len() int {
  213. l.mtx.Lock()
  214. defer l.mtx.Unlock()
  215. return len(l.peers)
  216. }
  217. func (l *peerList) Pop(ctx context.Context) types.NodeID {
  218. l.mtx.Lock()
  219. if len(l.peers) == 0 {
  220. // if we don't have any peers in the list we block until a peer is
  221. // appended
  222. wait := make(chan types.NodeID, 1)
  223. l.waiting = append(l.waiting, wait)
  224. // unlock whilst waiting so that the list can be appended to
  225. l.mtx.Unlock()
  226. select {
  227. case peer := <-wait:
  228. return peer
  229. case <-ctx.Done():
  230. return ""
  231. }
  232. }
  233. peer := l.peers[0]
  234. l.peers = l.peers[1:]
  235. l.mtx.Unlock()
  236. return peer
  237. }
  238. func (l *peerList) Append(peer types.NodeID) {
  239. l.mtx.Lock()
  240. defer l.mtx.Unlock()
  241. if len(l.waiting) > 0 {
  242. wait := l.waiting[0]
  243. l.waiting = l.waiting[1:]
  244. wait <- peer
  245. close(wait)
  246. } else {
  247. l.peers = append(l.peers, peer)
  248. }
  249. }
  250. func (l *peerList) Remove(peer types.NodeID) {
  251. l.mtx.Lock()
  252. defer l.mtx.Unlock()
  253. for i, p := range l.peers {
  254. if p == peer {
  255. l.peers = append(l.peers[:i], l.peers[i+1:]...)
  256. return
  257. }
  258. }
  259. }
  260. func (l *peerList) All() []types.NodeID {
  261. l.mtx.Lock()
  262. defer l.mtx.Unlock()
  263. return l.peers
  264. }
  265. func (l *peerList) Contains(id types.NodeID) bool {
  266. l.mtx.Lock()
  267. defer l.mtx.Unlock()
  268. for _, p := range l.peers {
  269. if id == p {
  270. return true
  271. }
  272. }
  273. return false
  274. }