You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

325 lines
7.6 KiB

  1. package statesync
  2. import (
  3. "context"
  4. "fmt"
  5. "strings"
  6. "sync"
  7. "testing"
  8. "time"
  9. "github.com/fortytw2/leaktest"
  10. "github.com/stretchr/testify/assert"
  11. "github.com/stretchr/testify/require"
  12. "github.com/tendermint/tendermint/internal/p2p"
  13. "github.com/tendermint/tendermint/internal/test/factory"
  14. ssproto "github.com/tendermint/tendermint/proto/tendermint/statesync"
  15. "github.com/tendermint/tendermint/types"
  16. )
  17. type channelInternal struct {
  18. In chan p2p.Envelope
  19. Out chan p2p.Envelope
  20. Error chan p2p.PeerError
  21. }
  22. func testChannel(size int) (*channelInternal, *p2p.Channel) {
  23. in := &channelInternal{
  24. In: make(chan p2p.Envelope, size),
  25. Out: make(chan p2p.Envelope, size),
  26. Error: make(chan p2p.PeerError, size),
  27. }
  28. return in, p2p.NewChannel(0, nil, in.In, in.Out, in.Error)
  29. }
  30. func TestDispatcherBasic(t *testing.T) {
  31. t.Cleanup(leaktest.Check(t))
  32. const numPeers = 5
  33. ctx, cancel := context.WithCancel(context.Background())
  34. defer cancel()
  35. chans, ch := testChannel(100)
  36. d := NewDispatcher(ch)
  37. go handleRequests(ctx, t, d, chans.Out)
  38. peers := createPeerSet(numPeers)
  39. wg := sync.WaitGroup{}
  40. // make a bunch of async requests and require that the correct responses are
  41. // given
  42. for i := 0; i < numPeers; i++ {
  43. wg.Add(1)
  44. go func(height int64) {
  45. defer wg.Done()
  46. lb, err := d.LightBlock(ctx, height, peers[height-1])
  47. require.NoError(t, err)
  48. require.NotNil(t, lb)
  49. require.Equal(t, lb.Height, height)
  50. }(int64(i + 1))
  51. }
  52. wg.Wait()
  53. // assert that all calls were responded to
  54. assert.Empty(t, d.calls)
  55. }
  56. func TestDispatcherReturnsNoBlock(t *testing.T) {
  57. t.Cleanup(leaktest.Check(t))
  58. ctx, cancel := context.WithCancel(context.Background())
  59. defer cancel()
  60. chans, ch := testChannel(100)
  61. d := NewDispatcher(ch)
  62. peer := factory.NodeID("a")
  63. go func() {
  64. <-chans.Out
  65. require.NoError(t, d.Respond(nil, peer))
  66. cancel()
  67. }()
  68. lb, err := d.LightBlock(ctx, 1, peer)
  69. <-ctx.Done()
  70. require.Nil(t, lb)
  71. require.NoError(t, err)
  72. }
  73. func TestDispatcherTimeOutWaitingOnLightBlock(t *testing.T) {
  74. t.Cleanup(leaktest.Check(t))
  75. ctx, cancel := context.WithCancel(context.Background())
  76. defer cancel()
  77. _, ch := testChannel(100)
  78. d := NewDispatcher(ch)
  79. peer := factory.NodeID("a")
  80. ctx, cancelFunc := context.WithTimeout(ctx, 10*time.Millisecond)
  81. defer cancelFunc()
  82. lb, err := d.LightBlock(ctx, 1, peer)
  83. require.Error(t, err)
  84. require.Equal(t, context.DeadlineExceeded, err)
  85. require.Nil(t, lb)
  86. }
  87. func TestDispatcherProviders(t *testing.T) {
  88. t.Cleanup(leaktest.Check(t))
  89. chainID := "test-chain"
  90. ctx, cancel := context.WithCancel(context.Background())
  91. defer cancel()
  92. chans, ch := testChannel(100)
  93. d := NewDispatcher(ch)
  94. go handleRequests(ctx, t, d, chans.Out)
  95. peers := createPeerSet(5)
  96. providers := make([]*BlockProvider, len(peers))
  97. for idx, peer := range peers {
  98. providers[idx] = NewBlockProvider(peer, chainID, d)
  99. }
  100. require.Len(t, providers, 5)
  101. for i, p := range providers {
  102. assert.Equal(t, string(peers[i]), p.String(), i)
  103. lb, err := p.LightBlock(ctx, 10)
  104. assert.NoError(t, err)
  105. assert.NotNil(t, lb)
  106. }
  107. }
  108. func TestPeerListBasic(t *testing.T) {
  109. t.Cleanup(leaktest.Check(t))
  110. ctx, cancel := context.WithCancel(context.Background())
  111. defer cancel()
  112. peerList := newPeerList()
  113. assert.Zero(t, peerList.Len())
  114. numPeers := 10
  115. peerSet := createPeerSet(numPeers)
  116. for _, peer := range peerSet {
  117. peerList.Append(peer)
  118. }
  119. for idx, peer := range peerList.All() {
  120. assert.Equal(t, peer, peerSet[idx])
  121. }
  122. assert.Equal(t, numPeers, peerList.Len())
  123. half := numPeers / 2
  124. for i := 0; i < half; i++ {
  125. assert.Equal(t, peerSet[i], peerList.Pop(ctx))
  126. }
  127. assert.Equal(t, half, peerList.Len())
  128. // removing a peer that doesn't exist should not change the list
  129. peerList.Remove(types.NodeID("lp"))
  130. assert.Equal(t, half, peerList.Len())
  131. // removing a peer that exists should decrease the list size by one
  132. peerList.Remove(peerSet[half])
  133. assert.Equal(t, numPeers-half-1, peerList.Len())
  134. // popping the next peer should work as expected
  135. assert.Equal(t, peerSet[half+1], peerList.Pop(ctx))
  136. assert.Equal(t, numPeers-half-2, peerList.Len())
  137. // append the two peers back
  138. peerList.Append(peerSet[half])
  139. peerList.Append(peerSet[half+1])
  140. assert.Equal(t, half, peerList.Len())
  141. }
  142. func TestPeerListBlocksWhenEmpty(t *testing.T) {
  143. t.Cleanup(leaktest.Check(t))
  144. peerList := newPeerList()
  145. require.Zero(t, peerList.Len())
  146. doneCh := make(chan struct{})
  147. ctx, cancel := context.WithCancel(context.Background())
  148. defer cancel()
  149. go func() {
  150. peerList.Pop(ctx)
  151. close(doneCh)
  152. }()
  153. select {
  154. case <-doneCh:
  155. t.Error("empty peer list should not have returned result")
  156. case <-time.After(100 * time.Millisecond):
  157. }
  158. }
  159. func TestEmptyPeerListReturnsWhenContextCanceled(t *testing.T) {
  160. t.Cleanup(leaktest.Check(t))
  161. peerList := newPeerList()
  162. require.Zero(t, peerList.Len())
  163. doneCh := make(chan struct{})
  164. ctx, cancel := context.WithCancel(context.Background())
  165. defer cancel()
  166. wrapped, cancel := context.WithCancel(ctx)
  167. go func() {
  168. peerList.Pop(wrapped)
  169. close(doneCh)
  170. }()
  171. select {
  172. case <-doneCh:
  173. t.Error("empty peer list should not have returned result")
  174. case <-time.After(100 * time.Millisecond):
  175. }
  176. cancel()
  177. select {
  178. case <-doneCh:
  179. case <-time.After(100 * time.Millisecond):
  180. t.Error("peer list should have returned after context canceled")
  181. }
  182. }
  183. func TestPeerListConcurrent(t *testing.T) {
  184. t.Cleanup(leaktest.Check(t))
  185. ctx, cancel := context.WithCancel(context.Background())
  186. defer cancel()
  187. peerList := newPeerList()
  188. numPeers := 10
  189. wg := sync.WaitGroup{}
  190. // we run a set of goroutines requesting the next peer in the list. As the
  191. // peer list hasn't been populated each these go routines should block
  192. for i := 0; i < numPeers/2; i++ {
  193. go func() {
  194. _ = peerList.Pop(ctx)
  195. wg.Done()
  196. }()
  197. }
  198. // now we add the peers to the list, this should allow the previously
  199. // blocked go routines to unblock
  200. for _, peer := range createPeerSet(numPeers) {
  201. wg.Add(1)
  202. peerList.Append(peer)
  203. }
  204. // we request the second half of the peer set
  205. for i := 0; i < numPeers/2; i++ {
  206. go func() {
  207. _ = peerList.Pop(ctx)
  208. wg.Done()
  209. }()
  210. }
  211. // we use a context with cancel and a separate go routine to wait for all
  212. // the other goroutines to close.
  213. go func() { wg.Wait(); cancel() }()
  214. select {
  215. case <-time.After(time.Second):
  216. // not all of the blocked go routines waiting on peers have closed after
  217. // one second. This likely means the list got blocked.
  218. t.Failed()
  219. case <-ctx.Done():
  220. // there should be no peers remaining
  221. require.Equal(t, 0, peerList.Len())
  222. }
  223. }
  224. func TestPeerListRemove(t *testing.T) {
  225. peerList := newPeerList()
  226. numPeers := 10
  227. peerSet := createPeerSet(numPeers)
  228. for _, peer := range peerSet {
  229. peerList.Append(peer)
  230. }
  231. for _, peer := range peerSet {
  232. peerList.Remove(peer)
  233. for _, p := range peerList.All() {
  234. require.NotEqual(t, p, peer)
  235. }
  236. numPeers--
  237. require.Equal(t, numPeers, peerList.Len())
  238. }
  239. }
  240. // handleRequests is a helper function usually run in a separate go routine to
  241. // imitate the expected responses of the reactor wired to the dispatcher
  242. func handleRequests(ctx context.Context, t *testing.T, d *Dispatcher, ch chan p2p.Envelope) {
  243. t.Helper()
  244. for {
  245. select {
  246. case request := <-ch:
  247. height := request.Message.(*ssproto.LightBlockRequest).Height
  248. peer := request.To
  249. resp := mockLBResp(ctx, t, peer, int64(height), time.Now())
  250. block, _ := resp.block.ToProto()
  251. require.NoError(t, d.Respond(block, resp.peer))
  252. case <-ctx.Done():
  253. return
  254. }
  255. }
  256. }
  257. func createPeerSet(num int) []types.NodeID {
  258. peers := make([]types.NodeID, num)
  259. for i := 0; i < num; i++ {
  260. peers[i], _ = types.NewNodeID(strings.Repeat(fmt.Sprintf("%d", i), 2*types.NodeIDByteLength))
  261. }
  262. return peers
  263. }