You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

747 lines
21 KiB

cleanup: Reduce and normalize import path aliasing. (#6975) The code in the Tendermint repository makes heavy use of import aliasing. This is made necessary by our extensive reuse of common base package names, and by repetition of similar names across different subdirectories. Unfortunately we have not been very consistent about which packages we alias in various circumstances, and the aliases we use vary. In the spirit of the advice in the style guide and https://github.com/golang/go/wiki/CodeReviewComments#imports, his change makes an effort to clean up and normalize import aliasing. This change makes no API or behavioral changes. It is a pure cleanup intended o help make the code more readable to developers (including myself) trying to understand what is being imported where. Only unexported names have been modified, and the changes were generated and applied mechanically with gofmt -r and comby, respecting the lexical and syntactic rules of Go. Even so, I did not fix every inconsistency. Where the changes would be too disruptive, I left it alone. The principles I followed in this cleanup are: - Remove aliases that restate the package name. - Remove aliases where the base package name is unambiguous. - Move overly-terse abbreviations from the import to the usage site. - Fix lexical issues (remove underscores, remove capitalization). - Fix import groupings to more closely match the style guide. - Group blank (side-effecting) imports and ensure they are commented. - Add aliases to multiple imports with the same base package name.
3 years ago
cleanup: Reduce and normalize import path aliasing. (#6975) The code in the Tendermint repository makes heavy use of import aliasing. This is made necessary by our extensive reuse of common base package names, and by repetition of similar names across different subdirectories. Unfortunately we have not been very consistent about which packages we alias in various circumstances, and the aliases we use vary. In the spirit of the advice in the style guide and https://github.com/golang/go/wiki/CodeReviewComments#imports, his change makes an effort to clean up and normalize import aliasing. This change makes no API or behavioral changes. It is a pure cleanup intended o help make the code more readable to developers (including myself) trying to understand what is being imported where. Only unexported names have been modified, and the changes were generated and applied mechanically with gofmt -r and comby, respecting the lexical and syntactic rules of Go. Even so, I did not fix every inconsistency. Where the changes would be too disruptive, I left it alone. The principles I followed in this cleanup are: - Remove aliases that restate the package name. - Remove aliases where the base package name is unambiguous. - Move overly-terse abbreviations from the import to the usage site. - Fix lexical issues (remove underscores, remove capitalization). - Fix import groupings to more closely match the style guide. - Group blank (side-effecting) imports and ensure they are commented. - Add aliases to multiple imports with the same base package name.
3 years ago
cleanup: Reduce and normalize import path aliasing. (#6975) The code in the Tendermint repository makes heavy use of import aliasing. This is made necessary by our extensive reuse of common base package names, and by repetition of similar names across different subdirectories. Unfortunately we have not been very consistent about which packages we alias in various circumstances, and the aliases we use vary. In the spirit of the advice in the style guide and https://github.com/golang/go/wiki/CodeReviewComments#imports, his change makes an effort to clean up and normalize import aliasing. This change makes no API or behavioral changes. It is a pure cleanup intended o help make the code more readable to developers (including myself) trying to understand what is being imported where. Only unexported names have been modified, and the changes were generated and applied mechanically with gofmt -r and comby, respecting the lexical and syntactic rules of Go. Even so, I did not fix every inconsistency. Where the changes would be too disruptive, I left it alone. The principles I followed in this cleanup are: - Remove aliases that restate the package name. - Remove aliases where the base package name is unambiguous. - Move overly-terse abbreviations from the import to the usage site. - Fix lexical issues (remove underscores, remove capitalization). - Fix import groupings to more closely match the style guide. - Group blank (side-effecting) imports and ensure they are commented. - Add aliases to multiple imports with the same base package name.
3 years ago
cleanup: Reduce and normalize import path aliasing. (#6975) The code in the Tendermint repository makes heavy use of import aliasing. This is made necessary by our extensive reuse of common base package names, and by repetition of similar names across different subdirectories. Unfortunately we have not been very consistent about which packages we alias in various circumstances, and the aliases we use vary. In the spirit of the advice in the style guide and https://github.com/golang/go/wiki/CodeReviewComments#imports, his change makes an effort to clean up and normalize import aliasing. This change makes no API or behavioral changes. It is a pure cleanup intended o help make the code more readable to developers (including myself) trying to understand what is being imported where. Only unexported names have been modified, and the changes were generated and applied mechanically with gofmt -r and comby, respecting the lexical and syntactic rules of Go. Even so, I did not fix every inconsistency. Where the changes would be too disruptive, I left it alone. The principles I followed in this cleanup are: - Remove aliases that restate the package name. - Remove aliases where the base package name is unambiguous. - Move overly-terse abbreviations from the import to the usage site. - Fix lexical issues (remove underscores, remove capitalization). - Fix import groupings to more closely match the style guide. - Group blank (side-effecting) imports and ensure they are commented. - Add aliases to multiple imports with the same base package name.
3 years ago
cleanup: Reduce and normalize import path aliasing. (#6975) The code in the Tendermint repository makes heavy use of import aliasing. This is made necessary by our extensive reuse of common base package names, and by repetition of similar names across different subdirectories. Unfortunately we have not been very consistent about which packages we alias in various circumstances, and the aliases we use vary. In the spirit of the advice in the style guide and https://github.com/golang/go/wiki/CodeReviewComments#imports, his change makes an effort to clean up and normalize import aliasing. This change makes no API or behavioral changes. It is a pure cleanup intended o help make the code more readable to developers (including myself) trying to understand what is being imported where. Only unexported names have been modified, and the changes were generated and applied mechanically with gofmt -r and comby, respecting the lexical and syntactic rules of Go. Even so, I did not fix every inconsistency. Where the changes would be too disruptive, I left it alone. The principles I followed in this cleanup are: - Remove aliases that restate the package name. - Remove aliases where the base package name is unambiguous. - Move overly-terse abbreviations from the import to the usage site. - Fix lexical issues (remove underscores, remove capitalization). - Fix import groupings to more closely match the style guide. - Group blank (side-effecting) imports and ensure they are commented. - Add aliases to multiple imports with the same base package name.
3 years ago
cleanup: Reduce and normalize import path aliasing. (#6975) The code in the Tendermint repository makes heavy use of import aliasing. This is made necessary by our extensive reuse of common base package names, and by repetition of similar names across different subdirectories. Unfortunately we have not been very consistent about which packages we alias in various circumstances, and the aliases we use vary. In the spirit of the advice in the style guide and https://github.com/golang/go/wiki/CodeReviewComments#imports, his change makes an effort to clean up and normalize import aliasing. This change makes no API or behavioral changes. It is a pure cleanup intended o help make the code more readable to developers (including myself) trying to understand what is being imported where. Only unexported names have been modified, and the changes were generated and applied mechanically with gofmt -r and comby, respecting the lexical and syntactic rules of Go. Even so, I did not fix every inconsistency. Where the changes would be too disruptive, I left it alone. The principles I followed in this cleanup are: - Remove aliases that restate the package name. - Remove aliases where the base package name is unambiguous. - Move overly-terse abbreviations from the import to the usage site. - Fix lexical issues (remove underscores, remove capitalization). - Fix import groupings to more closely match the style guide. - Group blank (side-effecting) imports and ensure they are commented. - Add aliases to multiple imports with the same base package name.
3 years ago
cleanup: Reduce and normalize import path aliasing. (#6975) The code in the Tendermint repository makes heavy use of import aliasing. This is made necessary by our extensive reuse of common base package names, and by repetition of similar names across different subdirectories. Unfortunately we have not been very consistent about which packages we alias in various circumstances, and the aliases we use vary. In the spirit of the advice in the style guide and https://github.com/golang/go/wiki/CodeReviewComments#imports, his change makes an effort to clean up and normalize import aliasing. This change makes no API or behavioral changes. It is a pure cleanup intended o help make the code more readable to developers (including myself) trying to understand what is being imported where. Only unexported names have been modified, and the changes were generated and applied mechanically with gofmt -r and comby, respecting the lexical and syntactic rules of Go. Even so, I did not fix every inconsistency. Where the changes would be too disruptive, I left it alone. The principles I followed in this cleanup are: - Remove aliases that restate the package name. - Remove aliases where the base package name is unambiguous. - Move overly-terse abbreviations from the import to the usage site. - Fix lexical issues (remove underscores, remove capitalization). - Fix import groupings to more closely match the style guide. - Group blank (side-effecting) imports and ensure they are commented. - Add aliases to multiple imports with the same base package name.
3 years ago
  1. package pex_test
  2. import (
  3. "context"
  4. "errors"
  5. "strings"
  6. "testing"
  7. "time"
  8. "github.com/stretchr/testify/require"
  9. dbm "github.com/tendermint/tm-db"
  10. "github.com/tendermint/tendermint/crypto/ed25519"
  11. "github.com/tendermint/tendermint/internal/p2p"
  12. "github.com/tendermint/tendermint/internal/p2p/p2ptest"
  13. "github.com/tendermint/tendermint/internal/p2p/pex"
  14. "github.com/tendermint/tendermint/libs/log"
  15. p2pproto "github.com/tendermint/tendermint/proto/tendermint/p2p"
  16. "github.com/tendermint/tendermint/types"
  17. )
  18. const (
  19. checkFrequency = 500 * time.Millisecond
  20. defaultBufferSize = 2
  21. shortWait = 10 * time.Second
  22. longWait = 60 * time.Second
  23. firstNode = 0
  24. secondNode = 1
  25. thirdNode = 2
  26. )
  27. func TestReactorBasic(t *testing.T) {
  28. ctx, cancel := context.WithCancel(context.Background())
  29. defer cancel()
  30. // start a network with one mock reactor and one "real" reactor
  31. testNet := setupNetwork(ctx, t, testOptions{
  32. MockNodes: 1,
  33. TotalNodes: 2,
  34. })
  35. testNet.connectAll(ctx, t)
  36. testNet.start(ctx, t)
  37. // assert that the mock node receives a request from the real node
  38. testNet.listenForRequest(ctx, t, secondNode, firstNode, shortWait)
  39. // assert that when a mock node sends a request it receives a response (and
  40. // the correct one)
  41. testNet.sendRequest(ctx, t, firstNode, secondNode)
  42. testNet.listenForResponse(ctx, t, secondNode, firstNode, shortWait, []p2pproto.PexAddress(nil))
  43. }
  44. func TestReactorConnectFullNetwork(t *testing.T) {
  45. ctx, cancel := context.WithCancel(context.Background())
  46. defer cancel()
  47. testNet := setupNetwork(ctx, t, testOptions{
  48. TotalNodes: 4,
  49. })
  50. // make every node be only connected with one other node (it actually ends up
  51. // being two because of two way connections but oh well)
  52. testNet.connectN(ctx, t, 1)
  53. testNet.start(ctx, t)
  54. // assert that all nodes add each other in the network
  55. for idx := 0; idx < len(testNet.nodes); idx++ {
  56. testNet.requireNumberOfPeers(t, idx, len(testNet.nodes)-1, longWait)
  57. }
  58. }
  59. func TestReactorSendsRequestsTooOften(t *testing.T) {
  60. ctx, cancel := context.WithCancel(context.Background())
  61. defer cancel()
  62. r := setupSingle(ctx, t)
  63. badNode := newNodeID(t, "b")
  64. r.pexInCh <- p2p.Envelope{
  65. From: badNode,
  66. Message: &p2pproto.PexRequest{},
  67. }
  68. resp := <-r.pexOutCh
  69. msg, ok := resp.Message.(*p2pproto.PexResponse)
  70. require.True(t, ok)
  71. require.Empty(t, msg.Addresses)
  72. r.pexInCh <- p2p.Envelope{
  73. From: badNode,
  74. Message: &p2pproto.PexRequest{},
  75. }
  76. peerErr := <-r.pexErrCh
  77. require.Error(t, peerErr.Err)
  78. require.Empty(t, r.pexOutCh)
  79. require.Contains(t, peerErr.Err.Error(), "peer sent a request too close after a prior one")
  80. require.Equal(t, badNode, peerErr.NodeID)
  81. }
  82. func TestReactorSendsResponseWithoutRequest(t *testing.T) {
  83. ctx, cancel := context.WithCancel(context.Background())
  84. defer cancel()
  85. testNet := setupNetwork(ctx, t, testOptions{
  86. MockNodes: 1,
  87. TotalNodes: 3,
  88. })
  89. testNet.connectAll(ctx, t)
  90. testNet.start(ctx, t)
  91. // firstNode sends the secondNode an unrequested response
  92. // NOTE: secondNode will send a request by default during startup so we send
  93. // two responses to counter that.
  94. testNet.sendResponse(ctx, t, firstNode, secondNode, []int{thirdNode})
  95. testNet.sendResponse(ctx, t, firstNode, secondNode, []int{thirdNode})
  96. // secondNode should evict the firstNode
  97. testNet.listenForPeerUpdate(ctx, t, secondNode, firstNode, p2p.PeerStatusDown, shortWait)
  98. }
  99. func TestReactorNeverSendsTooManyPeers(t *testing.T) {
  100. ctx, cancel := context.WithCancel(context.Background())
  101. defer cancel()
  102. testNet := setupNetwork(ctx, t, testOptions{
  103. MockNodes: 1,
  104. TotalNodes: 2,
  105. })
  106. testNet.connectAll(ctx, t)
  107. testNet.start(ctx, t)
  108. testNet.addNodes(ctx, t, 110)
  109. nodes := make([]int, 110)
  110. for i := 0; i < len(nodes); i++ {
  111. nodes[i] = i + 2
  112. }
  113. testNet.addAddresses(t, secondNode, nodes)
  114. // first we check that even although we have 110 peers, honest pex reactors
  115. // only send 100 (test if secondNode sends firstNode 100 addresses)
  116. testNet.pingAndlistenForNAddresses(ctx, t, secondNode, firstNode, shortWait, 100)
  117. }
  118. func TestReactorErrorsOnReceivingTooManyPeers(t *testing.T) {
  119. ctx, cancel := context.WithCancel(context.Background())
  120. defer cancel()
  121. r := setupSingle(ctx, t)
  122. peer := p2p.NodeAddress{Protocol: p2p.MemoryProtocol, NodeID: randomNodeID(t)}
  123. added, err := r.manager.Add(peer)
  124. require.NoError(t, err)
  125. require.True(t, added)
  126. addresses := make([]p2pproto.PexAddress, 101)
  127. for i := 0; i < len(addresses); i++ {
  128. nodeAddress := p2p.NodeAddress{Protocol: p2p.MemoryProtocol, NodeID: randomNodeID(t)}
  129. addresses[i] = p2pproto.PexAddress{
  130. URL: nodeAddress.String(),
  131. }
  132. }
  133. r.peerCh <- p2p.PeerUpdate{
  134. NodeID: peer.NodeID,
  135. Status: p2p.PeerStatusUp,
  136. }
  137. select {
  138. // wait for a request and then send a response with too many addresses
  139. case req := <-r.pexOutCh:
  140. if _, ok := req.Message.(*p2pproto.PexRequest); !ok {
  141. t.Fatal("expected v2 pex request")
  142. }
  143. r.pexInCh <- p2p.Envelope{
  144. From: peer.NodeID,
  145. Message: &p2pproto.PexResponse{
  146. Addresses: addresses,
  147. },
  148. }
  149. case <-time.After(10 * time.Second):
  150. t.Fatal("pex failed to send a request within 10 seconds")
  151. }
  152. peerErr := <-r.pexErrCh
  153. require.Error(t, peerErr.Err)
  154. require.Empty(t, r.pexOutCh)
  155. require.Contains(t, peerErr.Err.Error(), "peer sent too many addresses")
  156. require.Equal(t, peer.NodeID, peerErr.NodeID)
  157. }
  158. func TestReactorSmallPeerStoreInALargeNetwork(t *testing.T) {
  159. ctx, cancel := context.WithCancel(context.Background())
  160. defer cancel()
  161. testNet := setupNetwork(ctx, t, testOptions{
  162. TotalNodes: 8,
  163. MaxPeers: 4,
  164. MaxConnected: 3,
  165. BufferSize: 8,
  166. })
  167. testNet.connectN(ctx, t, 1)
  168. testNet.start(ctx, t)
  169. // test that all nodes reach full capacity
  170. for _, nodeID := range testNet.nodes {
  171. require.Eventually(t, func() bool {
  172. // nolint:scopelint
  173. return testNet.network.Nodes[nodeID].PeerManager.PeerRatio() >= 0.9
  174. }, longWait, checkFrequency)
  175. }
  176. }
  177. func TestReactorLargePeerStoreInASmallNetwork(t *testing.T) {
  178. ctx, cancel := context.WithCancel(context.Background())
  179. defer cancel()
  180. testNet := setupNetwork(ctx, t, testOptions{
  181. TotalNodes: 3,
  182. MaxPeers: 25,
  183. MaxConnected: 25,
  184. BufferSize: 5,
  185. })
  186. testNet.connectN(ctx, t, 1)
  187. testNet.start(ctx, t)
  188. // assert that all nodes add each other in the network
  189. for idx := 0; idx < len(testNet.nodes); idx++ {
  190. testNet.requireNumberOfPeers(t, idx, len(testNet.nodes)-1, longWait)
  191. }
  192. }
  193. func TestReactorWithNetworkGrowth(t *testing.T) {
  194. ctx, cancel := context.WithCancel(context.Background())
  195. defer cancel()
  196. testNet := setupNetwork(ctx, t, testOptions{
  197. TotalNodes: 5,
  198. BufferSize: 5,
  199. })
  200. testNet.connectAll(ctx, t)
  201. testNet.start(ctx, t)
  202. // assert that all nodes add each other in the network
  203. for idx := 0; idx < len(testNet.nodes); idx++ {
  204. testNet.requireNumberOfPeers(t, idx, len(testNet.nodes)-1, shortWait)
  205. }
  206. // now we inject 10 more nodes
  207. testNet.addNodes(ctx, t, 10)
  208. for i := 5; i < testNet.total; i++ {
  209. node := testNet.nodes[i]
  210. require.NoError(t, testNet.reactors[node].Start(ctx))
  211. require.True(t, testNet.reactors[node].IsRunning())
  212. // we connect all new nodes to a single entry point and check that the
  213. // node can distribute the addresses to all the others
  214. testNet.connectPeers(ctx, t, 0, i)
  215. }
  216. require.Len(t, testNet.reactors, 15)
  217. // assert that all nodes add each other in the network
  218. for idx := 0; idx < len(testNet.nodes); idx++ {
  219. testNet.requireNumberOfPeers(t, idx, len(testNet.nodes)-1, longWait)
  220. }
  221. }
  222. type singleTestReactor struct {
  223. reactor *pex.Reactor
  224. pexInCh chan p2p.Envelope
  225. pexOutCh chan p2p.Envelope
  226. pexErrCh chan p2p.PeerError
  227. pexCh *p2p.Channel
  228. peerCh chan p2p.PeerUpdate
  229. manager *p2p.PeerManager
  230. }
  231. func setupSingle(ctx context.Context, t *testing.T) *singleTestReactor {
  232. t.Helper()
  233. nodeID := newNodeID(t, "a")
  234. chBuf := 2
  235. pexInCh := make(chan p2p.Envelope, chBuf)
  236. pexOutCh := make(chan p2p.Envelope, chBuf)
  237. pexErrCh := make(chan p2p.PeerError, chBuf)
  238. pexCh := p2p.NewChannel(
  239. p2p.ChannelID(pex.PexChannel),
  240. new(p2pproto.PexMessage),
  241. pexInCh,
  242. pexOutCh,
  243. pexErrCh,
  244. )
  245. peerCh := make(chan p2p.PeerUpdate, chBuf)
  246. peerUpdates := p2p.NewPeerUpdates(peerCh, chBuf)
  247. peerManager, err := p2p.NewPeerManager(nodeID, dbm.NewMemDB(), p2p.PeerManagerOptions{})
  248. require.NoError(t, err)
  249. chCreator := func(context.Context, *p2p.ChannelDescriptor) (*p2p.Channel, error) {
  250. return pexCh, nil
  251. }
  252. reactor, err := pex.NewReactor(ctx, log.TestingLogger(), peerManager, chCreator, peerUpdates)
  253. require.NoError(t, err)
  254. require.NoError(t, reactor.Start(ctx))
  255. t.Cleanup(reactor.Wait)
  256. return &singleTestReactor{
  257. reactor: reactor,
  258. pexInCh: pexInCh,
  259. pexOutCh: pexOutCh,
  260. pexErrCh: pexErrCh,
  261. pexCh: pexCh,
  262. peerCh: peerCh,
  263. manager: peerManager,
  264. }
  265. }
  266. type reactorTestSuite struct {
  267. network *p2ptest.Network
  268. logger log.Logger
  269. reactors map[types.NodeID]*pex.Reactor
  270. pexChannels map[types.NodeID]*p2p.Channel
  271. peerChans map[types.NodeID]chan p2p.PeerUpdate
  272. peerUpdates map[types.NodeID]*p2p.PeerUpdates
  273. nodes []types.NodeID
  274. mocks []types.NodeID
  275. total int
  276. opts testOptions
  277. }
  278. type testOptions struct {
  279. MockNodes int
  280. TotalNodes int
  281. BufferSize int
  282. MaxPeers uint16
  283. MaxConnected uint16
  284. }
  285. // setup setups a test suite with a network of nodes. Mocknodes represent the
  286. // hollow nodes that the test can listen and send on
  287. func setupNetwork(ctx context.Context, t *testing.T, opts testOptions) *reactorTestSuite {
  288. t.Helper()
  289. require.Greater(t, opts.TotalNodes, opts.MockNodes)
  290. if opts.BufferSize == 0 {
  291. opts.BufferSize = defaultBufferSize
  292. }
  293. networkOpts := p2ptest.NetworkOptions{
  294. NumNodes: opts.TotalNodes,
  295. BufferSize: opts.BufferSize,
  296. NodeOpts: p2ptest.NodeOptions{
  297. MaxPeers: opts.MaxPeers,
  298. MaxConnected: opts.MaxConnected,
  299. },
  300. }
  301. chBuf := opts.BufferSize
  302. realNodes := opts.TotalNodes - opts.MockNodes
  303. rts := &reactorTestSuite{
  304. logger: log.TestingLogger().With("testCase", t.Name()),
  305. network: p2ptest.MakeNetwork(ctx, t, networkOpts),
  306. reactors: make(map[types.NodeID]*pex.Reactor, realNodes),
  307. pexChannels: make(map[types.NodeID]*p2p.Channel, opts.TotalNodes),
  308. peerChans: make(map[types.NodeID]chan p2p.PeerUpdate, opts.TotalNodes),
  309. peerUpdates: make(map[types.NodeID]*p2p.PeerUpdates, opts.TotalNodes),
  310. total: opts.TotalNodes,
  311. opts: opts,
  312. }
  313. // NOTE: we don't assert that the channels get drained after stopping the
  314. // reactor
  315. rts.pexChannels = rts.network.MakeChannelsNoCleanup(ctx, t, pex.ChannelDescriptor())
  316. idx := 0
  317. for nodeID := range rts.network.Nodes {
  318. rts.peerChans[nodeID] = make(chan p2p.PeerUpdate, chBuf)
  319. rts.peerUpdates[nodeID] = p2p.NewPeerUpdates(rts.peerChans[nodeID], chBuf)
  320. rts.network.Nodes[nodeID].PeerManager.Register(ctx, rts.peerUpdates[nodeID])
  321. chCreator := func(context.Context, *p2p.ChannelDescriptor) (*p2p.Channel, error) {
  322. return rts.pexChannels[nodeID], nil
  323. }
  324. // the first nodes in the array are always mock nodes
  325. if idx < opts.MockNodes {
  326. rts.mocks = append(rts.mocks, nodeID)
  327. } else {
  328. var err error
  329. rts.reactors[nodeID], err = pex.NewReactor(
  330. ctx,
  331. rts.logger.With("nodeID", nodeID),
  332. rts.network.Nodes[nodeID].PeerManager,
  333. chCreator,
  334. rts.peerUpdates[nodeID],
  335. )
  336. require.NoError(t, err)
  337. }
  338. rts.nodes = append(rts.nodes, nodeID)
  339. idx++
  340. }
  341. require.Len(t, rts.reactors, realNodes)
  342. t.Cleanup(func() {
  343. for _, reactor := range rts.reactors {
  344. if reactor.IsRunning() {
  345. reactor.Wait()
  346. require.False(t, reactor.IsRunning())
  347. }
  348. }
  349. })
  350. return rts
  351. }
  352. // starts up the pex reactors for each node
  353. func (r *reactorTestSuite) start(ctx context.Context, t *testing.T) {
  354. t.Helper()
  355. for _, reactor := range r.reactors {
  356. require.NoError(t, reactor.Start(ctx))
  357. require.True(t, reactor.IsRunning())
  358. }
  359. }
  360. func (r *reactorTestSuite) addNodes(ctx context.Context, t *testing.T, nodes int) {
  361. t.Helper()
  362. for i := 0; i < nodes; i++ {
  363. node := r.network.MakeNode(ctx, t, p2ptest.NodeOptions{
  364. MaxPeers: r.opts.MaxPeers,
  365. MaxConnected: r.opts.MaxConnected,
  366. })
  367. r.network.Nodes[node.NodeID] = node
  368. nodeID := node.NodeID
  369. r.pexChannels[nodeID] = node.MakeChannelNoCleanup(ctx, t, pex.ChannelDescriptor())
  370. r.peerChans[nodeID] = make(chan p2p.PeerUpdate, r.opts.BufferSize)
  371. r.peerUpdates[nodeID] = p2p.NewPeerUpdates(r.peerChans[nodeID], r.opts.BufferSize)
  372. r.network.Nodes[nodeID].PeerManager.Register(ctx, r.peerUpdates[nodeID])
  373. chCreator := func(context.Context, *p2p.ChannelDescriptor) (*p2p.Channel, error) {
  374. return r.pexChannels[nodeID], nil
  375. }
  376. var err error
  377. r.reactors[nodeID], err = pex.NewReactor(
  378. ctx,
  379. r.logger.With("nodeID", nodeID),
  380. r.network.Nodes[nodeID].PeerManager,
  381. chCreator,
  382. r.peerUpdates[nodeID],
  383. )
  384. require.NoError(t, err)
  385. r.nodes = append(r.nodes, nodeID)
  386. r.total++
  387. }
  388. }
  389. func (r *reactorTestSuite) listenFor(
  390. ctx context.Context,
  391. t *testing.T,
  392. node types.NodeID,
  393. conditional func(msg *p2p.Envelope) bool,
  394. assertion func(t *testing.T, msg *p2p.Envelope) bool,
  395. waitPeriod time.Duration,
  396. ) {
  397. ctx, cancel := context.WithTimeout(ctx, waitPeriod)
  398. defer cancel()
  399. iter := r.pexChannels[node].Receive(ctx)
  400. for iter.Next(ctx) {
  401. envelope := iter.Envelope()
  402. if conditional(envelope) && assertion(t, envelope) {
  403. return
  404. }
  405. }
  406. if errors.Is(ctx.Err(), context.DeadlineExceeded) {
  407. require.Fail(t, "timed out waiting for message",
  408. "node=%v, waitPeriod=%s", node, waitPeriod)
  409. }
  410. }
  411. func (r *reactorTestSuite) listenForRequest(ctx context.Context, t *testing.T, fromNode, toNode int, waitPeriod time.Duration) {
  412. r.logger.Info("Listening for request", "from", fromNode, "to", toNode)
  413. to, from := r.checkNodePair(t, toNode, fromNode)
  414. conditional := func(msg *p2p.Envelope) bool {
  415. _, ok := msg.Message.(*p2pproto.PexRequest)
  416. return ok && msg.From == from
  417. }
  418. assertion := func(t *testing.T, msg *p2p.Envelope) bool {
  419. require.Equal(t, &p2pproto.PexRequest{}, msg.Message)
  420. return true
  421. }
  422. r.listenFor(ctx, t, to, conditional, assertion, waitPeriod)
  423. }
  424. func (r *reactorTestSuite) pingAndlistenForNAddresses(
  425. ctx context.Context,
  426. t *testing.T,
  427. fromNode, toNode int,
  428. waitPeriod time.Duration,
  429. addresses int,
  430. ) {
  431. t.Helper()
  432. r.logger.Info("Listening for addresses", "from", fromNode, "to", toNode)
  433. to, from := r.checkNodePair(t, toNode, fromNode)
  434. conditional := func(msg *p2p.Envelope) bool {
  435. _, ok := msg.Message.(*p2pproto.PexResponse)
  436. return ok && msg.From == from
  437. }
  438. assertion := func(t *testing.T, msg *p2p.Envelope) bool {
  439. m, ok := msg.Message.(*p2pproto.PexResponse)
  440. if !ok {
  441. require.Fail(t, "expected pex response v2")
  442. return true
  443. }
  444. // assert the same amount of addresses
  445. if len(m.Addresses) == addresses {
  446. return true
  447. }
  448. // if we didn't get the right length, we wait and send the
  449. // request again
  450. time.Sleep(300 * time.Millisecond)
  451. r.sendRequest(ctx, t, toNode, fromNode)
  452. return false
  453. }
  454. r.sendRequest(ctx, t, toNode, fromNode)
  455. r.listenFor(ctx, t, to, conditional, assertion, waitPeriod)
  456. }
  457. func (r *reactorTestSuite) listenForResponse(
  458. ctx context.Context,
  459. t *testing.T,
  460. fromNode, toNode int,
  461. waitPeriod time.Duration,
  462. addresses []p2pproto.PexAddress,
  463. ) {
  464. r.logger.Info("Listening for response", "from", fromNode, "to", toNode)
  465. to, from := r.checkNodePair(t, toNode, fromNode)
  466. conditional := func(msg *p2p.Envelope) bool {
  467. _, ok := msg.Message.(*p2pproto.PexResponse)
  468. r.logger.Info("message", msg, "ok", ok)
  469. return ok && msg.From == from
  470. }
  471. assertion := func(t *testing.T, msg *p2p.Envelope) bool {
  472. require.Equal(t, &p2pproto.PexResponse{Addresses: addresses}, msg.Message)
  473. return true
  474. }
  475. r.listenFor(ctx, t, to, conditional, assertion, waitPeriod)
  476. }
  477. func (r *reactorTestSuite) listenForPeerUpdate(
  478. ctx context.Context,
  479. t *testing.T,
  480. onNode, withNode int,
  481. status p2p.PeerStatus,
  482. waitPeriod time.Duration,
  483. ) {
  484. on, with := r.checkNodePair(t, onNode, withNode)
  485. sub := r.network.Nodes[on].PeerManager.Subscribe(ctx)
  486. timesUp := time.After(waitPeriod)
  487. for {
  488. select {
  489. case <-ctx.Done():
  490. require.Fail(t, "operation canceled")
  491. return
  492. case peerUpdate := <-sub.Updates():
  493. if peerUpdate.NodeID == with {
  494. require.Equal(t, status, peerUpdate.Status)
  495. return
  496. }
  497. case <-timesUp:
  498. require.Fail(t, "timed out waiting for peer status", "%v with status %v",
  499. with, status)
  500. return
  501. }
  502. }
  503. }
  504. func (r *reactorTestSuite) getAddressesFor(nodes []int) []p2pproto.PexAddress {
  505. addresses := make([]p2pproto.PexAddress, len(nodes))
  506. for idx, node := range nodes {
  507. nodeID := r.nodes[node]
  508. addresses[idx] = p2pproto.PexAddress{
  509. URL: r.network.Nodes[nodeID].NodeAddress.String(),
  510. }
  511. }
  512. return addresses
  513. }
  514. func (r *reactorTestSuite) sendRequest(ctx context.Context, t *testing.T, fromNode, toNode int) {
  515. t.Helper()
  516. to, from := r.checkNodePair(t, toNode, fromNode)
  517. require.NoError(t, r.pexChannels[from].Send(ctx, p2p.Envelope{
  518. To: to,
  519. Message: &p2pproto.PexRequest{},
  520. }))
  521. }
  522. func (r *reactorTestSuite) sendResponse(
  523. ctx context.Context,
  524. t *testing.T,
  525. fromNode, toNode int,
  526. withNodes []int,
  527. ) {
  528. t.Helper()
  529. from, to := r.checkNodePair(t, fromNode, toNode)
  530. addrs := r.getAddressesFor(withNodes)
  531. require.NoError(t, r.pexChannels[from].Send(ctx, p2p.Envelope{
  532. To: to,
  533. Message: &p2pproto.PexResponse{
  534. Addresses: addrs,
  535. },
  536. }))
  537. }
  538. func (r *reactorTestSuite) requireNumberOfPeers(
  539. t *testing.T,
  540. nodeIndex, numPeers int,
  541. waitPeriod time.Duration,
  542. ) {
  543. t.Helper()
  544. require.Eventuallyf(t, func() bool {
  545. actualNumPeers := len(r.network.Nodes[r.nodes[nodeIndex]].PeerManager.Peers())
  546. return actualNumPeers >= numPeers
  547. }, waitPeriod, checkFrequency, "peer failed to connect with the asserted amount of peers "+
  548. "index=%d, node=%q, waitPeriod=%s expected=%d actual=%d",
  549. nodeIndex, r.nodes[nodeIndex], waitPeriod, numPeers,
  550. len(r.network.Nodes[r.nodes[nodeIndex]].PeerManager.Peers()),
  551. )
  552. }
  553. func (r *reactorTestSuite) connectAll(ctx context.Context, t *testing.T) {
  554. r.connectN(ctx, t, r.total-1)
  555. }
  556. // connects all nodes with n other nodes
  557. func (r *reactorTestSuite) connectN(ctx context.Context, t *testing.T, n int) {
  558. if n >= r.total {
  559. require.Fail(t, "connectN: n must be less than the size of the network - 1")
  560. }
  561. for i := 0; i < r.total; i++ {
  562. for j := 0; j < n; j++ {
  563. r.connectPeers(ctx, t, i, (i+j+1)%r.total)
  564. }
  565. }
  566. }
  567. // connects node1 to node2
  568. func (r *reactorTestSuite) connectPeers(ctx context.Context, t *testing.T, sourceNode, targetNode int) {
  569. t.Helper()
  570. node1, node2 := r.checkNodePair(t, sourceNode, targetNode)
  571. r.logger.Info("connecting peers", "sourceNode", sourceNode, "targetNode", targetNode)
  572. n1 := r.network.Nodes[node1]
  573. if n1 == nil {
  574. require.Fail(t, "connectPeers: source node %v is not part of the testnet", node1)
  575. return
  576. }
  577. n2 := r.network.Nodes[node2]
  578. if n2 == nil {
  579. require.Fail(t, "connectPeers: target node %v is not part of the testnet", node2)
  580. return
  581. }
  582. sourceSub := n1.PeerManager.Subscribe(ctx)
  583. targetSub := n2.PeerManager.Subscribe(ctx)
  584. sourceAddress := n1.NodeAddress
  585. r.logger.Debug("source address", "address", sourceAddress)
  586. targetAddress := n2.NodeAddress
  587. r.logger.Debug("target address", "address", targetAddress)
  588. added, err := n1.PeerManager.Add(targetAddress)
  589. require.NoError(t, err)
  590. if !added {
  591. r.logger.Debug("nodes already know about one another",
  592. "sourceNode", sourceNode, "targetNode", targetNode)
  593. return
  594. }
  595. select {
  596. case peerUpdate := <-targetSub.Updates():
  597. require.Equal(t, p2p.PeerUpdate{
  598. NodeID: node1,
  599. Status: p2p.PeerStatusUp,
  600. }, peerUpdate)
  601. r.logger.Debug("target connected with source")
  602. case <-time.After(2 * time.Second):
  603. require.Fail(t, "timed out waiting for peer", "%v accepting %v",
  604. targetNode, sourceNode)
  605. }
  606. select {
  607. case peerUpdate := <-sourceSub.Updates():
  608. require.Equal(t, p2p.PeerUpdate{
  609. NodeID: node2,
  610. Status: p2p.PeerStatusUp,
  611. }, peerUpdate)
  612. r.logger.Debug("source connected with target")
  613. case <-time.After(2 * time.Second):
  614. require.Fail(t, "timed out waiting for peer", "%v dialing %v",
  615. sourceNode, targetNode)
  616. }
  617. added, err = n2.PeerManager.Add(sourceAddress)
  618. require.NoError(t, err)
  619. require.True(t, added)
  620. }
  621. func (r *reactorTestSuite) checkNodePair(t *testing.T, first, second int) (types.NodeID, types.NodeID) {
  622. require.NotEqual(t, first, second)
  623. require.Less(t, first, r.total)
  624. require.Less(t, second, r.total)
  625. return r.nodes[first], r.nodes[second]
  626. }
  627. func (r *reactorTestSuite) addAddresses(t *testing.T, node int, addrs []int) {
  628. peerManager := r.network.Nodes[r.nodes[node]].PeerManager
  629. for _, addr := range addrs {
  630. require.Less(t, addr, r.total)
  631. address := r.network.Nodes[r.nodes[addr]].NodeAddress
  632. added, err := peerManager.Add(address)
  633. require.NoError(t, err)
  634. require.True(t, added)
  635. }
  636. }
  637. func newNodeID(t *testing.T, id string) types.NodeID {
  638. nodeID, err := types.NewNodeID(strings.Repeat(id, 2*types.NodeIDByteLength))
  639. require.NoError(t, err)
  640. return nodeID
  641. }
  642. func randomNodeID(t *testing.T) types.NodeID {
  643. return types.NodeIDFromPubKey(ed25519.GenPrivKey().PubKey())
  644. }