You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

726 lines
20 KiB

cleanup: Reduce and normalize import path aliasing. (#6975) The code in the Tendermint repository makes heavy use of import aliasing. This is made necessary by our extensive reuse of common base package names, and by repetition of similar names across different subdirectories. Unfortunately we have not been very consistent about which packages we alias in various circumstances, and the aliases we use vary. In the spirit of the advice in the style guide and https://github.com/golang/go/wiki/CodeReviewComments#imports, his change makes an effort to clean up and normalize import aliasing. This change makes no API or behavioral changes. It is a pure cleanup intended o help make the code more readable to developers (including myself) trying to understand what is being imported where. Only unexported names have been modified, and the changes were generated and applied mechanically with gofmt -r and comby, respecting the lexical and syntactic rules of Go. Even so, I did not fix every inconsistency. Where the changes would be too disruptive, I left it alone. The principles I followed in this cleanup are: - Remove aliases that restate the package name. - Remove aliases where the base package name is unambiguous. - Move overly-terse abbreviations from the import to the usage site. - Fix lexical issues (remove underscores, remove capitalization). - Fix import groupings to more closely match the style guide. - Group blank (side-effecting) imports and ensure they are commented. - Add aliases to multiple imports with the same base package name.
3 years ago
cleanup: Reduce and normalize import path aliasing. (#6975) The code in the Tendermint repository makes heavy use of import aliasing. This is made necessary by our extensive reuse of common base package names, and by repetition of similar names across different subdirectories. Unfortunately we have not been very consistent about which packages we alias in various circumstances, and the aliases we use vary. In the spirit of the advice in the style guide and https://github.com/golang/go/wiki/CodeReviewComments#imports, his change makes an effort to clean up and normalize import aliasing. This change makes no API or behavioral changes. It is a pure cleanup intended o help make the code more readable to developers (including myself) trying to understand what is being imported where. Only unexported names have been modified, and the changes were generated and applied mechanically with gofmt -r and comby, respecting the lexical and syntactic rules of Go. Even so, I did not fix every inconsistency. Where the changes would be too disruptive, I left it alone. The principles I followed in this cleanup are: - Remove aliases that restate the package name. - Remove aliases where the base package name is unambiguous. - Move overly-terse abbreviations from the import to the usage site. - Fix lexical issues (remove underscores, remove capitalization). - Fix import groupings to more closely match the style guide. - Group blank (side-effecting) imports and ensure they are commented. - Add aliases to multiple imports with the same base package name.
3 years ago
cleanup: Reduce and normalize import path aliasing. (#6975) The code in the Tendermint repository makes heavy use of import aliasing. This is made necessary by our extensive reuse of common base package names, and by repetition of similar names across different subdirectories. Unfortunately we have not been very consistent about which packages we alias in various circumstances, and the aliases we use vary. In the spirit of the advice in the style guide and https://github.com/golang/go/wiki/CodeReviewComments#imports, his change makes an effort to clean up and normalize import aliasing. This change makes no API or behavioral changes. It is a pure cleanup intended o help make the code more readable to developers (including myself) trying to understand what is being imported where. Only unexported names have been modified, and the changes were generated and applied mechanically with gofmt -r and comby, respecting the lexical and syntactic rules of Go. Even so, I did not fix every inconsistency. Where the changes would be too disruptive, I left it alone. The principles I followed in this cleanup are: - Remove aliases that restate the package name. - Remove aliases where the base package name is unambiguous. - Move overly-terse abbreviations from the import to the usage site. - Fix lexical issues (remove underscores, remove capitalization). - Fix import groupings to more closely match the style guide. - Group blank (side-effecting) imports and ensure they are commented. - Add aliases to multiple imports with the same base package name.
3 years ago
cleanup: Reduce and normalize import path aliasing. (#6975) The code in the Tendermint repository makes heavy use of import aliasing. This is made necessary by our extensive reuse of common base package names, and by repetition of similar names across different subdirectories. Unfortunately we have not been very consistent about which packages we alias in various circumstances, and the aliases we use vary. In the spirit of the advice in the style guide and https://github.com/golang/go/wiki/CodeReviewComments#imports, his change makes an effort to clean up and normalize import aliasing. This change makes no API or behavioral changes. It is a pure cleanup intended o help make the code more readable to developers (including myself) trying to understand what is being imported where. Only unexported names have been modified, and the changes were generated and applied mechanically with gofmt -r and comby, respecting the lexical and syntactic rules of Go. Even so, I did not fix every inconsistency. Where the changes would be too disruptive, I left it alone. The principles I followed in this cleanup are: - Remove aliases that restate the package name. - Remove aliases where the base package name is unambiguous. - Move overly-terse abbreviations from the import to the usage site. - Fix lexical issues (remove underscores, remove capitalization). - Fix import groupings to more closely match the style guide. - Group blank (side-effecting) imports and ensure they are commented. - Add aliases to multiple imports with the same base package name.
3 years ago
cleanup: Reduce and normalize import path aliasing. (#6975) The code in the Tendermint repository makes heavy use of import aliasing. This is made necessary by our extensive reuse of common base package names, and by repetition of similar names across different subdirectories. Unfortunately we have not been very consistent about which packages we alias in various circumstances, and the aliases we use vary. In the spirit of the advice in the style guide and https://github.com/golang/go/wiki/CodeReviewComments#imports, his change makes an effort to clean up and normalize import aliasing. This change makes no API or behavioral changes. It is a pure cleanup intended o help make the code more readable to developers (including myself) trying to understand what is being imported where. Only unexported names have been modified, and the changes were generated and applied mechanically with gofmt -r and comby, respecting the lexical and syntactic rules of Go. Even so, I did not fix every inconsistency. Where the changes would be too disruptive, I left it alone. The principles I followed in this cleanup are: - Remove aliases that restate the package name. - Remove aliases where the base package name is unambiguous. - Move overly-terse abbreviations from the import to the usage site. - Fix lexical issues (remove underscores, remove capitalization). - Fix import groupings to more closely match the style guide. - Group blank (side-effecting) imports and ensure they are commented. - Add aliases to multiple imports with the same base package name.
3 years ago
cleanup: Reduce and normalize import path aliasing. (#6975) The code in the Tendermint repository makes heavy use of import aliasing. This is made necessary by our extensive reuse of common base package names, and by repetition of similar names across different subdirectories. Unfortunately we have not been very consistent about which packages we alias in various circumstances, and the aliases we use vary. In the spirit of the advice in the style guide and https://github.com/golang/go/wiki/CodeReviewComments#imports, his change makes an effort to clean up and normalize import aliasing. This change makes no API or behavioral changes. It is a pure cleanup intended o help make the code more readable to developers (including myself) trying to understand what is being imported where. Only unexported names have been modified, and the changes were generated and applied mechanically with gofmt -r and comby, respecting the lexical and syntactic rules of Go. Even so, I did not fix every inconsistency. Where the changes would be too disruptive, I left it alone. The principles I followed in this cleanup are: - Remove aliases that restate the package name. - Remove aliases where the base package name is unambiguous. - Move overly-terse abbreviations from the import to the usage site. - Fix lexical issues (remove underscores, remove capitalization). - Fix import groupings to more closely match the style guide. - Group blank (side-effecting) imports and ensure they are commented. - Add aliases to multiple imports with the same base package name.
3 years ago
cleanup: Reduce and normalize import path aliasing. (#6975) The code in the Tendermint repository makes heavy use of import aliasing. This is made necessary by our extensive reuse of common base package names, and by repetition of similar names across different subdirectories. Unfortunately we have not been very consistent about which packages we alias in various circumstances, and the aliases we use vary. In the spirit of the advice in the style guide and https://github.com/golang/go/wiki/CodeReviewComments#imports, his change makes an effort to clean up and normalize import aliasing. This change makes no API or behavioral changes. It is a pure cleanup intended o help make the code more readable to developers (including myself) trying to understand what is being imported where. Only unexported names have been modified, and the changes were generated and applied mechanically with gofmt -r and comby, respecting the lexical and syntactic rules of Go. Even so, I did not fix every inconsistency. Where the changes would be too disruptive, I left it alone. The principles I followed in this cleanup are: - Remove aliases that restate the package name. - Remove aliases where the base package name is unambiguous. - Move overly-terse abbreviations from the import to the usage site. - Fix lexical issues (remove underscores, remove capitalization). - Fix import groupings to more closely match the style guide. - Group blank (side-effecting) imports and ensure they are commented. - Add aliases to multiple imports with the same base package name.
3 years ago
  1. package pex_test
  2. import (
  3. "context"
  4. "errors"
  5. "strings"
  6. "testing"
  7. "time"
  8. "github.com/stretchr/testify/require"
  9. dbm "github.com/tendermint/tm-db"
  10. "github.com/tendermint/tendermint/crypto/ed25519"
  11. "github.com/tendermint/tendermint/internal/p2p"
  12. "github.com/tendermint/tendermint/internal/p2p/p2ptest"
  13. "github.com/tendermint/tendermint/internal/p2p/pex"
  14. "github.com/tendermint/tendermint/libs/log"
  15. p2pproto "github.com/tendermint/tendermint/proto/tendermint/p2p"
  16. "github.com/tendermint/tendermint/types"
  17. )
  18. const (
  19. checkFrequency = 500 * time.Millisecond
  20. defaultBufferSize = 2
  21. shortWait = 10 * time.Second
  22. longWait = 60 * time.Second
  23. firstNode = 0
  24. secondNode = 1
  25. thirdNode = 2
  26. )
  27. func TestReactorBasic(t *testing.T) {
  28. ctx, cancel := context.WithCancel(context.Background())
  29. defer cancel()
  30. // start a network with one mock reactor and one "real" reactor
  31. testNet := setupNetwork(ctx, t, testOptions{
  32. MockNodes: 1,
  33. TotalNodes: 2,
  34. })
  35. testNet.connectAll(ctx, t)
  36. testNet.start(ctx, t)
  37. // assert that the mock node receives a request from the real node
  38. testNet.listenForRequest(ctx, t, secondNode, firstNode, shortWait)
  39. // assert that when a mock node sends a request it receives a response (and
  40. // the correct one)
  41. testNet.sendRequest(ctx, t, firstNode, secondNode)
  42. testNet.listenForResponse(ctx, t, secondNode, firstNode, shortWait, []p2pproto.PexAddress(nil))
  43. }
  44. func TestReactorConnectFullNetwork(t *testing.T) {
  45. ctx, cancel := context.WithCancel(context.Background())
  46. defer cancel()
  47. testNet := setupNetwork(ctx, t, testOptions{
  48. TotalNodes: 4,
  49. })
  50. // make every node be only connected with one other node (it actually ends up
  51. // being two because of two way connections but oh well)
  52. testNet.connectN(ctx, t, 1)
  53. testNet.start(ctx, t)
  54. // assert that all nodes add each other in the network
  55. for idx := 0; idx < len(testNet.nodes); idx++ {
  56. testNet.requireNumberOfPeers(t, idx, len(testNet.nodes)-1, longWait)
  57. }
  58. }
  59. func TestReactorSendsRequestsTooOften(t *testing.T) {
  60. ctx, cancel := context.WithCancel(context.Background())
  61. defer cancel()
  62. r := setupSingle(ctx, t)
  63. badNode := newNodeID(t, "b")
  64. r.pexInCh <- p2p.Envelope{
  65. From: badNode,
  66. Message: &p2pproto.PexRequest{},
  67. }
  68. resp := <-r.pexOutCh
  69. msg, ok := resp.Message.(*p2pproto.PexResponse)
  70. require.True(t, ok)
  71. require.Empty(t, msg.Addresses)
  72. r.pexInCh <- p2p.Envelope{
  73. From: badNode,
  74. Message: &p2pproto.PexRequest{},
  75. }
  76. peerErr := <-r.pexErrCh
  77. require.Error(t, peerErr.Err)
  78. require.Empty(t, r.pexOutCh)
  79. require.Contains(t, peerErr.Err.Error(), "peer sent a request too close after a prior one")
  80. require.Equal(t, badNode, peerErr.NodeID)
  81. }
  82. func TestReactorSendsResponseWithoutRequest(t *testing.T) {
  83. ctx, cancel := context.WithCancel(context.Background())
  84. defer cancel()
  85. testNet := setupNetwork(ctx, t, testOptions{
  86. MockNodes: 1,
  87. TotalNodes: 3,
  88. })
  89. testNet.connectAll(ctx, t)
  90. testNet.start(ctx, t)
  91. // firstNode sends the secondNode an unrequested response
  92. // NOTE: secondNode will send a request by default during startup so we send
  93. // two responses to counter that.
  94. testNet.sendResponse(ctx, t, firstNode, secondNode, []int{thirdNode})
  95. testNet.sendResponse(ctx, t, firstNode, secondNode, []int{thirdNode})
  96. // secondNode should evict the firstNode
  97. testNet.listenForPeerUpdate(ctx, t, secondNode, firstNode, p2p.PeerStatusDown, shortWait)
  98. }
  99. func TestReactorNeverSendsTooManyPeers(t *testing.T) {
  100. ctx, cancel := context.WithCancel(context.Background())
  101. defer cancel()
  102. testNet := setupNetwork(ctx, t, testOptions{
  103. MockNodes: 1,
  104. TotalNodes: 2,
  105. })
  106. testNet.connectAll(ctx, t)
  107. testNet.start(ctx, t)
  108. testNet.addNodes(ctx, t, 110)
  109. nodes := make([]int, 110)
  110. for i := 0; i < len(nodes); i++ {
  111. nodes[i] = i + 2
  112. }
  113. testNet.addAddresses(t, secondNode, nodes)
  114. // first we check that even although we have 110 peers, honest pex reactors
  115. // only send 100 (test if secondNode sends firstNode 100 addresses)
  116. testNet.pingAndlistenForNAddresses(ctx, t, secondNode, firstNode, shortWait, 100)
  117. }
  118. func TestReactorErrorsOnReceivingTooManyPeers(t *testing.T) {
  119. ctx, cancel := context.WithCancel(context.Background())
  120. defer cancel()
  121. r := setupSingle(ctx, t)
  122. peer := p2p.NodeAddress{Protocol: p2p.MemoryProtocol, NodeID: randomNodeID(t)}
  123. added, err := r.manager.Add(peer)
  124. require.NoError(t, err)
  125. require.True(t, added)
  126. addresses := make([]p2pproto.PexAddress, 101)
  127. for i := 0; i < len(addresses); i++ {
  128. nodeAddress := p2p.NodeAddress{Protocol: p2p.MemoryProtocol, NodeID: randomNodeID(t)}
  129. addresses[i] = p2pproto.PexAddress{
  130. URL: nodeAddress.String(),
  131. }
  132. }
  133. r.peerCh <- p2p.PeerUpdate{
  134. NodeID: peer.NodeID,
  135. Status: p2p.PeerStatusUp,
  136. }
  137. select {
  138. // wait for a request and then send a response with too many addresses
  139. case req := <-r.pexOutCh:
  140. if _, ok := req.Message.(*p2pproto.PexRequest); !ok {
  141. t.Fatal("expected v2 pex request")
  142. }
  143. r.pexInCh <- p2p.Envelope{
  144. From: peer.NodeID,
  145. Message: &p2pproto.PexResponse{
  146. Addresses: addresses,
  147. },
  148. }
  149. case <-time.After(10 * time.Second):
  150. t.Fatal("pex failed to send a request within 10 seconds")
  151. }
  152. peerErr := <-r.pexErrCh
  153. require.Error(t, peerErr.Err)
  154. require.Empty(t, r.pexOutCh)
  155. require.Contains(t, peerErr.Err.Error(), "peer sent too many addresses")
  156. require.Equal(t, peer.NodeID, peerErr.NodeID)
  157. }
  158. func TestReactorSmallPeerStoreInALargeNetwork(t *testing.T) {
  159. ctx, cancel := context.WithCancel(context.Background())
  160. defer cancel()
  161. testNet := setupNetwork(ctx, t, testOptions{
  162. TotalNodes: 8,
  163. MaxPeers: 4,
  164. MaxConnected: 3,
  165. BufferSize: 8,
  166. })
  167. testNet.connectN(ctx, t, 1)
  168. testNet.start(ctx, t)
  169. // test that all nodes reach full capacity
  170. for _, nodeID := range testNet.nodes {
  171. require.Eventually(t, func() bool {
  172. // nolint:scopelint
  173. return testNet.network.Nodes[nodeID].PeerManager.PeerRatio() >= 0.9
  174. }, longWait, checkFrequency)
  175. }
  176. }
  177. func TestReactorLargePeerStoreInASmallNetwork(t *testing.T) {
  178. ctx, cancel := context.WithCancel(context.Background())
  179. defer cancel()
  180. testNet := setupNetwork(ctx, t, testOptions{
  181. TotalNodes: 3,
  182. MaxPeers: 25,
  183. MaxConnected: 25,
  184. BufferSize: 5,
  185. })
  186. testNet.connectN(ctx, t, 1)
  187. testNet.start(ctx, t)
  188. // assert that all nodes add each other in the network
  189. for idx := 0; idx < len(testNet.nodes); idx++ {
  190. testNet.requireNumberOfPeers(t, idx, len(testNet.nodes)-1, longWait)
  191. }
  192. }
  193. func TestReactorWithNetworkGrowth(t *testing.T) {
  194. ctx, cancel := context.WithCancel(context.Background())
  195. defer cancel()
  196. testNet := setupNetwork(ctx, t, testOptions{
  197. TotalNodes: 5,
  198. BufferSize: 5,
  199. })
  200. testNet.connectAll(ctx, t)
  201. testNet.start(ctx, t)
  202. // assert that all nodes add each other in the network
  203. for idx := 0; idx < len(testNet.nodes); idx++ {
  204. testNet.requireNumberOfPeers(t, idx, len(testNet.nodes)-1, shortWait)
  205. }
  206. // now we inject 10 more nodes
  207. testNet.addNodes(ctx, t, 10)
  208. for i := 5; i < testNet.total; i++ {
  209. node := testNet.nodes[i]
  210. require.NoError(t, testNet.reactors[node].Start(ctx))
  211. require.True(t, testNet.reactors[node].IsRunning())
  212. // we connect all new nodes to a single entry point and check that the
  213. // node can distribute the addresses to all the others
  214. testNet.connectPeers(ctx, t, 0, i)
  215. }
  216. require.Len(t, testNet.reactors, 15)
  217. // assert that all nodes add each other in the network
  218. for idx := 0; idx < len(testNet.nodes); idx++ {
  219. testNet.requireNumberOfPeers(t, idx, len(testNet.nodes)-1, longWait)
  220. }
  221. }
  222. type singleTestReactor struct {
  223. reactor *pex.Reactor
  224. pexInCh chan p2p.Envelope
  225. pexOutCh chan p2p.Envelope
  226. pexErrCh chan p2p.PeerError
  227. pexCh *p2p.Channel
  228. peerCh chan p2p.PeerUpdate
  229. manager *p2p.PeerManager
  230. }
  231. func setupSingle(ctx context.Context, t *testing.T) *singleTestReactor {
  232. t.Helper()
  233. nodeID := newNodeID(t, "a")
  234. chBuf := 2
  235. pexInCh := make(chan p2p.Envelope, chBuf)
  236. pexOutCh := make(chan p2p.Envelope, chBuf)
  237. pexErrCh := make(chan p2p.PeerError, chBuf)
  238. pexCh := p2p.NewChannel(
  239. p2p.ChannelID(pex.PexChannel),
  240. new(p2pproto.PexMessage),
  241. pexInCh,
  242. pexOutCh,
  243. pexErrCh,
  244. )
  245. peerCh := make(chan p2p.PeerUpdate, chBuf)
  246. peerUpdates := p2p.NewPeerUpdates(peerCh, chBuf)
  247. peerManager, err := p2p.NewPeerManager(nodeID, dbm.NewMemDB(), p2p.PeerManagerOptions{})
  248. require.NoError(t, err)
  249. reactor := pex.NewReactor(log.TestingLogger(), peerManager, pexCh, peerUpdates)
  250. require.NoError(t, reactor.Start(ctx))
  251. t.Cleanup(reactor.Wait)
  252. return &singleTestReactor{
  253. reactor: reactor,
  254. pexInCh: pexInCh,
  255. pexOutCh: pexOutCh,
  256. pexErrCh: pexErrCh,
  257. pexCh: pexCh,
  258. peerCh: peerCh,
  259. manager: peerManager,
  260. }
  261. }
  262. type reactorTestSuite struct {
  263. network *p2ptest.Network
  264. logger log.Logger
  265. reactors map[types.NodeID]*pex.Reactor
  266. pexChannels map[types.NodeID]*p2p.Channel
  267. peerChans map[types.NodeID]chan p2p.PeerUpdate
  268. peerUpdates map[types.NodeID]*p2p.PeerUpdates
  269. nodes []types.NodeID
  270. mocks []types.NodeID
  271. total int
  272. opts testOptions
  273. }
  274. type testOptions struct {
  275. MockNodes int
  276. TotalNodes int
  277. BufferSize int
  278. MaxPeers uint16
  279. MaxConnected uint16
  280. }
  281. // setup setups a test suite with a network of nodes. Mocknodes represent the
  282. // hollow nodes that the test can listen and send on
  283. func setupNetwork(ctx context.Context, t *testing.T, opts testOptions) *reactorTestSuite {
  284. t.Helper()
  285. require.Greater(t, opts.TotalNodes, opts.MockNodes)
  286. if opts.BufferSize == 0 {
  287. opts.BufferSize = defaultBufferSize
  288. }
  289. networkOpts := p2ptest.NetworkOptions{
  290. NumNodes: opts.TotalNodes,
  291. BufferSize: opts.BufferSize,
  292. NodeOpts: p2ptest.NodeOptions{
  293. MaxPeers: opts.MaxPeers,
  294. MaxConnected: opts.MaxConnected,
  295. },
  296. }
  297. chBuf := opts.BufferSize
  298. realNodes := opts.TotalNodes - opts.MockNodes
  299. rts := &reactorTestSuite{
  300. logger: log.TestingLogger().With("testCase", t.Name()),
  301. network: p2ptest.MakeNetwork(ctx, t, networkOpts),
  302. reactors: make(map[types.NodeID]*pex.Reactor, realNodes),
  303. pexChannels: make(map[types.NodeID]*p2p.Channel, opts.TotalNodes),
  304. peerChans: make(map[types.NodeID]chan p2p.PeerUpdate, opts.TotalNodes),
  305. peerUpdates: make(map[types.NodeID]*p2p.PeerUpdates, opts.TotalNodes),
  306. total: opts.TotalNodes,
  307. opts: opts,
  308. }
  309. // NOTE: we don't assert that the channels get drained after stopping the
  310. // reactor
  311. rts.pexChannels = rts.network.MakeChannelsNoCleanup(ctx, t, pex.ChannelDescriptor())
  312. idx := 0
  313. for nodeID := range rts.network.Nodes {
  314. rts.peerChans[nodeID] = make(chan p2p.PeerUpdate, chBuf)
  315. rts.peerUpdates[nodeID] = p2p.NewPeerUpdates(rts.peerChans[nodeID], chBuf)
  316. rts.network.Nodes[nodeID].PeerManager.Register(ctx, rts.peerUpdates[nodeID])
  317. // the first nodes in the array are always mock nodes
  318. if idx < opts.MockNodes {
  319. rts.mocks = append(rts.mocks, nodeID)
  320. } else {
  321. rts.reactors[nodeID] = pex.NewReactor(
  322. rts.logger.With("nodeID", nodeID),
  323. rts.network.Nodes[nodeID].PeerManager,
  324. rts.pexChannels[nodeID],
  325. rts.peerUpdates[nodeID],
  326. )
  327. }
  328. rts.nodes = append(rts.nodes, nodeID)
  329. idx++
  330. }
  331. require.Len(t, rts.reactors, realNodes)
  332. t.Cleanup(func() {
  333. for _, reactor := range rts.reactors {
  334. if reactor.IsRunning() {
  335. reactor.Wait()
  336. require.False(t, reactor.IsRunning())
  337. }
  338. }
  339. })
  340. return rts
  341. }
  342. // starts up the pex reactors for each node
  343. func (r *reactorTestSuite) start(ctx context.Context, t *testing.T) {
  344. t.Helper()
  345. for _, reactor := range r.reactors {
  346. require.NoError(t, reactor.Start(ctx))
  347. require.True(t, reactor.IsRunning())
  348. }
  349. }
  350. func (r *reactorTestSuite) addNodes(ctx context.Context, t *testing.T, nodes int) {
  351. t.Helper()
  352. for i := 0; i < nodes; i++ {
  353. node := r.network.MakeNode(ctx, t, p2ptest.NodeOptions{
  354. MaxPeers: r.opts.MaxPeers,
  355. MaxConnected: r.opts.MaxConnected,
  356. })
  357. r.network.Nodes[node.NodeID] = node
  358. nodeID := node.NodeID
  359. r.pexChannels[nodeID] = node.MakeChannelNoCleanup(ctx, t, pex.ChannelDescriptor())
  360. r.peerChans[nodeID] = make(chan p2p.PeerUpdate, r.opts.BufferSize)
  361. r.peerUpdates[nodeID] = p2p.NewPeerUpdates(r.peerChans[nodeID], r.opts.BufferSize)
  362. r.network.Nodes[nodeID].PeerManager.Register(ctx, r.peerUpdates[nodeID])
  363. r.reactors[nodeID] = pex.NewReactor(
  364. r.logger.With("nodeID", nodeID),
  365. r.network.Nodes[nodeID].PeerManager,
  366. r.pexChannels[nodeID],
  367. r.peerUpdates[nodeID],
  368. )
  369. r.nodes = append(r.nodes, nodeID)
  370. r.total++
  371. }
  372. }
  373. func (r *reactorTestSuite) listenFor(
  374. ctx context.Context,
  375. t *testing.T,
  376. node types.NodeID,
  377. conditional func(msg *p2p.Envelope) bool,
  378. assertion func(t *testing.T, msg *p2p.Envelope) bool,
  379. waitPeriod time.Duration,
  380. ) {
  381. ctx, cancel := context.WithTimeout(ctx, waitPeriod)
  382. defer cancel()
  383. iter := r.pexChannels[node].Receive(ctx)
  384. for iter.Next(ctx) {
  385. envelope := iter.Envelope()
  386. if conditional(envelope) && assertion(t, envelope) {
  387. return
  388. }
  389. }
  390. if errors.Is(ctx.Err(), context.DeadlineExceeded) {
  391. require.Fail(t, "timed out waiting for message",
  392. "node=%v, waitPeriod=%s", node, waitPeriod)
  393. }
  394. }
  395. func (r *reactorTestSuite) listenForRequest(ctx context.Context, t *testing.T, fromNode, toNode int, waitPeriod time.Duration) {
  396. r.logger.Info("Listening for request", "from", fromNode, "to", toNode)
  397. to, from := r.checkNodePair(t, toNode, fromNode)
  398. conditional := func(msg *p2p.Envelope) bool {
  399. _, ok := msg.Message.(*p2pproto.PexRequest)
  400. return ok && msg.From == from
  401. }
  402. assertion := func(t *testing.T, msg *p2p.Envelope) bool {
  403. require.Equal(t, &p2pproto.PexRequest{}, msg.Message)
  404. return true
  405. }
  406. r.listenFor(ctx, t, to, conditional, assertion, waitPeriod)
  407. }
  408. func (r *reactorTestSuite) pingAndlistenForNAddresses(
  409. ctx context.Context,
  410. t *testing.T,
  411. fromNode, toNode int,
  412. waitPeriod time.Duration,
  413. addresses int,
  414. ) {
  415. t.Helper()
  416. r.logger.Info("Listening for addresses", "from", fromNode, "to", toNode)
  417. to, from := r.checkNodePair(t, toNode, fromNode)
  418. conditional := func(msg *p2p.Envelope) bool {
  419. _, ok := msg.Message.(*p2pproto.PexResponse)
  420. return ok && msg.From == from
  421. }
  422. assertion := func(t *testing.T, msg *p2p.Envelope) bool {
  423. m, ok := msg.Message.(*p2pproto.PexResponse)
  424. if !ok {
  425. require.Fail(t, "expected pex response v2")
  426. return true
  427. }
  428. // assert the same amount of addresses
  429. if len(m.Addresses) == addresses {
  430. return true
  431. }
  432. // if we didn't get the right length, we wait and send the
  433. // request again
  434. time.Sleep(300 * time.Millisecond)
  435. r.sendRequest(ctx, t, toNode, fromNode)
  436. return false
  437. }
  438. r.sendRequest(ctx, t, toNode, fromNode)
  439. r.listenFor(ctx, t, to, conditional, assertion, waitPeriod)
  440. }
  441. func (r *reactorTestSuite) listenForResponse(
  442. ctx context.Context,
  443. t *testing.T,
  444. fromNode, toNode int,
  445. waitPeriod time.Duration,
  446. addresses []p2pproto.PexAddress,
  447. ) {
  448. r.logger.Info("Listening for response", "from", fromNode, "to", toNode)
  449. to, from := r.checkNodePair(t, toNode, fromNode)
  450. conditional := func(msg *p2p.Envelope) bool {
  451. _, ok := msg.Message.(*p2pproto.PexResponse)
  452. r.logger.Info("message", msg, "ok", ok)
  453. return ok && msg.From == from
  454. }
  455. assertion := func(t *testing.T, msg *p2p.Envelope) bool {
  456. require.Equal(t, &p2pproto.PexResponse{Addresses: addresses}, msg.Message)
  457. return true
  458. }
  459. r.listenFor(ctx, t, to, conditional, assertion, waitPeriod)
  460. }
  461. func (r *reactorTestSuite) listenForPeerUpdate(
  462. ctx context.Context,
  463. t *testing.T,
  464. onNode, withNode int,
  465. status p2p.PeerStatus,
  466. waitPeriod time.Duration,
  467. ) {
  468. on, with := r.checkNodePair(t, onNode, withNode)
  469. sub := r.network.Nodes[on].PeerManager.Subscribe(ctx)
  470. timesUp := time.After(waitPeriod)
  471. for {
  472. select {
  473. case <-ctx.Done():
  474. require.Fail(t, "operation canceled")
  475. return
  476. case peerUpdate := <-sub.Updates():
  477. if peerUpdate.NodeID == with {
  478. require.Equal(t, status, peerUpdate.Status)
  479. return
  480. }
  481. case <-timesUp:
  482. require.Fail(t, "timed out waiting for peer status", "%v with status %v",
  483. with, status)
  484. return
  485. }
  486. }
  487. }
  488. func (r *reactorTestSuite) getAddressesFor(nodes []int) []p2pproto.PexAddress {
  489. addresses := make([]p2pproto.PexAddress, len(nodes))
  490. for idx, node := range nodes {
  491. nodeID := r.nodes[node]
  492. addresses[idx] = p2pproto.PexAddress{
  493. URL: r.network.Nodes[nodeID].NodeAddress.String(),
  494. }
  495. }
  496. return addresses
  497. }
  498. func (r *reactorTestSuite) sendRequest(ctx context.Context, t *testing.T, fromNode, toNode int) {
  499. t.Helper()
  500. to, from := r.checkNodePair(t, toNode, fromNode)
  501. require.NoError(t, r.pexChannels[from].Send(ctx, p2p.Envelope{
  502. To: to,
  503. Message: &p2pproto.PexRequest{},
  504. }))
  505. }
  506. func (r *reactorTestSuite) sendResponse(
  507. ctx context.Context,
  508. t *testing.T,
  509. fromNode, toNode int,
  510. withNodes []int,
  511. ) {
  512. t.Helper()
  513. from, to := r.checkNodePair(t, fromNode, toNode)
  514. addrs := r.getAddressesFor(withNodes)
  515. require.NoError(t, r.pexChannels[from].Send(ctx, p2p.Envelope{
  516. To: to,
  517. Message: &p2pproto.PexResponse{
  518. Addresses: addrs,
  519. },
  520. }))
  521. }
  522. func (r *reactorTestSuite) requireNumberOfPeers(
  523. t *testing.T,
  524. nodeIndex, numPeers int,
  525. waitPeriod time.Duration,
  526. ) {
  527. t.Helper()
  528. require.Eventuallyf(t, func() bool {
  529. actualNumPeers := len(r.network.Nodes[r.nodes[nodeIndex]].PeerManager.Peers())
  530. return actualNumPeers >= numPeers
  531. }, waitPeriod, checkFrequency, "peer failed to connect with the asserted amount of peers "+
  532. "index=%d, node=%q, waitPeriod=%s expected=%d actual=%d",
  533. nodeIndex, r.nodes[nodeIndex], waitPeriod, numPeers,
  534. len(r.network.Nodes[r.nodes[nodeIndex]].PeerManager.Peers()),
  535. )
  536. }
  537. func (r *reactorTestSuite) connectAll(ctx context.Context, t *testing.T) {
  538. r.connectN(ctx, t, r.total-1)
  539. }
  540. // connects all nodes with n other nodes
  541. func (r *reactorTestSuite) connectN(ctx context.Context, t *testing.T, n int) {
  542. if n >= r.total {
  543. require.Fail(t, "connectN: n must be less than the size of the network - 1")
  544. }
  545. for i := 0; i < r.total; i++ {
  546. for j := 0; j < n; j++ {
  547. r.connectPeers(ctx, t, i, (i+j+1)%r.total)
  548. }
  549. }
  550. }
  551. // connects node1 to node2
  552. func (r *reactorTestSuite) connectPeers(ctx context.Context, t *testing.T, sourceNode, targetNode int) {
  553. t.Helper()
  554. node1, node2 := r.checkNodePair(t, sourceNode, targetNode)
  555. r.logger.Info("connecting peers", "sourceNode", sourceNode, "targetNode", targetNode)
  556. n1 := r.network.Nodes[node1]
  557. if n1 == nil {
  558. require.Fail(t, "connectPeers: source node %v is not part of the testnet", node1)
  559. return
  560. }
  561. n2 := r.network.Nodes[node2]
  562. if n2 == nil {
  563. require.Fail(t, "connectPeers: target node %v is not part of the testnet", node2)
  564. return
  565. }
  566. sourceSub := n1.PeerManager.Subscribe(ctx)
  567. targetSub := n2.PeerManager.Subscribe(ctx)
  568. sourceAddress := n1.NodeAddress
  569. r.logger.Debug("source address", "address", sourceAddress)
  570. targetAddress := n2.NodeAddress
  571. r.logger.Debug("target address", "address", targetAddress)
  572. added, err := n1.PeerManager.Add(targetAddress)
  573. require.NoError(t, err)
  574. if !added {
  575. r.logger.Debug("nodes already know about one another",
  576. "sourceNode", sourceNode, "targetNode", targetNode)
  577. return
  578. }
  579. select {
  580. case peerUpdate := <-targetSub.Updates():
  581. require.Equal(t, p2p.PeerUpdate{
  582. NodeID: node1,
  583. Status: p2p.PeerStatusUp,
  584. }, peerUpdate)
  585. r.logger.Debug("target connected with source")
  586. case <-time.After(2 * time.Second):
  587. require.Fail(t, "timed out waiting for peer", "%v accepting %v",
  588. targetNode, sourceNode)
  589. }
  590. select {
  591. case peerUpdate := <-sourceSub.Updates():
  592. require.Equal(t, p2p.PeerUpdate{
  593. NodeID: node2,
  594. Status: p2p.PeerStatusUp,
  595. }, peerUpdate)
  596. r.logger.Debug("source connected with target")
  597. case <-time.After(2 * time.Second):
  598. require.Fail(t, "timed out waiting for peer", "%v dialing %v",
  599. sourceNode, targetNode)
  600. }
  601. added, err = n2.PeerManager.Add(sourceAddress)
  602. require.NoError(t, err)
  603. require.True(t, added)
  604. }
  605. func (r *reactorTestSuite) checkNodePair(t *testing.T, first, second int) (types.NodeID, types.NodeID) {
  606. require.NotEqual(t, first, second)
  607. require.Less(t, first, r.total)
  608. require.Less(t, second, r.total)
  609. return r.nodes[first], r.nodes[second]
  610. }
  611. func (r *reactorTestSuite) addAddresses(t *testing.T, node int, addrs []int) {
  612. peerManager := r.network.Nodes[r.nodes[node]].PeerManager
  613. for _, addr := range addrs {
  614. require.Less(t, addr, r.total)
  615. address := r.network.Nodes[r.nodes[addr]].NodeAddress
  616. added, err := peerManager.Add(address)
  617. require.NoError(t, err)
  618. require.True(t, added)
  619. }
  620. }
  621. func newNodeID(t *testing.T, id string) types.NodeID {
  622. nodeID, err := types.NewNodeID(strings.Repeat(id, 2*types.NodeIDByteLength))
  623. require.NoError(t, err)
  624. return nodeID
  625. }
  626. func randomNodeID(t *testing.T) types.NodeID {
  627. return types.NodeIDFromPubKey(ed25519.GenPrivKey().PubKey())
  628. }