You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

724 lines
20 KiB

cleanup: Reduce and normalize import path aliasing. (#6975) The code in the Tendermint repository makes heavy use of import aliasing. This is made necessary by our extensive reuse of common base package names, and by repetition of similar names across different subdirectories. Unfortunately we have not been very consistent about which packages we alias in various circumstances, and the aliases we use vary. In the spirit of the advice in the style guide and https://github.com/golang/go/wiki/CodeReviewComments#imports, his change makes an effort to clean up and normalize import aliasing. This change makes no API or behavioral changes. It is a pure cleanup intended o help make the code more readable to developers (including myself) trying to understand what is being imported where. Only unexported names have been modified, and the changes were generated and applied mechanically with gofmt -r and comby, respecting the lexical and syntactic rules of Go. Even so, I did not fix every inconsistency. Where the changes would be too disruptive, I left it alone. The principles I followed in this cleanup are: - Remove aliases that restate the package name. - Remove aliases where the base package name is unambiguous. - Move overly-terse abbreviations from the import to the usage site. - Fix lexical issues (remove underscores, remove capitalization). - Fix import groupings to more closely match the style guide. - Group blank (side-effecting) imports and ensure they are commented. - Add aliases to multiple imports with the same base package name.
3 years ago
cleanup: Reduce and normalize import path aliasing. (#6975) The code in the Tendermint repository makes heavy use of import aliasing. This is made necessary by our extensive reuse of common base package names, and by repetition of similar names across different subdirectories. Unfortunately we have not been very consistent about which packages we alias in various circumstances, and the aliases we use vary. In the spirit of the advice in the style guide and https://github.com/golang/go/wiki/CodeReviewComments#imports, his change makes an effort to clean up and normalize import aliasing. This change makes no API or behavioral changes. It is a pure cleanup intended o help make the code more readable to developers (including myself) trying to understand what is being imported where. Only unexported names have been modified, and the changes were generated and applied mechanically with gofmt -r and comby, respecting the lexical and syntactic rules of Go. Even so, I did not fix every inconsistency. Where the changes would be too disruptive, I left it alone. The principles I followed in this cleanup are: - Remove aliases that restate the package name. - Remove aliases where the base package name is unambiguous. - Move overly-terse abbreviations from the import to the usage site. - Fix lexical issues (remove underscores, remove capitalization). - Fix import groupings to more closely match the style guide. - Group blank (side-effecting) imports and ensure they are commented. - Add aliases to multiple imports with the same base package name.
3 years ago
cleanup: Reduce and normalize import path aliasing. (#6975) The code in the Tendermint repository makes heavy use of import aliasing. This is made necessary by our extensive reuse of common base package names, and by repetition of similar names across different subdirectories. Unfortunately we have not been very consistent about which packages we alias in various circumstances, and the aliases we use vary. In the spirit of the advice in the style guide and https://github.com/golang/go/wiki/CodeReviewComments#imports, his change makes an effort to clean up and normalize import aliasing. This change makes no API or behavioral changes. It is a pure cleanup intended o help make the code more readable to developers (including myself) trying to understand what is being imported where. Only unexported names have been modified, and the changes were generated and applied mechanically with gofmt -r and comby, respecting the lexical and syntactic rules of Go. Even so, I did not fix every inconsistency. Where the changes would be too disruptive, I left it alone. The principles I followed in this cleanup are: - Remove aliases that restate the package name. - Remove aliases where the base package name is unambiguous. - Move overly-terse abbreviations from the import to the usage site. - Fix lexical issues (remove underscores, remove capitalization). - Fix import groupings to more closely match the style guide. - Group blank (side-effecting) imports and ensure they are commented. - Add aliases to multiple imports with the same base package name.
3 years ago
cleanup: Reduce and normalize import path aliasing. (#6975) The code in the Tendermint repository makes heavy use of import aliasing. This is made necessary by our extensive reuse of common base package names, and by repetition of similar names across different subdirectories. Unfortunately we have not been very consistent about which packages we alias in various circumstances, and the aliases we use vary. In the spirit of the advice in the style guide and https://github.com/golang/go/wiki/CodeReviewComments#imports, his change makes an effort to clean up and normalize import aliasing. This change makes no API or behavioral changes. It is a pure cleanup intended o help make the code more readable to developers (including myself) trying to understand what is being imported where. Only unexported names have been modified, and the changes were generated and applied mechanically with gofmt -r and comby, respecting the lexical and syntactic rules of Go. Even so, I did not fix every inconsistency. Where the changes would be too disruptive, I left it alone. The principles I followed in this cleanup are: - Remove aliases that restate the package name. - Remove aliases where the base package name is unambiguous. - Move overly-terse abbreviations from the import to the usage site. - Fix lexical issues (remove underscores, remove capitalization). - Fix import groupings to more closely match the style guide. - Group blank (side-effecting) imports and ensure they are commented. - Add aliases to multiple imports with the same base package name.
3 years ago
cleanup: Reduce and normalize import path aliasing. (#6975) The code in the Tendermint repository makes heavy use of import aliasing. This is made necessary by our extensive reuse of common base package names, and by repetition of similar names across different subdirectories. Unfortunately we have not been very consistent about which packages we alias in various circumstances, and the aliases we use vary. In the spirit of the advice in the style guide and https://github.com/golang/go/wiki/CodeReviewComments#imports, his change makes an effort to clean up and normalize import aliasing. This change makes no API or behavioral changes. It is a pure cleanup intended o help make the code more readable to developers (including myself) trying to understand what is being imported where. Only unexported names have been modified, and the changes were generated and applied mechanically with gofmt -r and comby, respecting the lexical and syntactic rules of Go. Even so, I did not fix every inconsistency. Where the changes would be too disruptive, I left it alone. The principles I followed in this cleanup are: - Remove aliases that restate the package name. - Remove aliases where the base package name is unambiguous. - Move overly-terse abbreviations from the import to the usage site. - Fix lexical issues (remove underscores, remove capitalization). - Fix import groupings to more closely match the style guide. - Group blank (side-effecting) imports and ensure they are commented. - Add aliases to multiple imports with the same base package name.
3 years ago
cleanup: Reduce and normalize import path aliasing. (#6975) The code in the Tendermint repository makes heavy use of import aliasing. This is made necessary by our extensive reuse of common base package names, and by repetition of similar names across different subdirectories. Unfortunately we have not been very consistent about which packages we alias in various circumstances, and the aliases we use vary. In the spirit of the advice in the style guide and https://github.com/golang/go/wiki/CodeReviewComments#imports, his change makes an effort to clean up and normalize import aliasing. This change makes no API or behavioral changes. It is a pure cleanup intended o help make the code more readable to developers (including myself) trying to understand what is being imported where. Only unexported names have been modified, and the changes were generated and applied mechanically with gofmt -r and comby, respecting the lexical and syntactic rules of Go. Even so, I did not fix every inconsistency. Where the changes would be too disruptive, I left it alone. The principles I followed in this cleanup are: - Remove aliases that restate the package name. - Remove aliases where the base package name is unambiguous. - Move overly-terse abbreviations from the import to the usage site. - Fix lexical issues (remove underscores, remove capitalization). - Fix import groupings to more closely match the style guide. - Group blank (side-effecting) imports and ensure they are commented. - Add aliases to multiple imports with the same base package name.
3 years ago
cleanup: Reduce and normalize import path aliasing. (#6975) The code in the Tendermint repository makes heavy use of import aliasing. This is made necessary by our extensive reuse of common base package names, and by repetition of similar names across different subdirectories. Unfortunately we have not been very consistent about which packages we alias in various circumstances, and the aliases we use vary. In the spirit of the advice in the style guide and https://github.com/golang/go/wiki/CodeReviewComments#imports, his change makes an effort to clean up and normalize import aliasing. This change makes no API or behavioral changes. It is a pure cleanup intended o help make the code more readable to developers (including myself) trying to understand what is being imported where. Only unexported names have been modified, and the changes were generated and applied mechanically with gofmt -r and comby, respecting the lexical and syntactic rules of Go. Even so, I did not fix every inconsistency. Where the changes would be too disruptive, I left it alone. The principles I followed in this cleanup are: - Remove aliases that restate the package name. - Remove aliases where the base package name is unambiguous. - Move overly-terse abbreviations from the import to the usage site. - Fix lexical issues (remove underscores, remove capitalization). - Fix import groupings to more closely match the style guide. - Group blank (side-effecting) imports and ensure they are commented. - Add aliases to multiple imports with the same base package name.
3 years ago
  1. package pex_test
  2. import (
  3. "context"
  4. "strings"
  5. "testing"
  6. "time"
  7. "github.com/stretchr/testify/require"
  8. dbm "github.com/tendermint/tm-db"
  9. "github.com/tendermint/tendermint/crypto/ed25519"
  10. "github.com/tendermint/tendermint/internal/p2p"
  11. "github.com/tendermint/tendermint/internal/p2p/p2ptest"
  12. "github.com/tendermint/tendermint/internal/p2p/pex"
  13. "github.com/tendermint/tendermint/libs/log"
  14. p2pproto "github.com/tendermint/tendermint/proto/tendermint/p2p"
  15. "github.com/tendermint/tendermint/types"
  16. )
  17. const (
  18. checkFrequency = 500 * time.Millisecond
  19. defaultBufferSize = 2
  20. shortWait = 10 * time.Second
  21. longWait = 60 * time.Second
  22. firstNode = 0
  23. secondNode = 1
  24. thirdNode = 2
  25. )
  26. func TestReactorBasic(t *testing.T) {
  27. ctx, cancel := context.WithCancel(context.Background())
  28. defer cancel()
  29. // start a network with one mock reactor and one "real" reactor
  30. testNet := setupNetwork(ctx, t, testOptions{
  31. MockNodes: 1,
  32. TotalNodes: 2,
  33. })
  34. testNet.connectAll(t)
  35. testNet.start(ctx, t)
  36. // assert that the mock node receives a request from the real node
  37. testNet.listenForRequest(t, secondNode, firstNode, shortWait)
  38. // assert that when a mock node sends a request it receives a response (and
  39. // the correct one)
  40. testNet.sendRequest(t, firstNode, secondNode)
  41. testNet.listenForResponse(t, secondNode, firstNode, shortWait, []p2pproto.PexAddress(nil))
  42. }
  43. func TestReactorConnectFullNetwork(t *testing.T) {
  44. ctx, cancel := context.WithCancel(context.Background())
  45. defer cancel()
  46. testNet := setupNetwork(ctx, t, testOptions{
  47. TotalNodes: 4,
  48. })
  49. // make every node be only connected with one other node (it actually ends up
  50. // being two because of two way connections but oh well)
  51. testNet.connectN(t, 1)
  52. testNet.start(ctx, t)
  53. // assert that all nodes add each other in the network
  54. for idx := 0; idx < len(testNet.nodes); idx++ {
  55. testNet.requireNumberOfPeers(t, idx, len(testNet.nodes)-1, longWait)
  56. }
  57. }
  58. func TestReactorSendsRequestsTooOften(t *testing.T) {
  59. ctx, cancel := context.WithCancel(context.Background())
  60. defer cancel()
  61. r := setupSingle(ctx, t)
  62. badNode := newNodeID(t, "b")
  63. r.pexInCh <- p2p.Envelope{
  64. From: badNode,
  65. Message: &p2pproto.PexRequest{},
  66. }
  67. resp := <-r.pexOutCh
  68. msg, ok := resp.Message.(*p2pproto.PexResponse)
  69. require.True(t, ok)
  70. require.Empty(t, msg.Addresses)
  71. r.pexInCh <- p2p.Envelope{
  72. From: badNode,
  73. Message: &p2pproto.PexRequest{},
  74. }
  75. peerErr := <-r.pexErrCh
  76. require.Error(t, peerErr.Err)
  77. require.Empty(t, r.pexOutCh)
  78. require.Contains(t, peerErr.Err.Error(), "peer sent a request too close after a prior one")
  79. require.Equal(t, badNode, peerErr.NodeID)
  80. }
  81. func TestReactorSendsResponseWithoutRequest(t *testing.T) {
  82. ctx, cancel := context.WithCancel(context.Background())
  83. defer cancel()
  84. testNet := setupNetwork(ctx, t, testOptions{
  85. MockNodes: 1,
  86. TotalNodes: 3,
  87. })
  88. testNet.connectAll(t)
  89. testNet.start(ctx, t)
  90. // firstNode sends the secondNode an unrequested response
  91. // NOTE: secondNode will send a request by default during startup so we send
  92. // two responses to counter that.
  93. testNet.sendResponse(t, firstNode, secondNode, []int{thirdNode})
  94. testNet.sendResponse(t, firstNode, secondNode, []int{thirdNode})
  95. // secondNode should evict the firstNode
  96. testNet.listenForPeerUpdate(t, secondNode, firstNode, p2p.PeerStatusDown, shortWait)
  97. }
  98. func TestReactorNeverSendsTooManyPeers(t *testing.T) {
  99. ctx, cancel := context.WithCancel(context.Background())
  100. defer cancel()
  101. testNet := setupNetwork(ctx, t, testOptions{
  102. MockNodes: 1,
  103. TotalNodes: 2,
  104. })
  105. testNet.connectAll(t)
  106. testNet.start(ctx, t)
  107. testNet.addNodes(ctx, t, 110)
  108. nodes := make([]int, 110)
  109. for i := 0; i < len(nodes); i++ {
  110. nodes[i] = i + 2
  111. }
  112. testNet.addAddresses(t, secondNode, nodes)
  113. // first we check that even although we have 110 peers, honest pex reactors
  114. // only send 100 (test if secondNode sends firstNode 100 addresses)
  115. testNet.pingAndlistenForNAddresses(t, secondNode, firstNode, shortWait, 100)
  116. }
  117. func TestReactorErrorsOnReceivingTooManyPeers(t *testing.T) {
  118. ctx, cancel := context.WithCancel(context.Background())
  119. defer cancel()
  120. r := setupSingle(ctx, t)
  121. peer := p2p.NodeAddress{Protocol: p2p.MemoryProtocol, NodeID: randomNodeID(t)}
  122. added, err := r.manager.Add(peer)
  123. require.NoError(t, err)
  124. require.True(t, added)
  125. addresses := make([]p2pproto.PexAddress, 101)
  126. for i := 0; i < len(addresses); i++ {
  127. nodeAddress := p2p.NodeAddress{Protocol: p2p.MemoryProtocol, NodeID: randomNodeID(t)}
  128. addresses[i] = p2pproto.PexAddress{
  129. URL: nodeAddress.String(),
  130. }
  131. }
  132. r.peerCh <- p2p.PeerUpdate{
  133. NodeID: peer.NodeID,
  134. Status: p2p.PeerStatusUp,
  135. }
  136. select {
  137. // wait for a request and then send a response with too many addresses
  138. case req := <-r.pexOutCh:
  139. if _, ok := req.Message.(*p2pproto.PexRequest); !ok {
  140. t.Fatal("expected v2 pex request")
  141. }
  142. r.pexInCh <- p2p.Envelope{
  143. From: peer.NodeID,
  144. Message: &p2pproto.PexResponse{
  145. Addresses: addresses,
  146. },
  147. }
  148. case <-time.After(10 * time.Second):
  149. t.Fatal("pex failed to send a request within 10 seconds")
  150. }
  151. peerErr := <-r.pexErrCh
  152. require.Error(t, peerErr.Err)
  153. require.Empty(t, r.pexOutCh)
  154. require.Contains(t, peerErr.Err.Error(), "peer sent too many addresses")
  155. require.Equal(t, peer.NodeID, peerErr.NodeID)
  156. }
  157. func TestReactorSmallPeerStoreInALargeNetwork(t *testing.T) {
  158. ctx, cancel := context.WithCancel(context.Background())
  159. defer cancel()
  160. testNet := setupNetwork(ctx, t, testOptions{
  161. TotalNodes: 8,
  162. MaxPeers: 4,
  163. MaxConnected: 3,
  164. BufferSize: 8,
  165. })
  166. testNet.connectN(t, 1)
  167. testNet.start(ctx, t)
  168. // test that all nodes reach full capacity
  169. for _, nodeID := range testNet.nodes {
  170. require.Eventually(t, func() bool {
  171. // nolint:scopelint
  172. return testNet.network.Nodes[nodeID].PeerManager.PeerRatio() >= 0.9
  173. }, longWait, checkFrequency)
  174. }
  175. }
  176. func TestReactorLargePeerStoreInASmallNetwork(t *testing.T) {
  177. ctx, cancel := context.WithCancel(context.Background())
  178. defer cancel()
  179. testNet := setupNetwork(ctx, t, testOptions{
  180. TotalNodes: 3,
  181. MaxPeers: 25,
  182. MaxConnected: 25,
  183. BufferSize: 5,
  184. })
  185. testNet.connectN(t, 1)
  186. testNet.start(ctx, t)
  187. // assert that all nodes add each other in the network
  188. for idx := 0; idx < len(testNet.nodes); idx++ {
  189. testNet.requireNumberOfPeers(t, idx, len(testNet.nodes)-1, longWait)
  190. }
  191. }
  192. func TestReactorWithNetworkGrowth(t *testing.T) {
  193. ctx, cancel := context.WithCancel(context.Background())
  194. defer cancel()
  195. testNet := setupNetwork(ctx, t, testOptions{
  196. TotalNodes: 5,
  197. BufferSize: 5,
  198. })
  199. testNet.connectAll(t)
  200. testNet.start(ctx, t)
  201. // assert that all nodes add each other in the network
  202. for idx := 0; idx < len(testNet.nodes); idx++ {
  203. testNet.requireNumberOfPeers(t, idx, len(testNet.nodes)-1, shortWait)
  204. }
  205. // now we inject 10 more nodes
  206. testNet.addNodes(ctx, t, 10)
  207. for i := 5; i < testNet.total; i++ {
  208. node := testNet.nodes[i]
  209. require.NoError(t, testNet.reactors[node].Start(ctx))
  210. require.True(t, testNet.reactors[node].IsRunning())
  211. // we connect all new nodes to a single entry point and check that the
  212. // node can distribute the addresses to all the others
  213. testNet.connectPeers(t, 0, i)
  214. }
  215. require.Len(t, testNet.reactors, 15)
  216. // assert that all nodes add each other in the network
  217. for idx := 0; idx < len(testNet.nodes); idx++ {
  218. testNet.requireNumberOfPeers(t, idx, len(testNet.nodes)-1, longWait)
  219. }
  220. }
  221. type singleTestReactor struct {
  222. reactor *pex.Reactor
  223. pexInCh chan p2p.Envelope
  224. pexOutCh chan p2p.Envelope
  225. pexErrCh chan p2p.PeerError
  226. pexCh *p2p.Channel
  227. peerCh chan p2p.PeerUpdate
  228. manager *p2p.PeerManager
  229. }
  230. func setupSingle(ctx context.Context, t *testing.T) *singleTestReactor {
  231. t.Helper()
  232. nodeID := newNodeID(t, "a")
  233. chBuf := 2
  234. pexInCh := make(chan p2p.Envelope, chBuf)
  235. pexOutCh := make(chan p2p.Envelope, chBuf)
  236. pexErrCh := make(chan p2p.PeerError, chBuf)
  237. pexCh := p2p.NewChannel(
  238. p2p.ChannelID(pex.PexChannel),
  239. new(p2pproto.PexMessage),
  240. pexInCh,
  241. pexOutCh,
  242. pexErrCh,
  243. )
  244. peerCh := make(chan p2p.PeerUpdate, chBuf)
  245. peerUpdates := p2p.NewPeerUpdates(peerCh, chBuf)
  246. peerManager, err := p2p.NewPeerManager(nodeID, dbm.NewMemDB(), p2p.PeerManagerOptions{})
  247. require.NoError(t, err)
  248. reactor := pex.NewReactor(log.TestingLogger(), peerManager, pexCh, peerUpdates)
  249. require.NoError(t, reactor.Start(ctx))
  250. t.Cleanup(func() {
  251. pexCh.Close()
  252. peerUpdates.Close()
  253. reactor.Wait()
  254. })
  255. return &singleTestReactor{
  256. reactor: reactor,
  257. pexInCh: pexInCh,
  258. pexOutCh: pexOutCh,
  259. pexErrCh: pexErrCh,
  260. pexCh: pexCh,
  261. peerCh: peerCh,
  262. manager: peerManager,
  263. }
  264. }
  265. type reactorTestSuite struct {
  266. network *p2ptest.Network
  267. logger log.Logger
  268. reactors map[types.NodeID]*pex.Reactor
  269. pexChannels map[types.NodeID]*p2p.Channel
  270. peerChans map[types.NodeID]chan p2p.PeerUpdate
  271. peerUpdates map[types.NodeID]*p2p.PeerUpdates
  272. nodes []types.NodeID
  273. mocks []types.NodeID
  274. total int
  275. opts testOptions
  276. }
  277. type testOptions struct {
  278. MockNodes int
  279. TotalNodes int
  280. BufferSize int
  281. MaxPeers uint16
  282. MaxConnected uint16
  283. }
  284. // setup setups a test suite with a network of nodes. Mocknodes represent the
  285. // hollow nodes that the test can listen and send on
  286. func setupNetwork(ctx context.Context, t *testing.T, opts testOptions) *reactorTestSuite {
  287. t.Helper()
  288. require.Greater(t, opts.TotalNodes, opts.MockNodes)
  289. if opts.BufferSize == 0 {
  290. opts.BufferSize = defaultBufferSize
  291. }
  292. networkOpts := p2ptest.NetworkOptions{
  293. NumNodes: opts.TotalNodes,
  294. BufferSize: opts.BufferSize,
  295. NodeOpts: p2ptest.NodeOptions{
  296. MaxPeers: opts.MaxPeers,
  297. MaxConnected: opts.MaxConnected,
  298. },
  299. }
  300. chBuf := opts.BufferSize
  301. realNodes := opts.TotalNodes - opts.MockNodes
  302. rts := &reactorTestSuite{
  303. logger: log.TestingLogger().With("testCase", t.Name()),
  304. network: p2ptest.MakeNetwork(ctx, t, networkOpts),
  305. reactors: make(map[types.NodeID]*pex.Reactor, realNodes),
  306. pexChannels: make(map[types.NodeID]*p2p.Channel, opts.TotalNodes),
  307. peerChans: make(map[types.NodeID]chan p2p.PeerUpdate, opts.TotalNodes),
  308. peerUpdates: make(map[types.NodeID]*p2p.PeerUpdates, opts.TotalNodes),
  309. total: opts.TotalNodes,
  310. opts: opts,
  311. }
  312. // NOTE: we don't assert that the channels get drained after stopping the
  313. // reactor
  314. rts.pexChannels = rts.network.MakeChannelsNoCleanup(t, pex.ChannelDescriptor())
  315. idx := 0
  316. for nodeID := range rts.network.Nodes {
  317. rts.peerChans[nodeID] = make(chan p2p.PeerUpdate, chBuf)
  318. rts.peerUpdates[nodeID] = p2p.NewPeerUpdates(rts.peerChans[nodeID], chBuf)
  319. rts.network.Nodes[nodeID].PeerManager.Register(rts.peerUpdates[nodeID])
  320. // the first nodes in the array are always mock nodes
  321. if idx < opts.MockNodes {
  322. rts.mocks = append(rts.mocks, nodeID)
  323. } else {
  324. rts.reactors[nodeID] = pex.NewReactor(
  325. rts.logger.With("nodeID", nodeID),
  326. rts.network.Nodes[nodeID].PeerManager,
  327. rts.pexChannels[nodeID],
  328. rts.peerUpdates[nodeID],
  329. )
  330. }
  331. rts.nodes = append(rts.nodes, nodeID)
  332. idx++
  333. }
  334. require.Len(t, rts.reactors, realNodes)
  335. t.Cleanup(func() {
  336. for nodeID, reactor := range rts.reactors {
  337. if reactor.IsRunning() {
  338. reactor.Wait()
  339. require.False(t, reactor.IsRunning())
  340. }
  341. rts.pexChannels[nodeID].Close()
  342. rts.peerUpdates[nodeID].Close()
  343. }
  344. for _, nodeID := range rts.mocks {
  345. rts.pexChannels[nodeID].Close()
  346. rts.peerUpdates[nodeID].Close()
  347. }
  348. })
  349. return rts
  350. }
  351. // starts up the pex reactors for each node
  352. func (r *reactorTestSuite) start(ctx context.Context, t *testing.T) {
  353. t.Helper()
  354. for _, reactor := range r.reactors {
  355. require.NoError(t, reactor.Start(ctx))
  356. require.True(t, reactor.IsRunning())
  357. }
  358. }
  359. func (r *reactorTestSuite) addNodes(ctx context.Context, t *testing.T, nodes int) {
  360. t.Helper()
  361. for i := 0; i < nodes; i++ {
  362. node := r.network.MakeNode(ctx, t, p2ptest.NodeOptions{
  363. MaxPeers: r.opts.MaxPeers,
  364. MaxConnected: r.opts.MaxConnected,
  365. })
  366. r.network.Nodes[node.NodeID] = node
  367. nodeID := node.NodeID
  368. r.pexChannels[nodeID] = node.MakeChannelNoCleanup(t, pex.ChannelDescriptor())
  369. r.peerChans[nodeID] = make(chan p2p.PeerUpdate, r.opts.BufferSize)
  370. r.peerUpdates[nodeID] = p2p.NewPeerUpdates(r.peerChans[nodeID], r.opts.BufferSize)
  371. r.network.Nodes[nodeID].PeerManager.Register(r.peerUpdates[nodeID])
  372. r.reactors[nodeID] = pex.NewReactor(
  373. r.logger.With("nodeID", nodeID),
  374. r.network.Nodes[nodeID].PeerManager,
  375. r.pexChannels[nodeID],
  376. r.peerUpdates[nodeID],
  377. )
  378. r.nodes = append(r.nodes, nodeID)
  379. r.total++
  380. }
  381. }
  382. func (r *reactorTestSuite) listenFor(
  383. t *testing.T,
  384. node types.NodeID,
  385. conditional func(msg p2p.Envelope) bool,
  386. assertion func(t *testing.T, msg p2p.Envelope) bool,
  387. waitPeriod time.Duration,
  388. ) {
  389. timesUp := time.After(waitPeriod)
  390. for {
  391. select {
  392. case envelope := <-r.pexChannels[node].In:
  393. if conditional(envelope) && assertion(t, envelope) {
  394. return
  395. }
  396. case <-timesUp:
  397. require.Fail(t, "timed out waiting for message",
  398. "node=%v, waitPeriod=%s", node, waitPeriod)
  399. }
  400. }
  401. }
  402. func (r *reactorTestSuite) listenForRequest(t *testing.T, fromNode, toNode int, waitPeriod time.Duration) {
  403. r.logger.Info("Listening for request", "from", fromNode, "to", toNode)
  404. to, from := r.checkNodePair(t, toNode, fromNode)
  405. conditional := func(msg p2p.Envelope) bool {
  406. _, ok := msg.Message.(*p2pproto.PexRequest)
  407. return ok && msg.From == from
  408. }
  409. assertion := func(t *testing.T, msg p2p.Envelope) bool {
  410. require.Equal(t, &p2pproto.PexRequest{}, msg.Message)
  411. return true
  412. }
  413. r.listenFor(t, to, conditional, assertion, waitPeriod)
  414. }
  415. func (r *reactorTestSuite) pingAndlistenForNAddresses(
  416. t *testing.T,
  417. fromNode, toNode int,
  418. waitPeriod time.Duration,
  419. addresses int,
  420. ) {
  421. r.logger.Info("Listening for addresses", "from", fromNode, "to", toNode)
  422. to, from := r.checkNodePair(t, toNode, fromNode)
  423. conditional := func(msg p2p.Envelope) bool {
  424. _, ok := msg.Message.(*p2pproto.PexResponse)
  425. return ok && msg.From == from
  426. }
  427. assertion := func(t *testing.T, msg p2p.Envelope) bool {
  428. m, ok := msg.Message.(*p2pproto.PexResponse)
  429. if !ok {
  430. require.Fail(t, "expected pex response v2")
  431. return true
  432. }
  433. // assert the same amount of addresses
  434. if len(m.Addresses) == addresses {
  435. return true
  436. }
  437. // if we didn't get the right length, we wait and send the
  438. // request again
  439. time.Sleep(300 * time.Millisecond)
  440. r.sendRequest(t, toNode, fromNode)
  441. return false
  442. }
  443. r.sendRequest(t, toNode, fromNode)
  444. r.listenFor(t, to, conditional, assertion, waitPeriod)
  445. }
  446. func (r *reactorTestSuite) listenForResponse(
  447. t *testing.T,
  448. fromNode, toNode int,
  449. waitPeriod time.Duration,
  450. addresses []p2pproto.PexAddress,
  451. ) {
  452. r.logger.Info("Listening for response", "from", fromNode, "to", toNode)
  453. to, from := r.checkNodePair(t, toNode, fromNode)
  454. conditional := func(msg p2p.Envelope) bool {
  455. _, ok := msg.Message.(*p2pproto.PexResponse)
  456. r.logger.Info("message", msg, "ok", ok)
  457. return ok && msg.From == from
  458. }
  459. assertion := func(t *testing.T, msg p2p.Envelope) bool {
  460. require.Equal(t, &p2pproto.PexResponse{Addresses: addresses}, msg.Message)
  461. return true
  462. }
  463. r.listenFor(t, to, conditional, assertion, waitPeriod)
  464. }
  465. func (r *reactorTestSuite) listenForPeerUpdate(
  466. t *testing.T,
  467. onNode, withNode int,
  468. status p2p.PeerStatus,
  469. waitPeriod time.Duration,
  470. ) {
  471. on, with := r.checkNodePair(t, onNode, withNode)
  472. sub := r.network.Nodes[on].PeerManager.Subscribe()
  473. defer sub.Close()
  474. timesUp := time.After(waitPeriod)
  475. for {
  476. select {
  477. case peerUpdate := <-sub.Updates():
  478. if peerUpdate.NodeID == with {
  479. require.Equal(t, status, peerUpdate.Status)
  480. return
  481. }
  482. case <-timesUp:
  483. require.Fail(t, "timed out waiting for peer status", "%v with status %v",
  484. with, status)
  485. return
  486. }
  487. }
  488. }
  489. func (r *reactorTestSuite) getAddressesFor(nodes []int) []p2pproto.PexAddress {
  490. addresses := make([]p2pproto.PexAddress, len(nodes))
  491. for idx, node := range nodes {
  492. nodeID := r.nodes[node]
  493. addresses[idx] = p2pproto.PexAddress{
  494. URL: r.network.Nodes[nodeID].NodeAddress.String(),
  495. }
  496. }
  497. return addresses
  498. }
  499. func (r *reactorTestSuite) sendRequest(t *testing.T, fromNode, toNode int) {
  500. to, from := r.checkNodePair(t, toNode, fromNode)
  501. r.pexChannels[from].Out <- p2p.Envelope{
  502. To: to,
  503. Message: &p2pproto.PexRequest{},
  504. }
  505. }
  506. func (r *reactorTestSuite) sendResponse(
  507. t *testing.T,
  508. fromNode, toNode int,
  509. withNodes []int,
  510. ) {
  511. from, to := r.checkNodePair(t, fromNode, toNode)
  512. addrs := r.getAddressesFor(withNodes)
  513. r.pexChannels[from].Out <- p2p.Envelope{
  514. To: to,
  515. Message: &p2pproto.PexResponse{
  516. Addresses: addrs,
  517. },
  518. }
  519. }
  520. func (r *reactorTestSuite) requireNumberOfPeers(
  521. t *testing.T,
  522. nodeIndex, numPeers int,
  523. waitPeriod time.Duration,
  524. ) {
  525. t.Helper()
  526. require.Eventuallyf(t, func() bool {
  527. actualNumPeers := len(r.network.Nodes[r.nodes[nodeIndex]].PeerManager.Peers())
  528. return actualNumPeers >= numPeers
  529. }, waitPeriod, checkFrequency, "peer failed to connect with the asserted amount of peers "+
  530. "index=%d, node=%q, waitPeriod=%s expected=%d actual=%d",
  531. nodeIndex, r.nodes[nodeIndex], waitPeriod, numPeers,
  532. len(r.network.Nodes[r.nodes[nodeIndex]].PeerManager.Peers()),
  533. )
  534. }
  535. func (r *reactorTestSuite) connectAll(t *testing.T) {
  536. r.connectN(t, r.total-1)
  537. }
  538. // connects all nodes with n other nodes
  539. func (r *reactorTestSuite) connectN(t *testing.T, n int) {
  540. if n >= r.total {
  541. require.Fail(t, "connectN: n must be less than the size of the network - 1")
  542. }
  543. for i := 0; i < r.total; i++ {
  544. for j := 0; j < n; j++ {
  545. r.connectPeers(t, i, (i+j+1)%r.total)
  546. }
  547. }
  548. }
  549. // connects node1 to node2
  550. func (r *reactorTestSuite) connectPeers(t *testing.T, sourceNode, targetNode int) {
  551. t.Helper()
  552. node1, node2 := r.checkNodePair(t, sourceNode, targetNode)
  553. r.logger.Info("connecting peers", "sourceNode", sourceNode, "targetNode", targetNode)
  554. n1 := r.network.Nodes[node1]
  555. if n1 == nil {
  556. require.Fail(t, "connectPeers: source node %v is not part of the testnet", node1)
  557. return
  558. }
  559. n2 := r.network.Nodes[node2]
  560. if n2 == nil {
  561. require.Fail(t, "connectPeers: target node %v is not part of the testnet", node2)
  562. return
  563. }
  564. sourceSub := n1.PeerManager.Subscribe()
  565. defer sourceSub.Close()
  566. targetSub := n2.PeerManager.Subscribe()
  567. defer targetSub.Close()
  568. sourceAddress := n1.NodeAddress
  569. r.logger.Debug("source address", "address", sourceAddress)
  570. targetAddress := n2.NodeAddress
  571. r.logger.Debug("target address", "address", targetAddress)
  572. added, err := n1.PeerManager.Add(targetAddress)
  573. require.NoError(t, err)
  574. if !added {
  575. r.logger.Debug("nodes already know about one another",
  576. "sourceNode", sourceNode, "targetNode", targetNode)
  577. return
  578. }
  579. select {
  580. case peerUpdate := <-targetSub.Updates():
  581. require.Equal(t, p2p.PeerUpdate{
  582. NodeID: node1,
  583. Status: p2p.PeerStatusUp,
  584. }, peerUpdate)
  585. r.logger.Debug("target connected with source")
  586. case <-time.After(2 * time.Second):
  587. require.Fail(t, "timed out waiting for peer", "%v accepting %v",
  588. targetNode, sourceNode)
  589. }
  590. select {
  591. case peerUpdate := <-sourceSub.Updates():
  592. require.Equal(t, p2p.PeerUpdate{
  593. NodeID: node2,
  594. Status: p2p.PeerStatusUp,
  595. }, peerUpdate)
  596. r.logger.Debug("source connected with target")
  597. case <-time.After(2 * time.Second):
  598. require.Fail(t, "timed out waiting for peer", "%v dialing %v",
  599. sourceNode, targetNode)
  600. }
  601. added, err = n2.PeerManager.Add(sourceAddress)
  602. require.NoError(t, err)
  603. require.True(t, added)
  604. }
  605. func (r *reactorTestSuite) checkNodePair(t *testing.T, first, second int) (types.NodeID, types.NodeID) {
  606. require.NotEqual(t, first, second)
  607. require.Less(t, first, r.total)
  608. require.Less(t, second, r.total)
  609. return r.nodes[first], r.nodes[second]
  610. }
  611. func (r *reactorTestSuite) addAddresses(t *testing.T, node int, addrs []int) {
  612. peerManager := r.network.Nodes[r.nodes[node]].PeerManager
  613. for _, addr := range addrs {
  614. require.Less(t, addr, r.total)
  615. address := r.network.Nodes[r.nodes[addr]].NodeAddress
  616. added, err := peerManager.Add(address)
  617. require.NoError(t, err)
  618. require.True(t, added)
  619. }
  620. }
  621. func newNodeID(t *testing.T, id string) types.NodeID {
  622. nodeID, err := types.NewNodeID(strings.Repeat(id, 2*types.NodeIDByteLength))
  623. require.NoError(t, err)
  624. return nodeID
  625. }
  626. func randomNodeID(t *testing.T) types.NodeID {
  627. return types.NodeIDFromPubKey(ed25519.GenPrivKey().PubKey())
  628. }