You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

782 lines
21 KiB

  1. package pex_test
  2. import (
  3. "context"
  4. "strings"
  5. "testing"
  6. "time"
  7. "github.com/stretchr/testify/require"
  8. dbm "github.com/tendermint/tm-db"
  9. "github.com/tendermint/tendermint/libs/log"
  10. "github.com/tendermint/tendermint/p2p"
  11. "github.com/tendermint/tendermint/p2p/p2ptest"
  12. "github.com/tendermint/tendermint/p2p/pex"
  13. proto "github.com/tendermint/tendermint/proto/tendermint/p2p"
  14. )
  15. const (
  16. checkFrequency = 500 * time.Millisecond
  17. defaultBufferSize = 2
  18. shortWait = 10 * time.Second
  19. longWait = 60 * time.Second
  20. firstNode = 0
  21. secondNode = 1
  22. thirdNode = 2
  23. fourthNode = 3
  24. )
  25. func TestReactorBasic(t *testing.T) {
  26. // start a network with one mock reactor and one "real" reactor
  27. testNet := setupNetwork(t, testOptions{
  28. MockNodes: 1,
  29. TotalNodes: 2,
  30. })
  31. testNet.connectAll(t)
  32. testNet.start(t)
  33. // assert that the mock node receives a request from the real node
  34. testNet.listenForRequest(t, secondNode, firstNode, shortWait)
  35. // assert that when a mock node sends a request it receives a response (and
  36. // the correct one)
  37. testNet.sendRequest(t, firstNode, secondNode, true)
  38. testNet.listenForResponse(t, secondNode, firstNode, shortWait, []proto.PexAddressV2(nil))
  39. }
  40. func TestReactorConnectFullNetwork(t *testing.T) {
  41. testNet := setupNetwork(t, testOptions{
  42. TotalNodes: 8,
  43. })
  44. // make every node be only connected with one other node (it actually ends up
  45. // being two because of two way connections but oh well)
  46. testNet.connectN(t, 1)
  47. testNet.start(t)
  48. // assert that all nodes add each other in the network
  49. for idx := 0; idx < len(testNet.nodes); idx++ {
  50. testNet.requireNumberOfPeers(t, idx, len(testNet.nodes)-1, longWait)
  51. }
  52. }
  53. func TestReactorSendsRequestsTooOften(t *testing.T) {
  54. r := setupSingle(t)
  55. badNode := newNodeID(t, "b")
  56. r.pexInCh <- p2p.Envelope{
  57. From: badNode,
  58. Message: &proto.PexRequestV2{},
  59. }
  60. resp := <-r.pexOutCh
  61. msg, ok := resp.Message.(*proto.PexResponseV2)
  62. require.True(t, ok)
  63. require.Empty(t, msg.Addresses)
  64. r.pexInCh <- p2p.Envelope{
  65. From: badNode,
  66. Message: &proto.PexRequestV2{},
  67. }
  68. peerErr := <-r.pexErrCh
  69. require.Error(t, peerErr.Err)
  70. require.Empty(t, r.pexOutCh)
  71. require.Contains(t, peerErr.Err.Error(), "peer sent a request too close after a prior one")
  72. require.Equal(t, badNode, peerErr.NodeID)
  73. }
  74. func TestReactorSendsResponseWithoutRequest(t *testing.T) {
  75. testNet := setupNetwork(t, testOptions{
  76. MockNodes: 1,
  77. TotalNodes: 3,
  78. })
  79. testNet.connectAll(t)
  80. testNet.start(t)
  81. // firstNode sends the secondNode an unrequested response
  82. // NOTE: secondNode will send a request by default during startup so we send
  83. // two responses to counter that.
  84. testNet.sendResponse(t, firstNode, secondNode, []int{thirdNode}, true)
  85. testNet.sendResponse(t, firstNode, secondNode, []int{thirdNode}, true)
  86. // secondNode should evict the firstNode
  87. testNet.listenForPeerUpdate(t, secondNode, firstNode, p2p.PeerStatusDown, shortWait)
  88. }
  89. func TestReactorNeverSendsTooManyPeers(t *testing.T) {
  90. testNet := setupNetwork(t, testOptions{
  91. MockNodes: 1,
  92. TotalNodes: 2,
  93. })
  94. testNet.connectAll(t)
  95. testNet.start(t)
  96. testNet.addNodes(t, 110)
  97. nodes := make([]int, 110)
  98. for i := 0; i < len(nodes); i++ {
  99. nodes[i] = i + 2
  100. }
  101. testNet.addAddresses(t, secondNode, nodes)
  102. // first we check that even although we have 110 peers, honest pex reactors
  103. // only send 100 (test if secondNode sends firstNode 100 addresses)
  104. testNet.pingAndlistenForNAddresses(t, secondNode, firstNode, shortWait, 100)
  105. }
  106. func TestReactorErrorsOnReceivingTooManyPeers(t *testing.T) {
  107. testNet := setupNetwork(t, testOptions{
  108. MockNodes: 1,
  109. TotalNodes: 2,
  110. })
  111. testNet.connectAll(t)
  112. testNet.start(t)
  113. testNet.addNodes(t, 110)
  114. nodes := make([]int, 110)
  115. for i := 0; i < len(nodes); i++ {
  116. nodes[i] = i + 2
  117. }
  118. // now we send a response with more than 100 peers
  119. testNet.sendResponse(t, firstNode, secondNode, nodes, true)
  120. // secondNode should evict the firstNode
  121. testNet.listenForPeerUpdate(t, secondNode, firstNode, p2p.PeerStatusDown, shortWait)
  122. }
  123. func TestReactorSmallPeerStoreInALargeNetwork(t *testing.T) {
  124. testNet := setupNetwork(t, testOptions{
  125. TotalNodes: 16,
  126. MaxPeers: 8,
  127. MaxConnected: 6,
  128. BufferSize: 8,
  129. })
  130. testNet.connectN(t, 1)
  131. testNet.start(t)
  132. // test that all nodes reach full capacity
  133. for _, nodeID := range testNet.nodes {
  134. require.Eventually(t, func() bool {
  135. // nolint:scopelint
  136. return testNet.network.Nodes[nodeID].PeerManager.PeerRatio() >= 0.9
  137. }, longWait, checkFrequency)
  138. }
  139. }
  140. func TestReactorLargePeerStoreInASmallNetwork(t *testing.T) {
  141. testNet := setupNetwork(t, testOptions{
  142. TotalNodes: 10,
  143. MaxPeers: 100,
  144. MaxConnected: 100,
  145. BufferSize: 10,
  146. })
  147. testNet.connectN(t, 1)
  148. testNet.start(t)
  149. // assert that all nodes add each other in the network
  150. for idx := 0; idx < len(testNet.nodes); idx++ {
  151. testNet.requireNumberOfPeers(t, idx, len(testNet.nodes)-1, longWait)
  152. }
  153. }
  154. func TestReactorWithNetworkGrowth(t *testing.T) {
  155. testNet := setupNetwork(t, testOptions{
  156. TotalNodes: 5,
  157. BufferSize: 5,
  158. })
  159. testNet.connectAll(t)
  160. testNet.start(t)
  161. // assert that all nodes add each other in the network
  162. for idx := 0; idx < len(testNet.nodes); idx++ {
  163. testNet.requireNumberOfPeers(t, idx, len(testNet.nodes)-1, shortWait)
  164. }
  165. // now we inject 10 more nodes
  166. testNet.addNodes(t, 10)
  167. for i := 5; i < testNet.total; i++ {
  168. node := testNet.nodes[i]
  169. require.NoError(t, testNet.reactors[node].Start())
  170. require.True(t, testNet.reactors[node].IsRunning())
  171. // we connect all new nodes to a single entry point and check that the
  172. // node can distribute the addresses to all the others
  173. testNet.connectPeers(t, 0, i)
  174. }
  175. require.Len(t, testNet.reactors, 15)
  176. // assert that all nodes add each other in the network
  177. for idx := 0; idx < len(testNet.nodes); idx++ {
  178. testNet.requireNumberOfPeers(t, idx, len(testNet.nodes)-1, longWait)
  179. }
  180. }
  181. func TestReactorIntegrationWithLegacyHandleRequest(t *testing.T) {
  182. testNet := setupNetwork(t, testOptions{
  183. MockNodes: 1,
  184. TotalNodes: 3,
  185. })
  186. testNet.connectAll(t)
  187. testNet.start(t)
  188. t.Log(testNet.nodes)
  189. // mock node sends a V1 Pex message to the second node
  190. testNet.sendRequest(t, firstNode, secondNode, false)
  191. addrs := testNet.getAddressesFor(t, []int{thirdNode})
  192. testNet.listenForLegacyResponse(t, secondNode, firstNode, shortWait, addrs)
  193. }
  194. func TestReactorIntegrationWithLegacyHandleResponse(t *testing.T) {
  195. testNet := setupNetwork(t, testOptions{
  196. MockNodes: 1,
  197. TotalNodes: 4,
  198. BufferSize: 4,
  199. })
  200. testNet.connectPeers(t, firstNode, secondNode)
  201. testNet.connectPeers(t, firstNode, thirdNode)
  202. testNet.connectPeers(t, firstNode, fourthNode)
  203. testNet.start(t)
  204. testNet.listenForRequest(t, secondNode, firstNode, shortWait)
  205. // send a v1 response instead
  206. testNet.sendResponse(t, firstNode, secondNode, []int{thirdNode, fourthNode}, false)
  207. testNet.requireNumberOfPeers(t, secondNode, len(testNet.nodes)-1, shortWait)
  208. }
  209. type singleTestReactor struct {
  210. reactor *pex.ReactorV2
  211. pexInCh chan p2p.Envelope
  212. pexOutCh chan p2p.Envelope
  213. pexErrCh chan p2p.PeerError
  214. pexCh *p2p.Channel
  215. }
  216. func setupSingle(t *testing.T) *singleTestReactor {
  217. t.Helper()
  218. nodeID := newNodeID(t, "a")
  219. chBuf := 2
  220. pexInCh := make(chan p2p.Envelope, chBuf)
  221. pexOutCh := make(chan p2p.Envelope, chBuf)
  222. pexErrCh := make(chan p2p.PeerError, chBuf)
  223. pexCh := p2p.NewChannel(
  224. p2p.ChannelID(pex.PexChannel),
  225. new(proto.PexMessage),
  226. pexInCh,
  227. pexOutCh,
  228. pexErrCh,
  229. )
  230. peerUpdates := p2p.NewPeerUpdates(make(chan p2p.PeerUpdate), chBuf)
  231. peerManager, err := p2p.NewPeerManager(nodeID, dbm.NewMemDB(), p2p.PeerManagerOptions{})
  232. require.NoError(t, err)
  233. reactor := pex.NewReactorV2(log.TestingLogger(), peerManager, pexCh, peerUpdates)
  234. require.NoError(t, reactor.Start())
  235. t.Cleanup(func() {
  236. err := reactor.Stop()
  237. if err != nil {
  238. t.Fatal(err)
  239. }
  240. pexCh.Close()
  241. peerUpdates.Close()
  242. })
  243. return &singleTestReactor{
  244. reactor: reactor,
  245. pexInCh: pexInCh,
  246. pexOutCh: pexOutCh,
  247. pexErrCh: pexErrCh,
  248. pexCh: pexCh,
  249. }
  250. }
  251. type reactorTestSuite struct {
  252. network *p2ptest.Network
  253. logger log.Logger
  254. reactors map[p2p.NodeID]*pex.ReactorV2
  255. pexChannels map[p2p.NodeID]*p2p.Channel
  256. peerChans map[p2p.NodeID]chan p2p.PeerUpdate
  257. peerUpdates map[p2p.NodeID]*p2p.PeerUpdates
  258. nodes []p2p.NodeID
  259. mocks []p2p.NodeID
  260. total int
  261. opts testOptions
  262. }
  263. type testOptions struct {
  264. MockNodes int
  265. TotalNodes int
  266. BufferSize int
  267. MaxPeers uint16
  268. MaxConnected uint16
  269. }
  270. // setup setups a test suite with a network of nodes. Mocknodes represent the
  271. // hollow nodes that the test can listen and send on
  272. func setupNetwork(t *testing.T, opts testOptions) *reactorTestSuite {
  273. t.Helper()
  274. require.Greater(t, opts.TotalNodes, opts.MockNodes)
  275. if opts.BufferSize == 0 {
  276. opts.BufferSize = defaultBufferSize
  277. }
  278. networkOpts := p2ptest.NetworkOptions{
  279. NumNodes: opts.TotalNodes,
  280. BufferSize: opts.BufferSize,
  281. NodeOpts: p2ptest.NodeOptions{
  282. MaxPeers: opts.MaxPeers,
  283. MaxConnected: opts.MaxConnected,
  284. },
  285. }
  286. chBuf := opts.BufferSize
  287. realNodes := opts.TotalNodes - opts.MockNodes
  288. rts := &reactorTestSuite{
  289. logger: log.TestingLogger().With("testCase", t.Name()),
  290. network: p2ptest.MakeNetwork(t, networkOpts),
  291. reactors: make(map[p2p.NodeID]*pex.ReactorV2, realNodes),
  292. pexChannels: make(map[p2p.NodeID]*p2p.Channel, opts.TotalNodes),
  293. peerChans: make(map[p2p.NodeID]chan p2p.PeerUpdate, opts.TotalNodes),
  294. peerUpdates: make(map[p2p.NodeID]*p2p.PeerUpdates, opts.TotalNodes),
  295. total: opts.TotalNodes,
  296. opts: opts,
  297. }
  298. // NOTE: we don't assert that the channels get drained after stopping the
  299. // reactor
  300. rts.pexChannels = rts.network.MakeChannelsNoCleanup(
  301. t, pex.ChannelDescriptor(), new(proto.PexMessage), chBuf,
  302. )
  303. idx := 0
  304. for nodeID := range rts.network.Nodes {
  305. rts.peerChans[nodeID] = make(chan p2p.PeerUpdate, chBuf)
  306. rts.peerUpdates[nodeID] = p2p.NewPeerUpdates(rts.peerChans[nodeID], chBuf)
  307. rts.network.Nodes[nodeID].PeerManager.Register(rts.peerUpdates[nodeID])
  308. // the first nodes in the array are always mock nodes
  309. if idx < opts.MockNodes {
  310. rts.mocks = append(rts.mocks, nodeID)
  311. } else {
  312. rts.reactors[nodeID] = pex.NewReactorV2(
  313. rts.logger.With("nodeID", nodeID),
  314. rts.network.Nodes[nodeID].PeerManager,
  315. rts.pexChannels[nodeID],
  316. rts.peerUpdates[nodeID],
  317. )
  318. }
  319. rts.nodes = append(rts.nodes, nodeID)
  320. idx++
  321. }
  322. require.Len(t, rts.reactors, realNodes)
  323. t.Cleanup(func() {
  324. for nodeID, reactor := range rts.reactors {
  325. if reactor.IsRunning() {
  326. require.NoError(t, reactor.Stop())
  327. require.False(t, reactor.IsRunning())
  328. }
  329. rts.pexChannels[nodeID].Close()
  330. rts.peerUpdates[nodeID].Close()
  331. }
  332. for _, nodeID := range rts.mocks {
  333. rts.pexChannels[nodeID].Close()
  334. rts.peerUpdates[nodeID].Close()
  335. }
  336. })
  337. return rts
  338. }
  339. // starts up the pex reactors for each node
  340. func (r *reactorTestSuite) start(t *testing.T) {
  341. t.Helper()
  342. for _, reactor := range r.reactors {
  343. require.NoError(t, reactor.Start())
  344. require.True(t, reactor.IsRunning())
  345. }
  346. }
  347. func (r *reactorTestSuite) addNodes(t *testing.T, nodes int) {
  348. t.Helper()
  349. for i := 0; i < nodes; i++ {
  350. node := r.network.MakeNode(t, p2ptest.NodeOptions{
  351. MaxPeers: r.opts.MaxPeers,
  352. MaxConnected: r.opts.MaxConnected,
  353. })
  354. r.network.Nodes[node.NodeID] = node
  355. nodeID := node.NodeID
  356. r.pexChannels[nodeID] = node.MakeChannelNoCleanup(
  357. t, pex.ChannelDescriptor(), new(proto.PexMessage), r.opts.BufferSize,
  358. )
  359. r.peerChans[nodeID] = make(chan p2p.PeerUpdate, r.opts.BufferSize)
  360. r.peerUpdates[nodeID] = p2p.NewPeerUpdates(r.peerChans[nodeID], r.opts.BufferSize)
  361. r.network.Nodes[nodeID].PeerManager.Register(r.peerUpdates[nodeID])
  362. r.reactors[nodeID] = pex.NewReactorV2(
  363. r.logger.With("nodeID", nodeID),
  364. r.network.Nodes[nodeID].PeerManager,
  365. r.pexChannels[nodeID],
  366. r.peerUpdates[nodeID],
  367. )
  368. r.nodes = append(r.nodes, nodeID)
  369. r.total++
  370. }
  371. }
  372. func (r *reactorTestSuite) listenFor(
  373. t *testing.T,
  374. node p2p.NodeID,
  375. conditional func(msg p2p.Envelope) bool,
  376. assertion func(t *testing.T, msg p2p.Envelope) bool,
  377. waitPeriod time.Duration,
  378. ) {
  379. timesUp := time.After(waitPeriod)
  380. for {
  381. select {
  382. case envelope := <-r.pexChannels[node].In:
  383. if conditional(envelope) && assertion(t, envelope) {
  384. return
  385. }
  386. case <-timesUp:
  387. require.Fail(t, "timed out waiting for message",
  388. "node=%v, waitPeriod=%s", node, waitPeriod)
  389. }
  390. }
  391. }
  392. func (r *reactorTestSuite) listenForRequest(t *testing.T, fromNode, toNode int, waitPeriod time.Duration) {
  393. r.logger.Info("Listening for request", "from", fromNode, "to", toNode)
  394. to, from := r.checkNodePair(t, toNode, fromNode)
  395. conditional := func(msg p2p.Envelope) bool {
  396. _, ok := msg.Message.(*proto.PexRequestV2)
  397. return ok && msg.From == from
  398. }
  399. assertion := func(t *testing.T, msg p2p.Envelope) bool {
  400. require.Equal(t, &proto.PexRequestV2{}, msg.Message)
  401. return true
  402. }
  403. r.listenFor(t, to, conditional, assertion, waitPeriod)
  404. }
  405. func (r *reactorTestSuite) pingAndlistenForNAddresses(
  406. t *testing.T,
  407. fromNode, toNode int,
  408. waitPeriod time.Duration,
  409. addresses int,
  410. ) {
  411. r.logger.Info("Listening for addresses", "from", fromNode, "to", toNode)
  412. to, from := r.checkNodePair(t, toNode, fromNode)
  413. conditional := func(msg p2p.Envelope) bool {
  414. _, ok := msg.Message.(*proto.PexResponseV2)
  415. return ok && msg.From == from
  416. }
  417. assertion := func(t *testing.T, msg p2p.Envelope) bool {
  418. m, ok := msg.Message.(*proto.PexResponseV2)
  419. if !ok {
  420. require.Fail(t, "expected pex response v2")
  421. return true
  422. }
  423. // assert the same amount of addresses
  424. if len(m.Addresses) == addresses {
  425. return true
  426. }
  427. // if we didn't get the right length, we wait and send the
  428. // request again
  429. time.Sleep(300 * time.Millisecond)
  430. r.sendRequest(t, toNode, fromNode, true)
  431. return false
  432. }
  433. r.sendRequest(t, toNode, fromNode, true)
  434. r.listenFor(t, to, conditional, assertion, waitPeriod)
  435. }
  436. func (r *reactorTestSuite) listenForResponse(
  437. t *testing.T,
  438. fromNode, toNode int,
  439. waitPeriod time.Duration,
  440. addresses []proto.PexAddressV2,
  441. ) {
  442. r.logger.Info("Listening for response", "from", fromNode, "to", toNode)
  443. to, from := r.checkNodePair(t, toNode, fromNode)
  444. conditional := func(msg p2p.Envelope) bool {
  445. _, ok := msg.Message.(*proto.PexResponseV2)
  446. r.logger.Info("message", msg, "ok", ok)
  447. return ok && msg.From == from
  448. }
  449. assertion := func(t *testing.T, msg p2p.Envelope) bool {
  450. require.Equal(t, &proto.PexResponseV2{Addresses: addresses}, msg.Message)
  451. return true
  452. }
  453. r.listenFor(t, to, conditional, assertion, waitPeriod)
  454. }
  455. func (r *reactorTestSuite) listenForLegacyResponse(
  456. t *testing.T,
  457. fromNode, toNode int,
  458. waitPeriod time.Duration,
  459. addresses []proto.PexAddress,
  460. ) {
  461. r.logger.Info("Listening for response", "from", fromNode, "to", toNode)
  462. to, from := r.checkNodePair(t, toNode, fromNode)
  463. conditional := func(msg p2p.Envelope) bool {
  464. _, ok := msg.Message.(*proto.PexResponse)
  465. return ok && msg.From == from
  466. }
  467. assertion := func(t *testing.T, msg p2p.Envelope) bool {
  468. require.Equal(t, &proto.PexResponse{Addresses: addresses}, msg.Message)
  469. return true
  470. }
  471. r.listenFor(t, to, conditional, assertion, waitPeriod)
  472. }
  473. func (r *reactorTestSuite) listenForPeerUpdate(
  474. t *testing.T,
  475. onNode, withNode int,
  476. status p2p.PeerStatus,
  477. waitPeriod time.Duration,
  478. ) {
  479. on, with := r.checkNodePair(t, onNode, withNode)
  480. sub := r.network.Nodes[on].PeerManager.Subscribe()
  481. defer sub.Close()
  482. timesUp := time.After(waitPeriod)
  483. for {
  484. select {
  485. case peerUpdate := <-sub.Updates():
  486. if peerUpdate.NodeID == with {
  487. require.Equal(t, status, peerUpdate.Status)
  488. return
  489. }
  490. case <-timesUp:
  491. require.Fail(t, "timed out waiting for peer status", "%v with status %v",
  492. with, status)
  493. return
  494. }
  495. }
  496. }
  497. func (r *reactorTestSuite) getV2AddressesFor(nodes []int) []proto.PexAddressV2 {
  498. addresses := make([]proto.PexAddressV2, len(nodes))
  499. for idx, node := range nodes {
  500. nodeID := r.nodes[node]
  501. addresses[idx] = proto.PexAddressV2{
  502. URL: r.network.Nodes[nodeID].NodeAddress.String(),
  503. }
  504. }
  505. return addresses
  506. }
  507. func (r *reactorTestSuite) getAddressesFor(t *testing.T, nodes []int) []proto.PexAddress {
  508. addresses := make([]proto.PexAddress, len(nodes))
  509. for idx, node := range nodes {
  510. nodeID := r.nodes[node]
  511. nodeAddrs := r.network.Nodes[nodeID].NodeAddress
  512. endpoints, err := nodeAddrs.Resolve(context.Background())
  513. require.NoError(t, err)
  514. require.Len(t, endpoints, 1)
  515. addresses[idx] = proto.PexAddress{
  516. ID: string(nodeAddrs.NodeID),
  517. IP: endpoints[0].IP.String(),
  518. Port: uint32(endpoints[0].Port),
  519. }
  520. }
  521. return addresses
  522. }
  523. func (r *reactorTestSuite) sendRequest(t *testing.T, fromNode, toNode int, v2 bool) {
  524. to, from := r.checkNodePair(t, toNode, fromNode)
  525. if v2 {
  526. r.pexChannels[from].Out <- p2p.Envelope{
  527. To: to,
  528. Message: &proto.PexRequestV2{},
  529. }
  530. } else {
  531. r.pexChannels[from].Out <- p2p.Envelope{
  532. To: to,
  533. Message: &proto.PexRequest{},
  534. }
  535. }
  536. }
  537. func (r *reactorTestSuite) sendResponse(
  538. t *testing.T,
  539. fromNode, toNode int,
  540. withNodes []int,
  541. v2 bool,
  542. ) {
  543. from, to := r.checkNodePair(t, fromNode, toNode)
  544. if v2 {
  545. addrs := r.getV2AddressesFor(withNodes)
  546. r.pexChannels[from].Out <- p2p.Envelope{
  547. To: to,
  548. Message: &proto.PexResponseV2{
  549. Addresses: addrs,
  550. },
  551. }
  552. } else {
  553. addrs := r.getAddressesFor(t, withNodes)
  554. r.pexChannels[from].Out <- p2p.Envelope{
  555. To: to,
  556. Message: &proto.PexResponse{
  557. Addresses: addrs,
  558. },
  559. }
  560. }
  561. }
  562. func (r *reactorTestSuite) requireNumberOfPeers(
  563. t *testing.T,
  564. nodeIndex, numPeers int,
  565. waitPeriod time.Duration,
  566. ) {
  567. require.Eventuallyf(t, func() bool {
  568. actualNumPeers := len(r.network.Nodes[r.nodes[nodeIndex]].PeerManager.Peers())
  569. return actualNumPeers >= numPeers
  570. }, waitPeriod, checkFrequency, "peer failed to connect with the asserted amount of peers "+
  571. "index=%d, node=%q, waitPeriod=%s expected=%d actual=%d",
  572. nodeIndex, r.nodes[nodeIndex], waitPeriod, numPeers,
  573. len(r.network.Nodes[r.nodes[nodeIndex]].PeerManager.Peers()),
  574. )
  575. }
  576. func (r *reactorTestSuite) connectAll(t *testing.T) {
  577. r.connectN(t, r.total-1)
  578. }
  579. // connects all nodes with n other nodes
  580. func (r *reactorTestSuite) connectN(t *testing.T, n int) {
  581. if n >= r.total {
  582. require.Fail(t, "connectN: n must be less than the size of the network - 1")
  583. }
  584. for i := 0; i < r.total; i++ {
  585. for j := 0; j < n; j++ {
  586. r.connectPeers(t, i, (i+j+1)%r.total)
  587. }
  588. }
  589. }
  590. // connects node1 to node2
  591. func (r *reactorTestSuite) connectPeers(t *testing.T, sourceNode, targetNode int) {
  592. t.Helper()
  593. node1, node2 := r.checkNodePair(t, sourceNode, targetNode)
  594. r.logger.Info("connecting peers", "sourceNode", sourceNode, "targetNode", targetNode)
  595. n1 := r.network.Nodes[node1]
  596. if n1 == nil {
  597. require.Fail(t, "connectPeers: source node %v is not part of the testnet", node1)
  598. return
  599. }
  600. n2 := r.network.Nodes[node2]
  601. if n2 == nil {
  602. require.Fail(t, "connectPeers: target node %v is not part of the testnet", node2)
  603. return
  604. }
  605. sourceSub := n1.PeerManager.Subscribe()
  606. defer sourceSub.Close()
  607. targetSub := n2.PeerManager.Subscribe()
  608. defer targetSub.Close()
  609. sourceAddress := n1.NodeAddress
  610. r.logger.Debug("source address", "address", sourceAddress)
  611. targetAddress := n2.NodeAddress
  612. r.logger.Debug("target address", "address", targetAddress)
  613. added, err := n1.PeerManager.Add(targetAddress)
  614. require.NoError(t, err)
  615. if !added {
  616. r.logger.Debug("nodes already know about one another",
  617. "sourceNode", sourceNode, "targetNode", targetNode)
  618. return
  619. }
  620. select {
  621. case peerUpdate := <-targetSub.Updates():
  622. require.Equal(t, p2p.PeerUpdate{
  623. NodeID: node1,
  624. Status: p2p.PeerStatusUp,
  625. }, peerUpdate)
  626. r.logger.Debug("target connected with source")
  627. case <-time.After(time.Second):
  628. require.Fail(t, "timed out waiting for peer", "%v accepting %v",
  629. targetNode, sourceNode)
  630. }
  631. select {
  632. case peerUpdate := <-sourceSub.Updates():
  633. require.Equal(t, p2p.PeerUpdate{
  634. NodeID: node2,
  635. Status: p2p.PeerStatusUp,
  636. }, peerUpdate)
  637. r.logger.Debug("source connected with target")
  638. case <-time.After(time.Second):
  639. require.Fail(t, "timed out waiting for peer", "%v dialing %v",
  640. sourceNode, targetNode)
  641. }
  642. added, err = n2.PeerManager.Add(sourceAddress)
  643. require.NoError(t, err)
  644. require.True(t, added)
  645. }
  646. // nolint: unused
  647. func (r *reactorTestSuite) pexAddresses(t *testing.T, nodeIndices []int) []proto.PexAddress {
  648. var addresses []proto.PexAddress
  649. for _, i := range nodeIndices {
  650. if i < len(r.nodes) {
  651. require.Fail(t, "index for pex address is greater than number of nodes")
  652. }
  653. nodeAddrs := r.network.Nodes[r.nodes[i]].NodeAddress
  654. ctx, cancel := context.WithTimeout(context.Background(), 3*time.Second)
  655. endpoints, err := nodeAddrs.Resolve(ctx)
  656. cancel()
  657. require.NoError(t, err)
  658. for _, endpoint := range endpoints {
  659. if endpoint.IP != nil {
  660. addresses = append(addresses, proto.PexAddress{
  661. ID: string(nodeAddrs.NodeID),
  662. IP: endpoint.IP.String(),
  663. Port: uint32(endpoint.Port),
  664. })
  665. }
  666. }
  667. }
  668. return addresses
  669. }
  670. func (r *reactorTestSuite) checkNodePair(t *testing.T, first, second int) (p2p.NodeID, p2p.NodeID) {
  671. require.NotEqual(t, first, second)
  672. require.Less(t, first, r.total)
  673. require.Less(t, second, r.total)
  674. return r.nodes[first], r.nodes[second]
  675. }
  676. func (r *reactorTestSuite) addAddresses(t *testing.T, node int, addrs []int) {
  677. peerManager := r.network.Nodes[r.nodes[node]].PeerManager
  678. for _, addr := range addrs {
  679. require.Less(t, addr, r.total)
  680. address := r.network.Nodes[r.nodes[addr]].NodeAddress
  681. added, err := peerManager.Add(address)
  682. require.NoError(t, err)
  683. require.True(t, added)
  684. }
  685. }
  686. func newNodeID(t *testing.T, id string) p2p.NodeID {
  687. nodeID, err := p2p.NewNodeID(strings.Repeat(id, 2*p2p.NodeIDByteLength))
  688. require.NoError(t, err)
  689. return nodeID
  690. }