You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

700 lines
19 KiB

cleanup: Reduce and normalize import path aliasing. (#6975) The code in the Tendermint repository makes heavy use of import aliasing. This is made necessary by our extensive reuse of common base package names, and by repetition of similar names across different subdirectories. Unfortunately we have not been very consistent about which packages we alias in various circumstances, and the aliases we use vary. In the spirit of the advice in the style guide and https://github.com/golang/go/wiki/CodeReviewComments#imports, his change makes an effort to clean up and normalize import aliasing. This change makes no API or behavioral changes. It is a pure cleanup intended o help make the code more readable to developers (including myself) trying to understand what is being imported where. Only unexported names have been modified, and the changes were generated and applied mechanically with gofmt -r and comby, respecting the lexical and syntactic rules of Go. Even so, I did not fix every inconsistency. Where the changes would be too disruptive, I left it alone. The principles I followed in this cleanup are: - Remove aliases that restate the package name. - Remove aliases where the base package name is unambiguous. - Move overly-terse abbreviations from the import to the usage site. - Fix lexical issues (remove underscores, remove capitalization). - Fix import groupings to more closely match the style guide. - Group blank (side-effecting) imports and ensure they are commented. - Add aliases to multiple imports with the same base package name.
3 years ago
cleanup: Reduce and normalize import path aliasing. (#6975) The code in the Tendermint repository makes heavy use of import aliasing. This is made necessary by our extensive reuse of common base package names, and by repetition of similar names across different subdirectories. Unfortunately we have not been very consistent about which packages we alias in various circumstances, and the aliases we use vary. In the spirit of the advice in the style guide and https://github.com/golang/go/wiki/CodeReviewComments#imports, his change makes an effort to clean up and normalize import aliasing. This change makes no API or behavioral changes. It is a pure cleanup intended o help make the code more readable to developers (including myself) trying to understand what is being imported where. Only unexported names have been modified, and the changes were generated and applied mechanically with gofmt -r and comby, respecting the lexical and syntactic rules of Go. Even so, I did not fix every inconsistency. Where the changes would be too disruptive, I left it alone. The principles I followed in this cleanup are: - Remove aliases that restate the package name. - Remove aliases where the base package name is unambiguous. - Move overly-terse abbreviations from the import to the usage site. - Fix lexical issues (remove underscores, remove capitalization). - Fix import groupings to more closely match the style guide. - Group blank (side-effecting) imports and ensure they are commented. - Add aliases to multiple imports with the same base package name.
3 years ago
cleanup: Reduce and normalize import path aliasing. (#6975) The code in the Tendermint repository makes heavy use of import aliasing. This is made necessary by our extensive reuse of common base package names, and by repetition of similar names across different subdirectories. Unfortunately we have not been very consistent about which packages we alias in various circumstances, and the aliases we use vary. In the spirit of the advice in the style guide and https://github.com/golang/go/wiki/CodeReviewComments#imports, his change makes an effort to clean up and normalize import aliasing. This change makes no API or behavioral changes. It is a pure cleanup intended o help make the code more readable to developers (including myself) trying to understand what is being imported where. Only unexported names have been modified, and the changes were generated and applied mechanically with gofmt -r and comby, respecting the lexical and syntactic rules of Go. Even so, I did not fix every inconsistency. Where the changes would be too disruptive, I left it alone. The principles I followed in this cleanup are: - Remove aliases that restate the package name. - Remove aliases where the base package name is unambiguous. - Move overly-terse abbreviations from the import to the usage site. - Fix lexical issues (remove underscores, remove capitalization). - Fix import groupings to more closely match the style guide. - Group blank (side-effecting) imports and ensure they are commented. - Add aliases to multiple imports with the same base package name.
3 years ago
cleanup: Reduce and normalize import path aliasing. (#6975) The code in the Tendermint repository makes heavy use of import aliasing. This is made necessary by our extensive reuse of common base package names, and by repetition of similar names across different subdirectories. Unfortunately we have not been very consistent about which packages we alias in various circumstances, and the aliases we use vary. In the spirit of the advice in the style guide and https://github.com/golang/go/wiki/CodeReviewComments#imports, his change makes an effort to clean up and normalize import aliasing. This change makes no API or behavioral changes. It is a pure cleanup intended o help make the code more readable to developers (including myself) trying to understand what is being imported where. Only unexported names have been modified, and the changes were generated and applied mechanically with gofmt -r and comby, respecting the lexical and syntactic rules of Go. Even so, I did not fix every inconsistency. Where the changes would be too disruptive, I left it alone. The principles I followed in this cleanup are: - Remove aliases that restate the package name. - Remove aliases where the base package name is unambiguous. - Move overly-terse abbreviations from the import to the usage site. - Fix lexical issues (remove underscores, remove capitalization). - Fix import groupings to more closely match the style guide. - Group blank (side-effecting) imports and ensure they are commented. - Add aliases to multiple imports with the same base package name.
3 years ago
cleanup: Reduce and normalize import path aliasing. (#6975) The code in the Tendermint repository makes heavy use of import aliasing. This is made necessary by our extensive reuse of common base package names, and by repetition of similar names across different subdirectories. Unfortunately we have not been very consistent about which packages we alias in various circumstances, and the aliases we use vary. In the spirit of the advice in the style guide and https://github.com/golang/go/wiki/CodeReviewComments#imports, his change makes an effort to clean up and normalize import aliasing. This change makes no API or behavioral changes. It is a pure cleanup intended o help make the code more readable to developers (including myself) trying to understand what is being imported where. Only unexported names have been modified, and the changes were generated and applied mechanically with gofmt -r and comby, respecting the lexical and syntactic rules of Go. Even so, I did not fix every inconsistency. Where the changes would be too disruptive, I left it alone. The principles I followed in this cleanup are: - Remove aliases that restate the package name. - Remove aliases where the base package name is unambiguous. - Move overly-terse abbreviations from the import to the usage site. - Fix lexical issues (remove underscores, remove capitalization). - Fix import groupings to more closely match the style guide. - Group blank (side-effecting) imports and ensure they are commented. - Add aliases to multiple imports with the same base package name.
3 years ago
cleanup: Reduce and normalize import path aliasing. (#6975) The code in the Tendermint repository makes heavy use of import aliasing. This is made necessary by our extensive reuse of common base package names, and by repetition of similar names across different subdirectories. Unfortunately we have not been very consistent about which packages we alias in various circumstances, and the aliases we use vary. In the spirit of the advice in the style guide and https://github.com/golang/go/wiki/CodeReviewComments#imports, his change makes an effort to clean up and normalize import aliasing. This change makes no API or behavioral changes. It is a pure cleanup intended o help make the code more readable to developers (including myself) trying to understand what is being imported where. Only unexported names have been modified, and the changes were generated and applied mechanically with gofmt -r and comby, respecting the lexical and syntactic rules of Go. Even so, I did not fix every inconsistency. Where the changes would be too disruptive, I left it alone. The principles I followed in this cleanup are: - Remove aliases that restate the package name. - Remove aliases where the base package name is unambiguous. - Move overly-terse abbreviations from the import to the usage site. - Fix lexical issues (remove underscores, remove capitalization). - Fix import groupings to more closely match the style guide. - Group blank (side-effecting) imports and ensure they are commented. - Add aliases to multiple imports with the same base package name.
3 years ago
cleanup: Reduce and normalize import path aliasing. (#6975) The code in the Tendermint repository makes heavy use of import aliasing. This is made necessary by our extensive reuse of common base package names, and by repetition of similar names across different subdirectories. Unfortunately we have not been very consistent about which packages we alias in various circumstances, and the aliases we use vary. In the spirit of the advice in the style guide and https://github.com/golang/go/wiki/CodeReviewComments#imports, his change makes an effort to clean up and normalize import aliasing. This change makes no API or behavioral changes. It is a pure cleanup intended o help make the code more readable to developers (including myself) trying to understand what is being imported where. Only unexported names have been modified, and the changes were generated and applied mechanically with gofmt -r and comby, respecting the lexical and syntactic rules of Go. Even so, I did not fix every inconsistency. Where the changes would be too disruptive, I left it alone. The principles I followed in this cleanup are: - Remove aliases that restate the package name. - Remove aliases where the base package name is unambiguous. - Move overly-terse abbreviations from the import to the usage site. - Fix lexical issues (remove underscores, remove capitalization). - Fix import groupings to more closely match the style guide. - Group blank (side-effecting) imports and ensure they are commented. - Add aliases to multiple imports with the same base package name.
3 years ago
  1. package pex_test
  2. import (
  3. "strings"
  4. "testing"
  5. "time"
  6. "github.com/stretchr/testify/require"
  7. dbm "github.com/tendermint/tm-db"
  8. "github.com/tendermint/tendermint/crypto/ed25519"
  9. "github.com/tendermint/tendermint/internal/p2p"
  10. "github.com/tendermint/tendermint/internal/p2p/p2ptest"
  11. "github.com/tendermint/tendermint/internal/p2p/pex"
  12. "github.com/tendermint/tendermint/libs/log"
  13. p2pproto "github.com/tendermint/tendermint/proto/tendermint/p2p"
  14. "github.com/tendermint/tendermint/types"
  15. )
  16. const (
  17. checkFrequency = 500 * time.Millisecond
  18. defaultBufferSize = 2
  19. shortWait = 10 * time.Second
  20. longWait = 60 * time.Second
  21. firstNode = 0
  22. secondNode = 1
  23. thirdNode = 2
  24. )
  25. func TestReactorBasic(t *testing.T) {
  26. // start a network with one mock reactor and one "real" reactor
  27. testNet := setupNetwork(t, testOptions{
  28. MockNodes: 1,
  29. TotalNodes: 2,
  30. })
  31. testNet.connectAll(t)
  32. testNet.start(t)
  33. // assert that the mock node receives a request from the real node
  34. testNet.listenForRequest(t, secondNode, firstNode, shortWait)
  35. // assert that when a mock node sends a request it receives a response (and
  36. // the correct one)
  37. testNet.sendRequest(t, firstNode, secondNode)
  38. testNet.listenForResponse(t, secondNode, firstNode, shortWait, []p2pproto.PexAddress(nil))
  39. }
  40. func TestReactorConnectFullNetwork(t *testing.T) {
  41. testNet := setupNetwork(t, testOptions{
  42. TotalNodes: 4,
  43. })
  44. // make every node be only connected with one other node (it actually ends up
  45. // being two because of two way connections but oh well)
  46. testNet.connectN(t, 1)
  47. testNet.start(t)
  48. // assert that all nodes add each other in the network
  49. for idx := 0; idx < len(testNet.nodes); idx++ {
  50. testNet.requireNumberOfPeers(t, idx, len(testNet.nodes)-1, longWait)
  51. }
  52. }
  53. func TestReactorSendsRequestsTooOften(t *testing.T) {
  54. r := setupSingle(t)
  55. badNode := newNodeID(t, "b")
  56. r.pexInCh <- p2p.Envelope{
  57. From: badNode,
  58. Message: &p2pproto.PexRequest{},
  59. }
  60. resp := <-r.pexOutCh
  61. msg, ok := resp.Message.(*p2pproto.PexResponse)
  62. require.True(t, ok)
  63. require.Empty(t, msg.Addresses)
  64. r.pexInCh <- p2p.Envelope{
  65. From: badNode,
  66. Message: &p2pproto.PexRequest{},
  67. }
  68. peerErr := <-r.pexErrCh
  69. require.Error(t, peerErr.Err)
  70. require.Empty(t, r.pexOutCh)
  71. require.Contains(t, peerErr.Err.Error(), "peer sent a request too close after a prior one")
  72. require.Equal(t, badNode, peerErr.NodeID)
  73. }
  74. func TestReactorSendsResponseWithoutRequest(t *testing.T) {
  75. testNet := setupNetwork(t, testOptions{
  76. MockNodes: 1,
  77. TotalNodes: 3,
  78. })
  79. testNet.connectAll(t)
  80. testNet.start(t)
  81. // firstNode sends the secondNode an unrequested response
  82. // NOTE: secondNode will send a request by default during startup so we send
  83. // two responses to counter that.
  84. testNet.sendResponse(t, firstNode, secondNode, []int{thirdNode})
  85. testNet.sendResponse(t, firstNode, secondNode, []int{thirdNode})
  86. // secondNode should evict the firstNode
  87. testNet.listenForPeerUpdate(t, secondNode, firstNode, p2p.PeerStatusDown, shortWait)
  88. }
  89. func TestReactorNeverSendsTooManyPeers(t *testing.T) {
  90. testNet := setupNetwork(t, testOptions{
  91. MockNodes: 1,
  92. TotalNodes: 2,
  93. })
  94. testNet.connectAll(t)
  95. testNet.start(t)
  96. testNet.addNodes(t, 110)
  97. nodes := make([]int, 110)
  98. for i := 0; i < len(nodes); i++ {
  99. nodes[i] = i + 2
  100. }
  101. testNet.addAddresses(t, secondNode, nodes)
  102. // first we check that even although we have 110 peers, honest pex reactors
  103. // only send 100 (test if secondNode sends firstNode 100 addresses)
  104. testNet.pingAndlistenForNAddresses(t, secondNode, firstNode, shortWait, 100)
  105. }
  106. func TestReactorErrorsOnReceivingTooManyPeers(t *testing.T) {
  107. r := setupSingle(t)
  108. peer := p2p.NodeAddress{Protocol: p2p.MemoryProtocol, NodeID: randomNodeID(t)}
  109. added, err := r.manager.Add(peer)
  110. require.NoError(t, err)
  111. require.True(t, added)
  112. addresses := make([]p2pproto.PexAddress, 101)
  113. for i := 0; i < len(addresses); i++ {
  114. nodeAddress := p2p.NodeAddress{Protocol: p2p.MemoryProtocol, NodeID: randomNodeID(t)}
  115. addresses[i] = p2pproto.PexAddress{
  116. URL: nodeAddress.String(),
  117. }
  118. }
  119. r.peerCh <- p2p.PeerUpdate{
  120. NodeID: peer.NodeID,
  121. Status: p2p.PeerStatusUp,
  122. }
  123. select {
  124. // wait for a request and then send a response with too many addresses
  125. case req := <-r.pexOutCh:
  126. if _, ok := req.Message.(*p2pproto.PexRequest); !ok {
  127. t.Fatal("expected v2 pex request")
  128. }
  129. r.pexInCh <- p2p.Envelope{
  130. From: peer.NodeID,
  131. Message: &p2pproto.PexResponse{
  132. Addresses: addresses,
  133. },
  134. }
  135. case <-time.After(10 * time.Second):
  136. t.Fatal("pex failed to send a request within 10 seconds")
  137. }
  138. peerErr := <-r.pexErrCh
  139. require.Error(t, peerErr.Err)
  140. require.Empty(t, r.pexOutCh)
  141. require.Contains(t, peerErr.Err.Error(), "peer sent too many addresses")
  142. require.Equal(t, peer.NodeID, peerErr.NodeID)
  143. }
  144. func TestReactorSmallPeerStoreInALargeNetwork(t *testing.T) {
  145. testNet := setupNetwork(t, testOptions{
  146. TotalNodes: 8,
  147. MaxPeers: 4,
  148. MaxConnected: 3,
  149. BufferSize: 8,
  150. })
  151. testNet.connectN(t, 1)
  152. testNet.start(t)
  153. // test that all nodes reach full capacity
  154. for _, nodeID := range testNet.nodes {
  155. require.Eventually(t, func() bool {
  156. // nolint:scopelint
  157. return testNet.network.Nodes[nodeID].PeerManager.PeerRatio() >= 0.9
  158. }, longWait, checkFrequency)
  159. }
  160. }
  161. func TestReactorLargePeerStoreInASmallNetwork(t *testing.T) {
  162. testNet := setupNetwork(t, testOptions{
  163. TotalNodes: 3,
  164. MaxPeers: 25,
  165. MaxConnected: 25,
  166. BufferSize: 5,
  167. })
  168. testNet.connectN(t, 1)
  169. testNet.start(t)
  170. // assert that all nodes add each other in the network
  171. for idx := 0; idx < len(testNet.nodes); idx++ {
  172. testNet.requireNumberOfPeers(t, idx, len(testNet.nodes)-1, longWait)
  173. }
  174. }
  175. func TestReactorWithNetworkGrowth(t *testing.T) {
  176. testNet := setupNetwork(t, testOptions{
  177. TotalNodes: 5,
  178. BufferSize: 5,
  179. })
  180. testNet.connectAll(t)
  181. testNet.start(t)
  182. // assert that all nodes add each other in the network
  183. for idx := 0; idx < len(testNet.nodes); idx++ {
  184. testNet.requireNumberOfPeers(t, idx, len(testNet.nodes)-1, shortWait)
  185. }
  186. // now we inject 10 more nodes
  187. testNet.addNodes(t, 10)
  188. for i := 5; i < testNet.total; i++ {
  189. node := testNet.nodes[i]
  190. require.NoError(t, testNet.reactors[node].Start())
  191. require.True(t, testNet.reactors[node].IsRunning())
  192. // we connect all new nodes to a single entry point and check that the
  193. // node can distribute the addresses to all the others
  194. testNet.connectPeers(t, 0, i)
  195. }
  196. require.Len(t, testNet.reactors, 15)
  197. // assert that all nodes add each other in the network
  198. for idx := 0; idx < len(testNet.nodes); idx++ {
  199. testNet.requireNumberOfPeers(t, idx, len(testNet.nodes)-1, longWait)
  200. }
  201. }
  202. type singleTestReactor struct {
  203. reactor *pex.Reactor
  204. pexInCh chan p2p.Envelope
  205. pexOutCh chan p2p.Envelope
  206. pexErrCh chan p2p.PeerError
  207. pexCh *p2p.Channel
  208. peerCh chan p2p.PeerUpdate
  209. manager *p2p.PeerManager
  210. }
  211. func setupSingle(t *testing.T) *singleTestReactor {
  212. t.Helper()
  213. nodeID := newNodeID(t, "a")
  214. chBuf := 2
  215. pexInCh := make(chan p2p.Envelope, chBuf)
  216. pexOutCh := make(chan p2p.Envelope, chBuf)
  217. pexErrCh := make(chan p2p.PeerError, chBuf)
  218. pexCh := p2p.NewChannel(
  219. p2p.ChannelID(pex.PexChannel),
  220. new(p2pproto.PexMessage),
  221. pexInCh,
  222. pexOutCh,
  223. pexErrCh,
  224. )
  225. peerCh := make(chan p2p.PeerUpdate, chBuf)
  226. peerUpdates := p2p.NewPeerUpdates(peerCh, chBuf)
  227. peerManager, err := p2p.NewPeerManager(nodeID, dbm.NewMemDB(), p2p.PeerManagerOptions{})
  228. require.NoError(t, err)
  229. reactor := pex.NewReactor(log.TestingLogger(), peerManager, pexCh, peerUpdates)
  230. require.NoError(t, reactor.Start())
  231. t.Cleanup(func() {
  232. err := reactor.Stop()
  233. if err != nil {
  234. t.Fatal(err)
  235. }
  236. pexCh.Close()
  237. peerUpdates.Close()
  238. })
  239. return &singleTestReactor{
  240. reactor: reactor,
  241. pexInCh: pexInCh,
  242. pexOutCh: pexOutCh,
  243. pexErrCh: pexErrCh,
  244. pexCh: pexCh,
  245. peerCh: peerCh,
  246. manager: peerManager,
  247. }
  248. }
  249. type reactorTestSuite struct {
  250. network *p2ptest.Network
  251. logger log.Logger
  252. reactors map[types.NodeID]*pex.Reactor
  253. pexChannels map[types.NodeID]*p2p.Channel
  254. peerChans map[types.NodeID]chan p2p.PeerUpdate
  255. peerUpdates map[types.NodeID]*p2p.PeerUpdates
  256. nodes []types.NodeID
  257. mocks []types.NodeID
  258. total int
  259. opts testOptions
  260. }
  261. type testOptions struct {
  262. MockNodes int
  263. TotalNodes int
  264. BufferSize int
  265. MaxPeers uint16
  266. MaxConnected uint16
  267. }
  268. // setup setups a test suite with a network of nodes. Mocknodes represent the
  269. // hollow nodes that the test can listen and send on
  270. func setupNetwork(t *testing.T, opts testOptions) *reactorTestSuite {
  271. t.Helper()
  272. require.Greater(t, opts.TotalNodes, opts.MockNodes)
  273. if opts.BufferSize == 0 {
  274. opts.BufferSize = defaultBufferSize
  275. }
  276. networkOpts := p2ptest.NetworkOptions{
  277. NumNodes: opts.TotalNodes,
  278. BufferSize: opts.BufferSize,
  279. NodeOpts: p2ptest.NodeOptions{
  280. MaxPeers: opts.MaxPeers,
  281. MaxConnected: opts.MaxConnected,
  282. },
  283. }
  284. chBuf := opts.BufferSize
  285. realNodes := opts.TotalNodes - opts.MockNodes
  286. rts := &reactorTestSuite{
  287. logger: log.TestingLogger().With("testCase", t.Name()),
  288. network: p2ptest.MakeNetwork(t, networkOpts),
  289. reactors: make(map[types.NodeID]*pex.Reactor, realNodes),
  290. pexChannels: make(map[types.NodeID]*p2p.Channel, opts.TotalNodes),
  291. peerChans: make(map[types.NodeID]chan p2p.PeerUpdate, opts.TotalNodes),
  292. peerUpdates: make(map[types.NodeID]*p2p.PeerUpdates, opts.TotalNodes),
  293. total: opts.TotalNodes,
  294. opts: opts,
  295. }
  296. // NOTE: we don't assert that the channels get drained after stopping the
  297. // reactor
  298. rts.pexChannels = rts.network.MakeChannelsNoCleanup(t, pex.ChannelDescriptor())
  299. idx := 0
  300. for nodeID := range rts.network.Nodes {
  301. rts.peerChans[nodeID] = make(chan p2p.PeerUpdate, chBuf)
  302. rts.peerUpdates[nodeID] = p2p.NewPeerUpdates(rts.peerChans[nodeID], chBuf)
  303. rts.network.Nodes[nodeID].PeerManager.Register(rts.peerUpdates[nodeID])
  304. // the first nodes in the array are always mock nodes
  305. if idx < opts.MockNodes {
  306. rts.mocks = append(rts.mocks, nodeID)
  307. } else {
  308. rts.reactors[nodeID] = pex.NewReactor(
  309. rts.logger.With("nodeID", nodeID),
  310. rts.network.Nodes[nodeID].PeerManager,
  311. rts.pexChannels[nodeID],
  312. rts.peerUpdates[nodeID],
  313. )
  314. }
  315. rts.nodes = append(rts.nodes, nodeID)
  316. idx++
  317. }
  318. require.Len(t, rts.reactors, realNodes)
  319. t.Cleanup(func() {
  320. for nodeID, reactor := range rts.reactors {
  321. if reactor.IsRunning() {
  322. require.NoError(t, reactor.Stop())
  323. require.False(t, reactor.IsRunning())
  324. }
  325. rts.pexChannels[nodeID].Close()
  326. rts.peerUpdates[nodeID].Close()
  327. }
  328. for _, nodeID := range rts.mocks {
  329. rts.pexChannels[nodeID].Close()
  330. rts.peerUpdates[nodeID].Close()
  331. }
  332. })
  333. return rts
  334. }
  335. // starts up the pex reactors for each node
  336. func (r *reactorTestSuite) start(t *testing.T) {
  337. t.Helper()
  338. for _, reactor := range r.reactors {
  339. require.NoError(t, reactor.Start())
  340. require.True(t, reactor.IsRunning())
  341. }
  342. }
  343. func (r *reactorTestSuite) addNodes(t *testing.T, nodes int) {
  344. t.Helper()
  345. for i := 0; i < nodes; i++ {
  346. node := r.network.MakeNode(t, p2ptest.NodeOptions{
  347. MaxPeers: r.opts.MaxPeers,
  348. MaxConnected: r.opts.MaxConnected,
  349. })
  350. r.network.Nodes[node.NodeID] = node
  351. nodeID := node.NodeID
  352. r.pexChannels[nodeID] = node.MakeChannelNoCleanup(t, pex.ChannelDescriptor())
  353. r.peerChans[nodeID] = make(chan p2p.PeerUpdate, r.opts.BufferSize)
  354. r.peerUpdates[nodeID] = p2p.NewPeerUpdates(r.peerChans[nodeID], r.opts.BufferSize)
  355. r.network.Nodes[nodeID].PeerManager.Register(r.peerUpdates[nodeID])
  356. r.reactors[nodeID] = pex.NewReactor(
  357. r.logger.With("nodeID", nodeID),
  358. r.network.Nodes[nodeID].PeerManager,
  359. r.pexChannels[nodeID],
  360. r.peerUpdates[nodeID],
  361. )
  362. r.nodes = append(r.nodes, nodeID)
  363. r.total++
  364. }
  365. }
  366. func (r *reactorTestSuite) listenFor(
  367. t *testing.T,
  368. node types.NodeID,
  369. conditional func(msg p2p.Envelope) bool,
  370. assertion func(t *testing.T, msg p2p.Envelope) bool,
  371. waitPeriod time.Duration,
  372. ) {
  373. timesUp := time.After(waitPeriod)
  374. for {
  375. select {
  376. case envelope := <-r.pexChannels[node].In:
  377. if conditional(envelope) && assertion(t, envelope) {
  378. return
  379. }
  380. case <-timesUp:
  381. require.Fail(t, "timed out waiting for message",
  382. "node=%v, waitPeriod=%s", node, waitPeriod)
  383. }
  384. }
  385. }
  386. func (r *reactorTestSuite) listenForRequest(t *testing.T, fromNode, toNode int, waitPeriod time.Duration) {
  387. r.logger.Info("Listening for request", "from", fromNode, "to", toNode)
  388. to, from := r.checkNodePair(t, toNode, fromNode)
  389. conditional := func(msg p2p.Envelope) bool {
  390. _, ok := msg.Message.(*p2pproto.PexRequest)
  391. return ok && msg.From == from
  392. }
  393. assertion := func(t *testing.T, msg p2p.Envelope) bool {
  394. require.Equal(t, &p2pproto.PexRequest{}, msg.Message)
  395. return true
  396. }
  397. r.listenFor(t, to, conditional, assertion, waitPeriod)
  398. }
  399. func (r *reactorTestSuite) pingAndlistenForNAddresses(
  400. t *testing.T,
  401. fromNode, toNode int,
  402. waitPeriod time.Duration,
  403. addresses int,
  404. ) {
  405. r.logger.Info("Listening for addresses", "from", fromNode, "to", toNode)
  406. to, from := r.checkNodePair(t, toNode, fromNode)
  407. conditional := func(msg p2p.Envelope) bool {
  408. _, ok := msg.Message.(*p2pproto.PexResponse)
  409. return ok && msg.From == from
  410. }
  411. assertion := func(t *testing.T, msg p2p.Envelope) bool {
  412. m, ok := msg.Message.(*p2pproto.PexResponse)
  413. if !ok {
  414. require.Fail(t, "expected pex response v2")
  415. return true
  416. }
  417. // assert the same amount of addresses
  418. if len(m.Addresses) == addresses {
  419. return true
  420. }
  421. // if we didn't get the right length, we wait and send the
  422. // request again
  423. time.Sleep(300 * time.Millisecond)
  424. r.sendRequest(t, toNode, fromNode)
  425. return false
  426. }
  427. r.sendRequest(t, toNode, fromNode)
  428. r.listenFor(t, to, conditional, assertion, waitPeriod)
  429. }
  430. func (r *reactorTestSuite) listenForResponse(
  431. t *testing.T,
  432. fromNode, toNode int,
  433. waitPeriod time.Duration,
  434. addresses []p2pproto.PexAddress,
  435. ) {
  436. r.logger.Info("Listening for response", "from", fromNode, "to", toNode)
  437. to, from := r.checkNodePair(t, toNode, fromNode)
  438. conditional := func(msg p2p.Envelope) bool {
  439. _, ok := msg.Message.(*p2pproto.PexResponse)
  440. r.logger.Info("message", msg, "ok", ok)
  441. return ok && msg.From == from
  442. }
  443. assertion := func(t *testing.T, msg p2p.Envelope) bool {
  444. require.Equal(t, &p2pproto.PexResponse{Addresses: addresses}, msg.Message)
  445. return true
  446. }
  447. r.listenFor(t, to, conditional, assertion, waitPeriod)
  448. }
  449. func (r *reactorTestSuite) listenForPeerUpdate(
  450. t *testing.T,
  451. onNode, withNode int,
  452. status p2p.PeerStatus,
  453. waitPeriod time.Duration,
  454. ) {
  455. on, with := r.checkNodePair(t, onNode, withNode)
  456. sub := r.network.Nodes[on].PeerManager.Subscribe()
  457. defer sub.Close()
  458. timesUp := time.After(waitPeriod)
  459. for {
  460. select {
  461. case peerUpdate := <-sub.Updates():
  462. if peerUpdate.NodeID == with {
  463. require.Equal(t, status, peerUpdate.Status)
  464. return
  465. }
  466. case <-timesUp:
  467. require.Fail(t, "timed out waiting for peer status", "%v with status %v",
  468. with, status)
  469. return
  470. }
  471. }
  472. }
  473. func (r *reactorTestSuite) getAddressesFor(nodes []int) []p2pproto.PexAddress {
  474. addresses := make([]p2pproto.PexAddress, len(nodes))
  475. for idx, node := range nodes {
  476. nodeID := r.nodes[node]
  477. addresses[idx] = p2pproto.PexAddress{
  478. URL: r.network.Nodes[nodeID].NodeAddress.String(),
  479. }
  480. }
  481. return addresses
  482. }
  483. func (r *reactorTestSuite) sendRequest(t *testing.T, fromNode, toNode int) {
  484. to, from := r.checkNodePair(t, toNode, fromNode)
  485. r.pexChannels[from].Out <- p2p.Envelope{
  486. To: to,
  487. Message: &p2pproto.PexRequest{},
  488. }
  489. }
  490. func (r *reactorTestSuite) sendResponse(
  491. t *testing.T,
  492. fromNode, toNode int,
  493. withNodes []int,
  494. ) {
  495. from, to := r.checkNodePair(t, fromNode, toNode)
  496. addrs := r.getAddressesFor(withNodes)
  497. r.pexChannels[from].Out <- p2p.Envelope{
  498. To: to,
  499. Message: &p2pproto.PexResponse{
  500. Addresses: addrs,
  501. },
  502. }
  503. }
  504. func (r *reactorTestSuite) requireNumberOfPeers(
  505. t *testing.T,
  506. nodeIndex, numPeers int,
  507. waitPeriod time.Duration,
  508. ) {
  509. t.Helper()
  510. require.Eventuallyf(t, func() bool {
  511. actualNumPeers := len(r.network.Nodes[r.nodes[nodeIndex]].PeerManager.Peers())
  512. return actualNumPeers >= numPeers
  513. }, waitPeriod, checkFrequency, "peer failed to connect with the asserted amount of peers "+
  514. "index=%d, node=%q, waitPeriod=%s expected=%d actual=%d",
  515. nodeIndex, r.nodes[nodeIndex], waitPeriod, numPeers,
  516. len(r.network.Nodes[r.nodes[nodeIndex]].PeerManager.Peers()),
  517. )
  518. }
  519. func (r *reactorTestSuite) connectAll(t *testing.T) {
  520. r.connectN(t, r.total-1)
  521. }
  522. // connects all nodes with n other nodes
  523. func (r *reactorTestSuite) connectN(t *testing.T, n int) {
  524. if n >= r.total {
  525. require.Fail(t, "connectN: n must be less than the size of the network - 1")
  526. }
  527. for i := 0; i < r.total; i++ {
  528. for j := 0; j < n; j++ {
  529. r.connectPeers(t, i, (i+j+1)%r.total)
  530. }
  531. }
  532. }
  533. // connects node1 to node2
  534. func (r *reactorTestSuite) connectPeers(t *testing.T, sourceNode, targetNode int) {
  535. t.Helper()
  536. node1, node2 := r.checkNodePair(t, sourceNode, targetNode)
  537. r.logger.Info("connecting peers", "sourceNode", sourceNode, "targetNode", targetNode)
  538. n1 := r.network.Nodes[node1]
  539. if n1 == nil {
  540. require.Fail(t, "connectPeers: source node %v is not part of the testnet", node1)
  541. return
  542. }
  543. n2 := r.network.Nodes[node2]
  544. if n2 == nil {
  545. require.Fail(t, "connectPeers: target node %v is not part of the testnet", node2)
  546. return
  547. }
  548. sourceSub := n1.PeerManager.Subscribe()
  549. defer sourceSub.Close()
  550. targetSub := n2.PeerManager.Subscribe()
  551. defer targetSub.Close()
  552. sourceAddress := n1.NodeAddress
  553. r.logger.Debug("source address", "address", sourceAddress)
  554. targetAddress := n2.NodeAddress
  555. r.logger.Debug("target address", "address", targetAddress)
  556. added, err := n1.PeerManager.Add(targetAddress)
  557. require.NoError(t, err)
  558. if !added {
  559. r.logger.Debug("nodes already know about one another",
  560. "sourceNode", sourceNode, "targetNode", targetNode)
  561. return
  562. }
  563. select {
  564. case peerUpdate := <-targetSub.Updates():
  565. require.Equal(t, p2p.PeerUpdate{
  566. NodeID: node1,
  567. Status: p2p.PeerStatusUp,
  568. }, peerUpdate)
  569. r.logger.Debug("target connected with source")
  570. case <-time.After(2 * time.Second):
  571. require.Fail(t, "timed out waiting for peer", "%v accepting %v",
  572. targetNode, sourceNode)
  573. }
  574. select {
  575. case peerUpdate := <-sourceSub.Updates():
  576. require.Equal(t, p2p.PeerUpdate{
  577. NodeID: node2,
  578. Status: p2p.PeerStatusUp,
  579. }, peerUpdate)
  580. r.logger.Debug("source connected with target")
  581. case <-time.After(2 * time.Second):
  582. require.Fail(t, "timed out waiting for peer", "%v dialing %v",
  583. sourceNode, targetNode)
  584. }
  585. added, err = n2.PeerManager.Add(sourceAddress)
  586. require.NoError(t, err)
  587. require.True(t, added)
  588. }
  589. func (r *reactorTestSuite) checkNodePair(t *testing.T, first, second int) (types.NodeID, types.NodeID) {
  590. require.NotEqual(t, first, second)
  591. require.Less(t, first, r.total)
  592. require.Less(t, second, r.total)
  593. return r.nodes[first], r.nodes[second]
  594. }
  595. func (r *reactorTestSuite) addAddresses(t *testing.T, node int, addrs []int) {
  596. peerManager := r.network.Nodes[r.nodes[node]].PeerManager
  597. for _, addr := range addrs {
  598. require.Less(t, addr, r.total)
  599. address := r.network.Nodes[r.nodes[addr]].NodeAddress
  600. added, err := peerManager.Add(address)
  601. require.NoError(t, err)
  602. require.True(t, added)
  603. }
  604. }
  605. func newNodeID(t *testing.T, id string) types.NodeID {
  606. nodeID, err := types.NewNodeID(strings.Repeat(id, 2*types.NodeIDByteLength))
  607. require.NoError(t, err)
  608. return nodeID
  609. }
  610. func randomNodeID(t *testing.T) types.NodeID {
  611. return types.NodeIDFromPubKey(ed25519.GenPrivKey().PubKey())
  612. }