You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

348 lines
9.7 KiB

  1. package p2ptest
  2. import (
  3. "context"
  4. "math/rand"
  5. "testing"
  6. "time"
  7. "github.com/stretchr/testify/require"
  8. dbm "github.com/tendermint/tm-db"
  9. "github.com/tendermint/tendermint/crypto"
  10. "github.com/tendermint/tendermint/crypto/ed25519"
  11. "github.com/tendermint/tendermint/internal/p2p"
  12. "github.com/tendermint/tendermint/libs/log"
  13. "github.com/tendermint/tendermint/types"
  14. )
  15. // Network sets up an in-memory network that can be used for high-level P2P
  16. // testing. It creates an arbitrary number of nodes that are connected to each
  17. // other, and can open channels across all nodes with custom reactors.
  18. type Network struct {
  19. Nodes map[types.NodeID]*Node
  20. logger log.Logger
  21. memoryNetwork *p2p.MemoryNetwork
  22. }
  23. // NetworkOptions is an argument structure to parameterize the
  24. // MakeNetwork function.
  25. type NetworkOptions struct {
  26. NumNodes int
  27. BufferSize int
  28. NodeOpts NodeOptions
  29. }
  30. type NodeOptions struct {
  31. MaxPeers uint16
  32. MaxConnected uint16
  33. }
  34. func (opts *NetworkOptions) setDefaults() {
  35. if opts.BufferSize == 0 {
  36. opts.BufferSize = 1
  37. }
  38. }
  39. // MakeNetwork creates a test network with the given number of nodes and
  40. // connects them to each other.
  41. func MakeNetwork(ctx context.Context, t *testing.T, opts NetworkOptions) *Network {
  42. opts.setDefaults()
  43. logger := log.TestingLogger()
  44. network := &Network{
  45. Nodes: map[types.NodeID]*Node{},
  46. logger: logger,
  47. memoryNetwork: p2p.NewMemoryNetwork(logger, opts.BufferSize),
  48. }
  49. for i := 0; i < opts.NumNodes; i++ {
  50. node := network.MakeNode(ctx, t, opts.NodeOpts)
  51. network.Nodes[node.NodeID] = node
  52. }
  53. return network
  54. }
  55. // Start starts the network by setting up a list of node addresses to dial in
  56. // addition to creating a peer update subscription for each node. Finally, all
  57. // nodes are connected to each other.
  58. func (n *Network) Start(ctx context.Context, t *testing.T) {
  59. // Set up a list of node addresses to dial, and a peer update subscription
  60. // for each node.
  61. dialQueue := []p2p.NodeAddress{}
  62. subs := map[types.NodeID]*p2p.PeerUpdates{}
  63. for _, node := range n.Nodes {
  64. dialQueue = append(dialQueue, node.NodeAddress)
  65. subs[node.NodeID] = node.PeerManager.Subscribe(ctx)
  66. defer subs[node.NodeID].Close()
  67. }
  68. // For each node, dial the nodes that it still doesn't have a connection to
  69. // (either inbound or outbound), and wait for both sides to confirm the
  70. // connection via the subscriptions.
  71. for i, sourceAddress := range dialQueue {
  72. sourceNode := n.Nodes[sourceAddress.NodeID]
  73. sourceSub := subs[sourceAddress.NodeID]
  74. for _, targetAddress := range dialQueue[i+1:] { // nodes <i already connected
  75. targetNode := n.Nodes[targetAddress.NodeID]
  76. targetSub := subs[targetAddress.NodeID]
  77. added, err := sourceNode.PeerManager.Add(targetAddress)
  78. require.NoError(t, err)
  79. require.True(t, added)
  80. select {
  81. case <-ctx.Done():
  82. require.Fail(t, "operation canceled")
  83. case peerUpdate := <-sourceSub.Updates():
  84. require.Equal(t, p2p.PeerUpdate{
  85. NodeID: targetNode.NodeID,
  86. Status: p2p.PeerStatusUp,
  87. }, peerUpdate)
  88. case <-time.After(3 * time.Second):
  89. require.Fail(t, "timed out waiting for peer", "%v dialing %v",
  90. sourceNode.NodeID, targetNode.NodeID)
  91. }
  92. select {
  93. case <-ctx.Done():
  94. require.Fail(t, "operation canceled")
  95. case peerUpdate := <-targetSub.Updates():
  96. require.Equal(t, p2p.PeerUpdate{
  97. NodeID: sourceNode.NodeID,
  98. Status: p2p.PeerStatusUp,
  99. }, peerUpdate)
  100. case <-time.After(3 * time.Second):
  101. require.Fail(t, "timed out waiting for peer", "%v accepting %v",
  102. targetNode.NodeID, sourceNode.NodeID)
  103. }
  104. // Add the address to the target as well, so it's able to dial the
  105. // source back if that's even necessary.
  106. added, err = targetNode.PeerManager.Add(sourceAddress)
  107. require.NoError(t, err)
  108. require.True(t, added)
  109. }
  110. }
  111. }
  112. // NodeIDs returns the network's node IDs.
  113. func (n *Network) NodeIDs() []types.NodeID {
  114. ids := []types.NodeID{}
  115. for id := range n.Nodes {
  116. ids = append(ids, id)
  117. }
  118. return ids
  119. }
  120. // MakeChannels makes a channel on all nodes and returns them, automatically
  121. // doing error checks and cleanups.
  122. func (n *Network) MakeChannels(
  123. ctx context.Context,
  124. t *testing.T,
  125. chDesc *p2p.ChannelDescriptor,
  126. ) map[types.NodeID]*p2p.Channel {
  127. channels := map[types.NodeID]*p2p.Channel{}
  128. for _, node := range n.Nodes {
  129. channels[node.NodeID] = node.MakeChannel(ctx, t, chDesc)
  130. }
  131. return channels
  132. }
  133. // MakeChannelsNoCleanup makes a channel on all nodes and returns them,
  134. // automatically doing error checks. The caller must ensure proper cleanup of
  135. // all the channels.
  136. func (n *Network) MakeChannelsNoCleanup(
  137. ctx context.Context,
  138. t *testing.T,
  139. chDesc *p2p.ChannelDescriptor,
  140. ) map[types.NodeID]*p2p.Channel {
  141. channels := map[types.NodeID]*p2p.Channel{}
  142. for _, node := range n.Nodes {
  143. channels[node.NodeID] = node.MakeChannelNoCleanup(ctx, t, chDesc)
  144. }
  145. return channels
  146. }
  147. // RandomNode returns a random node.
  148. func (n *Network) RandomNode() *Node {
  149. nodes := make([]*Node, 0, len(n.Nodes))
  150. for _, node := range n.Nodes {
  151. nodes = append(nodes, node)
  152. }
  153. return nodes[rand.Intn(len(nodes))] // nolint:gosec
  154. }
  155. // Peers returns a node's peers (i.e. everyone except itself).
  156. func (n *Network) Peers(id types.NodeID) []*Node {
  157. peers := make([]*Node, 0, len(n.Nodes)-1)
  158. for _, peer := range n.Nodes {
  159. if peer.NodeID != id {
  160. peers = append(peers, peer)
  161. }
  162. }
  163. return peers
  164. }
  165. // Remove removes a node from the network, stopping it and waiting for all other
  166. // nodes to pick up the disconnection.
  167. func (n *Network) Remove(ctx context.Context, t *testing.T, id types.NodeID) {
  168. require.Contains(t, n.Nodes, id)
  169. node := n.Nodes[id]
  170. delete(n.Nodes, id)
  171. subs := []*p2p.PeerUpdates{}
  172. for _, peer := range n.Nodes {
  173. sub := peer.PeerManager.Subscribe(ctx)
  174. defer sub.Close()
  175. subs = append(subs, sub)
  176. }
  177. require.NoError(t, node.Transport.Close())
  178. if node.Router.IsRunning() {
  179. require.NoError(t, node.Router.Stop())
  180. }
  181. node.PeerManager.Close()
  182. for _, sub := range subs {
  183. RequireUpdate(t, sub, p2p.PeerUpdate{
  184. NodeID: node.NodeID,
  185. Status: p2p.PeerStatusDown,
  186. })
  187. }
  188. }
  189. // Node is a node in a Network, with a Router and a PeerManager.
  190. type Node struct {
  191. NodeID types.NodeID
  192. NodeInfo types.NodeInfo
  193. NodeAddress p2p.NodeAddress
  194. PrivKey crypto.PrivKey
  195. Router *p2p.Router
  196. PeerManager *p2p.PeerManager
  197. Transport *p2p.MemoryTransport
  198. }
  199. // MakeNode creates a new Node configured for the network with a
  200. // running peer manager, but does not add it to the existing
  201. // network. Callers are responsible for updating peering relationships.
  202. func (n *Network) MakeNode(ctx context.Context, t *testing.T, opts NodeOptions) *Node {
  203. privKey := ed25519.GenPrivKey()
  204. nodeID := types.NodeIDFromPubKey(privKey.PubKey())
  205. nodeInfo := types.NodeInfo{
  206. NodeID: nodeID,
  207. ListenAddr: "0.0.0.0:0", // FIXME: We have to fake this for now.
  208. Moniker: string(nodeID),
  209. }
  210. transport := n.memoryNetwork.CreateTransport(nodeID)
  211. require.Len(t, transport.Endpoints(), 1, "transport not listening on 1 endpoint")
  212. peerManager, err := p2p.NewPeerManager(nodeID, dbm.NewMemDB(), p2p.PeerManagerOptions{
  213. MinRetryTime: 10 * time.Millisecond,
  214. MaxRetryTime: 100 * time.Millisecond,
  215. RetryTimeJitter: time.Millisecond,
  216. MaxPeers: opts.MaxPeers,
  217. MaxConnected: opts.MaxConnected,
  218. })
  219. require.NoError(t, err)
  220. router, err := p2p.NewRouter(
  221. ctx,
  222. n.logger,
  223. p2p.NopMetrics(),
  224. nodeInfo,
  225. privKey,
  226. peerManager,
  227. []p2p.Transport{transport},
  228. transport.Endpoints(),
  229. p2p.RouterOptions{DialSleep: func(_ context.Context) {}},
  230. )
  231. require.NoError(t, err)
  232. require.NoError(t, router.Start(ctx))
  233. t.Cleanup(func() {
  234. if router.IsRunning() {
  235. require.NoError(t, router.Stop())
  236. }
  237. peerManager.Close()
  238. require.NoError(t, transport.Close())
  239. })
  240. return &Node{
  241. NodeID: nodeID,
  242. NodeInfo: nodeInfo,
  243. NodeAddress: transport.Endpoints()[0].NodeAddress(nodeID),
  244. PrivKey: privKey,
  245. Router: router,
  246. PeerManager: peerManager,
  247. Transport: transport,
  248. }
  249. }
  250. // MakeChannel opens a channel, with automatic error handling and cleanup. On
  251. // test cleanup, it also checks that the channel is empty, to make sure
  252. // all expected messages have been asserted.
  253. func (n *Node) MakeChannel(
  254. ctx context.Context,
  255. t *testing.T,
  256. chDesc *p2p.ChannelDescriptor,
  257. ) *p2p.Channel {
  258. ctx, cancel := context.WithCancel(ctx)
  259. channel, err := n.Router.OpenChannel(ctx, chDesc)
  260. require.NoError(t, err)
  261. require.Contains(t, n.Router.NodeInfo().Channels, byte(chDesc.ID))
  262. t.Cleanup(func() {
  263. RequireEmpty(t, channel)
  264. cancel()
  265. })
  266. return channel
  267. }
  268. // MakeChannelNoCleanup opens a channel, with automatic error handling. The
  269. // caller must ensure proper cleanup of the channel.
  270. func (n *Node) MakeChannelNoCleanup(
  271. ctx context.Context,
  272. t *testing.T,
  273. chDesc *p2p.ChannelDescriptor,
  274. ) *p2p.Channel {
  275. channel, err := n.Router.OpenChannel(ctx, chDesc)
  276. require.NoError(t, err)
  277. return channel
  278. }
  279. // MakePeerUpdates opens a peer update subscription, with automatic cleanup.
  280. // It checks that all updates have been consumed during cleanup.
  281. func (n *Node) MakePeerUpdates(ctx context.Context, t *testing.T) *p2p.PeerUpdates {
  282. t.Helper()
  283. sub := n.PeerManager.Subscribe(ctx)
  284. t.Cleanup(func() {
  285. RequireNoUpdates(ctx, t, sub)
  286. sub.Close()
  287. })
  288. return sub
  289. }
  290. // MakePeerUpdatesNoRequireEmpty opens a peer update subscription, with automatic cleanup.
  291. // It does *not* check that all updates have been consumed, but will
  292. // close the update channel.
  293. func (n *Node) MakePeerUpdatesNoRequireEmpty(ctx context.Context, t *testing.T) *p2p.PeerUpdates {
  294. sub := n.PeerManager.Subscribe(ctx)
  295. t.Cleanup(sub.Close)
  296. return sub
  297. }
  298. func MakeChannelDesc(chID p2p.ChannelID) *p2p.ChannelDescriptor {
  299. return &p2p.ChannelDescriptor{
  300. ID: chID,
  301. MessageType: &Message{},
  302. Priority: 5,
  303. SendQueueCapacity: 10,
  304. RecvMessageCapacity: 10,
  305. }
  306. }