You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

344 lines
9.6 KiB

  1. package p2ptest
  2. import (
  3. "context"
  4. "math/rand"
  5. "testing"
  6. "time"
  7. "github.com/gogo/protobuf/proto"
  8. "github.com/stretchr/testify/require"
  9. dbm "github.com/tendermint/tm-db"
  10. "github.com/tendermint/tendermint/crypto"
  11. "github.com/tendermint/tendermint/crypto/ed25519"
  12. "github.com/tendermint/tendermint/internal/p2p"
  13. "github.com/tendermint/tendermint/libs/log"
  14. "github.com/tendermint/tendermint/types"
  15. )
  16. // Network sets up an in-memory network that can be used for high-level P2P
  17. // testing. It creates an arbitrary number of nodes that are connected to each
  18. // other, and can open channels across all nodes with custom reactors.
  19. type Network struct {
  20. Nodes map[types.NodeID]*Node
  21. logger log.Logger
  22. memoryNetwork *p2p.MemoryNetwork
  23. }
  24. // NetworkOptions is an argument structure to parameterize the
  25. // MakeNetwork function.
  26. type NetworkOptions struct {
  27. NumNodes int
  28. BufferSize int
  29. NodeOpts NodeOptions
  30. }
  31. type NodeOptions struct {
  32. MaxPeers uint16
  33. MaxConnected uint16
  34. }
  35. func (opts *NetworkOptions) setDefaults() {
  36. if opts.BufferSize == 0 {
  37. opts.BufferSize = 1
  38. }
  39. }
  40. // MakeNetwork creates a test network with the given number of nodes and
  41. // connects them to each other.
  42. func MakeNetwork(t *testing.T, opts NetworkOptions) *Network {
  43. opts.setDefaults()
  44. logger := log.TestingLogger()
  45. network := &Network{
  46. Nodes: map[types.NodeID]*Node{},
  47. logger: logger,
  48. memoryNetwork: p2p.NewMemoryNetwork(logger, opts.BufferSize),
  49. }
  50. for i := 0; i < opts.NumNodes; i++ {
  51. node := network.MakeNode(t, opts.NodeOpts)
  52. network.Nodes[node.NodeID] = node
  53. }
  54. return network
  55. }
  56. // Start starts the network by setting up a list of node addresses to dial in
  57. // addition to creating a peer update subscription for each node. Finally, all
  58. // nodes are connected to each other.
  59. func (n *Network) Start(t *testing.T) {
  60. // Set up a list of node addresses to dial, and a peer update subscription
  61. // for each node.
  62. dialQueue := []p2p.NodeAddress{}
  63. subs := map[types.NodeID]*p2p.PeerUpdates{}
  64. for _, node := range n.Nodes {
  65. dialQueue = append(dialQueue, node.NodeAddress)
  66. subs[node.NodeID] = node.PeerManager.Subscribe()
  67. defer subs[node.NodeID].Close()
  68. }
  69. // For each node, dial the nodes that it still doesn't have a connection to
  70. // (either inbound or outbound), and wait for both sides to confirm the
  71. // connection via the subscriptions.
  72. for i, sourceAddress := range dialQueue {
  73. sourceNode := n.Nodes[sourceAddress.NodeID]
  74. sourceSub := subs[sourceAddress.NodeID]
  75. for _, targetAddress := range dialQueue[i+1:] { // nodes <i already connected
  76. targetNode := n.Nodes[targetAddress.NodeID]
  77. targetSub := subs[targetAddress.NodeID]
  78. added, err := sourceNode.PeerManager.Add(targetAddress)
  79. require.NoError(t, err)
  80. require.True(t, added)
  81. select {
  82. case peerUpdate := <-sourceSub.Updates():
  83. require.Equal(t, p2p.PeerUpdate{
  84. NodeID: targetNode.NodeID,
  85. Status: p2p.PeerStatusUp,
  86. }, peerUpdate)
  87. case <-time.After(3 * time.Second):
  88. require.Fail(t, "timed out waiting for peer", "%v dialing %v",
  89. sourceNode.NodeID, targetNode.NodeID)
  90. }
  91. select {
  92. case peerUpdate := <-targetSub.Updates():
  93. require.Equal(t, p2p.PeerUpdate{
  94. NodeID: sourceNode.NodeID,
  95. Status: p2p.PeerStatusUp,
  96. }, peerUpdate)
  97. case <-time.After(3 * time.Second):
  98. require.Fail(t, "timed out waiting for peer", "%v accepting %v",
  99. targetNode.NodeID, sourceNode.NodeID)
  100. }
  101. // Add the address to the target as well, so it's able to dial the
  102. // source back if that's even necessary.
  103. added, err = targetNode.PeerManager.Add(sourceAddress)
  104. require.NoError(t, err)
  105. require.True(t, added)
  106. }
  107. }
  108. }
  109. // NodeIDs returns the network's node IDs.
  110. func (n *Network) NodeIDs() []types.NodeID {
  111. ids := []types.NodeID{}
  112. for id := range n.Nodes {
  113. ids = append(ids, id)
  114. }
  115. return ids
  116. }
  117. // MakeChannels makes a channel on all nodes and returns them, automatically
  118. // doing error checks and cleanups.
  119. func (n *Network) MakeChannels(
  120. t *testing.T,
  121. chDesc p2p.ChannelDescriptor,
  122. messageType proto.Message,
  123. size int,
  124. ) map[types.NodeID]*p2p.Channel {
  125. channels := map[types.NodeID]*p2p.Channel{}
  126. for _, node := range n.Nodes {
  127. channels[node.NodeID] = node.MakeChannel(t, chDesc, messageType, size)
  128. }
  129. return channels
  130. }
  131. // MakeChannelsNoCleanup makes a channel on all nodes and returns them,
  132. // automatically doing error checks. The caller must ensure proper cleanup of
  133. // all the channels.
  134. func (n *Network) MakeChannelsNoCleanup(
  135. t *testing.T,
  136. chDesc p2p.ChannelDescriptor,
  137. messageType proto.Message,
  138. size int,
  139. ) map[types.NodeID]*p2p.Channel {
  140. channels := map[types.NodeID]*p2p.Channel{}
  141. for _, node := range n.Nodes {
  142. channels[node.NodeID] = node.MakeChannelNoCleanup(t, chDesc, messageType, size)
  143. }
  144. return channels
  145. }
  146. // RandomNode returns a random node.
  147. func (n *Network) RandomNode() *Node {
  148. nodes := make([]*Node, 0, len(n.Nodes))
  149. for _, node := range n.Nodes {
  150. nodes = append(nodes, node)
  151. }
  152. return nodes[rand.Intn(len(nodes))] // nolint:gosec
  153. }
  154. // Peers returns a node's peers (i.e. everyone except itself).
  155. func (n *Network) Peers(id types.NodeID) []*Node {
  156. peers := make([]*Node, 0, len(n.Nodes)-1)
  157. for _, peer := range n.Nodes {
  158. if peer.NodeID != id {
  159. peers = append(peers, peer)
  160. }
  161. }
  162. return peers
  163. }
  164. // Remove removes a node from the network, stopping it and waiting for all other
  165. // nodes to pick up the disconnection.
  166. func (n *Network) Remove(t *testing.T, id types.NodeID) {
  167. require.Contains(t, n.Nodes, id)
  168. node := n.Nodes[id]
  169. delete(n.Nodes, id)
  170. subs := []*p2p.PeerUpdates{}
  171. for _, peer := range n.Nodes {
  172. sub := peer.PeerManager.Subscribe()
  173. defer sub.Close()
  174. subs = append(subs, sub)
  175. }
  176. require.NoError(t, node.Transport.Close())
  177. if node.Router.IsRunning() {
  178. require.NoError(t, node.Router.Stop())
  179. }
  180. node.PeerManager.Close()
  181. for _, sub := range subs {
  182. RequireUpdate(t, sub, p2p.PeerUpdate{
  183. NodeID: node.NodeID,
  184. Status: p2p.PeerStatusDown,
  185. })
  186. }
  187. }
  188. // Node is a node in a Network, with a Router and a PeerManager.
  189. type Node struct {
  190. NodeID types.NodeID
  191. NodeInfo types.NodeInfo
  192. NodeAddress p2p.NodeAddress
  193. PrivKey crypto.PrivKey
  194. Router *p2p.Router
  195. PeerManager *p2p.PeerManager
  196. Transport *p2p.MemoryTransport
  197. }
  198. // MakeNode creates a new Node configured for the network with a
  199. // running peer manager, but does not add it to the existing
  200. // network. Callers are responsible for updating peering relationships.
  201. func (n *Network) MakeNode(t *testing.T, opts NodeOptions) *Node {
  202. privKey := ed25519.GenPrivKey()
  203. nodeID := types.NodeIDFromPubKey(privKey.PubKey())
  204. nodeInfo := types.NodeInfo{
  205. NodeID: nodeID,
  206. ListenAddr: "0.0.0.0:0", // FIXME: We have to fake this for now.
  207. Moniker: string(nodeID),
  208. }
  209. transport := n.memoryNetwork.CreateTransport(nodeID)
  210. require.Len(t, transport.Endpoints(), 1, "transport not listening on 1 endpoint")
  211. peerManager, err := p2p.NewPeerManager(nodeID, dbm.NewMemDB(), p2p.PeerManagerOptions{
  212. MinRetryTime: 10 * time.Millisecond,
  213. MaxRetryTime: 100 * time.Millisecond,
  214. RetryTimeJitter: time.Millisecond,
  215. MaxPeers: opts.MaxPeers,
  216. MaxConnected: opts.MaxConnected,
  217. })
  218. require.NoError(t, err)
  219. router, err := p2p.NewRouter(
  220. n.logger,
  221. p2p.NopMetrics(),
  222. nodeInfo,
  223. privKey,
  224. peerManager,
  225. []p2p.Transport{transport},
  226. p2p.RouterOptions{DialSleep: func(_ context.Context) {}},
  227. )
  228. require.NoError(t, err)
  229. require.NoError(t, router.Start())
  230. t.Cleanup(func() {
  231. if router.IsRunning() {
  232. require.NoError(t, router.Stop())
  233. }
  234. peerManager.Close()
  235. require.NoError(t, transport.Close())
  236. })
  237. return &Node{
  238. NodeID: nodeID,
  239. NodeInfo: nodeInfo,
  240. NodeAddress: transport.Endpoints()[0].NodeAddress(nodeID),
  241. PrivKey: privKey,
  242. Router: router,
  243. PeerManager: peerManager,
  244. Transport: transport,
  245. }
  246. }
  247. // MakeChannel opens a channel, with automatic error handling and cleanup. On
  248. // test cleanup, it also checks that the channel is empty, to make sure
  249. // all expected messages have been asserted.
  250. func (n *Node) MakeChannel(t *testing.T, chDesc p2p.ChannelDescriptor,
  251. messageType proto.Message, size int) *p2p.Channel {
  252. channel, err := n.Router.OpenChannel(chDesc, messageType, size)
  253. require.NoError(t, err)
  254. require.Contains(t, n.Router.NodeInfo().Channels, chDesc.ID)
  255. t.Cleanup(func() {
  256. RequireEmpty(t, channel)
  257. channel.Close()
  258. })
  259. return channel
  260. }
  261. // MakeChannelNoCleanup opens a channel, with automatic error handling. The
  262. // caller must ensure proper cleanup of the channel.
  263. func (n *Node) MakeChannelNoCleanup(
  264. t *testing.T,
  265. chDesc p2p.ChannelDescriptor,
  266. messageType proto.Message,
  267. size int,
  268. ) *p2p.Channel {
  269. channel, err := n.Router.OpenChannel(chDesc, messageType, size)
  270. require.NoError(t, err)
  271. return channel
  272. }
  273. // MakePeerUpdates opens a peer update subscription, with automatic cleanup.
  274. // It checks that all updates have been consumed during cleanup.
  275. func (n *Node) MakePeerUpdates(t *testing.T) *p2p.PeerUpdates {
  276. t.Helper()
  277. sub := n.PeerManager.Subscribe()
  278. t.Cleanup(func() {
  279. t.Helper()
  280. RequireNoUpdates(t, sub)
  281. sub.Close()
  282. })
  283. return sub
  284. }
  285. // MakePeerUpdatesNoRequireEmpty opens a peer update subscription, with automatic cleanup.
  286. // It does *not* check that all updates have been consumed, but will
  287. // close the update channel.
  288. func (n *Node) MakePeerUpdatesNoRequireEmpty(t *testing.T) *p2p.PeerUpdates {
  289. sub := n.PeerManager.Subscribe()
  290. t.Cleanup(func() {
  291. sub.Close()
  292. })
  293. return sub
  294. }
  295. func MakeChannelDesc(chID p2p.ChannelID) p2p.ChannelDescriptor {
  296. return p2p.ChannelDescriptor{
  297. ID: byte(chID),
  298. Priority: 5,
  299. SendQueueCapacity: 10,
  300. RecvMessageCapacity: 10,
  301. MaxSendBytes: 1000,
  302. }
  303. }