You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

341 lines
9.4 KiB

  1. package p2ptest
  2. import (
  3. "math/rand"
  4. "testing"
  5. "time"
  6. "github.com/gogo/protobuf/proto"
  7. "github.com/stretchr/testify/require"
  8. dbm "github.com/tendermint/tm-db"
  9. "github.com/tendermint/tendermint/crypto"
  10. "github.com/tendermint/tendermint/crypto/ed25519"
  11. "github.com/tendermint/tendermint/libs/log"
  12. "github.com/tendermint/tendermint/p2p"
  13. )
  14. // Network sets up an in-memory network that can be used for high-level P2P
  15. // testing. It creates an arbitrary number of nodes that are connected to each
  16. // other, and can open channels across all nodes with custom reactors.
  17. type Network struct {
  18. Nodes map[p2p.NodeID]*Node
  19. logger log.Logger
  20. memoryNetwork *p2p.MemoryNetwork
  21. }
  22. // NetworkOptions is an argument structure to parameterize the
  23. // MakeNetwork function.
  24. type NetworkOptions struct {
  25. NumNodes int
  26. BufferSize int
  27. NodeOpts NodeOptions
  28. }
  29. type NodeOptions struct {
  30. MaxPeers uint16
  31. MaxConnected uint16
  32. }
  33. func (opts *NetworkOptions) setDefaults() {
  34. if opts.BufferSize == 0 {
  35. opts.BufferSize = 1
  36. }
  37. }
  38. // MakeNetwork creates a test network with the given number of nodes and
  39. // connects them to each other.
  40. func MakeNetwork(t *testing.T, opts NetworkOptions) *Network {
  41. opts.setDefaults()
  42. logger := log.TestingLogger()
  43. network := &Network{
  44. Nodes: map[p2p.NodeID]*Node{},
  45. logger: logger,
  46. memoryNetwork: p2p.NewMemoryNetwork(logger, opts.BufferSize),
  47. }
  48. for i := 0; i < opts.NumNodes; i++ {
  49. node := network.MakeNode(t, opts.NodeOpts)
  50. network.Nodes[node.NodeID] = node
  51. }
  52. return network
  53. }
  54. // Start starts the network by setting up a list of node addresses to dial in
  55. // addition to creating a peer update subscription for each node. Finally, all
  56. // nodes are connected to each other.
  57. func (n *Network) Start(t *testing.T) {
  58. // Set up a list of node addresses to dial, and a peer update subscription
  59. // for each node.
  60. dialQueue := []p2p.NodeAddress{}
  61. subs := map[p2p.NodeID]*p2p.PeerUpdates{}
  62. for _, node := range n.Nodes {
  63. dialQueue = append(dialQueue, node.NodeAddress)
  64. subs[node.NodeID] = node.PeerManager.Subscribe()
  65. defer subs[node.NodeID].Close()
  66. }
  67. // For each node, dial the nodes that it still doesn't have a connection to
  68. // (either inbound or outbound), and wait for both sides to confirm the
  69. // connection via the subscriptions.
  70. for i, sourceAddress := range dialQueue {
  71. sourceNode := n.Nodes[sourceAddress.NodeID]
  72. sourceSub := subs[sourceAddress.NodeID]
  73. for _, targetAddress := range dialQueue[i+1:] { // nodes <i already connected
  74. targetNode := n.Nodes[targetAddress.NodeID]
  75. targetSub := subs[targetAddress.NodeID]
  76. added, err := sourceNode.PeerManager.Add(targetAddress)
  77. require.NoError(t, err)
  78. require.True(t, added)
  79. select {
  80. case peerUpdate := <-sourceSub.Updates():
  81. require.Equal(t, p2p.PeerUpdate{
  82. NodeID: targetNode.NodeID,
  83. Status: p2p.PeerStatusUp,
  84. }, peerUpdate)
  85. case <-time.After(time.Second):
  86. require.Fail(t, "timed out waiting for peer", "%v dialing %v",
  87. sourceNode.NodeID, targetNode.NodeID)
  88. }
  89. select {
  90. case peerUpdate := <-targetSub.Updates():
  91. require.Equal(t, p2p.PeerUpdate{
  92. NodeID: sourceNode.NodeID,
  93. Status: p2p.PeerStatusUp,
  94. }, peerUpdate)
  95. case <-time.After(time.Second):
  96. require.Fail(t, "timed out waiting for peer", "%v accepting %v",
  97. targetNode.NodeID, sourceNode.NodeID)
  98. }
  99. // Add the address to the target as well, so it's able to dial the
  100. // source back if that's even necessary.
  101. added, err = targetNode.PeerManager.Add(sourceAddress)
  102. require.NoError(t, err)
  103. require.True(t, added)
  104. }
  105. }
  106. }
  107. // NodeIDs returns the network's node IDs.
  108. func (n *Network) NodeIDs() []p2p.NodeID {
  109. ids := []p2p.NodeID{}
  110. for id := range n.Nodes {
  111. ids = append(ids, id)
  112. }
  113. return ids
  114. }
  115. // MakeChannels makes a channel on all nodes and returns them, automatically
  116. // doing error checks and cleanups.
  117. func (n *Network) MakeChannels(
  118. t *testing.T,
  119. chDesc p2p.ChannelDescriptor,
  120. messageType proto.Message,
  121. size int,
  122. ) map[p2p.NodeID]*p2p.Channel {
  123. channels := map[p2p.NodeID]*p2p.Channel{}
  124. for _, node := range n.Nodes {
  125. channels[node.NodeID] = node.MakeChannel(t, chDesc, messageType, size)
  126. }
  127. return channels
  128. }
  129. // MakeChannelsNoCleanup makes a channel on all nodes and returns them,
  130. // automatically doing error checks. The caller must ensure proper cleanup of
  131. // all the channels.
  132. func (n *Network) MakeChannelsNoCleanup(
  133. t *testing.T,
  134. chDesc p2p.ChannelDescriptor,
  135. messageType proto.Message,
  136. size int,
  137. ) map[p2p.NodeID]*p2p.Channel {
  138. channels := map[p2p.NodeID]*p2p.Channel{}
  139. for _, node := range n.Nodes {
  140. channels[node.NodeID] = node.MakeChannelNoCleanup(t, chDesc, messageType, size)
  141. }
  142. return channels
  143. }
  144. // RandomNode returns a random node.
  145. func (n *Network) RandomNode() *Node {
  146. nodes := make([]*Node, 0, len(n.Nodes))
  147. for _, node := range n.Nodes {
  148. nodes = append(nodes, node)
  149. }
  150. return nodes[rand.Intn(len(nodes))] // nolint:gosec
  151. }
  152. // Peers returns a node's peers (i.e. everyone except itself).
  153. func (n *Network) Peers(id p2p.NodeID) []*Node {
  154. peers := make([]*Node, 0, len(n.Nodes)-1)
  155. for _, peer := range n.Nodes {
  156. if peer.NodeID != id {
  157. peers = append(peers, peer)
  158. }
  159. }
  160. return peers
  161. }
  162. // Remove removes a node from the network, stopping it and waiting for all other
  163. // nodes to pick up the disconnection.
  164. func (n *Network) Remove(t *testing.T, id p2p.NodeID) {
  165. require.Contains(t, n.Nodes, id)
  166. node := n.Nodes[id]
  167. delete(n.Nodes, id)
  168. subs := []*p2p.PeerUpdates{}
  169. for _, peer := range n.Nodes {
  170. sub := peer.PeerManager.Subscribe()
  171. defer sub.Close()
  172. subs = append(subs, sub)
  173. }
  174. require.NoError(t, node.Transport.Close())
  175. if node.Router.IsRunning() {
  176. require.NoError(t, node.Router.Stop())
  177. }
  178. node.PeerManager.Close()
  179. for _, sub := range subs {
  180. RequireUpdate(t, sub, p2p.PeerUpdate{
  181. NodeID: node.NodeID,
  182. Status: p2p.PeerStatusDown,
  183. })
  184. }
  185. }
  186. // Node is a node in a Network, with a Router and a PeerManager.
  187. type Node struct {
  188. NodeID p2p.NodeID
  189. NodeInfo p2p.NodeInfo
  190. NodeAddress p2p.NodeAddress
  191. PrivKey crypto.PrivKey
  192. Router *p2p.Router
  193. PeerManager *p2p.PeerManager
  194. Transport *p2p.MemoryTransport
  195. }
  196. // MakeNode creates a new Node configured for the network with a
  197. // running peer manager, but does not add it to the existing
  198. // network. Callers are responsible for updating peering relationships.
  199. func (n *Network) MakeNode(t *testing.T, opts NodeOptions) *Node {
  200. privKey := ed25519.GenPrivKey()
  201. nodeID := p2p.NodeIDFromPubKey(privKey.PubKey())
  202. nodeInfo := p2p.NodeInfo{
  203. NodeID: nodeID,
  204. ListenAddr: "0.0.0.0:0", // FIXME: We have to fake this for now.
  205. Moniker: string(nodeID),
  206. }
  207. transport := n.memoryNetwork.CreateTransport(nodeID)
  208. require.Len(t, transport.Endpoints(), 1, "transport not listening on 1 endpoint")
  209. peerManager, err := p2p.NewPeerManager(nodeID, dbm.NewMemDB(), p2p.PeerManagerOptions{
  210. MinRetryTime: 10 * time.Millisecond,
  211. MaxRetryTime: 100 * time.Millisecond,
  212. RetryTimeJitter: time.Millisecond,
  213. MaxPeers: opts.MaxPeers,
  214. MaxConnected: opts.MaxConnected,
  215. })
  216. require.NoError(t, err)
  217. router, err := p2p.NewRouter(
  218. n.logger,
  219. p2p.NopMetrics(),
  220. nodeInfo,
  221. privKey,
  222. peerManager,
  223. []p2p.Transport{transport},
  224. p2p.RouterOptions{},
  225. )
  226. require.NoError(t, err)
  227. require.NoError(t, router.Start())
  228. t.Cleanup(func() {
  229. if router.IsRunning() {
  230. require.NoError(t, router.Stop())
  231. }
  232. peerManager.Close()
  233. require.NoError(t, transport.Close())
  234. })
  235. return &Node{
  236. NodeID: nodeID,
  237. NodeInfo: nodeInfo,
  238. NodeAddress: transport.Endpoints()[0].NodeAddress(nodeID),
  239. PrivKey: privKey,
  240. Router: router,
  241. PeerManager: peerManager,
  242. Transport: transport,
  243. }
  244. }
  245. // MakeChannel opens a channel, with automatic error handling and cleanup. On
  246. // test cleanup, it also checks that the channel is empty, to make sure
  247. // all expected messages have been asserted.
  248. func (n *Node) MakeChannel(t *testing.T, chDesc p2p.ChannelDescriptor,
  249. messageType proto.Message, size int) *p2p.Channel {
  250. channel, err := n.Router.OpenChannel(chDesc, messageType, size)
  251. require.NoError(t, err)
  252. t.Cleanup(func() {
  253. RequireEmpty(t, channel)
  254. channel.Close()
  255. })
  256. return channel
  257. }
  258. // MakeChannelNoCleanup opens a channel, with automatic error handling. The
  259. // caller must ensure proper cleanup of the channel.
  260. func (n *Node) MakeChannelNoCleanup(
  261. t *testing.T,
  262. chDesc p2p.ChannelDescriptor,
  263. messageType proto.Message,
  264. size int,
  265. ) *p2p.Channel {
  266. channel, err := n.Router.OpenChannel(chDesc, messageType, size)
  267. require.NoError(t, err)
  268. return channel
  269. }
  270. // MakePeerUpdates opens a peer update subscription, with automatic cleanup.
  271. // It checks that all updates have been consumed during cleanup.
  272. func (n *Node) MakePeerUpdates(t *testing.T) *p2p.PeerUpdates {
  273. t.Helper()
  274. sub := n.PeerManager.Subscribe()
  275. t.Cleanup(func() {
  276. t.Helper()
  277. RequireNoUpdates(t, sub)
  278. sub.Close()
  279. })
  280. return sub
  281. }
  282. // MakePeerUpdatesNoRequireEmpty opens a peer update subscription, with automatic cleanup.
  283. // It does *not* check that all updates have been consumed, but will
  284. // close the update channel.
  285. func (n *Node) MakePeerUpdatesNoRequireEmpty(t *testing.T) *p2p.PeerUpdates {
  286. sub := n.PeerManager.Subscribe()
  287. t.Cleanup(func() {
  288. sub.Close()
  289. })
  290. return sub
  291. }
  292. func MakeChannelDesc(chID p2p.ChannelID) p2p.ChannelDescriptor {
  293. return p2p.ChannelDescriptor{
  294. ID: byte(chID),
  295. Priority: 5,
  296. SendQueueCapacity: 10,
  297. RecvMessageCapacity: 10,
  298. MaxSendBytes: 1000,
  299. }
  300. }