You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

283 lines
8.1 KiB

  1. package p2ptest
  2. import (
  3. "math/rand"
  4. "testing"
  5. "time"
  6. "github.com/gogo/protobuf/proto"
  7. "github.com/stretchr/testify/require"
  8. dbm "github.com/tendermint/tm-db"
  9. "github.com/tendermint/tendermint/crypto"
  10. "github.com/tendermint/tendermint/crypto/ed25519"
  11. "github.com/tendermint/tendermint/libs/log"
  12. "github.com/tendermint/tendermint/p2p"
  13. )
  14. // Network sets up an in-memory network that can be used for high-level P2P
  15. // testing. It creates an arbitrary number of nodes that are connected to each
  16. // other, and can open channels across all nodes with custom reactors.
  17. type Network struct {
  18. Nodes map[p2p.NodeID]*Node
  19. logger log.Logger
  20. memoryNetwork *p2p.MemoryNetwork
  21. }
  22. // MakeNetwork creates a test network with the given number of nodes and
  23. // connects them to each other.
  24. func MakeNetwork(t *testing.T, nodes int) *Network {
  25. logger := log.TestingLogger()
  26. network := &Network{
  27. Nodes: map[p2p.NodeID]*Node{},
  28. logger: logger,
  29. memoryNetwork: p2p.NewMemoryNetwork(logger),
  30. }
  31. for i := 0; i < nodes; i++ {
  32. node := network.MakeNode(t)
  33. network.Nodes[node.NodeID] = node
  34. }
  35. return network
  36. }
  37. // Start starts the network by setting up a list of node addresses to dial in
  38. // addition to creating a peer update subscription for each node. Finally, all
  39. // nodes are connected to each other.
  40. func (n *Network) Start(t *testing.T) {
  41. // Set up a list of node addresses to dial, and a peer update subscription
  42. // for each node.
  43. dialQueue := []p2p.NodeAddress{}
  44. subs := map[p2p.NodeID]*p2p.PeerUpdates{}
  45. for _, node := range n.Nodes {
  46. dialQueue = append(dialQueue, node.NodeAddress)
  47. subs[node.NodeID] = node.PeerManager.Subscribe()
  48. defer subs[node.NodeID].Close()
  49. }
  50. // For each node, dial the nodes that it still doesn't have a connection to
  51. // (either inbound or outbound), and wait for both sides to confirm the
  52. // connection via the subscriptions.
  53. for i, sourceAddress := range dialQueue {
  54. sourceNode := n.Nodes[sourceAddress.NodeID]
  55. sourceSub := subs[sourceAddress.NodeID]
  56. for _, targetAddress := range dialQueue[i+1:] { // nodes <i already connected
  57. targetNode := n.Nodes[targetAddress.NodeID]
  58. targetSub := subs[targetAddress.NodeID]
  59. require.NoError(t, sourceNode.PeerManager.Add(targetAddress))
  60. select {
  61. case peerUpdate := <-sourceSub.Updates():
  62. require.Equal(t, p2p.PeerUpdate{
  63. NodeID: targetNode.NodeID,
  64. Status: p2p.PeerStatusUp,
  65. }, peerUpdate)
  66. case <-time.After(time.Second):
  67. require.Fail(t, "timed out waiting for peer", "%v dialing %v",
  68. sourceNode.NodeID, targetNode.NodeID)
  69. }
  70. select {
  71. case peerUpdate := <-targetSub.Updates():
  72. require.Equal(t, p2p.PeerUpdate{
  73. NodeID: sourceNode.NodeID,
  74. Status: p2p.PeerStatusUp,
  75. }, peerUpdate)
  76. case <-time.After(time.Second):
  77. require.Fail(t, "timed out waiting for peer", "%v accepting %v",
  78. targetNode.NodeID, sourceNode.NodeID)
  79. }
  80. // Add the address to the target as well, so it's able to dial the
  81. // source back if that's even necessary.
  82. require.NoError(t, targetNode.PeerManager.Add(sourceAddress))
  83. }
  84. }
  85. }
  86. // NodeIDs returns the network's node IDs.
  87. func (n *Network) NodeIDs() []p2p.NodeID {
  88. ids := []p2p.NodeID{}
  89. for id := range n.Nodes {
  90. ids = append(ids, id)
  91. }
  92. return ids
  93. }
  94. // MakeChannels makes a channel on all nodes and returns them, automatically
  95. // doing error checks and cleanups.
  96. func (n *Network) MakeChannels(
  97. t *testing.T,
  98. chID p2p.ChannelID,
  99. messageType proto.Message,
  100. size int,
  101. ) map[p2p.NodeID]*p2p.Channel {
  102. channels := map[p2p.NodeID]*p2p.Channel{}
  103. for _, node := range n.Nodes {
  104. channels[node.NodeID] = node.MakeChannel(t, chID, messageType, size)
  105. }
  106. return channels
  107. }
  108. // MakeChannelsNoCleanup makes a channel on all nodes and returns them,
  109. // automatically doing error checks. The caller must ensure proper cleanup of
  110. // all the channels.
  111. func (n *Network) MakeChannelsNoCleanup(
  112. t *testing.T,
  113. chID p2p.ChannelID,
  114. messageType proto.Message,
  115. size int,
  116. ) map[p2p.NodeID]*p2p.Channel {
  117. channels := map[p2p.NodeID]*p2p.Channel{}
  118. for _, node := range n.Nodes {
  119. channels[node.NodeID] = node.MakeChannelNoCleanup(t, chID, messageType, size)
  120. }
  121. return channels
  122. }
  123. // RandomNode returns a random node.
  124. func (n *Network) RandomNode() *Node {
  125. nodes := make([]*Node, 0, len(n.Nodes))
  126. for _, node := range n.Nodes {
  127. nodes = append(nodes, node)
  128. }
  129. return nodes[rand.Intn(len(nodes))] // nolint:gosec
  130. }
  131. // Peers returns a node's peers (i.e. everyone except itself).
  132. func (n *Network) Peers(id p2p.NodeID) []*Node {
  133. peers := make([]*Node, 0, len(n.Nodes)-1)
  134. for _, peer := range n.Nodes {
  135. if peer.NodeID != id {
  136. peers = append(peers, peer)
  137. }
  138. }
  139. return peers
  140. }
  141. // Remove removes a node from the network, stopping it and waiting for all other
  142. // nodes to pick up the disconnection.
  143. func (n *Network) Remove(t *testing.T, id p2p.NodeID) {
  144. require.Contains(t, n.Nodes, id)
  145. node := n.Nodes[id]
  146. delete(n.Nodes, id)
  147. subs := []*p2p.PeerUpdates{}
  148. for _, peer := range n.Nodes {
  149. sub := peer.PeerManager.Subscribe()
  150. defer sub.Close()
  151. subs = append(subs, sub)
  152. }
  153. require.NoError(t, node.Transport.Close())
  154. if node.Router.IsRunning() {
  155. require.NoError(t, node.Router.Stop())
  156. }
  157. node.PeerManager.Close()
  158. for _, sub := range subs {
  159. RequireUpdate(t, sub, p2p.PeerUpdate{
  160. NodeID: node.NodeID,
  161. Status: p2p.PeerStatusDown,
  162. })
  163. }
  164. }
  165. // Node is a node in a Network, with a Router and a PeerManager.
  166. type Node struct {
  167. NodeID p2p.NodeID
  168. NodeInfo p2p.NodeInfo
  169. NodeAddress p2p.NodeAddress
  170. PrivKey crypto.PrivKey
  171. Router *p2p.Router
  172. PeerManager *p2p.PeerManager
  173. Transport *p2p.MemoryTransport
  174. }
  175. // MakeNode creates a new Node configured for the network with a
  176. // running peer manager, but does not add it to the existing
  177. // network. Callers are responsible for updating peering relationships.
  178. func (n *Network) MakeNode(t *testing.T) *Node {
  179. privKey := ed25519.GenPrivKey()
  180. nodeID := p2p.NodeIDFromPubKey(privKey.PubKey())
  181. nodeInfo := p2p.NodeInfo{
  182. NodeID: nodeID,
  183. ListenAddr: "0.0.0.0:0", // FIXME: We have to fake this for now.
  184. Moniker: string(nodeID),
  185. }
  186. transport := n.memoryNetwork.CreateTransport(nodeID)
  187. require.Len(t, transport.Endpoints(), 1, "transport not listening on 1 endpoint")
  188. peerManager, err := p2p.NewPeerManager(nodeID, dbm.NewMemDB(), p2p.PeerManagerOptions{
  189. MinRetryTime: 10 * time.Millisecond,
  190. MaxRetryTime: 100 * time.Millisecond,
  191. RetryTimeJitter: time.Millisecond,
  192. })
  193. require.NoError(t, err)
  194. router, err := p2p.NewRouter(n.logger, nodeInfo, privKey, peerManager,
  195. []p2p.Transport{transport}, p2p.RouterOptions{})
  196. require.NoError(t, err)
  197. require.NoError(t, router.Start())
  198. t.Cleanup(func() {
  199. if router.IsRunning() {
  200. require.NoError(t, router.Stop())
  201. }
  202. peerManager.Close()
  203. require.NoError(t, transport.Close())
  204. })
  205. return &Node{
  206. NodeID: nodeID,
  207. NodeInfo: nodeInfo,
  208. NodeAddress: transport.Endpoints()[0].NodeAddress(nodeID),
  209. PrivKey: privKey,
  210. Router: router,
  211. PeerManager: peerManager,
  212. Transport: transport,
  213. }
  214. }
  215. // MakeChannel opens a channel, with automatic error handling and cleanup. On
  216. // test cleanup, it also checks that the channel is empty, to make sure
  217. // all expected messages have been asserted.
  218. func (n *Node) MakeChannel(t *testing.T, chID p2p.ChannelID, messageType proto.Message, size int) *p2p.Channel {
  219. channel, err := n.Router.OpenChannel(chID, messageType, size)
  220. require.NoError(t, err)
  221. t.Cleanup(func() {
  222. RequireEmpty(t, channel)
  223. channel.Close()
  224. })
  225. return channel
  226. }
  227. // MakeChannelNoCleanup opens a channel, with automatic error handling. The
  228. // caller must ensure proper cleanup of the channel.
  229. func (n *Node) MakeChannelNoCleanup(
  230. t *testing.T,
  231. chID p2p.ChannelID,
  232. messageType proto.Message,
  233. size int,
  234. ) *p2p.Channel {
  235. channel, err := n.Router.OpenChannel(chID, messageType, size)
  236. require.NoError(t, err)
  237. return channel
  238. }
  239. // MakePeerUpdates opens a peer update subscription, with automatic cleanup.
  240. // It checks that all updates have been consumed during cleanup.
  241. func (n *Node) MakePeerUpdates(t *testing.T) *p2p.PeerUpdates {
  242. sub := n.PeerManager.Subscribe()
  243. t.Cleanup(func() {
  244. RequireNoUpdates(t, sub)
  245. sub.Close()
  246. })
  247. return sub
  248. }