You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

318 lines
8.8 KiB

  1. package p2ptest
  2. import (
  3. "math/rand"
  4. "testing"
  5. "time"
  6. "github.com/gogo/protobuf/proto"
  7. "github.com/stretchr/testify/require"
  8. dbm "github.com/tendermint/tm-db"
  9. "github.com/tendermint/tendermint/crypto"
  10. "github.com/tendermint/tendermint/crypto/ed25519"
  11. "github.com/tendermint/tendermint/libs/log"
  12. "github.com/tendermint/tendermint/p2p"
  13. )
  14. // Network sets up an in-memory network that can be used for high-level P2P
  15. // testing. It creates an arbitrary number of nodes that are connected to each
  16. // other, and can open channels across all nodes with custom reactors.
  17. type Network struct {
  18. Nodes map[p2p.NodeID]*Node
  19. logger log.Logger
  20. memoryNetwork *p2p.MemoryNetwork
  21. }
  22. // NetworkOptions is an argument structure to parameterize the
  23. // MakeNetwork function.
  24. type NetworkOptions struct {
  25. NumNodes int
  26. BufferSize int
  27. }
  28. func (opts *NetworkOptions) setDefaults() {
  29. if opts.BufferSize == 0 {
  30. opts.BufferSize = 1
  31. }
  32. }
  33. // MakeNetwork creates a test network with the given number of nodes and
  34. // connects them to each other.
  35. func MakeNetwork(t *testing.T, opts NetworkOptions) *Network {
  36. opts.setDefaults()
  37. logger := log.TestingLogger()
  38. network := &Network{
  39. Nodes: map[p2p.NodeID]*Node{},
  40. logger: logger,
  41. memoryNetwork: p2p.NewMemoryNetwork(logger, opts.BufferSize),
  42. }
  43. for i := 0; i < opts.NumNodes; i++ {
  44. node := network.MakeNode(t)
  45. network.Nodes[node.NodeID] = node
  46. }
  47. return network
  48. }
  49. // Start starts the network by setting up a list of node addresses to dial in
  50. // addition to creating a peer update subscription for each node. Finally, all
  51. // nodes are connected to each other.
  52. func (n *Network) Start(t *testing.T) {
  53. // Set up a list of node addresses to dial, and a peer update subscription
  54. // for each node.
  55. dialQueue := []p2p.NodeAddress{}
  56. subs := map[p2p.NodeID]*p2p.PeerUpdates{}
  57. for _, node := range n.Nodes {
  58. dialQueue = append(dialQueue, node.NodeAddress)
  59. subs[node.NodeID] = node.PeerManager.Subscribe()
  60. defer subs[node.NodeID].Close()
  61. }
  62. // For each node, dial the nodes that it still doesn't have a connection to
  63. // (either inbound or outbound), and wait for both sides to confirm the
  64. // connection via the subscriptions.
  65. for i, sourceAddress := range dialQueue {
  66. sourceNode := n.Nodes[sourceAddress.NodeID]
  67. sourceSub := subs[sourceAddress.NodeID]
  68. for _, targetAddress := range dialQueue[i+1:] { // nodes <i already connected
  69. targetNode := n.Nodes[targetAddress.NodeID]
  70. targetSub := subs[targetAddress.NodeID]
  71. require.NoError(t, sourceNode.PeerManager.Add(targetAddress))
  72. select {
  73. case peerUpdate := <-sourceSub.Updates():
  74. require.Equal(t, p2p.PeerUpdate{
  75. NodeID: targetNode.NodeID,
  76. Status: p2p.PeerStatusUp,
  77. }, peerUpdate)
  78. case <-time.After(time.Second):
  79. require.Fail(t, "timed out waiting for peer", "%v dialing %v",
  80. sourceNode.NodeID, targetNode.NodeID)
  81. }
  82. select {
  83. case peerUpdate := <-targetSub.Updates():
  84. require.Equal(t, p2p.PeerUpdate{
  85. NodeID: sourceNode.NodeID,
  86. Status: p2p.PeerStatusUp,
  87. }, peerUpdate)
  88. case <-time.After(time.Second):
  89. require.Fail(t, "timed out waiting for peer", "%v accepting %v",
  90. targetNode.NodeID, sourceNode.NodeID)
  91. }
  92. // Add the address to the target as well, so it's able to dial the
  93. // source back if that's even necessary.
  94. require.NoError(t, targetNode.PeerManager.Add(sourceAddress))
  95. }
  96. }
  97. }
  98. // NodeIDs returns the network's node IDs.
  99. func (n *Network) NodeIDs() []p2p.NodeID {
  100. ids := []p2p.NodeID{}
  101. for id := range n.Nodes {
  102. ids = append(ids, id)
  103. }
  104. return ids
  105. }
  106. // MakeChannels makes a channel on all nodes and returns them, automatically
  107. // doing error checks and cleanups.
  108. func (n *Network) MakeChannels(
  109. t *testing.T,
  110. chID p2p.ChannelID,
  111. messageType proto.Message,
  112. size int,
  113. ) map[p2p.NodeID]*p2p.Channel {
  114. channels := map[p2p.NodeID]*p2p.Channel{}
  115. for _, node := range n.Nodes {
  116. channels[node.NodeID] = node.MakeChannel(t, chID, messageType, size)
  117. }
  118. return channels
  119. }
  120. // MakeChannelsNoCleanup makes a channel on all nodes and returns them,
  121. // automatically doing error checks. The caller must ensure proper cleanup of
  122. // all the channels.
  123. func (n *Network) MakeChannelsNoCleanup(
  124. t *testing.T,
  125. chID p2p.ChannelID,
  126. messageType proto.Message,
  127. size int,
  128. ) map[p2p.NodeID]*p2p.Channel {
  129. channels := map[p2p.NodeID]*p2p.Channel{}
  130. for _, node := range n.Nodes {
  131. channels[node.NodeID] = node.MakeChannelNoCleanup(t, chID, messageType, size)
  132. }
  133. return channels
  134. }
  135. // RandomNode returns a random node.
  136. func (n *Network) RandomNode() *Node {
  137. nodes := make([]*Node, 0, len(n.Nodes))
  138. for _, node := range n.Nodes {
  139. nodes = append(nodes, node)
  140. }
  141. return nodes[rand.Intn(len(nodes))] // nolint:gosec
  142. }
  143. // Peers returns a node's peers (i.e. everyone except itself).
  144. func (n *Network) Peers(id p2p.NodeID) []*Node {
  145. peers := make([]*Node, 0, len(n.Nodes)-1)
  146. for _, peer := range n.Nodes {
  147. if peer.NodeID != id {
  148. peers = append(peers, peer)
  149. }
  150. }
  151. return peers
  152. }
  153. // Remove removes a node from the network, stopping it and waiting for all other
  154. // nodes to pick up the disconnection.
  155. func (n *Network) Remove(t *testing.T, id p2p.NodeID) {
  156. require.Contains(t, n.Nodes, id)
  157. node := n.Nodes[id]
  158. delete(n.Nodes, id)
  159. subs := []*p2p.PeerUpdates{}
  160. for _, peer := range n.Nodes {
  161. sub := peer.PeerManager.Subscribe()
  162. defer sub.Close()
  163. subs = append(subs, sub)
  164. }
  165. require.NoError(t, node.Transport.Close())
  166. if node.Router.IsRunning() {
  167. require.NoError(t, node.Router.Stop())
  168. }
  169. node.PeerManager.Close()
  170. for _, sub := range subs {
  171. RequireUpdate(t, sub, p2p.PeerUpdate{
  172. NodeID: node.NodeID,
  173. Status: p2p.PeerStatusDown,
  174. })
  175. }
  176. }
  177. // Node is a node in a Network, with a Router and a PeerManager.
  178. type Node struct {
  179. NodeID p2p.NodeID
  180. NodeInfo p2p.NodeInfo
  181. NodeAddress p2p.NodeAddress
  182. PrivKey crypto.PrivKey
  183. Router *p2p.Router
  184. PeerManager *p2p.PeerManager
  185. Transport *p2p.MemoryTransport
  186. }
  187. // MakeNode creates a new Node configured for the network with a
  188. // running peer manager, but does not add it to the existing
  189. // network. Callers are responsible for updating peering relationships.
  190. func (n *Network) MakeNode(t *testing.T) *Node {
  191. privKey := ed25519.GenPrivKey()
  192. nodeID := p2p.NodeIDFromPubKey(privKey.PubKey())
  193. nodeInfo := p2p.NodeInfo{
  194. NodeID: nodeID,
  195. ListenAddr: "0.0.0.0:0", // FIXME: We have to fake this for now.
  196. Moniker: string(nodeID),
  197. }
  198. transport := n.memoryNetwork.CreateTransport(nodeID)
  199. require.Len(t, transport.Endpoints(), 1, "transport not listening on 1 endpoint")
  200. peerManager, err := p2p.NewPeerManager(nodeID, dbm.NewMemDB(), p2p.PeerManagerOptions{
  201. MinRetryTime: 10 * time.Millisecond,
  202. MaxRetryTime: 100 * time.Millisecond,
  203. RetryTimeJitter: time.Millisecond,
  204. })
  205. require.NoError(t, err)
  206. router, err := p2p.NewRouter(
  207. n.logger,
  208. p2p.NopMetrics(),
  209. nodeInfo,
  210. privKey,
  211. peerManager,
  212. []p2p.Transport{transport},
  213. p2p.RouterOptions{},
  214. )
  215. require.NoError(t, err)
  216. require.NoError(t, router.Start())
  217. t.Cleanup(func() {
  218. if router.IsRunning() {
  219. require.NoError(t, router.Stop())
  220. }
  221. peerManager.Close()
  222. require.NoError(t, transport.Close())
  223. })
  224. return &Node{
  225. NodeID: nodeID,
  226. NodeInfo: nodeInfo,
  227. NodeAddress: transport.Endpoints()[0].NodeAddress(nodeID),
  228. PrivKey: privKey,
  229. Router: router,
  230. PeerManager: peerManager,
  231. Transport: transport,
  232. }
  233. }
  234. // MakeChannel opens a channel, with automatic error handling and cleanup. On
  235. // test cleanup, it also checks that the channel is empty, to make sure
  236. // all expected messages have been asserted.
  237. func (n *Node) MakeChannel(t *testing.T, chID p2p.ChannelID, messageType proto.Message, size int) *p2p.Channel {
  238. channel, err := n.Router.OpenChannel(chID, messageType, size)
  239. require.NoError(t, err)
  240. t.Cleanup(func() {
  241. RequireEmpty(t, channel)
  242. channel.Close()
  243. })
  244. return channel
  245. }
  246. // MakeChannelNoCleanup opens a channel, with automatic error handling. The
  247. // caller must ensure proper cleanup of the channel.
  248. func (n *Node) MakeChannelNoCleanup(
  249. t *testing.T,
  250. chID p2p.ChannelID,
  251. messageType proto.Message,
  252. size int,
  253. ) *p2p.Channel {
  254. channel, err := n.Router.OpenChannel(chID, messageType, size)
  255. require.NoError(t, err)
  256. return channel
  257. }
  258. // MakePeerUpdates opens a peer update subscription, with automatic cleanup.
  259. // It checks that all updates have been consumed during cleanup.
  260. func (n *Node) MakePeerUpdates(t *testing.T) *p2p.PeerUpdates {
  261. t.Helper()
  262. sub := n.PeerManager.Subscribe()
  263. t.Cleanup(func() {
  264. t.Helper()
  265. RequireNoUpdates(t, sub)
  266. sub.Close()
  267. })
  268. return sub
  269. }
  270. // MakePeerUpdatesNoRequireEmpty opens a peer update subscription, with automatic cleanup.
  271. // It does *not* check that all updates have been consumed, but will
  272. // close the update channel.
  273. func (n *Node) MakePeerUpdatesNoRequireEmpty(t *testing.T) *p2p.PeerUpdates {
  274. sub := n.PeerManager.Subscribe()
  275. t.Cleanup(func() {
  276. sub.Close()
  277. })
  278. return sub
  279. }