You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

598 lines
15 KiB

p2p: file descriptor leaks (#3150) * close peer's connection to avoid fd leak Fixes #2967 * rename peer#Addr to RemoteAddr * fix test * fixes after Ethan's review * bring back the check * changelog entry * write a test for switch#acceptRoutine * increase timeouts? :( * remove extra assertNPeersWithTimeout * simplify test * assert number of peers (just to be safe) * Cleanup in OnStop * run tests with verbose flag on CircleCI * spawn a reading routine to prevent connection from closing * get port from the listener random port is faster, but often results in ``` panic: listen tcp 127.0.0.1:44068: bind: address already in use [recovered] panic: listen tcp 127.0.0.1:44068: bind: address already in use goroutine 79 [running]: testing.tRunner.func1(0xc0001bd600) /usr/local/go/src/testing/testing.go:792 +0x387 panic(0x974d20, 0xc0001b0500) /usr/local/go/src/runtime/panic.go:513 +0x1b9 github.com/tendermint/tendermint/p2p.MakeSwitch(0xc0000f42a0, 0x0, 0x9fb9cc, 0x9, 0x9fc346, 0xb, 0xb42128, 0x0, 0x0, 0x0, ...) /home/vagrant/go/src/github.com/tendermint/tendermint/p2p/test_util.go:182 +0xa28 github.com/tendermint/tendermint/p2p.MakeConnectedSwitches(0xc0000f42a0, 0x2, 0xb42128, 0xb41eb8, 0x4f1205, 0xc0001bed80, 0x4f16ed) /home/vagrant/go/src/github.com/tendermint/tendermint/p2p/test_util.go:75 +0xf9 github.com/tendermint/tendermint/p2p.MakeSwitchPair(0xbb8d20, 0xc0001bd600, 0xb42128, 0x2f7, 0x4f16c0) /home/vagrant/go/src/github.com/tendermint/tendermint/p2p/switch_test.go:94 +0x4c github.com/tendermint/tendermint/p2p.TestSwitches(0xc0001bd600) /home/vagrant/go/src/github.com/tendermint/tendermint/p2p/switch_test.go:117 +0x58 testing.tRunner(0xc0001bd600, 0xb42038) /usr/local/go/src/testing/testing.go:827 +0xbf created by testing.(*T).Run /usr/local/go/src/testing/testing.go:878 +0x353 exit status 2 FAIL github.com/tendermint/tendermint/p2p 0.350s ```
6 years ago
p2p: file descriptor leaks (#3150) * close peer's connection to avoid fd leak Fixes #2967 * rename peer#Addr to RemoteAddr * fix test * fixes after Ethan's review * bring back the check * changelog entry * write a test for switch#acceptRoutine * increase timeouts? :( * remove extra assertNPeersWithTimeout * simplify test * assert number of peers (just to be safe) * Cleanup in OnStop * run tests with verbose flag on CircleCI * spawn a reading routine to prevent connection from closing * get port from the listener random port is faster, but often results in ``` panic: listen tcp 127.0.0.1:44068: bind: address already in use [recovered] panic: listen tcp 127.0.0.1:44068: bind: address already in use goroutine 79 [running]: testing.tRunner.func1(0xc0001bd600) /usr/local/go/src/testing/testing.go:792 +0x387 panic(0x974d20, 0xc0001b0500) /usr/local/go/src/runtime/panic.go:513 +0x1b9 github.com/tendermint/tendermint/p2p.MakeSwitch(0xc0000f42a0, 0x0, 0x9fb9cc, 0x9, 0x9fc346, 0xb, 0xb42128, 0x0, 0x0, 0x0, ...) /home/vagrant/go/src/github.com/tendermint/tendermint/p2p/test_util.go:182 +0xa28 github.com/tendermint/tendermint/p2p.MakeConnectedSwitches(0xc0000f42a0, 0x2, 0xb42128, 0xb41eb8, 0x4f1205, 0xc0001bed80, 0x4f16ed) /home/vagrant/go/src/github.com/tendermint/tendermint/p2p/test_util.go:75 +0xf9 github.com/tendermint/tendermint/p2p.MakeSwitchPair(0xbb8d20, 0xc0001bd600, 0xb42128, 0x2f7, 0x4f16c0) /home/vagrant/go/src/github.com/tendermint/tendermint/p2p/switch_test.go:94 +0x4c github.com/tendermint/tendermint/p2p.TestSwitches(0xc0001bd600) /home/vagrant/go/src/github.com/tendermint/tendermint/p2p/switch_test.go:117 +0x58 testing.tRunner(0xc0001bd600, 0xb42038) /usr/local/go/src/testing/testing.go:827 +0xbf created by testing.(*T).Run /usr/local/go/src/testing/testing.go:878 +0x353 exit status 2 FAIL github.com/tendermint/tendermint/p2p 0.350s ```
6 years ago
p2p: file descriptor leaks (#3150) * close peer's connection to avoid fd leak Fixes #2967 * rename peer#Addr to RemoteAddr * fix test * fixes after Ethan's review * bring back the check * changelog entry * write a test for switch#acceptRoutine * increase timeouts? :( * remove extra assertNPeersWithTimeout * simplify test * assert number of peers (just to be safe) * Cleanup in OnStop * run tests with verbose flag on CircleCI * spawn a reading routine to prevent connection from closing * get port from the listener random port is faster, but often results in ``` panic: listen tcp 127.0.0.1:44068: bind: address already in use [recovered] panic: listen tcp 127.0.0.1:44068: bind: address already in use goroutine 79 [running]: testing.tRunner.func1(0xc0001bd600) /usr/local/go/src/testing/testing.go:792 +0x387 panic(0x974d20, 0xc0001b0500) /usr/local/go/src/runtime/panic.go:513 +0x1b9 github.com/tendermint/tendermint/p2p.MakeSwitch(0xc0000f42a0, 0x0, 0x9fb9cc, 0x9, 0x9fc346, 0xb, 0xb42128, 0x0, 0x0, 0x0, ...) /home/vagrant/go/src/github.com/tendermint/tendermint/p2p/test_util.go:182 +0xa28 github.com/tendermint/tendermint/p2p.MakeConnectedSwitches(0xc0000f42a0, 0x2, 0xb42128, 0xb41eb8, 0x4f1205, 0xc0001bed80, 0x4f16ed) /home/vagrant/go/src/github.com/tendermint/tendermint/p2p/test_util.go:75 +0xf9 github.com/tendermint/tendermint/p2p.MakeSwitchPair(0xbb8d20, 0xc0001bd600, 0xb42128, 0x2f7, 0x4f16c0) /home/vagrant/go/src/github.com/tendermint/tendermint/p2p/switch_test.go:94 +0x4c github.com/tendermint/tendermint/p2p.TestSwitches(0xc0001bd600) /home/vagrant/go/src/github.com/tendermint/tendermint/p2p/switch_test.go:117 +0x58 testing.tRunner(0xc0001bd600, 0xb42038) /usr/local/go/src/testing/testing.go:827 +0xbf created by testing.(*T).Run /usr/local/go/src/testing/testing.go:878 +0x353 exit status 2 FAIL github.com/tendermint/tendermint/p2p 0.350s ```
6 years ago
  1. package p2p
  2. import (
  3. "bytes"
  4. "fmt"
  5. "io"
  6. "io/ioutil"
  7. "net"
  8. "net/http"
  9. "net/http/httptest"
  10. "regexp"
  11. "strconv"
  12. "sync"
  13. "testing"
  14. "time"
  15. stdprometheus "github.com/prometheus/client_golang/prometheus"
  16. "github.com/stretchr/testify/assert"
  17. "github.com/stretchr/testify/require"
  18. "github.com/tendermint/tendermint/config"
  19. "github.com/tendermint/tendermint/crypto/ed25519"
  20. "github.com/tendermint/tendermint/libs/log"
  21. "github.com/tendermint/tendermint/p2p/conn"
  22. )
  23. var (
  24. cfg *config.P2PConfig
  25. )
  26. func init() {
  27. cfg = config.DefaultP2PConfig()
  28. cfg.PexReactor = true
  29. cfg.AllowDuplicateIP = true
  30. }
  31. type PeerMessage struct {
  32. PeerID ID
  33. Bytes []byte
  34. Counter int
  35. }
  36. type TestReactor struct {
  37. BaseReactor
  38. mtx sync.Mutex
  39. channels []*conn.ChannelDescriptor
  40. logMessages bool
  41. msgsCounter int
  42. msgsReceived map[byte][]PeerMessage
  43. }
  44. func NewTestReactor(channels []*conn.ChannelDescriptor, logMessages bool) *TestReactor {
  45. tr := &TestReactor{
  46. channels: channels,
  47. logMessages: logMessages,
  48. msgsReceived: make(map[byte][]PeerMessage),
  49. }
  50. tr.BaseReactor = *NewBaseReactor("TestReactor", tr)
  51. tr.SetLogger(log.TestingLogger())
  52. return tr
  53. }
  54. func (tr *TestReactor) GetChannels() []*conn.ChannelDescriptor {
  55. return tr.channels
  56. }
  57. func (tr *TestReactor) AddPeer(peer Peer) {}
  58. func (tr *TestReactor) RemovePeer(peer Peer, reason interface{}) {}
  59. func (tr *TestReactor) Receive(chID byte, peer Peer, msgBytes []byte) {
  60. if tr.logMessages {
  61. tr.mtx.Lock()
  62. defer tr.mtx.Unlock()
  63. //fmt.Printf("Received: %X, %X\n", chID, msgBytes)
  64. tr.msgsReceived[chID] = append(tr.msgsReceived[chID], PeerMessage{peer.ID(), msgBytes, tr.msgsCounter})
  65. tr.msgsCounter++
  66. }
  67. }
  68. func (tr *TestReactor) getMsgs(chID byte) []PeerMessage {
  69. tr.mtx.Lock()
  70. defer tr.mtx.Unlock()
  71. return tr.msgsReceived[chID]
  72. }
  73. //-----------------------------------------------------------------------------
  74. // convenience method for creating two switches connected to each other.
  75. // XXX: note this uses net.Pipe and not a proper TCP conn
  76. func MakeSwitchPair(t testing.TB, initSwitch func(int, *Switch) *Switch) (*Switch, *Switch) {
  77. // Create two switches that will be interconnected.
  78. switches := MakeConnectedSwitches(cfg, 2, initSwitch, Connect2Switches)
  79. return switches[0], switches[1]
  80. }
  81. func initSwitchFunc(i int, sw *Switch) *Switch {
  82. sw.SetAddrBook(&addrBookMock{
  83. addrs: make(map[string]struct{}),
  84. ourAddrs: make(map[string]struct{})})
  85. // Make two reactors of two channels each
  86. sw.AddReactor("foo", NewTestReactor([]*conn.ChannelDescriptor{
  87. {ID: byte(0x00), Priority: 10},
  88. {ID: byte(0x01), Priority: 10},
  89. }, true))
  90. sw.AddReactor("bar", NewTestReactor([]*conn.ChannelDescriptor{
  91. {ID: byte(0x02), Priority: 10},
  92. {ID: byte(0x03), Priority: 10},
  93. }, true))
  94. return sw
  95. }
  96. func TestSwitches(t *testing.T) {
  97. s1, s2 := MakeSwitchPair(t, initSwitchFunc)
  98. defer s1.Stop()
  99. defer s2.Stop()
  100. if s1.Peers().Size() != 1 {
  101. t.Errorf("Expected exactly 1 peer in s1, got %v", s1.Peers().Size())
  102. }
  103. if s2.Peers().Size() != 1 {
  104. t.Errorf("Expected exactly 1 peer in s2, got %v", s2.Peers().Size())
  105. }
  106. // Lets send some messages
  107. ch0Msg := []byte("channel zero")
  108. ch1Msg := []byte("channel foo")
  109. ch2Msg := []byte("channel bar")
  110. s1.Broadcast(byte(0x00), ch0Msg)
  111. s1.Broadcast(byte(0x01), ch1Msg)
  112. s1.Broadcast(byte(0x02), ch2Msg)
  113. assertMsgReceivedWithTimeout(t, ch0Msg, byte(0x00), s2.Reactor("foo").(*TestReactor), 10*time.Millisecond, 5*time.Second)
  114. assertMsgReceivedWithTimeout(t, ch1Msg, byte(0x01), s2.Reactor("foo").(*TestReactor), 10*time.Millisecond, 5*time.Second)
  115. assertMsgReceivedWithTimeout(t, ch2Msg, byte(0x02), s2.Reactor("bar").(*TestReactor), 10*time.Millisecond, 5*time.Second)
  116. }
  117. func assertMsgReceivedWithTimeout(t *testing.T, msgBytes []byte, channel byte, reactor *TestReactor, checkPeriod, timeout time.Duration) {
  118. ticker := time.NewTicker(checkPeriod)
  119. for {
  120. select {
  121. case <-ticker.C:
  122. msgs := reactor.getMsgs(channel)
  123. if len(msgs) > 0 {
  124. if !bytes.Equal(msgs[0].Bytes, msgBytes) {
  125. t.Fatalf("Unexpected message bytes. Wanted: %X, Got: %X", msgBytes, msgs[0].Bytes)
  126. }
  127. return
  128. }
  129. case <-time.After(timeout):
  130. t.Fatalf("Expected to have received 1 message in channel #%v, got zero", channel)
  131. }
  132. }
  133. }
  134. func TestSwitchFiltersOutItself(t *testing.T) {
  135. s1 := MakeSwitch(cfg, 1, "127.0.0.1", "123.123.123", initSwitchFunc)
  136. // addr := s1.NodeInfo().NetAddress()
  137. // // add ourselves like we do in node.go#427
  138. // s1.addrBook.AddOurAddress(addr)
  139. // simulate s1 having a public IP by creating a remote peer with the same ID
  140. rp := &remotePeer{PrivKey: s1.nodeKey.PrivKey, Config: cfg}
  141. rp.Start()
  142. // addr should be rejected in addPeer based on the same ID
  143. err := s1.DialPeerWithAddress(rp.Addr(), false)
  144. if assert.Error(t, err) {
  145. if err, ok := err.(ErrRejected); ok {
  146. if !err.IsSelf() {
  147. t.Errorf("expected self to be rejected")
  148. }
  149. } else {
  150. t.Errorf("expected ErrRejected")
  151. }
  152. }
  153. assert.True(t, s1.addrBook.OurAddress(rp.Addr()))
  154. assert.False(t, s1.addrBook.HasAddress(rp.Addr()))
  155. rp.Stop()
  156. assertNoPeersAfterTimeout(t, s1, 100*time.Millisecond)
  157. }
  158. func TestSwitchPeerFilter(t *testing.T) {
  159. var (
  160. filters = []PeerFilterFunc{
  161. func(_ IPeerSet, _ Peer) error { return nil },
  162. func(_ IPeerSet, _ Peer) error { return fmt.Errorf("denied!") },
  163. func(_ IPeerSet, _ Peer) error { return nil },
  164. }
  165. sw = MakeSwitch(
  166. cfg,
  167. 1,
  168. "testing",
  169. "123.123.123",
  170. initSwitchFunc,
  171. SwitchPeerFilters(filters...),
  172. )
  173. )
  174. defer sw.Stop()
  175. // simulate remote peer
  176. rp := &remotePeer{PrivKey: ed25519.GenPrivKey(), Config: cfg}
  177. rp.Start()
  178. defer rp.Stop()
  179. p, err := sw.transport.Dial(*rp.Addr(), peerConfig{
  180. chDescs: sw.chDescs,
  181. onPeerError: sw.StopPeerForError,
  182. reactorsByCh: sw.reactorsByCh,
  183. })
  184. if err != nil {
  185. t.Fatal(err)
  186. }
  187. err = sw.addPeer(p)
  188. if err, ok := err.(ErrRejected); ok {
  189. if !err.IsFiltered() {
  190. t.Errorf("expected peer to be filtered")
  191. }
  192. } else {
  193. t.Errorf("expected ErrRejected")
  194. }
  195. }
  196. func TestSwitchPeerFilterTimeout(t *testing.T) {
  197. var (
  198. filters = []PeerFilterFunc{
  199. func(_ IPeerSet, _ Peer) error {
  200. time.Sleep(10 * time.Millisecond)
  201. return nil
  202. },
  203. }
  204. sw = MakeSwitch(
  205. cfg,
  206. 1,
  207. "testing",
  208. "123.123.123",
  209. initSwitchFunc,
  210. SwitchFilterTimeout(5*time.Millisecond),
  211. SwitchPeerFilters(filters...),
  212. )
  213. )
  214. defer sw.Stop()
  215. // simulate remote peer
  216. rp := &remotePeer{PrivKey: ed25519.GenPrivKey(), Config: cfg}
  217. rp.Start()
  218. defer rp.Stop()
  219. p, err := sw.transport.Dial(*rp.Addr(), peerConfig{
  220. chDescs: sw.chDescs,
  221. onPeerError: sw.StopPeerForError,
  222. reactorsByCh: sw.reactorsByCh,
  223. })
  224. if err != nil {
  225. t.Fatal(err)
  226. }
  227. err = sw.addPeer(p)
  228. if _, ok := err.(ErrFilterTimeout); !ok {
  229. t.Errorf("expected ErrFilterTimeout")
  230. }
  231. }
  232. func TestSwitchPeerFilterDuplicate(t *testing.T) {
  233. sw := MakeSwitch(cfg, 1, "testing", "123.123.123", initSwitchFunc)
  234. sw.Start()
  235. defer sw.Stop()
  236. // simulate remote peer
  237. rp := &remotePeer{PrivKey: ed25519.GenPrivKey(), Config: cfg}
  238. rp.Start()
  239. defer rp.Stop()
  240. p, err := sw.transport.Dial(*rp.Addr(), peerConfig{
  241. chDescs: sw.chDescs,
  242. onPeerError: sw.StopPeerForError,
  243. reactorsByCh: sw.reactorsByCh,
  244. })
  245. if err != nil {
  246. t.Fatal(err)
  247. }
  248. if err := sw.addPeer(p); err != nil {
  249. t.Fatal(err)
  250. }
  251. err = sw.addPeer(p)
  252. if errRej, ok := err.(ErrRejected); ok {
  253. if !errRej.IsDuplicate() {
  254. t.Errorf("expected peer to be duplicate. got %v", errRej)
  255. }
  256. } else {
  257. t.Errorf("expected ErrRejected, got %v", err)
  258. }
  259. }
  260. func assertNoPeersAfterTimeout(t *testing.T, sw *Switch, timeout time.Duration) {
  261. time.Sleep(timeout)
  262. if sw.Peers().Size() != 0 {
  263. t.Fatalf("Expected %v to not connect to some peers, got %d", sw, sw.Peers().Size())
  264. }
  265. }
  266. func TestSwitchStopsNonPersistentPeerOnError(t *testing.T) {
  267. assert, require := assert.New(t), require.New(t)
  268. sw := MakeSwitch(cfg, 1, "testing", "123.123.123", initSwitchFunc)
  269. err := sw.Start()
  270. if err != nil {
  271. t.Error(err)
  272. }
  273. defer sw.Stop()
  274. // simulate remote peer
  275. rp := &remotePeer{PrivKey: ed25519.GenPrivKey(), Config: cfg}
  276. rp.Start()
  277. defer rp.Stop()
  278. p, err := sw.transport.Dial(*rp.Addr(), peerConfig{
  279. chDescs: sw.chDescs,
  280. onPeerError: sw.StopPeerForError,
  281. reactorsByCh: sw.reactorsByCh,
  282. })
  283. require.Nil(err)
  284. err = sw.addPeer(p)
  285. require.Nil(err)
  286. require.NotNil(sw.Peers().Get(rp.ID()))
  287. // simulate failure by closing connection
  288. p.(*peer).CloseConn()
  289. assertNoPeersAfterTimeout(t, sw, 100*time.Millisecond)
  290. assert.False(p.IsRunning())
  291. }
  292. func TestSwitchStopPeerForError(t *testing.T) {
  293. s := httptest.NewServer(stdprometheus.UninstrumentedHandler())
  294. defer s.Close()
  295. scrapeMetrics := func() string {
  296. resp, _ := http.Get(s.URL)
  297. buf, _ := ioutil.ReadAll(resp.Body)
  298. return string(buf)
  299. }
  300. namespace, subsystem, name := config.TestInstrumentationConfig().Namespace, MetricsSubsystem, "peers"
  301. re := regexp.MustCompile(namespace + `_` + subsystem + `_` + name + ` ([0-9\.]+)`)
  302. peersMetricValue := func() float64 {
  303. matches := re.FindStringSubmatch(scrapeMetrics())
  304. f, _ := strconv.ParseFloat(matches[1], 64)
  305. return f
  306. }
  307. p2pMetrics := PrometheusMetrics(namespace)
  308. // make two connected switches
  309. sw1, sw2 := MakeSwitchPair(t, func(i int, sw *Switch) *Switch {
  310. // set metrics on sw1
  311. if i == 0 {
  312. opt := WithMetrics(p2pMetrics)
  313. opt(sw)
  314. }
  315. return initSwitchFunc(i, sw)
  316. })
  317. assert.Equal(t, len(sw1.Peers().List()), 1)
  318. assert.EqualValues(t, 1, peersMetricValue())
  319. // send messages to the peer from sw1
  320. p := sw1.Peers().List()[0]
  321. p.Send(0x1, []byte("here's a message to send"))
  322. // stop sw2. this should cause the p to fail,
  323. // which results in calling StopPeerForError internally
  324. sw2.Stop()
  325. // now call StopPeerForError explicitly, eg. from a reactor
  326. sw1.StopPeerForError(p, fmt.Errorf("some err"))
  327. assert.Equal(t, len(sw1.Peers().List()), 0)
  328. assert.EqualValues(t, 0, peersMetricValue())
  329. }
  330. func TestSwitchReconnectsToPersistentPeer(t *testing.T) {
  331. assert, require := assert.New(t), require.New(t)
  332. sw := MakeSwitch(cfg, 1, "testing", "123.123.123", initSwitchFunc)
  333. err := sw.Start()
  334. if err != nil {
  335. t.Error(err)
  336. }
  337. defer sw.Stop()
  338. // simulate remote peer
  339. rp := &remotePeer{PrivKey: ed25519.GenPrivKey(), Config: cfg}
  340. rp.Start()
  341. defer rp.Stop()
  342. p, err := sw.transport.Dial(*rp.Addr(), peerConfig{
  343. chDescs: sw.chDescs,
  344. onPeerError: sw.StopPeerForError,
  345. persistent: true,
  346. reactorsByCh: sw.reactorsByCh,
  347. })
  348. require.Nil(err)
  349. require.Nil(sw.addPeer(p))
  350. require.NotNil(sw.Peers().Get(rp.ID()))
  351. // simulate failure by closing connection
  352. p.(*peer).CloseConn()
  353. // TODO: remove sleep, detect the disconnection, wait for reconnect
  354. npeers := sw.Peers().Size()
  355. for i := 0; i < 20; i++ {
  356. time.Sleep(250 * time.Millisecond)
  357. npeers = sw.Peers().Size()
  358. if npeers > 0 {
  359. break
  360. }
  361. }
  362. assert.NotZero(npeers)
  363. assert.False(p.IsRunning())
  364. // simulate another remote peer
  365. rp = &remotePeer{
  366. PrivKey: ed25519.GenPrivKey(),
  367. Config: cfg,
  368. // Use different interface to prevent duplicate IP filter, this will break
  369. // beyond two peers.
  370. listenAddr: "127.0.0.1:0",
  371. }
  372. rp.Start()
  373. defer rp.Stop()
  374. // simulate first time dial failure
  375. conf := config.DefaultP2PConfig()
  376. conf.TestDialFail = true
  377. err = sw.addOutboundPeerWithConfig(rp.Addr(), conf, true)
  378. require.NotNil(err)
  379. // DialPeerWithAddres - sw.peerConfig resets the dialer
  380. // TODO: same as above
  381. for i := 0; i < 20; i++ {
  382. time.Sleep(250 * time.Millisecond)
  383. npeers = sw.Peers().Size()
  384. if npeers > 1 {
  385. break
  386. }
  387. }
  388. assert.EqualValues(2, npeers)
  389. }
  390. func TestSwitchFullConnectivity(t *testing.T) {
  391. switches := MakeConnectedSwitches(cfg, 3, initSwitchFunc, Connect2Switches)
  392. defer func() {
  393. for _, sw := range switches {
  394. sw.Stop()
  395. }
  396. }()
  397. for i, sw := range switches {
  398. if sw.Peers().Size() != 2 {
  399. t.Fatalf("Expected each switch to be connected to 2 other, but %d switch only connected to %d", sw.Peers().Size(), i)
  400. }
  401. }
  402. }
  403. func TestSwitchAcceptRoutine(t *testing.T) {
  404. cfg.MaxNumInboundPeers = 5
  405. // make switch
  406. sw := MakeSwitch(cfg, 1, "testing", "123.123.123", initSwitchFunc)
  407. err := sw.Start()
  408. require.NoError(t, err)
  409. defer sw.Stop()
  410. remotePeers := make([]*remotePeer, 0)
  411. assert.Equal(t, 0, sw.Peers().Size())
  412. // 1. check we connect up to MaxNumInboundPeers
  413. for i := 0; i < cfg.MaxNumInboundPeers; i++ {
  414. rp := &remotePeer{PrivKey: ed25519.GenPrivKey(), Config: cfg}
  415. remotePeers = append(remotePeers, rp)
  416. rp.Start()
  417. c, err := rp.Dial(sw.NodeInfo().NetAddress())
  418. require.NoError(t, err)
  419. // spawn a reading routine to prevent connection from closing
  420. go func(c net.Conn) {
  421. for {
  422. one := make([]byte, 1)
  423. _, err := c.Read(one)
  424. if err != nil {
  425. return
  426. }
  427. }
  428. }(c)
  429. }
  430. time.Sleep(10 * time.Millisecond)
  431. assert.Equal(t, cfg.MaxNumInboundPeers, sw.Peers().Size())
  432. // 2. check we close new connections if we already have MaxNumInboundPeers peers
  433. rp := &remotePeer{PrivKey: ed25519.GenPrivKey(), Config: cfg}
  434. rp.Start()
  435. conn, err := rp.Dial(sw.NodeInfo().NetAddress())
  436. require.NoError(t, err)
  437. // check conn is closed
  438. one := make([]byte, 1)
  439. conn.SetReadDeadline(time.Now().Add(10 * time.Millisecond))
  440. _, err = conn.Read(one)
  441. assert.Equal(t, io.EOF, err)
  442. assert.Equal(t, cfg.MaxNumInboundPeers, sw.Peers().Size())
  443. rp.Stop()
  444. // stop remote peers
  445. for _, rp := range remotePeers {
  446. rp.Stop()
  447. }
  448. }
  449. func BenchmarkSwitchBroadcast(b *testing.B) {
  450. s1, s2 := MakeSwitchPair(b, func(i int, sw *Switch) *Switch {
  451. // Make bar reactors of bar channels each
  452. sw.AddReactor("foo", NewTestReactor([]*conn.ChannelDescriptor{
  453. {ID: byte(0x00), Priority: 10},
  454. {ID: byte(0x01), Priority: 10},
  455. }, false))
  456. sw.AddReactor("bar", NewTestReactor([]*conn.ChannelDescriptor{
  457. {ID: byte(0x02), Priority: 10},
  458. {ID: byte(0x03), Priority: 10},
  459. }, false))
  460. return sw
  461. })
  462. defer s1.Stop()
  463. defer s2.Stop()
  464. // Allow time for goroutines to boot up
  465. time.Sleep(1 * time.Second)
  466. b.ResetTimer()
  467. numSuccess, numFailure := 0, 0
  468. // Send random message from foo channel to another
  469. for i := 0; i < b.N; i++ {
  470. chID := byte(i % 4)
  471. successChan := s1.Broadcast(chID, []byte("test data"))
  472. for s := range successChan {
  473. if s {
  474. numSuccess++
  475. } else {
  476. numFailure++
  477. }
  478. }
  479. }
  480. b.Logf("success: %v, failure: %v", numSuccess, numFailure)
  481. }
  482. type addrBookMock struct {
  483. addrs map[string]struct{}
  484. ourAddrs map[string]struct{}
  485. }
  486. var _ AddrBook = (*addrBookMock)(nil)
  487. func (book *addrBookMock) AddAddress(addr *NetAddress, src *NetAddress) error {
  488. book.addrs[addr.String()] = struct{}{}
  489. return nil
  490. }
  491. func (book *addrBookMock) AddOurAddress(addr *NetAddress) { book.ourAddrs[addr.String()] = struct{}{} }
  492. func (book *addrBookMock) OurAddress(addr *NetAddress) bool {
  493. _, ok := book.ourAddrs[addr.String()]
  494. return ok
  495. }
  496. func (book *addrBookMock) MarkGood(*NetAddress) {}
  497. func (book *addrBookMock) HasAddress(addr *NetAddress) bool {
  498. _, ok := book.addrs[addr.String()]
  499. return ok
  500. }
  501. func (book *addrBookMock) RemoveAddress(addr *NetAddress) {
  502. delete(book.addrs, addr.String())
  503. }
  504. func (book *addrBookMock) Save() {}