You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

596 lines
15 KiB

p2p: file descriptor leaks (#3150) * close peer's connection to avoid fd leak Fixes #2967 * rename peer#Addr to RemoteAddr * fix test * fixes after Ethan's review * bring back the check * changelog entry * write a test for switch#acceptRoutine * increase timeouts? :( * remove extra assertNPeersWithTimeout * simplify test * assert number of peers (just to be safe) * Cleanup in OnStop * run tests with verbose flag on CircleCI * spawn a reading routine to prevent connection from closing * get port from the listener random port is faster, but often results in ``` panic: listen tcp 127.0.0.1:44068: bind: address already in use [recovered] panic: listen tcp 127.0.0.1:44068: bind: address already in use goroutine 79 [running]: testing.tRunner.func1(0xc0001bd600) /usr/local/go/src/testing/testing.go:792 +0x387 panic(0x974d20, 0xc0001b0500) /usr/local/go/src/runtime/panic.go:513 +0x1b9 github.com/tendermint/tendermint/p2p.MakeSwitch(0xc0000f42a0, 0x0, 0x9fb9cc, 0x9, 0x9fc346, 0xb, 0xb42128, 0x0, 0x0, 0x0, ...) /home/vagrant/go/src/github.com/tendermint/tendermint/p2p/test_util.go:182 +0xa28 github.com/tendermint/tendermint/p2p.MakeConnectedSwitches(0xc0000f42a0, 0x2, 0xb42128, 0xb41eb8, 0x4f1205, 0xc0001bed80, 0x4f16ed) /home/vagrant/go/src/github.com/tendermint/tendermint/p2p/test_util.go:75 +0xf9 github.com/tendermint/tendermint/p2p.MakeSwitchPair(0xbb8d20, 0xc0001bd600, 0xb42128, 0x2f7, 0x4f16c0) /home/vagrant/go/src/github.com/tendermint/tendermint/p2p/switch_test.go:94 +0x4c github.com/tendermint/tendermint/p2p.TestSwitches(0xc0001bd600) /home/vagrant/go/src/github.com/tendermint/tendermint/p2p/switch_test.go:117 +0x58 testing.tRunner(0xc0001bd600, 0xb42038) /usr/local/go/src/testing/testing.go:827 +0xbf created by testing.(*T).Run /usr/local/go/src/testing/testing.go:878 +0x353 exit status 2 FAIL github.com/tendermint/tendermint/p2p 0.350s ```
6 years ago
p2p: file descriptor leaks (#3150) * close peer's connection to avoid fd leak Fixes #2967 * rename peer#Addr to RemoteAddr * fix test * fixes after Ethan's review * bring back the check * changelog entry * write a test for switch#acceptRoutine * increase timeouts? :( * remove extra assertNPeersWithTimeout * simplify test * assert number of peers (just to be safe) * Cleanup in OnStop * run tests with verbose flag on CircleCI * spawn a reading routine to prevent connection from closing * get port from the listener random port is faster, but often results in ``` panic: listen tcp 127.0.0.1:44068: bind: address already in use [recovered] panic: listen tcp 127.0.0.1:44068: bind: address already in use goroutine 79 [running]: testing.tRunner.func1(0xc0001bd600) /usr/local/go/src/testing/testing.go:792 +0x387 panic(0x974d20, 0xc0001b0500) /usr/local/go/src/runtime/panic.go:513 +0x1b9 github.com/tendermint/tendermint/p2p.MakeSwitch(0xc0000f42a0, 0x0, 0x9fb9cc, 0x9, 0x9fc346, 0xb, 0xb42128, 0x0, 0x0, 0x0, ...) /home/vagrant/go/src/github.com/tendermint/tendermint/p2p/test_util.go:182 +0xa28 github.com/tendermint/tendermint/p2p.MakeConnectedSwitches(0xc0000f42a0, 0x2, 0xb42128, 0xb41eb8, 0x4f1205, 0xc0001bed80, 0x4f16ed) /home/vagrant/go/src/github.com/tendermint/tendermint/p2p/test_util.go:75 +0xf9 github.com/tendermint/tendermint/p2p.MakeSwitchPair(0xbb8d20, 0xc0001bd600, 0xb42128, 0x2f7, 0x4f16c0) /home/vagrant/go/src/github.com/tendermint/tendermint/p2p/switch_test.go:94 +0x4c github.com/tendermint/tendermint/p2p.TestSwitches(0xc0001bd600) /home/vagrant/go/src/github.com/tendermint/tendermint/p2p/switch_test.go:117 +0x58 testing.tRunner(0xc0001bd600, 0xb42038) /usr/local/go/src/testing/testing.go:827 +0xbf created by testing.(*T).Run /usr/local/go/src/testing/testing.go:878 +0x353 exit status 2 FAIL github.com/tendermint/tendermint/p2p 0.350s ```
6 years ago
p2p: file descriptor leaks (#3150) * close peer's connection to avoid fd leak Fixes #2967 * rename peer#Addr to RemoteAddr * fix test * fixes after Ethan's review * bring back the check * changelog entry * write a test for switch#acceptRoutine * increase timeouts? :( * remove extra assertNPeersWithTimeout * simplify test * assert number of peers (just to be safe) * Cleanup in OnStop * run tests with verbose flag on CircleCI * spawn a reading routine to prevent connection from closing * get port from the listener random port is faster, but often results in ``` panic: listen tcp 127.0.0.1:44068: bind: address already in use [recovered] panic: listen tcp 127.0.0.1:44068: bind: address already in use goroutine 79 [running]: testing.tRunner.func1(0xc0001bd600) /usr/local/go/src/testing/testing.go:792 +0x387 panic(0x974d20, 0xc0001b0500) /usr/local/go/src/runtime/panic.go:513 +0x1b9 github.com/tendermint/tendermint/p2p.MakeSwitch(0xc0000f42a0, 0x0, 0x9fb9cc, 0x9, 0x9fc346, 0xb, 0xb42128, 0x0, 0x0, 0x0, ...) /home/vagrant/go/src/github.com/tendermint/tendermint/p2p/test_util.go:182 +0xa28 github.com/tendermint/tendermint/p2p.MakeConnectedSwitches(0xc0000f42a0, 0x2, 0xb42128, 0xb41eb8, 0x4f1205, 0xc0001bed80, 0x4f16ed) /home/vagrant/go/src/github.com/tendermint/tendermint/p2p/test_util.go:75 +0xf9 github.com/tendermint/tendermint/p2p.MakeSwitchPair(0xbb8d20, 0xc0001bd600, 0xb42128, 0x2f7, 0x4f16c0) /home/vagrant/go/src/github.com/tendermint/tendermint/p2p/switch_test.go:94 +0x4c github.com/tendermint/tendermint/p2p.TestSwitches(0xc0001bd600) /home/vagrant/go/src/github.com/tendermint/tendermint/p2p/switch_test.go:117 +0x58 testing.tRunner(0xc0001bd600, 0xb42038) /usr/local/go/src/testing/testing.go:827 +0xbf created by testing.(*T).Run /usr/local/go/src/testing/testing.go:878 +0x353 exit status 2 FAIL github.com/tendermint/tendermint/p2p 0.350s ```
6 years ago
  1. package p2p
  2. import (
  3. "bytes"
  4. "fmt"
  5. "io"
  6. "io/ioutil"
  7. "net"
  8. "net/http"
  9. "net/http/httptest"
  10. "regexp"
  11. "strconv"
  12. "sync"
  13. "testing"
  14. "time"
  15. stdprometheus "github.com/prometheus/client_golang/prometheus"
  16. "github.com/stretchr/testify/assert"
  17. "github.com/stretchr/testify/require"
  18. "github.com/tendermint/tendermint/config"
  19. "github.com/tendermint/tendermint/crypto/ed25519"
  20. "github.com/tendermint/tendermint/libs/log"
  21. "github.com/tendermint/tendermint/p2p/conn"
  22. )
  23. var (
  24. cfg *config.P2PConfig
  25. )
  26. func init() {
  27. cfg = config.DefaultP2PConfig()
  28. cfg.PexReactor = true
  29. cfg.AllowDuplicateIP = true
  30. }
  31. type PeerMessage struct {
  32. PeerID ID
  33. Bytes []byte
  34. Counter int
  35. }
  36. type TestReactor struct {
  37. BaseReactor
  38. mtx sync.Mutex
  39. channels []*conn.ChannelDescriptor
  40. logMessages bool
  41. msgsCounter int
  42. msgsReceived map[byte][]PeerMessage
  43. }
  44. func NewTestReactor(channels []*conn.ChannelDescriptor, logMessages bool) *TestReactor {
  45. tr := &TestReactor{
  46. channels: channels,
  47. logMessages: logMessages,
  48. msgsReceived: make(map[byte][]PeerMessage),
  49. }
  50. tr.BaseReactor = *NewBaseReactor("TestReactor", tr)
  51. tr.SetLogger(log.TestingLogger())
  52. return tr
  53. }
  54. func (tr *TestReactor) GetChannels() []*conn.ChannelDescriptor {
  55. return tr.channels
  56. }
  57. func (tr *TestReactor) AddPeer(peer Peer) {}
  58. func (tr *TestReactor) RemovePeer(peer Peer, reason interface{}) {}
  59. func (tr *TestReactor) Receive(chID byte, peer Peer, msgBytes []byte) {
  60. if tr.logMessages {
  61. tr.mtx.Lock()
  62. defer tr.mtx.Unlock()
  63. //fmt.Printf("Received: %X, %X\n", chID, msgBytes)
  64. tr.msgsReceived[chID] = append(tr.msgsReceived[chID], PeerMessage{peer.ID(), msgBytes, tr.msgsCounter})
  65. tr.msgsCounter++
  66. }
  67. }
  68. func (tr *TestReactor) getMsgs(chID byte) []PeerMessage {
  69. tr.mtx.Lock()
  70. defer tr.mtx.Unlock()
  71. return tr.msgsReceived[chID]
  72. }
  73. //-----------------------------------------------------------------------------
  74. // convenience method for creating two switches connected to each other.
  75. // XXX: note this uses net.Pipe and not a proper TCP conn
  76. func MakeSwitchPair(t testing.TB, initSwitch func(int, *Switch) *Switch) (*Switch, *Switch) {
  77. // Create two switches that will be interconnected.
  78. switches := MakeConnectedSwitches(cfg, 2, initSwitch, Connect2Switches)
  79. return switches[0], switches[1]
  80. }
  81. func initSwitchFunc(i int, sw *Switch) *Switch {
  82. sw.SetAddrBook(&addrBookMock{
  83. addrs: make(map[string]struct{}),
  84. ourAddrs: make(map[string]struct{})})
  85. // Make two reactors of two channels each
  86. sw.AddReactor("foo", NewTestReactor([]*conn.ChannelDescriptor{
  87. {ID: byte(0x00), Priority: 10},
  88. {ID: byte(0x01), Priority: 10},
  89. }, true))
  90. sw.AddReactor("bar", NewTestReactor([]*conn.ChannelDescriptor{
  91. {ID: byte(0x02), Priority: 10},
  92. {ID: byte(0x03), Priority: 10},
  93. }, true))
  94. return sw
  95. }
  96. func TestSwitches(t *testing.T) {
  97. s1, s2 := MakeSwitchPair(t, initSwitchFunc)
  98. defer s1.Stop()
  99. defer s2.Stop()
  100. if s1.Peers().Size() != 1 {
  101. t.Errorf("Expected exactly 1 peer in s1, got %v", s1.Peers().Size())
  102. }
  103. if s2.Peers().Size() != 1 {
  104. t.Errorf("Expected exactly 1 peer in s2, got %v", s2.Peers().Size())
  105. }
  106. // Lets send some messages
  107. ch0Msg := []byte("channel zero")
  108. ch1Msg := []byte("channel foo")
  109. ch2Msg := []byte("channel bar")
  110. s1.Broadcast(byte(0x00), ch0Msg)
  111. s1.Broadcast(byte(0x01), ch1Msg)
  112. s1.Broadcast(byte(0x02), ch2Msg)
  113. assertMsgReceivedWithTimeout(t, ch0Msg, byte(0x00), s2.Reactor("foo").(*TestReactor), 10*time.Millisecond, 5*time.Second)
  114. assertMsgReceivedWithTimeout(t, ch1Msg, byte(0x01), s2.Reactor("foo").(*TestReactor), 10*time.Millisecond, 5*time.Second)
  115. assertMsgReceivedWithTimeout(t, ch2Msg, byte(0x02), s2.Reactor("bar").(*TestReactor), 10*time.Millisecond, 5*time.Second)
  116. }
  117. func assertMsgReceivedWithTimeout(t *testing.T, msgBytes []byte, channel byte, reactor *TestReactor, checkPeriod, timeout time.Duration) {
  118. ticker := time.NewTicker(checkPeriod)
  119. for {
  120. select {
  121. case <-ticker.C:
  122. msgs := reactor.getMsgs(channel)
  123. if len(msgs) > 0 {
  124. if !bytes.Equal(msgs[0].Bytes, msgBytes) {
  125. t.Fatalf("Unexpected message bytes. Wanted: %X, Got: %X", msgBytes, msgs[0].Bytes)
  126. }
  127. return
  128. }
  129. case <-time.After(timeout):
  130. t.Fatalf("Expected to have received 1 message in channel #%v, got zero", channel)
  131. }
  132. }
  133. }
  134. func TestSwitchFiltersOutItself(t *testing.T) {
  135. s1 := MakeSwitch(cfg, 1, "127.0.0.1", "123.123.123", initSwitchFunc)
  136. // addr := s1.NodeInfo().NetAddress()
  137. // // add ourselves like we do in node.go#427
  138. // s1.addrBook.AddOurAddress(addr)
  139. // simulate s1 having a public IP by creating a remote peer with the same ID
  140. rp := &remotePeer{PrivKey: s1.nodeKey.PrivKey, Config: cfg}
  141. rp.Start()
  142. // addr should be rejected in addPeer based on the same ID
  143. err := s1.DialPeerWithAddress(rp.Addr(), false)
  144. if assert.Error(t, err) {
  145. if err, ok := err.(ErrRejected); ok {
  146. if !err.IsSelf() {
  147. t.Errorf("expected self to be rejected")
  148. }
  149. } else {
  150. t.Errorf("expected ErrRejected")
  151. }
  152. }
  153. assert.True(t, s1.addrBook.OurAddress(rp.Addr()))
  154. assert.False(t, s1.addrBook.HasAddress(rp.Addr()))
  155. rp.Stop()
  156. assertNoPeersAfterTimeout(t, s1, 100*time.Millisecond)
  157. }
  158. func TestSwitchPeerFilter(t *testing.T) {
  159. var (
  160. filters = []PeerFilterFunc{
  161. func(_ IPeerSet, _ Peer) error { return nil },
  162. func(_ IPeerSet, _ Peer) error { return fmt.Errorf("denied!") },
  163. func(_ IPeerSet, _ Peer) error { return nil },
  164. }
  165. sw = MakeSwitch(
  166. cfg,
  167. 1,
  168. "testing",
  169. "123.123.123",
  170. initSwitchFunc,
  171. SwitchPeerFilters(filters...),
  172. )
  173. )
  174. defer sw.Stop()
  175. // simulate remote peer
  176. rp := &remotePeer{PrivKey: ed25519.GenPrivKey(), Config: cfg}
  177. rp.Start()
  178. defer rp.Stop()
  179. p, err := sw.transport.Dial(*rp.Addr(), peerConfig{
  180. chDescs: sw.chDescs,
  181. onPeerError: sw.StopPeerForError,
  182. reactorsByCh: sw.reactorsByCh,
  183. })
  184. if err != nil {
  185. t.Fatal(err)
  186. }
  187. err = sw.addPeer(p)
  188. if err, ok := err.(ErrRejected); ok {
  189. if !err.IsFiltered() {
  190. t.Errorf("expected peer to be filtered")
  191. }
  192. } else {
  193. t.Errorf("expected ErrRejected")
  194. }
  195. }
  196. func TestSwitchPeerFilterTimeout(t *testing.T) {
  197. var (
  198. filters = []PeerFilterFunc{
  199. func(_ IPeerSet, _ Peer) error {
  200. time.Sleep(10 * time.Millisecond)
  201. return nil
  202. },
  203. }
  204. sw = MakeSwitch(
  205. cfg,
  206. 1,
  207. "testing",
  208. "123.123.123",
  209. initSwitchFunc,
  210. SwitchFilterTimeout(5*time.Millisecond),
  211. SwitchPeerFilters(filters...),
  212. )
  213. )
  214. defer sw.Stop()
  215. // simulate remote peer
  216. rp := &remotePeer{PrivKey: ed25519.GenPrivKey(), Config: cfg}
  217. rp.Start()
  218. defer rp.Stop()
  219. p, err := sw.transport.Dial(*rp.Addr(), peerConfig{
  220. chDescs: sw.chDescs,
  221. onPeerError: sw.StopPeerForError,
  222. reactorsByCh: sw.reactorsByCh,
  223. })
  224. if err != nil {
  225. t.Fatal(err)
  226. }
  227. err = sw.addPeer(p)
  228. if _, ok := err.(ErrFilterTimeout); !ok {
  229. t.Errorf("expected ErrFilterTimeout")
  230. }
  231. }
  232. func TestSwitchPeerFilterDuplicate(t *testing.T) {
  233. sw := MakeSwitch(cfg, 1, "testing", "123.123.123", initSwitchFunc)
  234. // simulate remote peer
  235. rp := &remotePeer{PrivKey: ed25519.GenPrivKey(), Config: cfg}
  236. rp.Start()
  237. defer rp.Stop()
  238. p, err := sw.transport.Dial(*rp.Addr(), peerConfig{
  239. chDescs: sw.chDescs,
  240. onPeerError: sw.StopPeerForError,
  241. reactorsByCh: sw.reactorsByCh,
  242. })
  243. if err != nil {
  244. t.Fatal(err)
  245. }
  246. if err := sw.addPeer(p); err != nil {
  247. t.Fatal(err)
  248. }
  249. err = sw.addPeer(p)
  250. if err, ok := err.(ErrRejected); ok {
  251. if !err.IsDuplicate() {
  252. t.Errorf("expected peer to be duplicate")
  253. }
  254. } else {
  255. t.Errorf("expected ErrRejected")
  256. }
  257. }
  258. func assertNoPeersAfterTimeout(t *testing.T, sw *Switch, timeout time.Duration) {
  259. time.Sleep(timeout)
  260. if sw.Peers().Size() != 0 {
  261. t.Fatalf("Expected %v to not connect to some peers, got %d", sw, sw.Peers().Size())
  262. }
  263. }
  264. func TestSwitchStopsNonPersistentPeerOnError(t *testing.T) {
  265. assert, require := assert.New(t), require.New(t)
  266. sw := MakeSwitch(cfg, 1, "testing", "123.123.123", initSwitchFunc)
  267. err := sw.Start()
  268. if err != nil {
  269. t.Error(err)
  270. }
  271. defer sw.Stop()
  272. // simulate remote peer
  273. rp := &remotePeer{PrivKey: ed25519.GenPrivKey(), Config: cfg}
  274. rp.Start()
  275. defer rp.Stop()
  276. p, err := sw.transport.Dial(*rp.Addr(), peerConfig{
  277. chDescs: sw.chDescs,
  278. onPeerError: sw.StopPeerForError,
  279. reactorsByCh: sw.reactorsByCh,
  280. })
  281. require.Nil(err)
  282. err = sw.addPeer(p)
  283. require.Nil(err)
  284. require.NotNil(sw.Peers().Get(rp.ID()))
  285. // simulate failure by closing connection
  286. p.(*peer).CloseConn()
  287. assertNoPeersAfterTimeout(t, sw, 100*time.Millisecond)
  288. assert.False(p.IsRunning())
  289. }
  290. func TestSwitchStopPeerForError(t *testing.T) {
  291. s := httptest.NewServer(stdprometheus.UninstrumentedHandler())
  292. defer s.Close()
  293. scrapeMetrics := func() string {
  294. resp, _ := http.Get(s.URL)
  295. buf, _ := ioutil.ReadAll(resp.Body)
  296. return string(buf)
  297. }
  298. namespace, subsystem, name := config.TestInstrumentationConfig().Namespace, MetricsSubsystem, "peers"
  299. re := regexp.MustCompile(namespace + `_` + subsystem + `_` + name + ` ([0-9\.]+)`)
  300. peersMetricValue := func() float64 {
  301. matches := re.FindStringSubmatch(scrapeMetrics())
  302. f, _ := strconv.ParseFloat(matches[1], 64)
  303. return f
  304. }
  305. p2pMetrics := PrometheusMetrics(namespace)
  306. // make two connected switches
  307. sw1, sw2 := MakeSwitchPair(t, func(i int, sw *Switch) *Switch {
  308. // set metrics on sw1
  309. if i == 0 {
  310. opt := WithMetrics(p2pMetrics)
  311. opt(sw)
  312. }
  313. return initSwitchFunc(i, sw)
  314. })
  315. assert.Equal(t, len(sw1.Peers().List()), 1)
  316. assert.EqualValues(t, 1, peersMetricValue())
  317. // send messages to the peer from sw1
  318. p := sw1.Peers().List()[0]
  319. p.Send(0x1, []byte("here's a message to send"))
  320. // stop sw2. this should cause the p to fail,
  321. // which results in calling StopPeerForError internally
  322. sw2.Stop()
  323. // now call StopPeerForError explicitly, eg. from a reactor
  324. sw1.StopPeerForError(p, fmt.Errorf("some err"))
  325. assert.Equal(t, len(sw1.Peers().List()), 0)
  326. assert.EqualValues(t, 0, peersMetricValue())
  327. }
  328. func TestSwitchReconnectsToPersistentPeer(t *testing.T) {
  329. assert, require := assert.New(t), require.New(t)
  330. sw := MakeSwitch(cfg, 1, "testing", "123.123.123", initSwitchFunc)
  331. err := sw.Start()
  332. if err != nil {
  333. t.Error(err)
  334. }
  335. defer sw.Stop()
  336. // simulate remote peer
  337. rp := &remotePeer{PrivKey: ed25519.GenPrivKey(), Config: cfg}
  338. rp.Start()
  339. defer rp.Stop()
  340. p, err := sw.transport.Dial(*rp.Addr(), peerConfig{
  341. chDescs: sw.chDescs,
  342. onPeerError: sw.StopPeerForError,
  343. persistent: true,
  344. reactorsByCh: sw.reactorsByCh,
  345. })
  346. require.Nil(err)
  347. require.Nil(sw.addPeer(p))
  348. require.NotNil(sw.Peers().Get(rp.ID()))
  349. // simulate failure by closing connection
  350. p.(*peer).CloseConn()
  351. // TODO: remove sleep, detect the disconnection, wait for reconnect
  352. npeers := sw.Peers().Size()
  353. for i := 0; i < 20; i++ {
  354. time.Sleep(250 * time.Millisecond)
  355. npeers = sw.Peers().Size()
  356. if npeers > 0 {
  357. break
  358. }
  359. }
  360. assert.NotZero(npeers)
  361. assert.False(p.IsRunning())
  362. // simulate another remote peer
  363. rp = &remotePeer{
  364. PrivKey: ed25519.GenPrivKey(),
  365. Config: cfg,
  366. // Use different interface to prevent duplicate IP filter, this will break
  367. // beyond two peers.
  368. listenAddr: "127.0.0.1:0",
  369. }
  370. rp.Start()
  371. defer rp.Stop()
  372. // simulate first time dial failure
  373. conf := config.DefaultP2PConfig()
  374. conf.TestDialFail = true
  375. err = sw.addOutboundPeerWithConfig(rp.Addr(), conf, true)
  376. require.NotNil(err)
  377. // DialPeerWithAddres - sw.peerConfig resets the dialer
  378. // TODO: same as above
  379. for i := 0; i < 20; i++ {
  380. time.Sleep(250 * time.Millisecond)
  381. npeers = sw.Peers().Size()
  382. if npeers > 1 {
  383. break
  384. }
  385. }
  386. assert.EqualValues(2, npeers)
  387. }
  388. func TestSwitchFullConnectivity(t *testing.T) {
  389. switches := MakeConnectedSwitches(cfg, 3, initSwitchFunc, Connect2Switches)
  390. defer func() {
  391. for _, sw := range switches {
  392. sw.Stop()
  393. }
  394. }()
  395. for i, sw := range switches {
  396. if sw.Peers().Size() != 2 {
  397. t.Fatalf("Expected each switch to be connected to 2 other, but %d switch only connected to %d", sw.Peers().Size(), i)
  398. }
  399. }
  400. }
  401. func TestSwitchAcceptRoutine(t *testing.T) {
  402. cfg.MaxNumInboundPeers = 5
  403. // make switch
  404. sw := MakeSwitch(cfg, 1, "testing", "123.123.123", initSwitchFunc)
  405. err := sw.Start()
  406. require.NoError(t, err)
  407. defer sw.Stop()
  408. remotePeers := make([]*remotePeer, 0)
  409. assert.Equal(t, 0, sw.Peers().Size())
  410. // 1. check we connect up to MaxNumInboundPeers
  411. for i := 0; i < cfg.MaxNumInboundPeers; i++ {
  412. rp := &remotePeer{PrivKey: ed25519.GenPrivKey(), Config: cfg}
  413. remotePeers = append(remotePeers, rp)
  414. rp.Start()
  415. c, err := rp.Dial(sw.NodeInfo().NetAddress())
  416. require.NoError(t, err)
  417. // spawn a reading routine to prevent connection from closing
  418. go func(c net.Conn) {
  419. for {
  420. one := make([]byte, 1)
  421. _, err := c.Read(one)
  422. if err != nil {
  423. return
  424. }
  425. }
  426. }(c)
  427. }
  428. time.Sleep(10 * time.Millisecond)
  429. assert.Equal(t, cfg.MaxNumInboundPeers, sw.Peers().Size())
  430. // 2. check we close new connections if we already have MaxNumInboundPeers peers
  431. rp := &remotePeer{PrivKey: ed25519.GenPrivKey(), Config: cfg}
  432. rp.Start()
  433. conn, err := rp.Dial(sw.NodeInfo().NetAddress())
  434. require.NoError(t, err)
  435. // check conn is closed
  436. one := make([]byte, 1)
  437. conn.SetReadDeadline(time.Now().Add(10 * time.Millisecond))
  438. _, err = conn.Read(one)
  439. assert.Equal(t, io.EOF, err)
  440. assert.Equal(t, cfg.MaxNumInboundPeers, sw.Peers().Size())
  441. rp.Stop()
  442. // stop remote peers
  443. for _, rp := range remotePeers {
  444. rp.Stop()
  445. }
  446. }
  447. func BenchmarkSwitchBroadcast(b *testing.B) {
  448. s1, s2 := MakeSwitchPair(b, func(i int, sw *Switch) *Switch {
  449. // Make bar reactors of bar channels each
  450. sw.AddReactor("foo", NewTestReactor([]*conn.ChannelDescriptor{
  451. {ID: byte(0x00), Priority: 10},
  452. {ID: byte(0x01), Priority: 10},
  453. }, false))
  454. sw.AddReactor("bar", NewTestReactor([]*conn.ChannelDescriptor{
  455. {ID: byte(0x02), Priority: 10},
  456. {ID: byte(0x03), Priority: 10},
  457. }, false))
  458. return sw
  459. })
  460. defer s1.Stop()
  461. defer s2.Stop()
  462. // Allow time for goroutines to boot up
  463. time.Sleep(1 * time.Second)
  464. b.ResetTimer()
  465. numSuccess, numFailure := 0, 0
  466. // Send random message from foo channel to another
  467. for i := 0; i < b.N; i++ {
  468. chID := byte(i % 4)
  469. successChan := s1.Broadcast(chID, []byte("test data"))
  470. for s := range successChan {
  471. if s {
  472. numSuccess++
  473. } else {
  474. numFailure++
  475. }
  476. }
  477. }
  478. b.Logf("success: %v, failure: %v", numSuccess, numFailure)
  479. }
  480. type addrBookMock struct {
  481. addrs map[string]struct{}
  482. ourAddrs map[string]struct{}
  483. }
  484. var _ AddrBook = (*addrBookMock)(nil)
  485. func (book *addrBookMock) AddAddress(addr *NetAddress, src *NetAddress) error {
  486. book.addrs[addr.String()] = struct{}{}
  487. return nil
  488. }
  489. func (book *addrBookMock) AddOurAddress(addr *NetAddress) { book.ourAddrs[addr.String()] = struct{}{} }
  490. func (book *addrBookMock) OurAddress(addr *NetAddress) bool {
  491. _, ok := book.ourAddrs[addr.String()]
  492. return ok
  493. }
  494. func (book *addrBookMock) MarkGood(*NetAddress) {}
  495. func (book *addrBookMock) HasAddress(addr *NetAddress) bool {
  496. _, ok := book.addrs[addr.String()]
  497. return ok
  498. }
  499. func (book *addrBookMock) RemoveAddress(addr *NetAddress) {
  500. delete(book.addrs, addr.String())
  501. }
  502. func (book *addrBookMock) Save() {}