You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

1388 lines
42 KiB

  1. package p2p
  2. import (
  3. "context"
  4. "errors"
  5. "fmt"
  6. "math"
  7. "math/rand"
  8. "sort"
  9. "sync"
  10. "time"
  11. "github.com/gogo/protobuf/proto"
  12. "github.com/google/orderedcode"
  13. dbm "github.com/tendermint/tm-db"
  14. tmsync "github.com/tendermint/tendermint/libs/sync"
  15. p2pproto "github.com/tendermint/tendermint/proto/tendermint/p2p"
  16. )
  17. const (
  18. // retryNever is returned by retryDelay() when retries are disabled.
  19. retryNever time.Duration = math.MaxInt64
  20. )
  21. // PeerStatus is a peer status.
  22. //
  23. // The peer manager has many more internal states for a peer (e.g. dialing,
  24. // connected, evicting, and so on), which are tracked separately. PeerStatus is
  25. // for external use outside of the peer manager.
  26. type PeerStatus string
  27. const (
  28. PeerStatusUp PeerStatus = "up" // connected and ready
  29. PeerStatusDown PeerStatus = "down" // disconnected
  30. PeerStatusGood PeerStatus = "good" // peer observed as good
  31. PeerStatusBad PeerStatus = "bad" // peer observed as bad
  32. )
  33. // PeerScore is a numeric score assigned to a peer (higher is better).
  34. type PeerScore uint8
  35. const (
  36. PeerScorePersistent PeerScore = math.MaxUint8 // persistent peers
  37. )
  38. // PeerUpdate is a peer update event sent via PeerUpdates.
  39. type PeerUpdate struct {
  40. NodeID NodeID
  41. Status PeerStatus
  42. }
  43. // PeerUpdates is a peer update subscription with notifications about peer
  44. // events (currently just status changes).
  45. type PeerUpdates struct {
  46. routerUpdatesCh chan PeerUpdate
  47. reactorUpdatesCh chan PeerUpdate
  48. closeCh chan struct{}
  49. closeOnce sync.Once
  50. }
  51. // NewPeerUpdates creates a new PeerUpdates subscription. It is primarily for
  52. // internal use, callers should typically use PeerManager.Subscribe(). The
  53. // subscriber must call Close() when done.
  54. func NewPeerUpdates(updatesCh chan PeerUpdate, buf int) *PeerUpdates {
  55. return &PeerUpdates{
  56. reactorUpdatesCh: updatesCh,
  57. routerUpdatesCh: make(chan PeerUpdate, buf),
  58. closeCh: make(chan struct{}),
  59. }
  60. }
  61. // Updates returns a channel for consuming peer updates.
  62. func (pu *PeerUpdates) Updates() <-chan PeerUpdate {
  63. return pu.reactorUpdatesCh
  64. }
  65. // SendUpdate pushes information about a peer into the routing layer,
  66. // presumably from a peer.
  67. func (pu *PeerUpdates) SendUpdate(update PeerUpdate) {
  68. select {
  69. case <-pu.closeCh:
  70. case pu.routerUpdatesCh <- update:
  71. }
  72. }
  73. // Close closes the peer updates subscription.
  74. func (pu *PeerUpdates) Close() {
  75. pu.closeOnce.Do(func() {
  76. // NOTE: We don't close updatesCh since multiple goroutines may be
  77. // sending on it. The PeerManager senders will select on closeCh as well
  78. // to avoid blocking on a closed subscription.
  79. close(pu.closeCh)
  80. })
  81. }
  82. // Done returns a channel that is closed when the subscription is closed.
  83. func (pu *PeerUpdates) Done() <-chan struct{} {
  84. return pu.closeCh
  85. }
  86. // PeerManagerOptions specifies options for a PeerManager.
  87. type PeerManagerOptions struct {
  88. // PersistentPeers are peers that we want to maintain persistent connections
  89. // to. These will be scored higher than other peers, and if
  90. // MaxConnectedUpgrade is non-zero any lower-scored peers will be evicted if
  91. // necessary to make room for these.
  92. PersistentPeers []NodeID
  93. // MaxPeers is the maximum number of peers to track information about, i.e.
  94. // store in the peer store. When exceeded, the lowest-scored unconnected peers
  95. // will be deleted. 0 means no limit.
  96. MaxPeers uint16
  97. // MaxConnected is the maximum number of connected peers (inbound and
  98. // outbound). 0 means no limit.
  99. MaxConnected uint16
  100. // MaxConnectedUpgrade is the maximum number of additional connections to
  101. // use for probing any better-scored peers to upgrade to when all connection
  102. // slots are full. 0 disables peer upgrading.
  103. //
  104. // For example, if we are already connected to MaxConnected peers, but we
  105. // know or learn about better-scored peers (e.g. configured persistent
  106. // peers) that we are not connected too, then we can probe these peers by
  107. // using up to MaxConnectedUpgrade connections, and once connected evict the
  108. // lowest-scored connected peers. This also works for inbound connections,
  109. // i.e. if a higher-scored peer attempts to connect to us, we can accept
  110. // the connection and evict a lower-scored peer.
  111. MaxConnectedUpgrade uint16
  112. // MinRetryTime is the minimum time to wait between retries. Retry times
  113. // double for each retry, up to MaxRetryTime. 0 disables retries.
  114. MinRetryTime time.Duration
  115. // MaxRetryTime is the maximum time to wait between retries. 0 means
  116. // no maximum, in which case the retry time will keep doubling.
  117. MaxRetryTime time.Duration
  118. // MaxRetryTimePersistent is the maximum time to wait between retries for
  119. // peers listed in PersistentPeers. 0 uses MaxRetryTime instead.
  120. MaxRetryTimePersistent time.Duration
  121. // RetryTimeJitter is the upper bound of a random interval added to
  122. // retry times, to avoid thundering herds. 0 disables jitter.
  123. RetryTimeJitter time.Duration
  124. // PeerScores sets fixed scores for specific peers. It is mainly used
  125. // for testing. A score of 0 is ignored.
  126. PeerScores map[NodeID]PeerScore
  127. // PrivatePeerIDs defines a set of NodeID objects which the PEX reactor will
  128. // consider private and never gossip.
  129. PrivatePeers map[NodeID]struct{}
  130. // persistentPeers provides fast PersistentPeers lookups. It is built
  131. // by optimize().
  132. persistentPeers map[NodeID]bool
  133. }
  134. // Validate validates the options.
  135. func (o *PeerManagerOptions) Validate() error {
  136. for _, id := range o.PersistentPeers {
  137. if err := id.Validate(); err != nil {
  138. return fmt.Errorf("invalid PersistentPeer ID %q: %w", id, err)
  139. }
  140. }
  141. for id := range o.PrivatePeers {
  142. if err := id.Validate(); err != nil {
  143. return fmt.Errorf("invalid private peer ID %q: %w", id, err)
  144. }
  145. }
  146. if o.MaxConnected > 0 && len(o.PersistentPeers) > int(o.MaxConnected) {
  147. return fmt.Errorf("number of persistent peers %v can't exceed MaxConnected %v",
  148. len(o.PersistentPeers), o.MaxConnected)
  149. }
  150. if o.MaxPeers > 0 {
  151. if o.MaxConnected == 0 || o.MaxConnected+o.MaxConnectedUpgrade > o.MaxPeers {
  152. return fmt.Errorf("MaxConnected %v and MaxConnectedUpgrade %v can't exceed MaxPeers %v", // nolint
  153. o.MaxConnected, o.MaxConnectedUpgrade, o.MaxPeers)
  154. }
  155. }
  156. if o.MaxRetryTime > 0 {
  157. if o.MinRetryTime == 0 {
  158. return errors.New("can't set MaxRetryTime without MinRetryTime")
  159. }
  160. if o.MinRetryTime > o.MaxRetryTime {
  161. return fmt.Errorf("MinRetryTime %v is greater than MaxRetryTime %v", // nolint
  162. o.MinRetryTime, o.MaxRetryTime)
  163. }
  164. }
  165. if o.MaxRetryTimePersistent > 0 {
  166. if o.MinRetryTime == 0 {
  167. return errors.New("can't set MaxRetryTimePersistent without MinRetryTime")
  168. }
  169. if o.MinRetryTime > o.MaxRetryTimePersistent {
  170. return fmt.Errorf("MinRetryTime %v is greater than MaxRetryTimePersistent %v", // nolint
  171. o.MinRetryTime, o.MaxRetryTimePersistent)
  172. }
  173. }
  174. return nil
  175. }
  176. // isPersistentPeer checks if a peer is in PersistentPeers. It will panic
  177. // if called before optimize().
  178. func (o *PeerManagerOptions) isPersistent(id NodeID) bool {
  179. if o.persistentPeers == nil {
  180. panic("isPersistentPeer() called before optimize()")
  181. }
  182. return o.persistentPeers[id]
  183. }
  184. // optimize optimizes operations by pregenerating lookup structures. It's a
  185. // separate method instead of memoizing during calls to avoid dealing with
  186. // concurrency and mutex overhead.
  187. func (o *PeerManagerOptions) optimize() {
  188. o.persistentPeers = make(map[NodeID]bool, len(o.PersistentPeers))
  189. for _, p := range o.PersistentPeers {
  190. o.persistentPeers[p] = true
  191. }
  192. }
  193. // PeerManager manages peer lifecycle information, using a peerStore for
  194. // underlying storage. Its primary purpose is to determine which peer to connect
  195. // to next (including retry timers), make sure a peer only has a single active
  196. // connection (either inbound or outbound), and evict peers to make room for
  197. // higher-scored peers. It does not manage actual connections (this is handled
  198. // by the Router), only the peer lifecycle state.
  199. //
  200. // For an outbound connection, the flow is as follows:
  201. // - DialNext: return a peer address to dial, mark peer as dialing.
  202. // - DialFailed: report a dial failure, unmark as dialing.
  203. // - Dialed: report a dial success, unmark as dialing and mark as connected
  204. // (errors if already connected, e.g. by Accepted).
  205. // - Ready: report routing is ready, mark as ready and broadcast PeerStatusUp.
  206. // - Disconnected: report peer disconnect, unmark as connected and broadcasts
  207. // PeerStatusDown.
  208. //
  209. // For an inbound connection, the flow is as follows:
  210. // - Accepted: report inbound connection success, mark as connected (errors if
  211. // already connected, e.g. by Dialed).
  212. // - Ready: report routing is ready, mark as ready and broadcast PeerStatusUp.
  213. // - Disconnected: report peer disconnect, unmark as connected and broadcasts
  214. // PeerStatusDown.
  215. //
  216. // When evicting peers, either because peers are explicitly scheduled for
  217. // eviction or we are connected to too many peers, the flow is as follows:
  218. // - EvictNext: if marked evict and connected, unmark evict and mark evicting.
  219. // If beyond MaxConnected, pick lowest-scored peer and mark evicting.
  220. // - Disconnected: unmark connected, evicting, evict, and broadcast a
  221. // PeerStatusDown peer update.
  222. //
  223. // If all connection slots are full (at MaxConnections), we can use up to
  224. // MaxConnectionsUpgrade additional connections to probe any higher-scored
  225. // unconnected peers, and if we reach them (or they reach us) we allow the
  226. // connection and evict a lower-scored peer. We mark the lower-scored peer as
  227. // upgrading[from]=to to make sure no other higher-scored peers can claim the
  228. // same one for an upgrade. The flow is as follows:
  229. // - Accepted: if upgrade is possible, mark connected and add lower-scored to evict.
  230. // - DialNext: if upgrade is possible, mark upgrading[from]=to and dialing.
  231. // - DialFailed: unmark upgrading[from]=to and dialing.
  232. // - Dialed: unmark upgrading[from]=to and dialing, mark as connected, add
  233. // lower-scored to evict.
  234. // - EvictNext: pick peer from evict, mark as evicting.
  235. // - Disconnected: unmark connected, upgrading[from]=to, evict, evicting.
  236. type PeerManager struct {
  237. selfID NodeID
  238. options PeerManagerOptions
  239. rand *rand.Rand
  240. dialWaker *tmsync.Waker // wakes up DialNext() on relevant peer changes
  241. evictWaker *tmsync.Waker // wakes up EvictNext() on relevant peer changes
  242. closeCh chan struct{} // signal channel for Close()
  243. closeOnce sync.Once
  244. mtx sync.Mutex
  245. store *peerStore
  246. subscriptions map[*PeerUpdates]*PeerUpdates // keyed by struct identity (address)
  247. dialing map[NodeID]bool // peers being dialed (DialNext → Dialed/DialFail)
  248. upgrading map[NodeID]NodeID // peers claimed for upgrade (DialNext → Dialed/DialFail)
  249. connected map[NodeID]bool // connected peers (Dialed/Accepted → Disconnected)
  250. ready map[NodeID]bool // ready peers (Ready → Disconnected)
  251. evict map[NodeID]bool // peers scheduled for eviction (Connected → EvictNext)
  252. evicting map[NodeID]bool // peers being evicted (EvictNext → Disconnected)
  253. }
  254. // NewPeerManager creates a new peer manager.
  255. func NewPeerManager(selfID NodeID, peerDB dbm.DB, options PeerManagerOptions) (*PeerManager, error) {
  256. if selfID == "" {
  257. return nil, errors.New("self ID not given")
  258. }
  259. if err := options.Validate(); err != nil {
  260. return nil, err
  261. }
  262. options.optimize()
  263. store, err := newPeerStore(peerDB)
  264. if err != nil {
  265. return nil, err
  266. }
  267. peerManager := &PeerManager{
  268. selfID: selfID,
  269. options: options,
  270. rand: rand.New(rand.NewSource(time.Now().UnixNano())), // nolint:gosec
  271. dialWaker: tmsync.NewWaker(),
  272. evictWaker: tmsync.NewWaker(),
  273. closeCh: make(chan struct{}),
  274. store: store,
  275. dialing: map[NodeID]bool{},
  276. upgrading: map[NodeID]NodeID{},
  277. connected: map[NodeID]bool{},
  278. ready: map[NodeID]bool{},
  279. evict: map[NodeID]bool{},
  280. evicting: map[NodeID]bool{},
  281. subscriptions: map[*PeerUpdates]*PeerUpdates{},
  282. }
  283. if err = peerManager.configurePeers(); err != nil {
  284. return nil, err
  285. }
  286. if err = peerManager.prunePeers(); err != nil {
  287. return nil, err
  288. }
  289. return peerManager, nil
  290. }
  291. // configurePeers configures peers in the peer store with ephemeral runtime
  292. // configuration, e.g. PersistentPeers. It also removes ourself, if we're in the
  293. // peer store. The caller must hold the mutex lock.
  294. func (m *PeerManager) configurePeers() error {
  295. if err := m.store.Delete(m.selfID); err != nil {
  296. return err
  297. }
  298. configure := map[NodeID]bool{}
  299. for _, id := range m.options.PersistentPeers {
  300. configure[id] = true
  301. }
  302. for id := range m.options.PeerScores {
  303. configure[id] = true
  304. }
  305. for id := range configure {
  306. if peer, ok := m.store.Get(id); ok {
  307. if err := m.store.Set(m.configurePeer(peer)); err != nil {
  308. return err
  309. }
  310. }
  311. }
  312. return nil
  313. }
  314. // configurePeer configures a peer with ephemeral runtime configuration.
  315. func (m *PeerManager) configurePeer(peer peerInfo) peerInfo {
  316. peer.Persistent = m.options.isPersistent(peer.ID)
  317. peer.FixedScore = m.options.PeerScores[peer.ID]
  318. return peer
  319. }
  320. // newPeerInfo creates a peerInfo for a new peer.
  321. func (m *PeerManager) newPeerInfo(id NodeID) peerInfo {
  322. peerInfo := peerInfo{
  323. ID: id,
  324. AddressInfo: map[NodeAddress]*peerAddressInfo{},
  325. }
  326. return m.configurePeer(peerInfo)
  327. }
  328. // prunePeers removes low-scored peers from the peer store if it contains more
  329. // than MaxPeers peers. The caller must hold the mutex lock.
  330. func (m *PeerManager) prunePeers() error {
  331. if m.options.MaxPeers == 0 || m.store.Size() <= int(m.options.MaxPeers) {
  332. return nil
  333. }
  334. ranked := m.store.Ranked()
  335. for i := len(ranked) - 1; i >= 0; i-- {
  336. peerID := ranked[i].ID
  337. switch {
  338. case m.store.Size() <= int(m.options.MaxPeers):
  339. break
  340. case m.dialing[peerID]:
  341. case m.connected[peerID]:
  342. default:
  343. if err := m.store.Delete(peerID); err != nil {
  344. return err
  345. }
  346. }
  347. }
  348. return nil
  349. }
  350. // Add adds a peer to the manager, given as an address. If the peer already
  351. // exists, the address is added to it if it isn't already present. This will push
  352. // low scoring peers out of the address book if it exceeds the maximum size.
  353. func (m *PeerManager) Add(address NodeAddress) (bool, error) {
  354. if err := address.Validate(); err != nil {
  355. return false, err
  356. }
  357. if address.NodeID == m.selfID {
  358. return false, fmt.Errorf("can't add self (%v) to peer store", m.selfID)
  359. }
  360. m.mtx.Lock()
  361. defer m.mtx.Unlock()
  362. peer, ok := m.store.Get(address.NodeID)
  363. if !ok {
  364. peer = m.newPeerInfo(address.NodeID)
  365. }
  366. _, ok = peer.AddressInfo[address]
  367. // if we already have the peer address, there's no need to continue
  368. if ok {
  369. return false, nil
  370. }
  371. // else add the new address
  372. peer.AddressInfo[address] = &peerAddressInfo{Address: address}
  373. if err := m.store.Set(peer); err != nil {
  374. return false, err
  375. }
  376. if err := m.prunePeers(); err != nil {
  377. return true, err
  378. }
  379. m.dialWaker.Wake()
  380. return true, nil
  381. }
  382. // PeerRatio returns the ratio of peer addresses stored to the maximum size.
  383. func (m *PeerManager) PeerRatio() float64 {
  384. if m.options.MaxPeers == 0 {
  385. return 0
  386. }
  387. m.mtx.Lock()
  388. defer m.mtx.Unlock()
  389. return float64(m.store.Size()) / float64(m.options.MaxPeers)
  390. }
  391. // DialNext finds an appropriate peer address to dial, and marks it as dialing.
  392. // If no peer is found, or all connection slots are full, it blocks until one
  393. // becomes available. The caller must call Dialed() or DialFailed() for the
  394. // returned peer.
  395. func (m *PeerManager) DialNext(ctx context.Context) (NodeAddress, error) {
  396. for {
  397. address, err := m.TryDialNext()
  398. if err != nil || (address != NodeAddress{}) {
  399. return address, err
  400. }
  401. select {
  402. case <-m.dialWaker.Sleep():
  403. case <-ctx.Done():
  404. return NodeAddress{}, ctx.Err()
  405. }
  406. }
  407. }
  408. // TryDialNext is equivalent to DialNext(), but immediately returns an empty
  409. // address if no peers or connection slots are available.
  410. func (m *PeerManager) TryDialNext() (NodeAddress, error) {
  411. m.mtx.Lock()
  412. defer m.mtx.Unlock()
  413. // We allow dialing MaxConnected+MaxConnectedUpgrade peers. Including
  414. // MaxConnectedUpgrade allows us to probe additional peers that have a
  415. // higher score than any other peers, and if successful evict it.
  416. if m.options.MaxConnected > 0 && len(m.connected)+len(m.dialing) >=
  417. int(m.options.MaxConnected)+int(m.options.MaxConnectedUpgrade) {
  418. return NodeAddress{}, nil
  419. }
  420. for _, peer := range m.store.Ranked() {
  421. if m.dialing[peer.ID] || m.connected[peer.ID] {
  422. continue
  423. }
  424. for _, addressInfo := range peer.AddressInfo {
  425. if time.Since(addressInfo.LastDialFailure) < m.retryDelay(addressInfo.DialFailures, peer.Persistent) {
  426. continue
  427. }
  428. // We now have an eligible address to dial. If we're full but have
  429. // upgrade capacity (as checked above), we find a lower-scored peer
  430. // we can replace and mark it as upgrading so noone else claims it.
  431. //
  432. // If we don't find one, there is no point in trying additional
  433. // peers, since they will all have the same or lower score than this
  434. // peer (since they're ordered by score via peerStore.Ranked).
  435. if m.options.MaxConnected > 0 && len(m.connected) >= int(m.options.MaxConnected) {
  436. upgradeFromPeer := m.findUpgradeCandidate(peer.ID, peer.Score())
  437. if upgradeFromPeer == "" {
  438. return NodeAddress{}, nil
  439. }
  440. m.upgrading[upgradeFromPeer] = peer.ID
  441. }
  442. m.dialing[peer.ID] = true
  443. return addressInfo.Address, nil
  444. }
  445. }
  446. return NodeAddress{}, nil
  447. }
  448. // DialFailed reports a failed dial attempt. This will make the peer available
  449. // for dialing again when appropriate (possibly after a retry timeout).
  450. //
  451. // FIXME: This should probably delete or mark bad addresses/peers after some time.
  452. func (m *PeerManager) DialFailed(address NodeAddress) error {
  453. m.mtx.Lock()
  454. defer m.mtx.Unlock()
  455. delete(m.dialing, address.NodeID)
  456. for from, to := range m.upgrading {
  457. if to == address.NodeID {
  458. delete(m.upgrading, from) // Unmark failed upgrade attempt.
  459. }
  460. }
  461. peer, ok := m.store.Get(address.NodeID)
  462. if !ok { // Peer may have been removed while dialing, ignore.
  463. return nil
  464. }
  465. addressInfo, ok := peer.AddressInfo[address]
  466. if !ok {
  467. return nil // Assume the address has been removed, ignore.
  468. }
  469. addressInfo.LastDialFailure = time.Now().UTC()
  470. addressInfo.DialFailures++
  471. if err := m.store.Set(peer); err != nil {
  472. return err
  473. }
  474. // We spawn a goroutine that notifies DialNext() again when the retry
  475. // timeout has elapsed, so that we can consider dialing it again. We
  476. // calculate the retry delay outside the goroutine, since it must hold
  477. // the mutex lock.
  478. if d := m.retryDelay(addressInfo.DialFailures, peer.Persistent); d != retryNever {
  479. go func() {
  480. // Use an explicit timer with deferred cleanup instead of
  481. // time.After(), to avoid leaking goroutines on PeerManager.Close().
  482. timer := time.NewTimer(d)
  483. defer timer.Stop()
  484. select {
  485. case <-timer.C:
  486. m.dialWaker.Wake()
  487. case <-m.closeCh:
  488. }
  489. }()
  490. }
  491. m.dialWaker.Wake()
  492. return nil
  493. }
  494. // Dialed marks a peer as successfully dialed. Any further connections will be
  495. // rejected, and once disconnected the peer may be dialed again.
  496. func (m *PeerManager) Dialed(address NodeAddress) error {
  497. m.mtx.Lock()
  498. defer m.mtx.Unlock()
  499. delete(m.dialing, address.NodeID)
  500. var upgradeFromPeer NodeID
  501. for from, to := range m.upgrading {
  502. if to == address.NodeID {
  503. delete(m.upgrading, from)
  504. upgradeFromPeer = from
  505. // Don't break, just in case this peer was marked as upgrading for
  506. // multiple lower-scored peers (shouldn't really happen).
  507. }
  508. }
  509. if address.NodeID == m.selfID {
  510. return fmt.Errorf("rejecting connection to self (%v)", address.NodeID)
  511. }
  512. if m.connected[address.NodeID] {
  513. return fmt.Errorf("peer %v is already connected", address.NodeID)
  514. }
  515. if m.options.MaxConnected > 0 && len(m.connected) >= int(m.options.MaxConnected) {
  516. if upgradeFromPeer == "" || len(m.connected) >=
  517. int(m.options.MaxConnected)+int(m.options.MaxConnectedUpgrade) {
  518. return fmt.Errorf("already connected to maximum number of peers")
  519. }
  520. }
  521. peer, ok := m.store.Get(address.NodeID)
  522. if !ok {
  523. return fmt.Errorf("peer %q was removed while dialing", address.NodeID)
  524. }
  525. now := time.Now().UTC()
  526. peer.LastConnected = now
  527. if addressInfo, ok := peer.AddressInfo[address]; ok {
  528. addressInfo.DialFailures = 0
  529. addressInfo.LastDialSuccess = now
  530. // If not found, assume address has been removed.
  531. }
  532. if err := m.store.Set(peer); err != nil {
  533. return err
  534. }
  535. if upgradeFromPeer != "" && m.options.MaxConnected > 0 &&
  536. len(m.connected) >= int(m.options.MaxConnected) {
  537. // Look for an even lower-scored peer that may have appeared since we
  538. // started the upgrade.
  539. if p, ok := m.store.Get(upgradeFromPeer); ok {
  540. if u := m.findUpgradeCandidate(p.ID, p.Score()); u != "" {
  541. upgradeFromPeer = u
  542. }
  543. }
  544. m.evict[upgradeFromPeer] = true
  545. }
  546. m.connected[peer.ID] = true
  547. m.evictWaker.Wake()
  548. return nil
  549. }
  550. // Accepted marks an incoming peer connection successfully accepted. If the peer
  551. // is already connected or we don't allow additional connections then this will
  552. // return an error.
  553. //
  554. // If full but MaxConnectedUpgrade is non-zero and the incoming peer is
  555. // better-scored than any existing peers, then we accept it and evict a
  556. // lower-scored peer.
  557. //
  558. // NOTE: We can't take an address here, since e.g. TCP uses a different port
  559. // number for outbound traffic than inbound traffic, so the peer's endpoint
  560. // wouldn't necessarily be an appropriate address to dial.
  561. //
  562. // FIXME: When we accept a connection from a peer, we should register that
  563. // peer's address in the peer store so that we can dial it later. In order to do
  564. // that, we'll need to get the remote address after all, but as noted above that
  565. // can't be the remote endpoint since that will usually have the wrong port
  566. // number.
  567. func (m *PeerManager) Accepted(peerID NodeID) error {
  568. m.mtx.Lock()
  569. defer m.mtx.Unlock()
  570. if peerID == m.selfID {
  571. return fmt.Errorf("rejecting connection from self (%v)", peerID)
  572. }
  573. if m.connected[peerID] {
  574. return fmt.Errorf("peer %q is already connected", peerID)
  575. }
  576. if m.options.MaxConnected > 0 &&
  577. len(m.connected) >= int(m.options.MaxConnected)+int(m.options.MaxConnectedUpgrade) {
  578. return fmt.Errorf("already connected to maximum number of peers")
  579. }
  580. peer, ok := m.store.Get(peerID)
  581. if !ok {
  582. peer = m.newPeerInfo(peerID)
  583. }
  584. // If all connections slots are full, but we allow upgrades (and we checked
  585. // above that we have upgrade capacity), then we can look for a lower-scored
  586. // peer to replace and if found accept the connection anyway and evict it.
  587. var upgradeFromPeer NodeID
  588. if m.options.MaxConnected > 0 && len(m.connected) >= int(m.options.MaxConnected) {
  589. upgradeFromPeer = m.findUpgradeCandidate(peer.ID, peer.Score())
  590. if upgradeFromPeer == "" {
  591. return fmt.Errorf("already connected to maximum number of peers")
  592. }
  593. }
  594. peer.LastConnected = time.Now().UTC()
  595. if err := m.store.Set(peer); err != nil {
  596. return err
  597. }
  598. m.connected[peerID] = true
  599. if upgradeFromPeer != "" {
  600. m.evict[upgradeFromPeer] = true
  601. }
  602. m.evictWaker.Wake()
  603. return nil
  604. }
  605. // Ready marks a peer as ready, broadcasting status updates to subscribers. The
  606. // peer must already be marked as connected. This is separate from Dialed() and
  607. // Accepted() to allow the router to set up its internal queues before reactors
  608. // start sending messages.
  609. func (m *PeerManager) Ready(peerID NodeID) error {
  610. m.mtx.Lock()
  611. defer m.mtx.Unlock()
  612. if m.connected[peerID] {
  613. m.ready[peerID] = true
  614. m.broadcast(PeerUpdate{
  615. NodeID: peerID,
  616. Status: PeerStatusUp,
  617. })
  618. }
  619. return nil
  620. }
  621. // EvictNext returns the next peer to evict (i.e. disconnect). If no evictable
  622. // peers are found, the call will block until one becomes available.
  623. func (m *PeerManager) EvictNext(ctx context.Context) (NodeID, error) {
  624. for {
  625. id, err := m.TryEvictNext()
  626. if err != nil || id != "" {
  627. return id, err
  628. }
  629. select {
  630. case <-m.evictWaker.Sleep():
  631. case <-ctx.Done():
  632. return "", ctx.Err()
  633. }
  634. }
  635. }
  636. // TryEvictNext is equivalent to EvictNext, but immediately returns an empty
  637. // node ID if no evictable peers are found.
  638. func (m *PeerManager) TryEvictNext() (NodeID, error) {
  639. m.mtx.Lock()
  640. defer m.mtx.Unlock()
  641. // If any connected peers are explicitly scheduled for eviction, we return a
  642. // random one.
  643. for peerID := range m.evict {
  644. delete(m.evict, peerID)
  645. if m.connected[peerID] && !m.evicting[peerID] {
  646. m.evicting[peerID] = true
  647. return peerID, nil
  648. }
  649. }
  650. // If we're below capacity, we don't need to evict anything.
  651. if m.options.MaxConnected == 0 ||
  652. len(m.connected)-len(m.evicting) <= int(m.options.MaxConnected) {
  653. return "", nil
  654. }
  655. // If we're above capacity (shouldn't really happen), just pick the
  656. // lowest-ranked peer to evict.
  657. ranked := m.store.Ranked()
  658. for i := len(ranked) - 1; i >= 0; i-- {
  659. peer := ranked[i]
  660. if m.connected[peer.ID] && !m.evicting[peer.ID] {
  661. m.evicting[peer.ID] = true
  662. return peer.ID, nil
  663. }
  664. }
  665. return "", nil
  666. }
  667. // Disconnected unmarks a peer as connected, allowing it to be dialed or
  668. // accepted again as appropriate.
  669. func (m *PeerManager) Disconnected(peerID NodeID) error {
  670. m.mtx.Lock()
  671. defer m.mtx.Unlock()
  672. ready := m.ready[peerID]
  673. delete(m.connected, peerID)
  674. delete(m.upgrading, peerID)
  675. delete(m.evict, peerID)
  676. delete(m.evicting, peerID)
  677. delete(m.ready, peerID)
  678. if ready {
  679. m.broadcast(PeerUpdate{
  680. NodeID: peerID,
  681. Status: PeerStatusDown,
  682. })
  683. }
  684. m.dialWaker.Wake()
  685. return nil
  686. }
  687. // Errored reports a peer error, causing the peer to be evicted if it's
  688. // currently connected.
  689. //
  690. // FIXME: This should probably be replaced with a peer behavior API, see
  691. // PeerError comments for more details.
  692. //
  693. // FIXME: This will cause the peer manager to immediately try to reconnect to
  694. // the peer, which is probably not always what we want.
  695. func (m *PeerManager) Errored(peerID NodeID, err error) error {
  696. m.mtx.Lock()
  697. defer m.mtx.Unlock()
  698. if m.connected[peerID] {
  699. m.evict[peerID] = true
  700. }
  701. m.evictWaker.Wake()
  702. return nil
  703. }
  704. // Advertise returns a list of peer addresses to advertise to a peer.
  705. //
  706. // FIXME: This is fairly naïve and only returns the addresses of the
  707. // highest-ranked peers.
  708. func (m *PeerManager) Advertise(peerID NodeID, limit uint16) []NodeAddress {
  709. m.mtx.Lock()
  710. defer m.mtx.Unlock()
  711. addresses := make([]NodeAddress, 0, limit)
  712. for _, peer := range m.store.Ranked() {
  713. if peer.ID == peerID {
  714. continue
  715. }
  716. for nodeAddr, addressInfo := range peer.AddressInfo {
  717. if len(addresses) >= int(limit) {
  718. return addresses
  719. }
  720. // only add non-private NodeIDs
  721. if _, ok := m.options.PrivatePeers[nodeAddr.NodeID]; !ok {
  722. addresses = append(addresses, addressInfo.Address)
  723. }
  724. }
  725. }
  726. return addresses
  727. }
  728. // Subscribe subscribes to peer updates. The caller must consume the peer
  729. // updates in a timely fashion and close the subscription when done, otherwise
  730. // the PeerManager will halt.
  731. func (m *PeerManager) Subscribe() *PeerUpdates {
  732. // FIXME: We use a size 1 buffer here. When we broadcast a peer update
  733. // we have to loop over all of the subscriptions, and we want to avoid
  734. // having to block and wait for a context switch before continuing on
  735. // to the next subscriptions. This also prevents tail latencies from
  736. // compounding. Limiting it to 1 means that the subscribers are still
  737. // reasonably in sync. However, this should probably be benchmarked.
  738. peerUpdates := NewPeerUpdates(make(chan PeerUpdate, 1), 1)
  739. m.Register(peerUpdates)
  740. return peerUpdates
  741. }
  742. // Register allows you to inject a custom PeerUpdate instance into the
  743. // PeerManager, rather than relying on the instance constructed by the
  744. // Subscribe method, which wraps the functionality of the Register
  745. // method.
  746. //
  747. // The caller must consume the peer updates from this PeerUpdates
  748. // instance in a timely fashion and close the subscription when done,
  749. // otherwise the PeerManager will halt.
  750. func (m *PeerManager) Register(peerUpdates *PeerUpdates) {
  751. m.mtx.Lock()
  752. m.subscriptions[peerUpdates] = peerUpdates
  753. m.mtx.Unlock()
  754. go func() {
  755. for {
  756. select {
  757. case <-peerUpdates.closeCh:
  758. return
  759. case <-m.closeCh:
  760. return
  761. case pu := <-peerUpdates.routerUpdatesCh:
  762. m.processPeerEvent(pu)
  763. }
  764. }
  765. }()
  766. go func() {
  767. select {
  768. case <-peerUpdates.Done():
  769. m.mtx.Lock()
  770. delete(m.subscriptions, peerUpdates)
  771. m.mtx.Unlock()
  772. case <-m.closeCh:
  773. }
  774. }()
  775. }
  776. func (m *PeerManager) processPeerEvent(pu PeerUpdate) {
  777. m.mtx.Lock()
  778. defer m.mtx.Unlock()
  779. if _, ok := m.store.peers[pu.NodeID]; !ok {
  780. m.store.peers[pu.NodeID] = &peerInfo{}
  781. }
  782. switch pu.Status {
  783. case PeerStatusBad:
  784. m.store.peers[pu.NodeID].MutableScore--
  785. case PeerStatusGood:
  786. m.store.peers[pu.NodeID].MutableScore++
  787. }
  788. }
  789. // broadcast broadcasts a peer update to all subscriptions. The caller must
  790. // already hold the mutex lock, to make sure updates are sent in the same order
  791. // as the PeerManager processes them, but this means subscribers must be
  792. // responsive at all times or the entire PeerManager will halt.
  793. //
  794. // FIXME: Consider using an internal channel to buffer updates while also
  795. // maintaining order if this is a problem.
  796. func (m *PeerManager) broadcast(peerUpdate PeerUpdate) {
  797. for _, sub := range m.subscriptions {
  798. // We have to check closeCh separately first, otherwise there's a 50%
  799. // chance the second select will send on a closed subscription.
  800. select {
  801. case <-sub.closeCh:
  802. continue
  803. default:
  804. }
  805. select {
  806. case sub.reactorUpdatesCh <- peerUpdate:
  807. case <-sub.closeCh:
  808. }
  809. }
  810. }
  811. // Close closes the peer manager, releasing resources (i.e. goroutines).
  812. func (m *PeerManager) Close() {
  813. m.closeOnce.Do(func() {
  814. close(m.closeCh)
  815. })
  816. }
  817. // Addresses returns all known addresses for a peer, primarily for testing.
  818. // The order is arbitrary.
  819. func (m *PeerManager) Addresses(peerID NodeID) []NodeAddress {
  820. m.mtx.Lock()
  821. defer m.mtx.Unlock()
  822. addresses := []NodeAddress{}
  823. if peer, ok := m.store.Get(peerID); ok {
  824. for _, addressInfo := range peer.AddressInfo {
  825. addresses = append(addresses, addressInfo.Address)
  826. }
  827. }
  828. return addresses
  829. }
  830. // Peers returns all known peers, primarily for testing. The order is arbitrary.
  831. func (m *PeerManager) Peers() []NodeID {
  832. m.mtx.Lock()
  833. defer m.mtx.Unlock()
  834. peers := []NodeID{}
  835. for _, peer := range m.store.Ranked() {
  836. peers = append(peers, peer.ID)
  837. }
  838. return peers
  839. }
  840. // Scores returns the peer scores for all known peers, primarily for testing.
  841. func (m *PeerManager) Scores() map[NodeID]PeerScore {
  842. m.mtx.Lock()
  843. defer m.mtx.Unlock()
  844. scores := map[NodeID]PeerScore{}
  845. for _, peer := range m.store.Ranked() {
  846. scores[peer.ID] = peer.Score()
  847. }
  848. return scores
  849. }
  850. // Status returns the status for a peer, primarily for testing.
  851. func (m *PeerManager) Status(id NodeID) PeerStatus {
  852. m.mtx.Lock()
  853. defer m.mtx.Unlock()
  854. switch {
  855. case m.ready[id]:
  856. return PeerStatusUp
  857. default:
  858. return PeerStatusDown
  859. }
  860. }
  861. // findUpgradeCandidate looks for a lower-scored peer that we could evict
  862. // to make room for the given peer. Returns an empty ID if none is found.
  863. // If the peer is already being upgraded to, we return that same upgrade.
  864. // The caller must hold the mutex lock.
  865. func (m *PeerManager) findUpgradeCandidate(id NodeID, score PeerScore) NodeID {
  866. for from, to := range m.upgrading {
  867. if to == id {
  868. return from
  869. }
  870. }
  871. ranked := m.store.Ranked()
  872. for i := len(ranked) - 1; i >= 0; i-- {
  873. candidate := ranked[i]
  874. switch {
  875. case candidate.Score() >= score:
  876. return "" // no further peers can be scored lower, due to sorting
  877. case !m.connected[candidate.ID]:
  878. case m.evict[candidate.ID]:
  879. case m.evicting[candidate.ID]:
  880. case m.upgrading[candidate.ID] != "":
  881. default:
  882. return candidate.ID
  883. }
  884. }
  885. return ""
  886. }
  887. // retryDelay calculates a dial retry delay using exponential backoff, based on
  888. // retry settings in PeerManagerOptions. If retries are disabled (i.e.
  889. // MinRetryTime is 0), this returns retryNever (i.e. an infinite retry delay).
  890. // The caller must hold the mutex lock (for m.rand which is not thread-safe).
  891. func (m *PeerManager) retryDelay(failures uint32, persistent bool) time.Duration {
  892. if failures == 0 {
  893. return 0
  894. }
  895. if m.options.MinRetryTime == 0 {
  896. return retryNever
  897. }
  898. maxDelay := m.options.MaxRetryTime
  899. if persistent && m.options.MaxRetryTimePersistent > 0 {
  900. maxDelay = m.options.MaxRetryTimePersistent
  901. }
  902. delay := m.options.MinRetryTime * time.Duration(math.Pow(2, float64(failures-1)))
  903. if maxDelay > 0 && delay > maxDelay {
  904. delay = maxDelay
  905. }
  906. if m.options.RetryTimeJitter > 0 {
  907. delay += time.Duration(m.rand.Int63n(int64(m.options.RetryTimeJitter)))
  908. }
  909. return delay
  910. }
  911. // GetHeight returns a peer's height, as reported via SetHeight, or 0 if the
  912. // peer or height is unknown.
  913. //
  914. // FIXME: This is a temporary workaround to share state between the consensus
  915. // and mempool reactors, carried over from the legacy P2P stack. Reactors should
  916. // not have dependencies on each other, instead tracking this themselves.
  917. func (m *PeerManager) GetHeight(peerID NodeID) int64 {
  918. m.mtx.Lock()
  919. defer m.mtx.Unlock()
  920. peer, _ := m.store.Get(peerID)
  921. return peer.Height
  922. }
  923. // SetHeight stores a peer's height, making it available via GetHeight.
  924. //
  925. // FIXME: This is a temporary workaround to share state between the consensus
  926. // and mempool reactors, carried over from the legacy P2P stack. Reactors should
  927. // not have dependencies on each other, instead tracking this themselves.
  928. func (m *PeerManager) SetHeight(peerID NodeID, height int64) error {
  929. m.mtx.Lock()
  930. defer m.mtx.Unlock()
  931. peer, ok := m.store.Get(peerID)
  932. if !ok {
  933. peer = m.newPeerInfo(peerID)
  934. }
  935. peer.Height = height
  936. return m.store.Set(peer)
  937. }
  938. // peerStore stores information about peers. It is not thread-safe, assuming it
  939. // is only used by PeerManager which handles concurrency control. This allows
  940. // the manager to execute multiple operations atomically via its own mutex.
  941. //
  942. // The entire set of peers is kept in memory, for performance. It is loaded
  943. // from disk on initialization, and any changes are written back to disk
  944. // (without fsync, since we can afford to lose recent writes).
  945. type peerStore struct {
  946. db dbm.DB
  947. peers map[NodeID]*peerInfo
  948. ranked []*peerInfo // cache for Ranked(), nil invalidates cache
  949. }
  950. // newPeerStore creates a new peer store, loading all persisted peers from the
  951. // database into memory.
  952. func newPeerStore(db dbm.DB) (*peerStore, error) {
  953. if db == nil {
  954. return nil, errors.New("no database provided")
  955. }
  956. store := &peerStore{db: db}
  957. if err := store.loadPeers(); err != nil {
  958. return nil, err
  959. }
  960. return store, nil
  961. }
  962. // loadPeers loads all peers from the database into memory.
  963. func (s *peerStore) loadPeers() error {
  964. peers := map[NodeID]*peerInfo{}
  965. start, end := keyPeerInfoRange()
  966. iter, err := s.db.Iterator(start, end)
  967. if err != nil {
  968. return err
  969. }
  970. defer iter.Close()
  971. for ; iter.Valid(); iter.Next() {
  972. // FIXME: We may want to tolerate failures here, by simply logging
  973. // the errors and ignoring the faulty peer entries.
  974. msg := new(p2pproto.PeerInfo)
  975. if err := proto.Unmarshal(iter.Value(), msg); err != nil {
  976. return fmt.Errorf("invalid peer Protobuf data: %w", err)
  977. }
  978. peer, err := peerInfoFromProto(msg)
  979. if err != nil {
  980. return fmt.Errorf("invalid peer data: %w", err)
  981. }
  982. peers[peer.ID] = peer
  983. }
  984. if iter.Error() != nil {
  985. return iter.Error()
  986. }
  987. s.peers = peers
  988. s.ranked = nil // invalidate cache if populated
  989. return nil
  990. }
  991. // Get fetches a peer. The boolean indicates whether the peer existed or not.
  992. // The returned peer info is a copy, and can be mutated at will.
  993. func (s *peerStore) Get(id NodeID) (peerInfo, bool) {
  994. peer, ok := s.peers[id]
  995. return peer.Copy(), ok
  996. }
  997. // Set stores peer data. The input data will be copied, and can safely be reused
  998. // by the caller.
  999. func (s *peerStore) Set(peer peerInfo) error {
  1000. if err := peer.Validate(); err != nil {
  1001. return err
  1002. }
  1003. peer = peer.Copy()
  1004. // FIXME: We may want to optimize this by avoiding saving to the database
  1005. // if there haven't been any changes to persisted fields.
  1006. bz, err := peer.ToProto().Marshal()
  1007. if err != nil {
  1008. return err
  1009. }
  1010. if err = s.db.Set(keyPeerInfo(peer.ID), bz); err != nil {
  1011. return err
  1012. }
  1013. if current, ok := s.peers[peer.ID]; !ok || current.Score() != peer.Score() {
  1014. // If the peer is new, or its score changes, we invalidate the Ranked() cache.
  1015. s.peers[peer.ID] = &peer
  1016. s.ranked = nil
  1017. } else {
  1018. // Otherwise, since s.ranked contains pointers to the old data and we
  1019. // want those pointers to remain valid with the new data, we have to
  1020. // update the existing pointer address.
  1021. *current = peer
  1022. }
  1023. return nil
  1024. }
  1025. // Delete deletes a peer, or does nothing if it does not exist.
  1026. func (s *peerStore) Delete(id NodeID) error {
  1027. if _, ok := s.peers[id]; !ok {
  1028. return nil
  1029. }
  1030. if err := s.db.Delete(keyPeerInfo(id)); err != nil {
  1031. return err
  1032. }
  1033. delete(s.peers, id)
  1034. s.ranked = nil
  1035. return nil
  1036. }
  1037. // List retrieves all peers in an arbitrary order. The returned data is a copy,
  1038. // and can be mutated at will.
  1039. func (s *peerStore) List() []peerInfo {
  1040. peers := make([]peerInfo, 0, len(s.peers))
  1041. for _, peer := range s.peers {
  1042. peers = append(peers, peer.Copy())
  1043. }
  1044. return peers
  1045. }
  1046. // Ranked returns a list of peers ordered by score (better peers first). Peers
  1047. // with equal scores are returned in an arbitrary order. The returned list must
  1048. // not be mutated or accessed concurrently by the caller, since it returns
  1049. // pointers to internal peerStore data for performance.
  1050. //
  1051. // Ranked is used to determine both which peers to dial, which ones to evict,
  1052. // and which ones to delete completely.
  1053. //
  1054. // FIXME: For now, we simply maintain a cache in s.ranked which is invalidated
  1055. // by setting it to nil, but if necessary we should use a better data structure
  1056. // for this (e.g. a heap or ordered map).
  1057. //
  1058. // FIXME: The scoring logic is currently very naïve, see peerInfo.Score().
  1059. func (s *peerStore) Ranked() []*peerInfo {
  1060. if s.ranked != nil {
  1061. return s.ranked
  1062. }
  1063. s.ranked = make([]*peerInfo, 0, len(s.peers))
  1064. for _, peer := range s.peers {
  1065. s.ranked = append(s.ranked, peer)
  1066. }
  1067. sort.Slice(s.ranked, func(i, j int) bool {
  1068. // FIXME: If necessary, consider precomputing scores before sorting,
  1069. // to reduce the number of Score() calls.
  1070. return s.ranked[i].Score() > s.ranked[j].Score()
  1071. })
  1072. return s.ranked
  1073. }
  1074. // Size returns the number of peers in the peer store.
  1075. func (s *peerStore) Size() int {
  1076. return len(s.peers)
  1077. }
  1078. // peerInfo contains peer information stored in a peerStore.
  1079. type peerInfo struct {
  1080. ID NodeID
  1081. AddressInfo map[NodeAddress]*peerAddressInfo
  1082. LastConnected time.Time
  1083. // These fields are ephemeral, i.e. not persisted to the database.
  1084. Persistent bool
  1085. Height int64
  1086. FixedScore PeerScore // mainly for tests
  1087. MutableScore int64 // updated by router
  1088. }
  1089. // peerInfoFromProto converts a Protobuf PeerInfo message to a peerInfo,
  1090. // erroring if the data is invalid.
  1091. func peerInfoFromProto(msg *p2pproto.PeerInfo) (*peerInfo, error) {
  1092. p := &peerInfo{
  1093. ID: NodeID(msg.ID),
  1094. AddressInfo: map[NodeAddress]*peerAddressInfo{},
  1095. }
  1096. if msg.LastConnected != nil {
  1097. p.LastConnected = *msg.LastConnected
  1098. }
  1099. for _, a := range msg.AddressInfo {
  1100. addressInfo, err := peerAddressInfoFromProto(a)
  1101. if err != nil {
  1102. return nil, err
  1103. }
  1104. p.AddressInfo[addressInfo.Address] = addressInfo
  1105. }
  1106. return p, p.Validate()
  1107. }
  1108. // ToProto converts the peerInfo to p2pproto.PeerInfo for database storage. The
  1109. // Protobuf type only contains persisted fields, while ephemeral fields are
  1110. // discarded. The returned message may contain pointers to original data, since
  1111. // it is expected to be serialized immediately.
  1112. func (p *peerInfo) ToProto() *p2pproto.PeerInfo {
  1113. msg := &p2pproto.PeerInfo{
  1114. ID: string(p.ID),
  1115. LastConnected: &p.LastConnected,
  1116. }
  1117. for _, addressInfo := range p.AddressInfo {
  1118. msg.AddressInfo = append(msg.AddressInfo, addressInfo.ToProto())
  1119. }
  1120. if msg.LastConnected.IsZero() {
  1121. msg.LastConnected = nil
  1122. }
  1123. return msg
  1124. }
  1125. // Copy returns a deep copy of the peer info.
  1126. func (p *peerInfo) Copy() peerInfo {
  1127. if p == nil {
  1128. return peerInfo{}
  1129. }
  1130. c := *p
  1131. for i, addressInfo := range c.AddressInfo {
  1132. addressInfoCopy := addressInfo.Copy()
  1133. c.AddressInfo[i] = &addressInfoCopy
  1134. }
  1135. return c
  1136. }
  1137. // Score calculates a score for the peer. Higher-scored peers will be
  1138. // preferred over lower scores.
  1139. func (p *peerInfo) Score() PeerScore {
  1140. if p.FixedScore > 0 {
  1141. return p.FixedScore
  1142. }
  1143. if p.Persistent {
  1144. return PeerScorePersistent
  1145. }
  1146. if p.MutableScore <= 0 {
  1147. return 0
  1148. }
  1149. if p.MutableScore >= math.MaxUint8 {
  1150. return PeerScore(math.MaxUint8)
  1151. }
  1152. return PeerScore(p.MutableScore)
  1153. }
  1154. // Validate validates the peer info.
  1155. func (p *peerInfo) Validate() error {
  1156. if p.ID == "" {
  1157. return errors.New("no peer ID")
  1158. }
  1159. return nil
  1160. }
  1161. // peerAddressInfo contains information and statistics about a peer address.
  1162. type peerAddressInfo struct {
  1163. Address NodeAddress
  1164. LastDialSuccess time.Time
  1165. LastDialFailure time.Time
  1166. DialFailures uint32 // since last successful dial
  1167. }
  1168. // peerAddressInfoFromProto converts a Protobuf PeerAddressInfo message
  1169. // to a peerAddressInfo.
  1170. func peerAddressInfoFromProto(msg *p2pproto.PeerAddressInfo) (*peerAddressInfo, error) {
  1171. address, err := ParseNodeAddress(msg.Address)
  1172. if err != nil {
  1173. return nil, fmt.Errorf("invalid address %q: %w", address, err)
  1174. }
  1175. addressInfo := &peerAddressInfo{
  1176. Address: address,
  1177. DialFailures: msg.DialFailures,
  1178. }
  1179. if msg.LastDialSuccess != nil {
  1180. addressInfo.LastDialSuccess = *msg.LastDialSuccess
  1181. }
  1182. if msg.LastDialFailure != nil {
  1183. addressInfo.LastDialFailure = *msg.LastDialFailure
  1184. }
  1185. return addressInfo, addressInfo.Validate()
  1186. }
  1187. // ToProto converts the address into to a Protobuf message for serialization.
  1188. func (a *peerAddressInfo) ToProto() *p2pproto.PeerAddressInfo {
  1189. msg := &p2pproto.PeerAddressInfo{
  1190. Address: a.Address.String(),
  1191. LastDialSuccess: &a.LastDialSuccess,
  1192. LastDialFailure: &a.LastDialFailure,
  1193. DialFailures: a.DialFailures,
  1194. }
  1195. if msg.LastDialSuccess.IsZero() {
  1196. msg.LastDialSuccess = nil
  1197. }
  1198. if msg.LastDialFailure.IsZero() {
  1199. msg.LastDialFailure = nil
  1200. }
  1201. return msg
  1202. }
  1203. // Copy returns a copy of the address info.
  1204. func (a *peerAddressInfo) Copy() peerAddressInfo {
  1205. return *a
  1206. }
  1207. // Validate validates the address info.
  1208. func (a *peerAddressInfo) Validate() error {
  1209. return a.Address.Validate()
  1210. }
  1211. // Database key prefixes.
  1212. const (
  1213. prefixPeerInfo int64 = 1
  1214. )
  1215. // keyPeerInfo generates a peerInfo database key.
  1216. func keyPeerInfo(id NodeID) []byte {
  1217. key, err := orderedcode.Append(nil, prefixPeerInfo, string(id))
  1218. if err != nil {
  1219. panic(err)
  1220. }
  1221. return key
  1222. }
  1223. // keyPeerInfoRange generates start/end keys for the entire peerInfo key range.
  1224. func keyPeerInfoRange() ([]byte, []byte) {
  1225. start, err := orderedcode.Append(nil, prefixPeerInfo, "")
  1226. if err != nil {
  1227. panic(err)
  1228. }
  1229. end, err := orderedcode.Append(nil, prefixPeerInfo, orderedcode.Infinity)
  1230. if err != nil {
  1231. panic(err)
  1232. }
  1233. return start, end
  1234. }