You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

906 lines
23 KiB

9 years ago
7 years ago
9 years ago
9 years ago
8 years ago
9 years ago
9 years ago
9 years ago
9 years ago
9 years ago
9 years ago
9 years ago
9 years ago
9 years ago
8 years ago
9 years ago
9 years ago
9 years ago
9 years ago
9 years ago
9 years ago
9 years ago
9 years ago
9 years ago
8 years ago
9 years ago
8 years ago
9 years ago
9 years ago
9 years ago
9 years ago
9 years ago
9 years ago
9 years ago
9 years ago
9 years ago
9 years ago
9 years ago
9 years ago
9 years ago
9 years ago
9 years ago
9 years ago
9 years ago
9 years ago
9 years ago
9 years ago
9 years ago
9 years ago
8 years ago
8 years ago
9 years ago
8 years ago
9 years ago
9 years ago
9 years ago
9 years ago
9 years ago
7 years ago
9 years ago
9 years ago
9 years ago
9 years ago
8 years ago
8 years ago
8 years ago
9 years ago
9 years ago
9 years ago
9 years ago
8 years ago
9 years ago
9 years ago
9 years ago
9 years ago
8 years ago
9 years ago
8 years ago
9 years ago
9 years ago
8 years ago
9 years ago
8 years ago
9 years ago
9 years ago
9 years ago
8 years ago
9 years ago
9 years ago
9 years ago
9 years ago
9 years ago
9 years ago
9 years ago
9 years ago
9 years ago
8 years ago
9 years ago
9 years ago
8 years ago
9 years ago
8 years ago
9 years ago
8 years ago
9 years ago
8 years ago
9 years ago
8 years ago
9 years ago
9 years ago
9 years ago
9 years ago
7 years ago
9 years ago
9 years ago
9 years ago
9 years ago
8 years ago
9 years ago
8 years ago
9 years ago
7 years ago
9 years ago
7 years ago
9 years ago
7 years ago
9 years ago
7 years ago
9 years ago
7 years ago
9 years ago
7 years ago
  1. // Modified for Tendermint
  2. // Originally Copyright (c) 2013-2014 Conformal Systems LLC.
  3. // https://github.com/conformal/btcd/blob/master/LICENSE
  4. package p2p
  5. import (
  6. "crypto/sha256"
  7. "encoding/binary"
  8. "encoding/json"
  9. "fmt"
  10. "math"
  11. "math/rand"
  12. "net"
  13. "os"
  14. "sync"
  15. "time"
  16. crypto "github.com/tendermint/go-crypto"
  17. cmn "github.com/tendermint/tmlibs/common"
  18. )
  19. const (
  20. // addresses under which the address manager will claim to need more addresses.
  21. needAddressThreshold = 1000
  22. // interval used to dump the address cache to disk for future use.
  23. dumpAddressInterval = time.Minute * 2
  24. // max addresses in each old address bucket.
  25. oldBucketSize = 64
  26. // buckets we split old addresses over.
  27. oldBucketCount = 64
  28. // max addresses in each new address bucket.
  29. newBucketSize = 64
  30. // buckets that we spread new addresses over.
  31. newBucketCount = 256
  32. // old buckets over which an address group will be spread.
  33. oldBucketsPerGroup = 4
  34. // new buckets over which a source address group will be spread.
  35. newBucketsPerGroup = 32
  36. // buckets a frequently seen new address may end up in.
  37. maxNewBucketsPerAddress = 4
  38. // days before which we assume an address has vanished
  39. // if we have not seen it announced in that long.
  40. numMissingDays = 30
  41. // tries without a single success before we assume an address is bad.
  42. numRetries = 3
  43. // max failures we will accept without a success before considering an address bad.
  44. maxFailures = 10
  45. // days since the last success before we will consider evicting an address.
  46. minBadDays = 7
  47. // % of total addresses known returned by GetSelection.
  48. getSelectionPercent = 23
  49. // min addresses that must be returned by GetSelection. Useful for bootstrapping.
  50. minGetSelection = 32
  51. // max addresses returned by GetSelection
  52. // NOTE: this must match "maxPexMessageSize"
  53. maxGetSelection = 250
  54. )
  55. const (
  56. bucketTypeNew = 0x01
  57. bucketTypeOld = 0x02
  58. )
  59. // AddrBook - concurrency safe peer address manager.
  60. type AddrBook struct {
  61. cmn.BaseService
  62. // immutable after creation
  63. filePath string
  64. routabilityStrict bool
  65. key string
  66. // accessed concurrently
  67. mtx sync.Mutex
  68. rand *rand.Rand
  69. ourAddrs map[string]*NetAddress
  70. addrLookup map[ID]*knownAddress // new & old
  71. bucketsOld []map[string]*knownAddress
  72. bucketsNew []map[string]*knownAddress
  73. nOld int
  74. nNew int
  75. wg sync.WaitGroup
  76. }
  77. // NewAddrBook creates a new address book.
  78. // Use Start to begin processing asynchronous address updates.
  79. func NewAddrBook(filePath string, routabilityStrict bool) *AddrBook {
  80. am := &AddrBook{
  81. rand: rand.New(rand.NewSource(time.Now().UnixNano())),
  82. ourAddrs: make(map[string]*NetAddress),
  83. addrLookup: make(map[ID]*knownAddress),
  84. filePath: filePath,
  85. routabilityStrict: routabilityStrict,
  86. }
  87. am.init()
  88. am.BaseService = *cmn.NewBaseService(nil, "AddrBook", am)
  89. return am
  90. }
  91. // When modifying this, don't forget to update loadFromFile()
  92. func (a *AddrBook) init() {
  93. a.key = crypto.CRandHex(24) // 24/2 * 8 = 96 bits
  94. // New addr buckets
  95. a.bucketsNew = make([]map[string]*knownAddress, newBucketCount)
  96. for i := range a.bucketsNew {
  97. a.bucketsNew[i] = make(map[string]*knownAddress)
  98. }
  99. // Old addr buckets
  100. a.bucketsOld = make([]map[string]*knownAddress, oldBucketCount)
  101. for i := range a.bucketsOld {
  102. a.bucketsOld[i] = make(map[string]*knownAddress)
  103. }
  104. }
  105. // OnStart implements Service.
  106. func (a *AddrBook) OnStart() error {
  107. if err := a.BaseService.OnStart(); err != nil {
  108. return err
  109. }
  110. a.loadFromFile(a.filePath)
  111. // wg.Add to ensure that any invocation of .Wait()
  112. // later on will wait for saveRoutine to terminate.
  113. a.wg.Add(1)
  114. go a.saveRoutine()
  115. return nil
  116. }
  117. // OnStop implements Service.
  118. func (a *AddrBook) OnStop() {
  119. a.BaseService.OnStop()
  120. }
  121. func (a *AddrBook) Wait() {
  122. a.wg.Wait()
  123. }
  124. // AddOurAddress adds another one of our addresses.
  125. func (a *AddrBook) AddOurAddress(addr *NetAddress) {
  126. a.mtx.Lock()
  127. defer a.mtx.Unlock()
  128. a.Logger.Info("Add our address to book", "addr", addr)
  129. a.ourAddrs[addr.String()] = addr
  130. }
  131. // OurAddresses returns a list of our addresses.
  132. func (a *AddrBook) OurAddresses() []*NetAddress {
  133. addrs := []*NetAddress{}
  134. for _, addr := range a.ourAddrs {
  135. addrs = append(addrs, addr)
  136. }
  137. return addrs
  138. }
  139. // AddAddress adds the given address as received from the given source.
  140. // NOTE: addr must not be nil
  141. func (a *AddrBook) AddAddress(addr *NetAddress, src *NetAddress) error {
  142. a.mtx.Lock()
  143. defer a.mtx.Unlock()
  144. return a.addAddress(addr, src)
  145. }
  146. // NeedMoreAddrs returns true if there are not have enough addresses in the book.
  147. func (a *AddrBook) NeedMoreAddrs() bool {
  148. return a.Size() < needAddressThreshold
  149. }
  150. // Size returns the number of addresses in the book.
  151. func (a *AddrBook) Size() int {
  152. a.mtx.Lock()
  153. defer a.mtx.Unlock()
  154. return a.size()
  155. }
  156. func (a *AddrBook) size() int {
  157. return a.nNew + a.nOld
  158. }
  159. // PickAddress picks an address to connect to.
  160. // The address is picked randomly from an old or new bucket according
  161. // to the newBias argument, which must be between [0, 100] (or else is truncated to that range)
  162. // and determines how biased we are to pick an address from a new bucket.
  163. // PickAddress returns nil if the AddrBook is empty or if we try to pick
  164. // from an empty bucket.
  165. func (a *AddrBook) PickAddress(newBias int) *NetAddress {
  166. a.mtx.Lock()
  167. defer a.mtx.Unlock()
  168. if a.size() == 0 {
  169. return nil
  170. }
  171. if newBias > 100 {
  172. newBias = 100
  173. }
  174. if newBias < 0 {
  175. newBias = 0
  176. }
  177. // Bias between new and old addresses.
  178. oldCorrelation := math.Sqrt(float64(a.nOld)) * (100.0 - float64(newBias))
  179. newCorrelation := math.Sqrt(float64(a.nNew)) * float64(newBias)
  180. // pick a random peer from a random bucket
  181. var bucket map[string]*knownAddress
  182. pickFromOldBucket := (newCorrelation+oldCorrelation)*a.rand.Float64() < oldCorrelation
  183. if (pickFromOldBucket && a.nOld == 0) ||
  184. (!pickFromOldBucket && a.nNew == 0) {
  185. return nil
  186. }
  187. // loop until we pick a random non-empty bucket
  188. for len(bucket) == 0 {
  189. if pickFromOldBucket {
  190. bucket = a.bucketsOld[a.rand.Intn(len(a.bucketsOld))]
  191. } else {
  192. bucket = a.bucketsNew[a.rand.Intn(len(a.bucketsNew))]
  193. }
  194. }
  195. // pick a random index and loop over the map to return that index
  196. randIndex := a.rand.Intn(len(bucket))
  197. for _, ka := range bucket {
  198. if randIndex == 0 {
  199. return ka.Addr
  200. }
  201. randIndex--
  202. }
  203. return nil
  204. }
  205. // MarkGood marks the peer as good and moves it into an "old" bucket.
  206. // TODO: call this from somewhere
  207. func (a *AddrBook) MarkGood(addr *NetAddress) {
  208. a.mtx.Lock()
  209. defer a.mtx.Unlock()
  210. ka := a.addrLookup[addr.ID]
  211. if ka == nil {
  212. return
  213. }
  214. ka.markGood()
  215. if ka.isNew() {
  216. a.moveToOld(ka)
  217. }
  218. }
  219. // MarkAttempt marks that an attempt was made to connect to the address.
  220. func (a *AddrBook) MarkAttempt(addr *NetAddress) {
  221. a.mtx.Lock()
  222. defer a.mtx.Unlock()
  223. ka := a.addrLookup[addr.ID]
  224. if ka == nil {
  225. return
  226. }
  227. ka.markAttempt()
  228. }
  229. // MarkBad currently just ejects the address. In the future, consider
  230. // blacklisting.
  231. func (a *AddrBook) MarkBad(addr *NetAddress) {
  232. a.RemoveAddress(addr)
  233. }
  234. // RemoveAddress removes the address from the book.
  235. func (a *AddrBook) RemoveAddress(addr *NetAddress) {
  236. a.mtx.Lock()
  237. defer a.mtx.Unlock()
  238. ka := a.addrLookup[addr.ID]
  239. if ka == nil {
  240. return
  241. }
  242. a.Logger.Info("Remove address from book", "addr", ka.Addr, "ID", ka.ID)
  243. a.removeFromAllBuckets(ka)
  244. }
  245. /* Peer exchange */
  246. // GetSelection randomly selects some addresses (old & new). Suitable for peer-exchange protocols.
  247. func (a *AddrBook) GetSelection() []*NetAddress {
  248. a.mtx.Lock()
  249. defer a.mtx.Unlock()
  250. if a.size() == 0 {
  251. return nil
  252. }
  253. allAddr := make([]*NetAddress, a.size())
  254. i := 0
  255. for _, ka := range a.addrLookup {
  256. allAddr[i] = ka.Addr
  257. i++
  258. }
  259. numAddresses := cmn.MaxInt(
  260. cmn.MinInt(minGetSelection, len(allAddr)),
  261. len(allAddr)*getSelectionPercent/100)
  262. numAddresses = cmn.MinInt(maxGetSelection, numAddresses)
  263. // Fisher-Yates shuffle the array. We only need to do the first
  264. // `numAddresses' since we are throwing the rest.
  265. // XXX: What's the point of this if we already loop randomly through addrLookup ?
  266. for i := 0; i < numAddresses; i++ {
  267. // pick a number between current index and the end
  268. j := rand.Intn(len(allAddr)-i) + i
  269. allAddr[i], allAddr[j] = allAddr[j], allAddr[i]
  270. }
  271. // slice off the limit we are willing to share.
  272. return allAddr[:numAddresses]
  273. }
  274. // ListOfKnownAddresses returns the new and old addresses.
  275. func (a *AddrBook) ListOfKnownAddresses() []*knownAddress {
  276. a.mtx.Lock()
  277. defer a.mtx.Unlock()
  278. addrs := []*knownAddress{}
  279. for _, addr := range a.addrLookup {
  280. addrs = append(addrs, addr.copy())
  281. }
  282. return addrs
  283. }
  284. func (ka *knownAddress) copy() *knownAddress {
  285. return &knownAddress{
  286. Addr: ka.Addr,
  287. Src: ka.Src,
  288. Attempts: ka.Attempts,
  289. LastAttempt: ka.LastAttempt,
  290. LastSuccess: ka.LastSuccess,
  291. BucketType: ka.BucketType,
  292. Buckets: ka.Buckets,
  293. }
  294. }
  295. /* Loading & Saving */
  296. type addrBookJSON struct {
  297. Key string
  298. Addrs []*knownAddress
  299. }
  300. func (a *AddrBook) saveToFile(filePath string) {
  301. a.Logger.Info("Saving AddrBook to file", "size", a.Size())
  302. a.mtx.Lock()
  303. defer a.mtx.Unlock()
  304. // Compile Addrs
  305. addrs := []*knownAddress{}
  306. for _, ka := range a.addrLookup {
  307. addrs = append(addrs, ka)
  308. }
  309. aJSON := &addrBookJSON{
  310. Key: a.key,
  311. Addrs: addrs,
  312. }
  313. jsonBytes, err := json.MarshalIndent(aJSON, "", "\t")
  314. if err != nil {
  315. a.Logger.Error("Failed to save AddrBook to file", "err", err)
  316. return
  317. }
  318. err = cmn.WriteFileAtomic(filePath, jsonBytes, 0644)
  319. if err != nil {
  320. a.Logger.Error("Failed to save AddrBook to file", "file", filePath, "err", err)
  321. }
  322. }
  323. // Returns false if file does not exist.
  324. // cmn.Panics if file is corrupt.
  325. func (a *AddrBook) loadFromFile(filePath string) bool {
  326. // If doesn't exist, do nothing.
  327. _, err := os.Stat(filePath)
  328. if os.IsNotExist(err) {
  329. return false
  330. }
  331. // Load addrBookJSON{}
  332. r, err := os.Open(filePath)
  333. if err != nil {
  334. cmn.PanicCrisis(cmn.Fmt("Error opening file %s: %v", filePath, err))
  335. }
  336. defer r.Close() // nolint: errcheck
  337. aJSON := &addrBookJSON{}
  338. dec := json.NewDecoder(r)
  339. err = dec.Decode(aJSON)
  340. if err != nil {
  341. cmn.PanicCrisis(cmn.Fmt("Error reading file %s: %v", filePath, err))
  342. }
  343. // Restore all the fields...
  344. // Restore the key
  345. a.key = aJSON.Key
  346. // Restore .bucketsNew & .bucketsOld
  347. for _, ka := range aJSON.Addrs {
  348. for _, bucketIndex := range ka.Buckets {
  349. bucket := a.getBucket(ka.BucketType, bucketIndex)
  350. bucket[ka.Addr.String()] = ka
  351. }
  352. a.addrLookup[ka.ID()] = ka
  353. if ka.BucketType == bucketTypeNew {
  354. a.nNew++
  355. } else {
  356. a.nOld++
  357. }
  358. }
  359. return true
  360. }
  361. // Save saves the book.
  362. func (a *AddrBook) Save() {
  363. a.Logger.Info("Saving AddrBook to file", "size", a.Size())
  364. a.saveToFile(a.filePath)
  365. }
  366. /* Private methods */
  367. func (a *AddrBook) saveRoutine() {
  368. defer a.wg.Done()
  369. saveFileTicker := time.NewTicker(dumpAddressInterval)
  370. out:
  371. for {
  372. select {
  373. case <-saveFileTicker.C:
  374. a.saveToFile(a.filePath)
  375. case <-a.Quit:
  376. break out
  377. }
  378. }
  379. saveFileTicker.Stop()
  380. a.saveToFile(a.filePath)
  381. a.Logger.Info("Address handler done")
  382. }
  383. func (a *AddrBook) getBucket(bucketType byte, bucketIdx int) map[string]*knownAddress {
  384. switch bucketType {
  385. case bucketTypeNew:
  386. return a.bucketsNew[bucketIdx]
  387. case bucketTypeOld:
  388. return a.bucketsOld[bucketIdx]
  389. default:
  390. cmn.PanicSanity("Should not happen")
  391. return nil
  392. }
  393. }
  394. // Adds ka to new bucket. Returns false if it couldn't do it cuz buckets full.
  395. // NOTE: currently it always returns true.
  396. func (a *AddrBook) addToNewBucket(ka *knownAddress, bucketIdx int) bool {
  397. // Sanity check
  398. if ka.isOld() {
  399. a.Logger.Error(cmn.Fmt("Cannot add address already in old bucket to a new bucket: %v", ka))
  400. return false
  401. }
  402. addrStr := ka.Addr.String()
  403. bucket := a.getBucket(bucketTypeNew, bucketIdx)
  404. // Already exists?
  405. if _, ok := bucket[addrStr]; ok {
  406. return true
  407. }
  408. // Enforce max addresses.
  409. if len(bucket) > newBucketSize {
  410. a.Logger.Info("new bucket is full, expiring old ")
  411. a.expireNew(bucketIdx)
  412. }
  413. // Add to bucket.
  414. bucket[addrStr] = ka
  415. if ka.addBucketRef(bucketIdx) == 1 {
  416. a.nNew++
  417. }
  418. // Ensure in addrLookup
  419. a.addrLookup[ka.ID()] = ka
  420. return true
  421. }
  422. // Adds ka to old bucket. Returns false if it couldn't do it cuz buckets full.
  423. func (a *AddrBook) addToOldBucket(ka *knownAddress, bucketIdx int) bool {
  424. // Sanity check
  425. if ka.isNew() {
  426. a.Logger.Error(cmn.Fmt("Cannot add new address to old bucket: %v", ka))
  427. return false
  428. }
  429. if len(ka.Buckets) != 0 {
  430. a.Logger.Error(cmn.Fmt("Cannot add already old address to another old bucket: %v", ka))
  431. return false
  432. }
  433. addrStr := ka.Addr.String()
  434. bucket := a.getBucket(bucketTypeOld, bucketIdx)
  435. // Already exists?
  436. if _, ok := bucket[addrStr]; ok {
  437. return true
  438. }
  439. // Enforce max addresses.
  440. if len(bucket) > oldBucketSize {
  441. return false
  442. }
  443. // Add to bucket.
  444. bucket[addrStr] = ka
  445. if ka.addBucketRef(bucketIdx) == 1 {
  446. a.nOld++
  447. }
  448. // Ensure in addrLookup
  449. a.addrLookup[ka.ID()] = ka
  450. return true
  451. }
  452. func (a *AddrBook) removeFromBucket(ka *knownAddress, bucketType byte, bucketIdx int) {
  453. if ka.BucketType != bucketType {
  454. a.Logger.Error(cmn.Fmt("Bucket type mismatch: %v", ka))
  455. return
  456. }
  457. bucket := a.getBucket(bucketType, bucketIdx)
  458. delete(bucket, ka.Addr.String())
  459. if ka.removeBucketRef(bucketIdx) == 0 {
  460. if bucketType == bucketTypeNew {
  461. a.nNew--
  462. } else {
  463. a.nOld--
  464. }
  465. delete(a.addrLookup, ka.ID())
  466. }
  467. }
  468. func (a *AddrBook) removeFromAllBuckets(ka *knownAddress) {
  469. for _, bucketIdx := range ka.Buckets {
  470. bucket := a.getBucket(ka.BucketType, bucketIdx)
  471. delete(bucket, ka.Addr.String())
  472. }
  473. ka.Buckets = nil
  474. if ka.BucketType == bucketTypeNew {
  475. a.nNew--
  476. } else {
  477. a.nOld--
  478. }
  479. delete(a.addrLookup, ka.ID())
  480. }
  481. func (a *AddrBook) pickOldest(bucketType byte, bucketIdx int) *knownAddress {
  482. bucket := a.getBucket(bucketType, bucketIdx)
  483. var oldest *knownAddress
  484. for _, ka := range bucket {
  485. if oldest == nil || ka.LastAttempt.Before(oldest.LastAttempt) {
  486. oldest = ka
  487. }
  488. }
  489. return oldest
  490. }
  491. func (a *AddrBook) addAddress(addr, src *NetAddress) error {
  492. if a.routabilityStrict && !addr.Routable() {
  493. return fmt.Errorf("Cannot add non-routable address %v", addr)
  494. }
  495. if _, ok := a.ourAddrs[addr.String()]; ok {
  496. // Ignore our own listener address.
  497. return fmt.Errorf("Cannot add ourselves with address %v", addr)
  498. }
  499. ka := a.addrLookup[addr.ID]
  500. if ka != nil {
  501. // Already old.
  502. if ka.isOld() {
  503. return nil
  504. }
  505. // Already in max new buckets.
  506. if len(ka.Buckets) == maxNewBucketsPerAddress {
  507. return nil
  508. }
  509. // The more entries we have, the less likely we are to add more.
  510. factor := int32(2 * len(ka.Buckets))
  511. if a.rand.Int31n(factor) != 0 {
  512. return nil
  513. }
  514. } else {
  515. ka = newKnownAddress(addr, src)
  516. }
  517. bucket := a.calcNewBucket(addr, src)
  518. a.addToNewBucket(ka, bucket)
  519. a.Logger.Info("Added new address", "address", addr, "total", a.size())
  520. return nil
  521. }
  522. // Make space in the new buckets by expiring the really bad entries.
  523. // If no bad entries are available we remove the oldest.
  524. func (a *AddrBook) expireNew(bucketIdx int) {
  525. for addrStr, ka := range a.bucketsNew[bucketIdx] {
  526. // If an entry is bad, throw it away
  527. if ka.isBad() {
  528. a.Logger.Info(cmn.Fmt("expiring bad address %v", addrStr))
  529. a.removeFromBucket(ka, bucketTypeNew, bucketIdx)
  530. return
  531. }
  532. }
  533. // If we haven't thrown out a bad entry, throw out the oldest entry
  534. oldest := a.pickOldest(bucketTypeNew, bucketIdx)
  535. a.removeFromBucket(oldest, bucketTypeNew, bucketIdx)
  536. }
  537. // Promotes an address from new to old.
  538. // TODO: Move to old probabilistically.
  539. // The better a node is, the less likely it should be evicted from an old bucket.
  540. func (a *AddrBook) moveToOld(ka *knownAddress) {
  541. // Sanity check
  542. if ka.isOld() {
  543. a.Logger.Error(cmn.Fmt("Cannot promote address that is already old %v", ka))
  544. return
  545. }
  546. if len(ka.Buckets) == 0 {
  547. a.Logger.Error(cmn.Fmt("Cannot promote address that isn't in any new buckets %v", ka))
  548. return
  549. }
  550. // Remember one of the buckets in which ka is in.
  551. freedBucket := ka.Buckets[0]
  552. // Remove from all (new) buckets.
  553. a.removeFromAllBuckets(ka)
  554. // It's officially old now.
  555. ka.BucketType = bucketTypeOld
  556. // Try to add it to its oldBucket destination.
  557. oldBucketIdx := a.calcOldBucket(ka.Addr)
  558. added := a.addToOldBucket(ka, oldBucketIdx)
  559. if !added {
  560. // No room, must evict something
  561. oldest := a.pickOldest(bucketTypeOld, oldBucketIdx)
  562. a.removeFromBucket(oldest, bucketTypeOld, oldBucketIdx)
  563. // Find new bucket to put oldest in
  564. newBucketIdx := a.calcNewBucket(oldest.Addr, oldest.Src)
  565. added := a.addToNewBucket(oldest, newBucketIdx)
  566. // No space in newBucket either, just put it in freedBucket from above.
  567. if !added {
  568. added := a.addToNewBucket(oldest, freedBucket)
  569. if !added {
  570. a.Logger.Error(cmn.Fmt("Could not migrate oldest %v to freedBucket %v", oldest, freedBucket))
  571. }
  572. }
  573. // Finally, add to bucket again.
  574. added = a.addToOldBucket(ka, oldBucketIdx)
  575. if !added {
  576. a.Logger.Error(cmn.Fmt("Could not re-add ka %v to oldBucketIdx %v", ka, oldBucketIdx))
  577. }
  578. }
  579. }
  580. // doublesha256( key + sourcegroup +
  581. // int64(doublesha256(key + group + sourcegroup))%bucket_per_group ) % num_new_buckets
  582. func (a *AddrBook) calcNewBucket(addr, src *NetAddress) int {
  583. data1 := []byte{}
  584. data1 = append(data1, []byte(a.key)...)
  585. data1 = append(data1, []byte(a.groupKey(addr))...)
  586. data1 = append(data1, []byte(a.groupKey(src))...)
  587. hash1 := doubleSha256(data1)
  588. hash64 := binary.BigEndian.Uint64(hash1)
  589. hash64 %= newBucketsPerGroup
  590. var hashbuf [8]byte
  591. binary.BigEndian.PutUint64(hashbuf[:], hash64)
  592. data2 := []byte{}
  593. data2 = append(data2, []byte(a.key)...)
  594. data2 = append(data2, a.groupKey(src)...)
  595. data2 = append(data2, hashbuf[:]...)
  596. hash2 := doubleSha256(data2)
  597. return int(binary.BigEndian.Uint64(hash2) % newBucketCount)
  598. }
  599. // doublesha256( key + group +
  600. // int64(doublesha256(key + addr))%buckets_per_group ) % num_old_buckets
  601. func (a *AddrBook) calcOldBucket(addr *NetAddress) int {
  602. data1 := []byte{}
  603. data1 = append(data1, []byte(a.key)...)
  604. data1 = append(data1, []byte(addr.String())...)
  605. hash1 := doubleSha256(data1)
  606. hash64 := binary.BigEndian.Uint64(hash1)
  607. hash64 %= oldBucketsPerGroup
  608. var hashbuf [8]byte
  609. binary.BigEndian.PutUint64(hashbuf[:], hash64)
  610. data2 := []byte{}
  611. data2 = append(data2, []byte(a.key)...)
  612. data2 = append(data2, a.groupKey(addr)...)
  613. data2 = append(data2, hashbuf[:]...)
  614. hash2 := doubleSha256(data2)
  615. return int(binary.BigEndian.Uint64(hash2) % oldBucketCount)
  616. }
  617. // Return a string representing the network group of this address.
  618. // This is the /16 for IPv4, the /32 (/36 for he.net) for IPv6, the string
  619. // "local" for a local address and the string "unroutable" for an unroutable
  620. // address.
  621. func (a *AddrBook) groupKey(na *NetAddress) string {
  622. if a.routabilityStrict && na.Local() {
  623. return "local"
  624. }
  625. if a.routabilityStrict && !na.Routable() {
  626. return "unroutable"
  627. }
  628. if ipv4 := na.IP.To4(); ipv4 != nil {
  629. return (&net.IPNet{IP: na.IP, Mask: net.CIDRMask(16, 32)}).String()
  630. }
  631. if na.RFC6145() || na.RFC6052() {
  632. // last four bytes are the ip address
  633. ip := net.IP(na.IP[12:16])
  634. return (&net.IPNet{IP: ip, Mask: net.CIDRMask(16, 32)}).String()
  635. }
  636. if na.RFC3964() {
  637. ip := net.IP(na.IP[2:7])
  638. return (&net.IPNet{IP: ip, Mask: net.CIDRMask(16, 32)}).String()
  639. }
  640. if na.RFC4380() {
  641. // teredo tunnels have the last 4 bytes as the v4 address XOR
  642. // 0xff.
  643. ip := net.IP(make([]byte, 4))
  644. for i, byte := range na.IP[12:16] {
  645. ip[i] = byte ^ 0xff
  646. }
  647. return (&net.IPNet{IP: ip, Mask: net.CIDRMask(16, 32)}).String()
  648. }
  649. // OK, so now we know ourselves to be a IPv6 address.
  650. // bitcoind uses /32 for everything, except for Hurricane Electric's
  651. // (he.net) IP range, which it uses /36 for.
  652. bits := 32
  653. heNet := &net.IPNet{IP: net.ParseIP("2001:470::"),
  654. Mask: net.CIDRMask(32, 128)}
  655. if heNet.Contains(na.IP) {
  656. bits = 36
  657. }
  658. return (&net.IPNet{IP: na.IP, Mask: net.CIDRMask(bits, 128)}).String()
  659. }
  660. //-----------------------------------------------------------------------------
  661. /*
  662. knownAddress
  663. tracks information about a known network address that is used
  664. to determine how viable an address is.
  665. */
  666. type knownAddress struct {
  667. Addr *NetAddress
  668. Src *NetAddress
  669. Attempts int32
  670. LastAttempt time.Time
  671. LastSuccess time.Time
  672. BucketType byte
  673. Buckets []int
  674. }
  675. func newKnownAddress(addr *NetAddress, src *NetAddress) *knownAddress {
  676. return &knownAddress{
  677. Addr: addr,
  678. Src: src,
  679. Attempts: 0,
  680. LastAttempt: time.Now(),
  681. BucketType: bucketTypeNew,
  682. Buckets: nil,
  683. }
  684. }
  685. func (ka *knownAddress) ID() ID {
  686. return ka.Addr.ID
  687. }
  688. func (ka *knownAddress) isOld() bool {
  689. return ka.BucketType == bucketTypeOld
  690. }
  691. func (ka *knownAddress) isNew() bool {
  692. return ka.BucketType == bucketTypeNew
  693. }
  694. func (ka *knownAddress) markAttempt() {
  695. now := time.Now()
  696. ka.LastAttempt = now
  697. ka.Attempts += 1
  698. }
  699. func (ka *knownAddress) markGood() {
  700. now := time.Now()
  701. ka.LastAttempt = now
  702. ka.Attempts = 0
  703. ka.LastSuccess = now
  704. }
  705. func (ka *knownAddress) addBucketRef(bucketIdx int) int {
  706. for _, bucket := range ka.Buckets {
  707. if bucket == bucketIdx {
  708. // TODO refactor to return error?
  709. // log.Warn(Fmt("Bucket already exists in ka.Buckets: %v", ka))
  710. return -1
  711. }
  712. }
  713. ka.Buckets = append(ka.Buckets, bucketIdx)
  714. return len(ka.Buckets)
  715. }
  716. func (ka *knownAddress) removeBucketRef(bucketIdx int) int {
  717. buckets := []int{}
  718. for _, bucket := range ka.Buckets {
  719. if bucket != bucketIdx {
  720. buckets = append(buckets, bucket)
  721. }
  722. }
  723. if len(buckets) != len(ka.Buckets)-1 {
  724. // TODO refactor to return error?
  725. // log.Warn(Fmt("bucketIdx not found in ka.Buckets: %v", ka))
  726. return -1
  727. }
  728. ka.Buckets = buckets
  729. return len(ka.Buckets)
  730. }
  731. /*
  732. An address is bad if the address in question is a New address, has not been tried in the last
  733. minute, and meets one of the following criteria:
  734. 1) It claims to be from the future
  735. 2) It hasn't been seen in over a month
  736. 3) It has failed at least three times and never succeeded
  737. 4) It has failed ten times in the last week
  738. All addresses that meet these criteria are assumed to be worthless and not
  739. worth keeping hold of.
  740. XXX: so a good peer needs us to call MarkGood before the conditions above are reached!
  741. */
  742. func (ka *knownAddress) isBad() bool {
  743. // Is Old --> good
  744. if ka.BucketType == bucketTypeOld {
  745. return false
  746. }
  747. // Has been attempted in the last minute --> good
  748. if ka.LastAttempt.Before(time.Now().Add(-1 * time.Minute)) {
  749. return false
  750. }
  751. // Too old?
  752. // XXX: does this mean if we've kept a connection up for this long we'll disconnect?!
  753. // and shouldn't it be .Before ?
  754. if ka.LastAttempt.After(time.Now().Add(-1 * numMissingDays * time.Hour * 24)) {
  755. return true
  756. }
  757. // Never succeeded?
  758. if ka.LastSuccess.IsZero() && ka.Attempts >= numRetries {
  759. return true
  760. }
  761. // Hasn't succeeded in too long?
  762. // XXX: does this mean if we've kept a connection up for this long we'll disconnect?!
  763. if ka.LastSuccess.Before(time.Now().Add(-1*minBadDays*time.Hour*24)) &&
  764. ka.Attempts >= maxFailures {
  765. return true
  766. }
  767. return false
  768. }
  769. //-----------------------------------------------------------------------------
  770. // doubleSha256 calculates sha256(sha256(b)) and returns the resulting bytes.
  771. func doubleSha256(b []byte) []byte {
  772. hasher := sha256.New()
  773. hasher.Write(b) // nolint: errcheck, gas
  774. sum := hasher.Sum(nil)
  775. hasher.Reset()
  776. hasher.Write(sum) // nolint: errcheck, gas
  777. return hasher.Sum(nil)
  778. }