You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

841 lines
21 KiB

9 years ago
8 years ago
9 years ago
9 years ago
9 years ago
9 years ago
9 years ago
9 years ago
9 years ago
9 years ago
8 years ago
9 years ago
9 years ago
9 years ago
9 years ago
9 years ago
9 years ago
8 years ago
9 years ago
8 years ago
9 years ago
9 years ago
8 years ago
9 years ago
9 years ago
9 years ago
9 years ago
9 years ago
8 years ago
9 years ago
9 years ago
9 years ago
8 years ago
8 years ago
9 years ago
8 years ago
9 years ago
9 years ago
9 years ago
9 years ago
9 years ago
9 years ago
8 years ago
8 years ago
8 years ago
9 years ago
8 years ago
9 years ago
9 years ago
8 years ago
9 years ago
8 years ago
9 years ago
8 years ago
9 years ago
8 years ago
9 years ago
8 years ago
9 years ago
8 years ago
9 years ago
8 years ago
9 years ago
8 years ago
9 years ago
8 years ago
9 years ago
8 years ago
9 years ago
8 years ago
9 years ago
8 years ago
9 years ago
9 years ago
9 years ago
9 years ago
9 years ago
9 years ago
8 years ago
9 years ago
8 years ago
9 years ago
  1. // Modified for Tendermint
  2. // Originally Copyright (c) 2013-2014 Conformal Systems LLC.
  3. // https://github.com/conformal/btcd/blob/master/LICENSE
  4. package p2p
  5. import (
  6. "encoding/binary"
  7. "encoding/json"
  8. "math"
  9. "math/rand"
  10. "net"
  11. "os"
  12. "sync"
  13. "time"
  14. crypto "github.com/tendermint/go-crypto"
  15. cmn "github.com/tendermint/tmlibs/common"
  16. )
  17. const (
  18. // addresses under which the address manager will claim to need more addresses.
  19. needAddressThreshold = 1000
  20. // interval used to dump the address cache to disk for future use.
  21. dumpAddressInterval = time.Minute * 2
  22. // max addresses in each old address bucket.
  23. oldBucketSize = 64
  24. // buckets we split old addresses over.
  25. oldBucketCount = 64
  26. // max addresses in each new address bucket.
  27. newBucketSize = 64
  28. // buckets that we spread new addresses over.
  29. newBucketCount = 256
  30. // old buckets over which an address group will be spread.
  31. oldBucketsPerGroup = 4
  32. // new buckets over which an source address group will be spread.
  33. newBucketsPerGroup = 32
  34. // buckets a frequently seen new address may end up in.
  35. maxNewBucketsPerAddress = 4
  36. // days before which we assume an address has vanished
  37. // if we have not seen it announced in that long.
  38. numMissingDays = 30
  39. // tries without a single success before we assume an address is bad.
  40. numRetries = 3
  41. // max failures we will accept without a success before considering an address bad.
  42. maxFailures = 10
  43. // days since the last success before we will consider evicting an address.
  44. minBadDays = 7
  45. // % of total addresses known returned by GetSelection.
  46. getSelectionPercent = 23
  47. // min addresses that must be returned by GetSelection. Useful for bootstrapping.
  48. minGetSelection = 32
  49. // max addresses returned by GetSelection
  50. // NOTE: this must match "maxPexMessageSize"
  51. maxGetSelection = 250
  52. // current version of the on-disk format.
  53. serializationVersion = 1
  54. )
  55. const (
  56. bucketTypeNew = 0x01
  57. bucketTypeOld = 0x02
  58. )
  59. // AddrBook - concurrency safe peer address manager.
  60. type AddrBook struct {
  61. cmn.BaseService
  62. mtx sync.Mutex
  63. filePath string
  64. routabilityStrict bool
  65. rand *rand.Rand
  66. key string
  67. ourAddrs map[string]*NetAddress
  68. addrLookup map[string]*knownAddress // new & old
  69. addrNew []map[string]*knownAddress
  70. addrOld []map[string]*knownAddress
  71. wg sync.WaitGroup
  72. nOld int
  73. nNew int
  74. }
  75. // NewAddrBook creates a new address book.
  76. // Use Start to begin processing asynchronous address updates.
  77. func NewAddrBook(filePath string, routabilityStrict bool) *AddrBook {
  78. am := &AddrBook{
  79. rand: rand.New(rand.NewSource(time.Now().UnixNano())),
  80. ourAddrs: make(map[string]*NetAddress),
  81. addrLookup: make(map[string]*knownAddress),
  82. filePath: filePath,
  83. routabilityStrict: routabilityStrict,
  84. }
  85. am.init()
  86. am.BaseService = *cmn.NewBaseService(nil, "AddrBook", am)
  87. return am
  88. }
  89. // When modifying this, don't forget to update loadFromFile()
  90. func (a *AddrBook) init() {
  91. a.key = crypto.CRandHex(24) // 24/2 * 8 = 96 bits
  92. // New addr buckets
  93. a.addrNew = make([]map[string]*knownAddress, newBucketCount)
  94. for i := range a.addrNew {
  95. a.addrNew[i] = make(map[string]*knownAddress)
  96. }
  97. // Old addr buckets
  98. a.addrOld = make([]map[string]*knownAddress, oldBucketCount)
  99. for i := range a.addrOld {
  100. a.addrOld[i] = make(map[string]*knownAddress)
  101. }
  102. }
  103. // OnStart implements Service.
  104. func (a *AddrBook) OnStart() error {
  105. a.BaseService.OnStart()
  106. a.loadFromFile(a.filePath)
  107. a.wg.Add(1)
  108. go a.saveRoutine()
  109. return nil
  110. }
  111. // OnStop implements Service.
  112. func (a *AddrBook) OnStop() {
  113. a.BaseService.OnStop()
  114. }
  115. func (a *AddrBook) Wait() {
  116. a.wg.Wait()
  117. }
  118. func (a *AddrBook) AddOurAddress(addr *NetAddress) {
  119. a.mtx.Lock()
  120. defer a.mtx.Unlock()
  121. a.Logger.Info("Add our address to book", "addr", addr)
  122. a.ourAddrs[addr.String()] = addr
  123. }
  124. func (a *AddrBook) OurAddresses() []*NetAddress {
  125. addrs := []*NetAddress{}
  126. for _, addr := range a.ourAddrs {
  127. addrs = append(addrs, addr)
  128. }
  129. return addrs
  130. }
  131. // NOTE: addr must not be nil
  132. func (a *AddrBook) AddAddress(addr *NetAddress, src *NetAddress) {
  133. a.mtx.Lock()
  134. defer a.mtx.Unlock()
  135. a.Logger.Info("Add address to book", "addr", addr, "src", src)
  136. a.addAddress(addr, src)
  137. }
  138. func (a *AddrBook) NeedMoreAddrs() bool {
  139. return a.Size() < needAddressThreshold
  140. }
  141. func (a *AddrBook) Size() int {
  142. a.mtx.Lock()
  143. defer a.mtx.Unlock()
  144. return a.size()
  145. }
  146. func (a *AddrBook) size() int {
  147. return a.nNew + a.nOld
  148. }
  149. // Pick an address to connect to with new/old bias.
  150. func (a *AddrBook) PickAddress(newBias int) *NetAddress {
  151. a.mtx.Lock()
  152. defer a.mtx.Unlock()
  153. if a.size() == 0 {
  154. return nil
  155. }
  156. if newBias > 100 {
  157. newBias = 100
  158. }
  159. if newBias < 0 {
  160. newBias = 0
  161. }
  162. // Bias between new and old addresses.
  163. oldCorrelation := math.Sqrt(float64(a.nOld)) * (100.0 - float64(newBias))
  164. newCorrelation := math.Sqrt(float64(a.nNew)) * float64(newBias)
  165. if (newCorrelation+oldCorrelation)*a.rand.Float64() < oldCorrelation {
  166. // pick random Old bucket.
  167. var bucket map[string]*knownAddress = nil
  168. for len(bucket) == 0 {
  169. bucket = a.addrOld[a.rand.Intn(len(a.addrOld))]
  170. }
  171. // pick a random ka from bucket.
  172. randIndex := a.rand.Intn(len(bucket))
  173. for _, ka := range bucket {
  174. if randIndex == 0 {
  175. return ka.Addr
  176. }
  177. randIndex--
  178. }
  179. cmn.PanicSanity("Should not happen")
  180. } else {
  181. // pick random New bucket.
  182. var bucket map[string]*knownAddress = nil
  183. for len(bucket) == 0 {
  184. bucket = a.addrNew[a.rand.Intn(len(a.addrNew))]
  185. }
  186. // pick a random ka from bucket.
  187. randIndex := a.rand.Intn(len(bucket))
  188. for _, ka := range bucket {
  189. if randIndex == 0 {
  190. return ka.Addr
  191. }
  192. randIndex--
  193. }
  194. cmn.PanicSanity("Should not happen")
  195. }
  196. return nil
  197. }
  198. func (a *AddrBook) MarkGood(addr *NetAddress) {
  199. a.mtx.Lock()
  200. defer a.mtx.Unlock()
  201. ka := a.addrLookup[addr.String()]
  202. if ka == nil {
  203. return
  204. }
  205. ka.markGood()
  206. if ka.isNew() {
  207. a.moveToOld(ka)
  208. }
  209. }
  210. func (a *AddrBook) MarkAttempt(addr *NetAddress) {
  211. a.mtx.Lock()
  212. defer a.mtx.Unlock()
  213. ka := a.addrLookup[addr.String()]
  214. if ka == nil {
  215. return
  216. }
  217. ka.markAttempt()
  218. }
  219. // MarkBad currently just ejects the address. In the future, consider
  220. // blacklisting.
  221. func (a *AddrBook) MarkBad(addr *NetAddress) {
  222. a.RemoveAddress(addr)
  223. }
  224. // RemoveAddress removes the address from the book.
  225. func (a *AddrBook) RemoveAddress(addr *NetAddress) {
  226. a.mtx.Lock()
  227. defer a.mtx.Unlock()
  228. ka := a.addrLookup[addr.String()]
  229. if ka == nil {
  230. return
  231. }
  232. a.Logger.Info("Remove address from book", "addr", addr)
  233. a.removeFromAllBuckets(ka)
  234. }
  235. /* Peer exchange */
  236. // GetSelection randomly selects some addresses (old & new). Suitable for peer-exchange protocols.
  237. func (a *AddrBook) GetSelection() []*NetAddress {
  238. a.mtx.Lock()
  239. defer a.mtx.Unlock()
  240. if a.size() == 0 {
  241. return nil
  242. }
  243. allAddr := make([]*NetAddress, a.size())
  244. i := 0
  245. for _, v := range a.addrLookup {
  246. allAddr[i] = v.Addr
  247. i++
  248. }
  249. numAddresses := cmn.MaxInt(
  250. cmn.MinInt(minGetSelection, len(allAddr)),
  251. len(allAddr)*getSelectionPercent/100)
  252. numAddresses = cmn.MinInt(maxGetSelection, numAddresses)
  253. // Fisher-Yates shuffle the array. We only need to do the first
  254. // `numAddresses' since we are throwing the rest.
  255. for i := 0; i < numAddresses; i++ {
  256. // pick a number between current index and the end
  257. j := rand.Intn(len(allAddr)-i) + i
  258. allAddr[i], allAddr[j] = allAddr[j], allAddr[i]
  259. }
  260. // slice off the limit we are willing to share.
  261. return allAddr[:numAddresses]
  262. }
  263. /* Loading & Saving */
  264. type addrBookJSON struct {
  265. Key string
  266. Addrs []*knownAddress
  267. }
  268. func (a *AddrBook) saveToFile(filePath string) {
  269. a.Logger.Info("Saving AddrBook to file", "size", a.Size())
  270. a.mtx.Lock()
  271. defer a.mtx.Unlock()
  272. // Compile Addrs
  273. addrs := []*knownAddress{}
  274. for _, ka := range a.addrLookup {
  275. addrs = append(addrs, ka)
  276. }
  277. aJSON := &addrBookJSON{
  278. Key: a.key,
  279. Addrs: addrs,
  280. }
  281. jsonBytes, err := json.MarshalIndent(aJSON, "", "\t")
  282. if err != nil {
  283. a.Logger.Error("Failed to save AddrBook to file", "err", err)
  284. return
  285. }
  286. err = cmn.WriteFileAtomic(filePath, jsonBytes, 0644)
  287. if err != nil {
  288. a.Logger.Error("Failed to save AddrBook to file", "file", filePath, "err", err)
  289. }
  290. }
  291. // Returns false if file does not exist.
  292. // cmn.Panics if file is corrupt.
  293. func (a *AddrBook) loadFromFile(filePath string) bool {
  294. // If doesn't exist, do nothing.
  295. _, err := os.Stat(filePath)
  296. if os.IsNotExist(err) {
  297. return false
  298. }
  299. // Load addrBookJSON{}
  300. r, err := os.Open(filePath)
  301. if err != nil {
  302. cmn.PanicCrisis(cmn.Fmt("Error opening file %s: %v", filePath, err))
  303. }
  304. defer r.Close()
  305. aJSON := &addrBookJSON{}
  306. dec := json.NewDecoder(r)
  307. err = dec.Decode(aJSON)
  308. if err != nil {
  309. cmn.PanicCrisis(cmn.Fmt("Error reading file %s: %v", filePath, err))
  310. }
  311. // Restore all the fields...
  312. // Restore the key
  313. a.key = aJSON.Key
  314. // Restore .addrNew & .addrOld
  315. for _, ka := range aJSON.Addrs {
  316. for _, bucketIndex := range ka.Buckets {
  317. bucket := a.getBucket(ka.BucketType, bucketIndex)
  318. bucket[ka.Addr.String()] = ka
  319. }
  320. a.addrLookup[ka.Addr.String()] = ka
  321. if ka.BucketType == bucketTypeNew {
  322. a.nNew++
  323. } else {
  324. a.nOld++
  325. }
  326. }
  327. return true
  328. }
  329. // Save saves the book.
  330. func (a *AddrBook) Save() {
  331. a.Logger.Info("Saving AddrBook to file", "size", a.Size())
  332. a.saveToFile(a.filePath)
  333. }
  334. /* Private methods */
  335. func (a *AddrBook) saveRoutine() {
  336. dumpAddressTicker := time.NewTicker(dumpAddressInterval)
  337. out:
  338. for {
  339. select {
  340. case <-dumpAddressTicker.C:
  341. a.saveToFile(a.filePath)
  342. case <-a.Quit:
  343. break out
  344. }
  345. }
  346. dumpAddressTicker.Stop()
  347. a.saveToFile(a.filePath)
  348. a.wg.Done()
  349. a.Logger.Info("Address handler done")
  350. }
  351. func (a *AddrBook) getBucket(bucketType byte, bucketIdx int) map[string]*knownAddress {
  352. switch bucketType {
  353. case bucketTypeNew:
  354. return a.addrNew[bucketIdx]
  355. case bucketTypeOld:
  356. return a.addrOld[bucketIdx]
  357. default:
  358. cmn.PanicSanity("Should not happen")
  359. return nil
  360. }
  361. }
  362. // Adds ka to new bucket. Returns false if it couldn't do it cuz buckets full.
  363. // NOTE: currently it always returns true.
  364. func (a *AddrBook) addToNewBucket(ka *knownAddress, bucketIdx int) bool {
  365. // Sanity check
  366. if ka.isOld() {
  367. a.Logger.Error(cmn.Fmt("Cannot add address already in old bucket to a new bucket: %v", ka))
  368. return false
  369. }
  370. addrStr := ka.Addr.String()
  371. bucket := a.getBucket(bucketTypeNew, bucketIdx)
  372. // Already exists?
  373. if _, ok := bucket[addrStr]; ok {
  374. return true
  375. }
  376. // Enforce max addresses.
  377. if len(bucket) > newBucketSize {
  378. a.Logger.Info("new bucket is full, expiring old ")
  379. a.expireNew(bucketIdx)
  380. }
  381. // Add to bucket.
  382. bucket[addrStr] = ka
  383. if ka.addBucketRef(bucketIdx) == 1 {
  384. a.nNew++
  385. }
  386. // Ensure in addrLookup
  387. a.addrLookup[addrStr] = ka
  388. return true
  389. }
  390. // Adds ka to old bucket. Returns false if it couldn't do it cuz buckets full.
  391. func (a *AddrBook) addToOldBucket(ka *knownAddress, bucketIdx int) bool {
  392. // Sanity check
  393. if ka.isNew() {
  394. a.Logger.Error(cmn.Fmt("Cannot add new address to old bucket: %v", ka))
  395. return false
  396. }
  397. if len(ka.Buckets) != 0 {
  398. a.Logger.Error(cmn.Fmt("Cannot add already old address to another old bucket: %v", ka))
  399. return false
  400. }
  401. addrStr := ka.Addr.String()
  402. bucket := a.getBucket(bucketTypeNew, bucketIdx)
  403. // Already exists?
  404. if _, ok := bucket[addrStr]; ok {
  405. return true
  406. }
  407. // Enforce max addresses.
  408. if len(bucket) > oldBucketSize {
  409. return false
  410. }
  411. // Add to bucket.
  412. bucket[addrStr] = ka
  413. if ka.addBucketRef(bucketIdx) == 1 {
  414. a.nOld++
  415. }
  416. // Ensure in addrLookup
  417. a.addrLookup[addrStr] = ka
  418. return true
  419. }
  420. func (a *AddrBook) removeFromBucket(ka *knownAddress, bucketType byte, bucketIdx int) {
  421. if ka.BucketType != bucketType {
  422. a.Logger.Error(cmn.Fmt("Bucket type mismatch: %v", ka))
  423. return
  424. }
  425. bucket := a.getBucket(bucketType, bucketIdx)
  426. delete(bucket, ka.Addr.String())
  427. if ka.removeBucketRef(bucketIdx) == 0 {
  428. if bucketType == bucketTypeNew {
  429. a.nNew--
  430. } else {
  431. a.nOld--
  432. }
  433. delete(a.addrLookup, ka.Addr.String())
  434. }
  435. }
  436. func (a *AddrBook) removeFromAllBuckets(ka *knownAddress) {
  437. for _, bucketIdx := range ka.Buckets {
  438. bucket := a.getBucket(ka.BucketType, bucketIdx)
  439. delete(bucket, ka.Addr.String())
  440. }
  441. ka.Buckets = nil
  442. if ka.BucketType == bucketTypeNew {
  443. a.nNew--
  444. } else {
  445. a.nOld--
  446. }
  447. delete(a.addrLookup, ka.Addr.String())
  448. }
  449. func (a *AddrBook) pickOldest(bucketType byte, bucketIdx int) *knownAddress {
  450. bucket := a.getBucket(bucketType, bucketIdx)
  451. var oldest *knownAddress
  452. for _, ka := range bucket {
  453. if oldest == nil || ka.LastAttempt.Before(oldest.LastAttempt) {
  454. oldest = ka
  455. }
  456. }
  457. return oldest
  458. }
  459. func (a *AddrBook) addAddress(addr, src *NetAddress) {
  460. if a.routabilityStrict && !addr.Routable() {
  461. a.Logger.Error(cmn.Fmt("Cannot add non-routable address %v", addr))
  462. return
  463. }
  464. if _, ok := a.ourAddrs[addr.String()]; ok {
  465. // Ignore our own listener address.
  466. return
  467. }
  468. ka := a.addrLookup[addr.String()]
  469. if ka != nil {
  470. // Already old.
  471. if ka.isOld() {
  472. return
  473. }
  474. // Already in max new buckets.
  475. if len(ka.Buckets) == maxNewBucketsPerAddress {
  476. return
  477. }
  478. // The more entries we have, the less likely we are to add more.
  479. factor := int32(2 * len(ka.Buckets))
  480. if a.rand.Int31n(factor) != 0 {
  481. return
  482. }
  483. } else {
  484. ka = newKnownAddress(addr, src)
  485. }
  486. bucket := a.calcNewBucket(addr, src)
  487. a.addToNewBucket(ka, bucket)
  488. a.Logger.Info("Added new address", "address", addr, "total", a.size())
  489. }
  490. // Make space in the new buckets by expiring the really bad entries.
  491. // If no bad entries are available we remove the oldest.
  492. func (a *AddrBook) expireNew(bucketIdx int) {
  493. for addrStr, ka := range a.addrNew[bucketIdx] {
  494. // If an entry is bad, throw it away
  495. if ka.isBad() {
  496. a.Logger.Info(cmn.Fmt("expiring bad address %v", addrStr))
  497. a.removeFromBucket(ka, bucketTypeNew, bucketIdx)
  498. return
  499. }
  500. }
  501. // If we haven't thrown out a bad entry, throw out the oldest entry
  502. oldest := a.pickOldest(bucketTypeNew, bucketIdx)
  503. a.removeFromBucket(oldest, bucketTypeNew, bucketIdx)
  504. }
  505. // Promotes an address from new to old.
  506. // TODO: Move to old probabilistically.
  507. // The better a node is, the less likely it should be evicted from an old bucket.
  508. func (a *AddrBook) moveToOld(ka *knownAddress) {
  509. // Sanity check
  510. if ka.isOld() {
  511. a.Logger.Error(cmn.Fmt("Cannot promote address that is already old %v", ka))
  512. return
  513. }
  514. if len(ka.Buckets) == 0 {
  515. a.Logger.Error(cmn.Fmt("Cannot promote address that isn't in any new buckets %v", ka))
  516. return
  517. }
  518. // Remember one of the buckets in which ka is in.
  519. freedBucket := ka.Buckets[0]
  520. // Remove from all (new) buckets.
  521. a.removeFromAllBuckets(ka)
  522. // It's officially old now.
  523. ka.BucketType = bucketTypeOld
  524. // Try to add it to its oldBucket destination.
  525. oldBucketIdx := a.calcOldBucket(ka.Addr)
  526. added := a.addToOldBucket(ka, oldBucketIdx)
  527. if !added {
  528. // No room, must evict something
  529. oldest := a.pickOldest(bucketTypeOld, oldBucketIdx)
  530. a.removeFromBucket(oldest, bucketTypeOld, oldBucketIdx)
  531. // Find new bucket to put oldest in
  532. newBucketIdx := a.calcNewBucket(oldest.Addr, oldest.Src)
  533. added := a.addToNewBucket(oldest, newBucketIdx)
  534. // No space in newBucket either, just put it in freedBucket from above.
  535. if !added {
  536. added := a.addToNewBucket(oldest, freedBucket)
  537. if !added {
  538. a.Logger.Error(cmn.Fmt("Could not migrate oldest %v to freedBucket %v", oldest, freedBucket))
  539. }
  540. }
  541. // Finally, add to bucket again.
  542. added = a.addToOldBucket(ka, oldBucketIdx)
  543. if !added {
  544. a.Logger.Error(cmn.Fmt("Could not re-add ka %v to oldBucketIdx %v", ka, oldBucketIdx))
  545. }
  546. }
  547. }
  548. // doublesha256( key + sourcegroup +
  549. // int64(doublesha256(key + group + sourcegroup))%bucket_per_group ) % num_new_buckets
  550. func (a *AddrBook) calcNewBucket(addr, src *NetAddress) int {
  551. data1 := []byte{}
  552. data1 = append(data1, []byte(a.key)...)
  553. data1 = append(data1, []byte(a.groupKey(addr))...)
  554. data1 = append(data1, []byte(a.groupKey(src))...)
  555. hash1 := doubleSha256(data1)
  556. hash64 := binary.BigEndian.Uint64(hash1)
  557. hash64 %= newBucketsPerGroup
  558. var hashbuf [8]byte
  559. binary.BigEndian.PutUint64(hashbuf[:], hash64)
  560. data2 := []byte{}
  561. data2 = append(data2, []byte(a.key)...)
  562. data2 = append(data2, a.groupKey(src)...)
  563. data2 = append(data2, hashbuf[:]...)
  564. hash2 := doubleSha256(data2)
  565. return int(binary.BigEndian.Uint64(hash2) % newBucketCount)
  566. }
  567. // doublesha256( key + group +
  568. // int64(doublesha256(key + addr))%buckets_per_group ) % num_old_buckets
  569. func (a *AddrBook) calcOldBucket(addr *NetAddress) int {
  570. data1 := []byte{}
  571. data1 = append(data1, []byte(a.key)...)
  572. data1 = append(data1, []byte(addr.String())...)
  573. hash1 := doubleSha256(data1)
  574. hash64 := binary.BigEndian.Uint64(hash1)
  575. hash64 %= oldBucketsPerGroup
  576. var hashbuf [8]byte
  577. binary.BigEndian.PutUint64(hashbuf[:], hash64)
  578. data2 := []byte{}
  579. data2 = append(data2, []byte(a.key)...)
  580. data2 = append(data2, a.groupKey(addr)...)
  581. data2 = append(data2, hashbuf[:]...)
  582. hash2 := doubleSha256(data2)
  583. return int(binary.BigEndian.Uint64(hash2) % oldBucketCount)
  584. }
  585. // Return a string representing the network group of this address.
  586. // This is the /16 for IPv6, the /32 (/36 for he.net) for IPv6, the string
  587. // "local" for a local address and the string "unroutable for an unroutable
  588. // address.
  589. func (a *AddrBook) groupKey(na *NetAddress) string {
  590. if a.routabilityStrict && na.Local() {
  591. return "local"
  592. }
  593. if a.routabilityStrict && !na.Routable() {
  594. return "unroutable"
  595. }
  596. if ipv4 := na.IP.To4(); ipv4 != nil {
  597. return (&net.IPNet{IP: na.IP, Mask: net.CIDRMask(16, 32)}).String()
  598. }
  599. if na.RFC6145() || na.RFC6052() {
  600. // last four bytes are the ip address
  601. ip := net.IP(na.IP[12:16])
  602. return (&net.IPNet{IP: ip, Mask: net.CIDRMask(16, 32)}).String()
  603. }
  604. if na.RFC3964() {
  605. ip := net.IP(na.IP[2:7])
  606. return (&net.IPNet{IP: ip, Mask: net.CIDRMask(16, 32)}).String()
  607. }
  608. if na.RFC4380() {
  609. // teredo tunnels have the last 4 bytes as the v4 address XOR
  610. // 0xff.
  611. ip := net.IP(make([]byte, 4))
  612. for i, byte := range na.IP[12:16] {
  613. ip[i] = byte ^ 0xff
  614. }
  615. return (&net.IPNet{IP: ip, Mask: net.CIDRMask(16, 32)}).String()
  616. }
  617. // OK, so now we know ourselves to be a IPv6 address.
  618. // bitcoind uses /32 for everything, except for Hurricane Electric's
  619. // (he.net) IP range, which it uses /36 for.
  620. bits := 32
  621. heNet := &net.IPNet{IP: net.ParseIP("2001:470::"),
  622. Mask: net.CIDRMask(32, 128)}
  623. if heNet.Contains(na.IP) {
  624. bits = 36
  625. }
  626. return (&net.IPNet{IP: na.IP, Mask: net.CIDRMask(bits, 128)}).String()
  627. }
  628. //-----------------------------------------------------------------------------
  629. /*
  630. knownAddress
  631. tracks information about a known network address that is used
  632. to determine how viable an address is.
  633. */
  634. type knownAddress struct {
  635. Addr *NetAddress
  636. Src *NetAddress
  637. Attempts int32
  638. LastAttempt time.Time
  639. LastSuccess time.Time
  640. BucketType byte
  641. Buckets []int
  642. }
  643. func newKnownAddress(addr *NetAddress, src *NetAddress) *knownAddress {
  644. return &knownAddress{
  645. Addr: addr,
  646. Src: src,
  647. Attempts: 0,
  648. LastAttempt: time.Now(),
  649. BucketType: bucketTypeNew,
  650. Buckets: nil,
  651. }
  652. }
  653. func (ka *knownAddress) isOld() bool {
  654. return ka.BucketType == bucketTypeOld
  655. }
  656. func (ka *knownAddress) isNew() bool {
  657. return ka.BucketType == bucketTypeNew
  658. }
  659. func (ka *knownAddress) markAttempt() {
  660. now := time.Now()
  661. ka.LastAttempt = now
  662. ka.Attempts += 1
  663. }
  664. func (ka *knownAddress) markGood() {
  665. now := time.Now()
  666. ka.LastAttempt = now
  667. ka.Attempts = 0
  668. ka.LastSuccess = now
  669. }
  670. func (ka *knownAddress) addBucketRef(bucketIdx int) int {
  671. for _, bucket := range ka.Buckets {
  672. if bucket == bucketIdx {
  673. // TODO refactor to return error?
  674. // log.Warn(Fmt("Bucket already exists in ka.Buckets: %v", ka))
  675. return -1
  676. }
  677. }
  678. ka.Buckets = append(ka.Buckets, bucketIdx)
  679. return len(ka.Buckets)
  680. }
  681. func (ka *knownAddress) removeBucketRef(bucketIdx int) int {
  682. buckets := []int{}
  683. for _, bucket := range ka.Buckets {
  684. if bucket != bucketIdx {
  685. buckets = append(buckets, bucket)
  686. }
  687. }
  688. if len(buckets) != len(ka.Buckets)-1 {
  689. // TODO refactor to return error?
  690. // log.Warn(Fmt("bucketIdx not found in ka.Buckets: %v", ka))
  691. return -1
  692. }
  693. ka.Buckets = buckets
  694. return len(ka.Buckets)
  695. }
  696. /*
  697. An address is bad if the address in question has not been tried in the last
  698. minute and meets one of the following criteria:
  699. 1) It claims to be from the future
  700. 2) It hasn't been seen in over a month
  701. 3) It has failed at least three times and never succeeded
  702. 4) It has failed ten times in the last week
  703. All addresses that meet these criteria are assumed to be worthless and not
  704. worth keeping hold of.
  705. */
  706. func (ka *knownAddress) isBad() bool {
  707. // Has been attempted in the last minute --> good
  708. if ka.LastAttempt.Before(time.Now().Add(-1 * time.Minute)) {
  709. return false
  710. }
  711. // Over a month old?
  712. if ka.LastAttempt.After(time.Now().Add(-1 * numMissingDays * time.Hour * 24)) {
  713. return true
  714. }
  715. // Never succeeded?
  716. if ka.LastSuccess.IsZero() && ka.Attempts >= numRetries {
  717. return true
  718. }
  719. // Hasn't succeeded in too long?
  720. if ka.LastSuccess.Before(time.Now().Add(-1*minBadDays*time.Hour*24)) &&
  721. ka.Attempts >= maxFailures {
  722. return true
  723. }
  724. return false
  725. }